summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-03-12 08:24:20 +0100
committerMichaël Zasso <targos@protonmail.com>2021-03-15 15:54:50 +0100
commit732ad99e47bae5deffa3a22d2ebe5500284106f0 (patch)
tree759a6b072accf188f03c74a84e8256fe92f1925c /deps
parent802b3e7cf9a5074a72bec75cf1c46758b81e04b1 (diff)
downloadnode-new-732ad99e47bae5deffa3a22d2ebe5500284106f0.tar.gz
deps: update V8 to 9.0.257.11
PR-URL: https://github.com/nodejs/node/pull/37587 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/AUTHORS12
-rw-r--r--deps/v8/BUILD.gn662
-rw-r--r--deps/v8/COMMON_OWNERS1
-rw-r--r--deps/v8/DEPS46
-rw-r--r--deps/v8/OWNERS1
-rw-r--r--deps/v8/PRESUBMIT.py8
-rw-r--r--deps/v8/RISCV_OWNERS3
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h209
-rw-r--r--deps/v8/gni/proto_library.gni191
-rwxr-xr-xdeps/v8/gni/protoc.py51
-rw-r--r--deps/v8/gni/snapshot_toolchain.gni3
-rw-r--r--deps/v8/gni/v8.gni18
-rw-r--r--deps/v8/include/DEPS1
-rw-r--r--deps/v8/include/OWNERS1
-rw-r--r--deps/v8/include/cppgc/allocation.h19
-rw-r--r--deps/v8/include/cppgc/common.h15
-rw-r--r--deps/v8/include/cppgc/cross-thread-persistent.h46
-rw-r--r--deps/v8/include/cppgc/custom-space.h14
-rw-r--r--deps/v8/include/cppgc/ephemeron-pair.h5
-rw-r--r--deps/v8/include/cppgc/heap-consistency.h117
-rw-r--r--deps/v8/include/cppgc/heap-state.h59
-rw-r--r--deps/v8/include/cppgc/heap-statistics.h110
-rw-r--r--deps/v8/include/cppgc/internal/caged-heap-local-data.h3
-rw-r--r--deps/v8/include/cppgc/internal/persistent-node.h8
-rw-r--r--deps/v8/include/cppgc/internal/pointer-policies.h23
-rw-r--r--deps/v8/include/cppgc/internal/process-heap.h34
-rw-r--r--deps/v8/include/cppgc/internal/write-barrier.h246
-rw-r--r--deps/v8/include/cppgc/liveness-broker.h6
-rw-r--r--deps/v8/include/cppgc/macros.h2
-rw-r--r--deps/v8/include/cppgc/member.h33
-rw-r--r--deps/v8/include/cppgc/object-size-trait.h58
-rw-r--r--deps/v8/include/cppgc/persistent.h38
-rw-r--r--deps/v8/include/cppgc/platform.h11
-rw-r--r--deps/v8/include/cppgc/prefinalizer.h2
-rw-r--r--deps/v8/include/cppgc/process-heap-statistics.h36
-rw-r--r--deps/v8/include/cppgc/sentinel-pointer.h32
-rw-r--r--deps/v8/include/cppgc/testing.h50
-rw-r--r--deps/v8/include/cppgc/trace-trait.h2
-rw-r--r--deps/v8/include/cppgc/type-traits.h89
-rw-r--r--deps/v8/include/cppgc/visitor.h57
-rw-r--r--deps/v8/include/js_protocol.pdl3
-rw-r--r--deps/v8/include/v8-cppgc.h140
-rw-r--r--deps/v8/include/v8-fast-api-calls.h191
-rw-r--r--deps/v8/include/v8-metrics.h64
-rw-r--r--deps/v8/include/v8-profiler.h16
-rw-r--r--deps/v8/include/v8-unwinder-state.h2
-rw-r--r--deps/v8/include/v8-version.h8
-rw-r--r--deps/v8/include/v8.h349
-rw-r--r--deps/v8/include/v8config.h8
-rw-r--r--deps/v8/infra/mb/mb_config.pyl75
-rw-r--r--deps/v8/infra/testing/README.md2
-rw-r--r--deps/v8/infra/testing/builders.pyl98
-rw-r--r--deps/v8/samples/cppgc/cppgc-sample.cc6
-rw-r--r--deps/v8/samples/shell.cc2
-rw-r--r--deps/v8/src/DEPS2
-rw-r--r--deps/v8/src/api/api-inl.h108
-rw-r--r--deps/v8/src/api/api-macros-undef.h20
-rw-r--r--deps/v8/src/api/api-macros.h132
-rw-r--r--deps/v8/src/api/api-natives.cc10
-rw-r--r--deps/v8/src/api/api.cc1691
-rw-r--r--deps/v8/src/api/api.h4
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.cc4
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h28
-rw-r--r--deps/v8/src/ast/ast-value-factory.h108
-rw-r--r--deps/v8/src/ast/ast.cc11
-rw-r--r--deps/v8/src/ast/ast.h187
-rw-r--r--deps/v8/src/ast/modules.cc61
-rw-r--r--deps/v8/src/ast/prettyprinter.cc91
-rw-r--r--deps/v8/src/ast/prettyprinter.h3
-rw-r--r--deps/v8/src/ast/scopes.cc111
-rw-r--r--deps/v8/src/ast/scopes.h65
-rw-r--r--deps/v8/src/base/bits.h20
-rw-r--r--deps/v8/src/base/build_config.h33
-rw-r--r--deps/v8/src/base/compiler-specific.h3
-rw-r--r--deps/v8/src/base/cpu.cc2
-rw-r--r--deps/v8/src/base/enum-set.h40
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc6
-rw-r--r--deps/v8/src/baseline/DEPS5
-rw-r--r--deps/v8/src/baseline/OWNERS6
-rw-r--r--deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h544
-rw-r--r--deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h116
-rw-r--r--deps/v8/src/baseline/baseline-assembler-inl.h134
-rw-r--r--deps/v8/src/baseline/baseline-assembler.h187
-rw-r--r--deps/v8/src/baseline/baseline-compiler.cc2180
-rw-r--r--deps/v8/src/baseline/baseline-compiler.h213
-rw-r--r--deps/v8/src/baseline/baseline.cc58
-rw-r--r--deps/v8/src/baseline/baseline.h25
-rw-r--r--deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h439
-rw-r--r--deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h92
-rw-r--r--deps/v8/src/builtins/accessors.cc44
-rw-r--r--deps/v8/src/builtins/accessors.h2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc347
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc712
-rw-r--r--deps/v8/src/builtins/array-join.tq2
-rw-r--r--deps/v8/src/builtins/array-reverse.tq2
-rw-r--r--deps/v8/src/builtins/array-slice.tq2
-rw-r--r--deps/v8/src/builtins/base.tq43
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-array.cc4
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc89
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.h11
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc178
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc173
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc29
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h70
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc20
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc41
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc12
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc31
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc173
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc19
-rw-r--r--deps/v8/src/builtins/builtins-object.cc5
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc253
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h27
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc13
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc456
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h49
-rw-r--r--deps/v8/src/builtins/builtins-string.tq4
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc33
-rw-r--r--deps/v8/src/builtins/builtins.cc9
-rw-r--r--deps/v8/src/builtins/builtins.h6
-rw-r--r--deps/v8/src/builtins/cast.tq2
-rw-r--r--deps/v8/src/builtins/constructor.tq29
-rw-r--r--deps/v8/src/builtins/frame-arguments.tq37
-rw-r--r--deps/v8/src/builtins/frames.tq26
-rw-r--r--deps/v8/src/builtins/generate-bytecodes-builtins-list.cc23
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc299
-rw-r--r--deps/v8/src/builtins/ic-callable.tq25
-rw-r--r--deps/v8/src/builtins/ic.tq7
-rw-r--r--deps/v8/src/builtins/internal.tq31
-rw-r--r--deps/v8/src/builtins/iterator.tq23
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc264
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc282
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc359
-rw-r--r--deps/v8/src/builtins/promise-constructor.tq5
-rw-r--r--deps/v8/src/builtins/promise-finally.tq5
-rw-r--r--deps/v8/src/builtins/promise-race.tq5
-rw-r--r--deps/v8/src/builtins/regexp-match.tq4
-rw-r--r--deps/v8/src/builtins/regexp-source.tq1
-rw-r--r--deps/v8/src/builtins/regexp.tq44
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc3316
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc368
-rw-r--r--deps/v8/src/builtins/string-endswith.tq2
-rw-r--r--deps/v8/src/builtins/string-includes.tq49
-rw-r--r--deps/v8/src/builtins/string-indexof.tq39
-rw-r--r--deps/v8/src/builtins/string-match-search.tq86
-rw-r--r--deps/v8/src/builtins/string-replaceall.tq46
-rw-r--r--deps/v8/src/builtins/wasm.tq38
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc529
-rw-r--r--deps/v8/src/codegen/OWNERS5
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc36
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h22
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm.cc26
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc90
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h20
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc22
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc24
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h17
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc144
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h39
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/assembler-arch.h2
-rw-r--r--deps/v8/src/codegen/assembler-inl.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc28
-rw-r--r--deps/v8/src/codegen/bailout-reason.h1
-rw-r--r--deps/v8/src/codegen/code-comments.cc15
-rw-r--r--deps/v8/src/codegen/code-comments.h3
-rw-r--r--deps/v8/src/codegen/code-factory.cc5
-rw-r--r--deps/v8/src/codegen/code-factory.h1
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc627
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h401
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc6
-rw-r--r--deps/v8/src/codegen/compiler.cc484
-rw-r--r--deps/v8/src/codegen/compiler.h23
-rw-r--r--deps/v8/src/codegen/constant-pool.cc249
-rw-r--r--deps/v8/src/codegen/constant-pool.h2
-rw-r--r--deps/v8/src/codegen/constants-arch.h2
-rw-r--r--deps/v8/src/codegen/cpu-features.h9
-rw-r--r--deps/v8/src/codegen/external-reference.cc131
-rw-r--r--deps/v8/src/codegen/external-reference.h20
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h6
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc60
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h25
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc26
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc559
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h103
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc129
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h334
-rw-r--r--deps/v8/src/codegen/machine-type.h6
-rw-r--r--deps/v8/src/codegen/macro-assembler.h3
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc6
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips.cc26
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc30
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h6
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc27
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h16
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc26
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc137
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h10
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc4
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h20
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc6
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h4
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc26
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc67
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h14
-rw-r--r--deps/v8/src/codegen/register-arch.h2
-rw-r--r--deps/v8/src/codegen/register-configuration.cc2
-rw-r--r--deps/v8/src/codegen/reloc-info.cc3
-rw-r--r--deps/v8/src/codegen/reloc-info.h2
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h261
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc3020
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h1243
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.cc201
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h1170
-rw-r--r--deps/v8/src/codegen/riscv64/cpu-riscv64.cc32
-rw-r--r--deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc301
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc4575
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h1209
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h346
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc6
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390.cc26
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc450
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h113
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc3
-rw-r--r--deps/v8/src/codegen/safepoint-table.h4
-rw-r--r--deps/v8/src/codegen/source-position-table.cc11
-rw-r--r--deps/v8/src/codegen/tnode.h2
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc2
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h11
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h2
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc37
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h9
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64.cc20
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc518
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h66
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h7
-rw-r--r--deps/v8/src/common/assert-scope.cc21
-rw-r--r--deps/v8/src/common/assert-scope.h5
-rw-r--r--deps/v8/src/common/globals.h25
-rw-r--r--deps/v8/src/common/message-template.h10
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler/OWNERS11
-rw-r--r--deps/v8/src/compiler/access-info.cc21
-rw-r--r--deps/v8/src/compiler/allocation-builder-inl.h22
-rw-r--r--deps/v8/src/compiler/allocation-builder.h6
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc267
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h15
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc15
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc106
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc275
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h13
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc13
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc139
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h6
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc143
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h19
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc546
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h62
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc62
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc269
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h7
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc1
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc214
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h9
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc39
-rw-r--r--deps/v8/src/compiler/backend/instruction.h25
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.cc196
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc192
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h24
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc40
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc114
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc204
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h26
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc42
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc166
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc171
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h13
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc13
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc167
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc2
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc2775
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h447
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc1579
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc3034
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc540
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h25
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc25
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc139
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc350
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h26
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc28
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc170
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc38
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc11
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h6
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc273
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h7
-rw-r--r--deps/v8/src/compiler/c-linkage.cc42
-rw-r--r--deps/v8/src/compiler/code-assembler.cc14
-rw-r--r--deps/v8/src/compiler/code-assembler.h18
-rw-r--r--deps/v8/src/compiler/common-operator.cc13
-rw-r--r--deps/v8/src/compiler/common-operator.h126
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc26
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h35
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc209
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc20
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc8
-rw-r--r--deps/v8/src/compiler/escape-analysis.h2
-rw-r--r--deps/v8/src/compiler/frame-states.cc55
-rw-r--r--deps/v8/src/compiler/frame-states.h48
-rw-r--r--deps/v8/src/compiler/frame.h8
-rw-r--r--deps/v8/src/compiler/functional-list.h2
-rw-r--r--deps/v8/src/compiler/graph-assembler.h51
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc19
-rw-r--r--deps/v8/src/compiler/graph-reducer.h10
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc2
-rw-r--r--deps/v8/src/compiler/heap-refs.h262
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc199
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h7
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc10
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc364
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h29
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc15
-rw-r--r--deps/v8/src/compiler/js-graph.cc7
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc1437
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h45
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc6
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc81
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h13
-rw-r--r--deps/v8/src/compiler/js-inlining.cc159
-rw-r--r--deps/v8/src/compiler/js-inlining.h22
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc134
-rw-r--r--deps/v8/src/compiler/js-operator.cc55
-rw-r--r--deps/v8/src/compiler/js-operator.h74
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc33
-rw-r--r--deps/v8/src/compiler/linkage.cc63
-rw-r--r--deps/v8/src/compiler/linkage.h11
-rw-r--r--deps/v8/src/compiler/load-elimination.cc5
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc87
-rw-r--r--deps/v8/src/compiler/loop-analysis.h82
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc116
-rw-r--r--deps/v8/src/compiler/loop-peeling.h4
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc4
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc75
-rw-r--r--deps/v8/src/compiler/machine-operator.cc1286
-rw-r--r--deps/v8/src/compiler/machine-operator.h17
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/node-matchers.h2
-rw-r--r--deps/v8/src/compiler/node-observer.cc61
-rw-r--r--deps/v8/src/compiler/node-observer.h130
-rw-r--r--deps/v8/src/compiler/node-properties.cc3
-rw-r--r--deps/v8/src/compiler/node-properties.h6
-rw-r--r--deps/v8/src/compiler/node.cc4
-rw-r--r--deps/v8/src/compiler/node.h19
-rw-r--r--deps/v8/src/compiler/opcodes.h19
-rw-r--r--deps/v8/src/compiler/operator-properties.cc1
-rw-r--r--deps/v8/src/compiler/operator.h2
-rw-r--r--deps/v8/src/compiler/osr.cc4
-rw-r--r--deps/v8/src/compiler/persistent-map.h4
-rw-r--r--deps/v8/src/compiler/pipeline.cc211
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/processed-feedback.h2
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc89
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc2
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h2
-rw-r--r--deps/v8/src/compiler/representation-change.cc2
-rw-r--r--deps/v8/src/compiler/scheduled-machine-lowering.cc2
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc85
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h4
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc50
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc424
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h18
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc31
-rw-r--r--deps/v8/src/compiler/simplified-operator.h12
-rw-r--r--deps/v8/src/compiler/state-values-utils.h11
-rw-r--r--deps/v8/src/compiler/typer.cc24
-rw-r--r--deps/v8/src/compiler/types.cc11
-rw-r--r--deps/v8/src/compiler/verifier.cc49
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc2064
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h99
-rw-r--r--deps/v8/src/d8/d8-console.cc1
-rw-r--r--deps/v8/src/d8/d8.cc381
-rw-r--r--deps/v8/src/d8/d8.h10
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc9
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc10
-rw-r--r--deps/v8/src/debug/debug-coverage.cc10
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc25
-rw-r--r--deps/v8/src/debug/debug-frames.cc5
-rw-r--r--deps/v8/src/debug/debug-frames.h4
-rw-r--r--deps/v8/src/debug/debug-interface.cc1176
-rw-r--r--deps/v8/src/debug/debug-interface.h28
-rw-r--r--deps/v8/src/debug/debug-property-iterator.cc53
-rw-r--r--deps/v8/src/debug/debug-property-iterator.h10
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc2
-rw-r--r--deps/v8/src/debug/debug-wasm-objects-inl.h30
-rw-r--r--deps/v8/src/debug/debug-wasm-objects.cc (renamed from deps/v8/src/debug/debug-wasm-support.cc)518
-rw-r--r--deps/v8/src/debug/debug-wasm-objects.h75
-rw-r--r--deps/v8/src/debug/debug-wasm-objects.tq7
-rw-r--r--deps/v8/src/debug/debug-wasm-support.h29
-rw-r--r--deps/v8/src/debug/debug.cc211
-rw-r--r--deps/v8/src/debug/debug.h3
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc12
-rw-r--r--deps/v8/src/debug/liveedit.cc2
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc6
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc6
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc9
-rw-r--r--deps/v8/src/debug/riscv64/debug-riscv64.cc55
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc9
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc2
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc12
-rw-r--r--deps/v8/src/deoptimizer/DEPS5
-rw-r--r--deps/v8/src/deoptimizer/OWNERS1
-rw-r--r--deps/v8/src/deoptimizer/deoptimized-frame-info.cc74
-rw-r--r--deps/v8/src/deoptimizer/deoptimized-frame-info.h70
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc2
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc2590
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h855
-rw-r--r--deps/v8/src/deoptimizer/frame-description.h230
-rw-r--r--deps/v8/src/deoptimizer/materialized-object-store.cc90
-rw-r--r--deps/v8/src/deoptimizer/materialized-object-store.h40
-rw-r--r--deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc7
-rw-r--r--deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc7
-rw-r--r--deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc41
-rw-r--r--deps/v8/src/deoptimizer/translated-state.cc2100
-rw-r--r--deps/v8/src/deoptimizer/translated-state.h451
-rw-r--r--deps/v8/src/deoptimizer/translation-array.cc356
-rw-r--r--deps/v8/src/deoptimizer/translation-array.h125
-rw-r--r--deps/v8/src/deoptimizer/translation-opcode.h71
-rw-r--r--deps/v8/src/diagnostics/arm/disasm-arm.cc4
-rw-r--r--deps/v8/src/diagnostics/arm/unwinder-arm.cc2
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.cc21
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.h4
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc58
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc153
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc284
-rw-r--r--deps/v8/src/diagnostics/perf-jit.cc19
-rw-r--r--deps/v8/src/diagnostics/perf-jit.h3
-rw-r--r--deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc1862
-rw-r--r--deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc12
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc31
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc76
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.cc2
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.h42
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc53
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.cc2
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.h36
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc580
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.h14
-rw-r--r--deps/v8/src/execution/arm64/simulator-logic-arm64.cc1
-rw-r--r--deps/v8/src/execution/frame-constants.h83
-rw-r--r--deps/v8/src/execution/frames-inl.h70
-rw-r--r--deps/v8/src/execution/frames.cc232
-rw-r--r--deps/v8/src/execution/frames.h169
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.cc2
-rw-r--r--deps/v8/src/execution/isolate-data.h2
-rw-r--r--deps/v8/src/execution/isolate-inl.h5
-rw-r--r--deps/v8/src/execution/isolate.cc491
-rw-r--r--deps/v8/src/execution/isolate.h160
-rw-r--r--deps/v8/src/execution/local-isolate.cc3
-rw-r--r--deps/v8/src/execution/local-isolate.h8
-rw-r--r--deps/v8/src/execution/messages.cc605
-rw-r--r--deps/v8/src/execution/messages.h212
-rw-r--r--deps/v8/src/execution/mips/frame-constants-mips.cc2
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.cc2
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.h3
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.cc2
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h6
-rw-r--r--deps/v8/src/execution/protectors-inl.h1
-rw-r--r--deps/v8/src/execution/protectors.cc4
-rw-r--r--deps/v8/src/execution/riscv64/frame-constants-riscv64.cc32
-rw-r--r--deps/v8/src/execution/riscv64/frame-constants-riscv64.h86
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.cc3750
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.h820
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc77
-rw-r--r--deps/v8/src/execution/runtime-profiler.h7
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.cc2
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.h5
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc313
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.h46
-rw-r--r--deps/v8/src/execution/simulator-base.h7
-rw-r--r--deps/v8/src/execution/simulator.h2
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.cc2
-rw-r--r--deps/v8/src/flags/flag-definitions.h239
-rw-r--r--deps/v8/src/handles/handles-inl.h2
-rw-r--r--deps/v8/src/handles/handles.cc32
-rw-r--r--deps/v8/src/handles/handles.h1
-rw-r--r--deps/v8/src/handles/persistent-handles.cc2
-rw-r--r--deps/v8/src/heap/base/asm/riscv64/push_registers_asm.cc45
-rw-r--r--deps/v8/src/heap/collection-barrier.cc10
-rw-r--r--deps/v8/src/heap/concurrent-allocator-inl.h2
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc1
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc33
-rw-r--r--deps/v8/src/heap/concurrent-marking.h4
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc192
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h42
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc8
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc3
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h2
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap.cc2
-rw-r--r--deps/v8/src/heap/cppgc/compaction-worklists.h7
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc6
-rw-r--r--deps/v8/src/heap/cppgc/compactor.h7
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.cc12
-rw-r--r--deps/v8/src/heap/cppgc/default-platform.cc9
-rw-r--r--deps/v8/src/heap/cppgc/free-list.cc21
-rw-r--r--deps/v8/src/heap/cppgc/free-list.h3
-rw-r--r--deps/v8/src/heap/cppgc/garbage-collector.h7
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.cc25
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.h11
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc68
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h57
-rw-r--r--deps/v8/src/heap/cppgc/heap-consistency.cc66
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h8
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc17
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h5
-rw-r--r--deps/v8/src/heap/cppgc/heap-state.cc32
-rw-r--r--deps/v8/src/heap/cppgc/heap-statistics-collector.cc158
-rw-r--r--deps/v8/src/heap/cppgc/heap-statistics-collector.h35
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc33
-rw-r--r--deps/v8/src/heap/cppgc/heap.h4
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.cc7
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.h4
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc116
-rw-r--r--deps/v8/src/heap/cppgc/marker.h28
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.cc3
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h19
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc3
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc8
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.h2
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.h1
-rw-r--r--deps/v8/src/heap/cppgc/metric-recorder.h69
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc36
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h28
-rw-r--r--deps/v8/src/heap/cppgc/object-size-trait.cc36
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap.h4
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc22
-rw-r--r--deps/v8/src/heap/cppgc/platform.cc10
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc10
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc2
-rw-r--r--deps/v8/src/heap/cppgc/process-heap-statistics.cc12
-rw-r--r--deps/v8/src/heap/cppgc/process-heap-statistics.h73
-rw-r--r--deps/v8/src/heap/cppgc/process-heap.cc4
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc139
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h111
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc160
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h8
-rw-r--r--deps/v8/src/heap/cppgc/testing.cc27
-rw-r--r--deps/v8/src/heap/cppgc/trace-event.h2
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc79
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.h22
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc47
-rw-r--r--deps/v8/src/heap/embedder-tracing.h34
-rw-r--r--deps/v8/src/heap/factory-base.cc59
-rw-r--r--deps/v8/src/heap/factory-base.h11
-rw-r--r--deps/v8/src/heap/factory.cc215
-rw-r--r--deps/v8/src/heap/factory.h50
-rw-r--r--deps/v8/src/heap/gc-tracer.cc26
-rw-r--r--deps/v8/src/heap/heap-inl.h13
-rw-r--r--deps/v8/src/heap/heap.cc50
-rw-r--r--deps/v8/src/heap/heap.h22
-rw-r--r--deps/v8/src/heap/incremental-marking.cc36
-rw-r--r--deps/v8/src/heap/local-heap-inl.h3
-rw-r--r--deps/v8/src/heap/local-heap.cc34
-rw-r--r--deps/v8/src/heap/local-heap.h13
-rw-r--r--deps/v8/src/heap/mark-compact.cc100
-rw-r--r--deps/v8/src/heap/mark-compact.h10
-rw-r--r--deps/v8/src/heap/marking-barrier.cc27
-rw-r--r--deps/v8/src/heap/memory-allocator.cc21
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.cc2
-rw-r--r--deps/v8/src/heap/new-spaces.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc30
-rw-r--r--deps/v8/src/heap/object-stats.h1
-rw-r--r--deps/v8/src/heap/objects-visiting.h81
-rw-r--r--deps/v8/src/heap/paged-spaces.h4
-rw-r--r--deps/v8/src/heap/safepoint.cc9
-rw-r--r--deps/v8/src/heap/scavenger.cc8
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc237
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc392
-rw-r--r--deps/v8/src/ic/accessor-assembler.h15
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc123
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h114
-rw-r--r--deps/v8/src/ic/call-optimization.cc15
-rw-r--r--deps/v8/src/ic/call-optimization.h9
-rw-r--r--deps/v8/src/ic/ic.cc39
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc23
-rw-r--r--deps/v8/src/ic/unary-op-assembler.cc63
-rw-r--r--deps/v8/src/ic/unary-op-assembler.h12
-rw-r--r--deps/v8/src/init/bootstrapper.cc175
-rw-r--r--deps/v8/src/init/heap-symbols.h77
-rw-r--r--deps/v8/src/init/v8.cc40
-rw-r--r--deps/v8/src/inspector/custom-preview.cc6
-rw-r--r--deps/v8/src/inspector/search-util.cc1
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc7
-rw-r--r--deps/v8/src/inspector/v8-console.cc1
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc6
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc12
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc12
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-regex.cc13
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc7
-rw-r--r--deps/v8/src/inspector/value-mirror.cc50
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc170
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h89
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc36
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h10
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc12
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc495
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h9
-rw-r--r--deps/v8/src/interpreter/bytecode-node.h14
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.cc27
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h54
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc9
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h31
-rw-r--r--deps/v8/src/interpreter/bytecode-traits.h59
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc21
-rw-r--r--deps/v8/src/interpreter/bytecodes.h589
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h1
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc170
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h34
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc98
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc55
-rw-r--r--deps/v8/src/interpreter/interpreter.cc58
-rw-r--r--deps/v8/src/interpreter/interpreter.h7
-rw-r--r--deps/v8/src/json/json-parser.cc15
-rw-r--r--deps/v8/src/libsampler/sampler.cc31
-rw-r--r--deps/v8/src/logging/code-events.h12
-rw-r--r--deps/v8/src/logging/counters-definitions.h27
-rw-r--r--deps/v8/src/logging/counters.h5
-rw-r--r--deps/v8/src/logging/log-utils.cc2
-rw-r--r--deps/v8/src/logging/log-utils.h9
-rw-r--r--deps/v8/src/logging/log.cc112
-rw-r--r--deps/v8/src/logging/log.h21
-rw-r--r--deps/v8/src/objects/all-objects-inl.h4
-rw-r--r--deps/v8/src/objects/arguments.tq17
-rw-r--r--deps/v8/src/objects/backing-store.cc3
-rw-r--r--deps/v8/src/objects/code-inl.h87
-rw-r--r--deps/v8/src/objects/code-kind.cc2
-rw-r--r--deps/v8/src/objects/code-kind.h59
-rw-r--r--deps/v8/src/objects/code.cc267
-rw-r--r--deps/v8/src/objects/code.h19
-rw-r--r--deps/v8/src/objects/compressed-slots.h8
-rw-r--r--deps/v8/src/objects/contexts-inl.h37
-rw-r--r--deps/v8/src/objects/contexts.h24
-rw-r--r--deps/v8/src/objects/contexts.tq2
-rw-r--r--deps/v8/src/objects/debug-objects.cc35
-rw-r--r--deps/v8/src/objects/debug-objects.h7
-rw-r--r--deps/v8/src/objects/dictionary-inl.h11
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h5
-rw-r--r--deps/v8/src/objects/feedback-cell.h6
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h30
-rw-r--r--deps/v8/src/objects/feedback-vector.cc54
-rw-r--r--deps/v8/src/objects/feedback-vector.h28
-rw-r--r--deps/v8/src/objects/feedback-vector.tq3
-rw-r--r--deps/v8/src/objects/field-index-inl.h2
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h41
-rw-r--r--deps/v8/src/objects/fixed-array.h30
-rw-r--r--deps/v8/src/objects/fixed-array.tq8
-rw-r--r--deps/v8/src/objects/frame-array-inl.h60
-rw-r--r--deps/v8/src/objects/frame-array.h116
-rw-r--r--deps/v8/src/objects/function-kind.h64
-rw-r--r--deps/v8/src/objects/heap-object.h1
-rw-r--r--deps/v8/src/objects/instance-type.h83
-rw-r--r--deps/v8/src/objects/intl-objects.cc14
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h26
-rw-r--r--deps/v8/src/objects/js-array-buffer.h17
-rw-r--r--deps/v8/src/objects/js-array-buffer.tq43
-rw-r--r--deps/v8/src/objects/js-array-inl.h14
-rw-r--r--deps/v8/src/objects/js-array.h10
-rw-r--r--deps/v8/src/objects/js-array.tq4
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc8
-rw-r--r--deps/v8/src/objects/js-display-names.cc7
-rw-r--r--deps/v8/src/objects/js-function-inl.h29
-rw-r--r--deps/v8/src/objects/js-function.cc231
-rw-r--r--deps/v8/src/objects/js-function.h33
-rw-r--r--deps/v8/src/objects/js-locale.cc56
-rw-r--r--deps/v8/src/objects/js-objects-inl.h72
-rw-r--r--deps/v8/src/objects/js-objects.cc349
-rw-r--r--deps/v8/src/objects/js-objects.h27
-rw-r--r--deps/v8/src/objects/js-promise.tq4
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h6
-rw-r--r--deps/v8/src/objects/js-regexp.cc92
-rw-r--r--deps/v8/src/objects/js-regexp.h45
-rw-r--r--deps/v8/src/objects/js-regexp.tq10
-rw-r--r--deps/v8/src/objects/layout-descriptor-inl.h255
-rw-r--r--deps/v8/src/objects/layout-descriptor.cc288
-rw-r--r--deps/v8/src/objects/layout-descriptor.h175
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h6
-rw-r--r--deps/v8/src/objects/literal-objects.cc62
-rw-r--r--deps/v8/src/objects/literal-objects.h12
-rw-r--r--deps/v8/src/objects/literal-objects.tq7
-rw-r--r--deps/v8/src/objects/lookup.cc247
-rw-r--r--deps/v8/src/objects/lookup.h31
-rw-r--r--deps/v8/src/objects/map-inl.h108
-rw-r--r--deps/v8/src/objects/map-updater.cc55
-rw-r--r--deps/v8/src/objects/map.cc188
-rw-r--r--deps/v8/src/objects/map.h291
-rw-r--r--deps/v8/src/objects/map.tq2
-rw-r--r--deps/v8/src/objects/module-inl.h3
-rw-r--r--deps/v8/src/objects/module.cc49
-rw-r--r--deps/v8/src/objects/module.h7
-rw-r--r--deps/v8/src/objects/module.tq1
-rw-r--r--deps/v8/src/objects/object-list-macros.h22
-rw-r--r--deps/v8/src/objects/object-macros.h61
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h80
-rw-r--r--deps/v8/src/objects/objects-definitions.h13
-rw-r--r--deps/v8/src/objects/objects-inl.h38
-rw-r--r--deps/v8/src/objects/objects.cc214
-rw-r--r--deps/v8/src/objects/objects.h3
-rw-r--r--deps/v8/src/objects/osr-optimized-code-cache.cc15
-rw-r--r--deps/v8/src/objects/osr-optimized-code-cache.h15
-rw-r--r--deps/v8/src/objects/property-cell-inl.h34
-rw-r--r--deps/v8/src/objects/property-cell.h43
-rw-r--r--deps/v8/src/objects/property-details.h103
-rw-r--r--deps/v8/src/objects/scope-info-inl.h82
-rw-r--r--deps/v8/src/objects/scope-info.cc224
-rw-r--r--deps/v8/src/objects/scope-info.h98
-rw-r--r--deps/v8/src/objects/scope-info.tq157
-rw-r--r--deps/v8/src/objects/script-inl.h6
-rw-r--r--deps/v8/src/objects/script.h7
-rw-r--r--deps/v8/src/objects/script.tq2
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h94
-rw-r--r--deps/v8/src/objects/shared-function-info.cc5
-rw-r--r--deps/v8/src/objects/shared-function-info.h35
-rw-r--r--deps/v8/src/objects/shared-function-info.tq11
-rw-r--r--deps/v8/src/objects/source-text-module.cc34
-rw-r--r--deps/v8/src/objects/source-text-module.h4
-rw-r--r--deps/v8/src/objects/source-text-module.tq1
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h24
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc719
-rw-r--r--deps/v8/src/objects/stack-frame-info.h134
-rw-r--r--deps/v8/src/objects/stack-frame-info.tq41
-rw-r--r--deps/v8/src/objects/string-inl.h152
-rw-r--r--deps/v8/src/objects/string-table.cc6
-rw-r--r--deps/v8/src/objects/string.cc19
-rw-r--r--deps/v8/src/objects/string.h38
-rw-r--r--deps/v8/src/objects/string.tq86
-rw-r--r--deps/v8/src/objects/swiss-hash-table-helpers.h363
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary-inl.h659
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.cc37
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.h284
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.tq15
-rw-r--r--deps/v8/src/objects/synthetic-module.cc22
-rw-r--r--deps/v8/src/objects/tagged-field.h2
-rw-r--r--deps/v8/src/objects/templates.tq2
-rw-r--r--deps/v8/src/objects/transitions-inl.h2
-rw-r--r--deps/v8/src/objects/transitions.cc7
-rw-r--r--deps/v8/src/objects/value-serializer.cc3
-rw-r--r--deps/v8/src/parsing/parser-base.h206
-rw-r--r--deps/v8/src/parsing/parser.cc59
-rw-r--r--deps/v8/src/parsing/parser.h15
-rw-r--r--deps/v8/src/parsing/preparse-data.cc6
-rw-r--r--deps/v8/src/parsing/preparser.h18
-rw-r--r--deps/v8/src/parsing/rewriter.cc5
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h4
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc26
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h20
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc66
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h45
-rw-r--r--deps/v8/src/profiler/profile-generator.cc159
-rw-r--r--deps/v8/src/profiler/profile-generator.h91
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc45
-rw-r--r--deps/v8/src/profiler/profiler-listener.h1
-rw-r--r--deps/v8/src/profiler/strings-storage.cc1
-rw-r--r--deps/v8/src/profiler/tick-sample.cc7
-rw-r--r--deps/v8/src/regexp/experimental/experimental.cc16
-rw-r--r--deps/v8/src/regexp/experimental/experimental.h10
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator-inl.h14
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc6
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.h1
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc90
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-arch.h2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h1
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc9
-rw-r--r--deps/v8/src/regexp/regexp-parser.h6
-rw-r--r--deps/v8/src/regexp/regexp.cc26
-rw-r--r--deps/v8/src/regexp/regexp.h16
-rw-r--r--deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc1269
-rw-r--r--deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h214
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc24
-rw-r--r--deps/v8/src/roots/roots-inl.h1
-rw-r--r--deps/v8/src/roots/roots.h14
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc5
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc94
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc125
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc42
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc73
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc47
-rw-r--r--deps/v8/src/runtime/runtime-module.cc15
-rw-r--r--deps/v8/src/runtime/runtime-object.cc39
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc89
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc10
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc66
-rw-r--r--deps/v8/src/runtime/runtime-test.cc134
-rw-r--r--deps/v8/src/runtime/runtime-trace.cc (renamed from deps/v8/src/runtime/runtime-interpreter.cc)66
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc150
-rw-r--r--deps/v8/src/runtime/runtime.h113
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc4
-rw-r--r--deps/v8/src/snapshot/deserializer.cc4
-rw-r--r--deps/v8/src/snapshot/deserializer.h7
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.cc13
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.h8
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc9
-rw-r--r--deps/v8/src/snapshot/serializer.cc8
-rw-r--r--deps/v8/src/snapshot/snapshot.cc2
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc2
-rw-r--r--deps/v8/src/strings/string-stream.cc9
-rw-r--r--deps/v8/src/torque/OWNERS6
-rw-r--r--deps/v8/src/torque/ast.h3
-rw-r--r--deps/v8/src/torque/cc-generator.cc129
-rw-r--r--deps/v8/src/torque/cc-generator.h7
-rw-r--r--deps/v8/src/torque/cfg.h11
-rw-r--r--deps/v8/src/torque/class-debug-reader-generator.cc87
-rw-r--r--deps/v8/src/torque/constants.h14
-rw-r--r--deps/v8/src/torque/csa-generator.cc97
-rw-r--r--deps/v8/src/torque/declarable.cc12
-rw-r--r--deps/v8/src/torque/declarable.h26
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc8
-rw-r--r--deps/v8/src/torque/declarations.h9
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc700
-rw-r--r--deps/v8/src/torque/implementation-visitor.h35
-rw-r--r--deps/v8/src/torque/instance-type-generator.cc47
-rw-r--r--deps/v8/src/torque/instructions.cc38
-rw-r--r--deps/v8/src/torque/instructions.h19
-rw-r--r--deps/v8/src/torque/runtime-macro-shims.h28
-rw-r--r--deps/v8/src/torque/source-positions.cc1
-rw-r--r--deps/v8/src/torque/torque-compiler.cc2
-rw-r--r--deps/v8/src/torque/torque-parser.cc40
-rw-r--r--deps/v8/src/torque/type-oracle.h4
-rw-r--r--deps/v8/src/torque/type-visitor.cc13
-rw-r--r--deps/v8/src/torque/types.cc37
-rw-r--r--deps/v8/src/torque/types.h24
-rw-r--r--deps/v8/src/tracing/trace-event.h3
-rw-r--r--deps/v8/src/trap-handler/handler-inside-posix.cc18
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h2
-rw-r--r--deps/v8/src/utils/allocation.h8
-rw-r--r--deps/v8/src/utils/memcopy.h5
-rw-r--r--deps/v8/src/utils/utils.cc4
-rw-r--r--deps/v8/src/utils/utils.h38
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h437
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h414
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h531
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h21
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc284
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h320
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc2092
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h6
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h51
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h344
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h375
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h184
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h2516
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h1113
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h471
-rw-r--r--deps/v8/src/wasm/c-api.cc101
-rw-r--r--deps/v8/src/wasm/compilation-environment.h2
-rw-r--r--deps/v8/src/wasm/decoder.h1
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h478
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc3
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h3
-rw-r--r--deps/v8/src/wasm/function-compiler.cc12
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc410
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc40
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h5
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc17
-rw-r--r--deps/v8/src/wasm/module-compiler.cc162
-rw-r--r--deps/v8/src/wasm/module-compiler.h8
-rw-r--r--deps/v8/src/wasm/module-decoder.cc143
-rw-r--r--deps/v8/src/wasm/module-decoder.h7
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc262
-rw-r--r--deps/v8/src/wasm/value-type.cc34
-rw-r--r--deps/v8/src/wasm/value-type.h343
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc82
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h41
-rw-r--r--deps/v8/src/wasm/wasm-constants.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc241
-rw-r--r--deps/v8/src/wasm/wasm-debug.h102
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc107
-rw-r--r--deps/v8/src/wasm/wasm-engine.h8
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h8
-rw-r--r--deps/v8/src/wasm/wasm-features.cc6
-rw-r--r--deps/v8/src/wasm/wasm-features.h2
-rw-r--r--deps/v8/src/wasm/wasm-js.cc134
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h12
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc58
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h8
-rw-r--r--deps/v8/src/wasm/wasm-module.cc25
-rw-r--r--deps/v8/src/wasm/wasm-module.h30
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h12
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc202
-rw-r--r--deps/v8/src/wasm/wasm-objects.h49
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq2
-rw-r--r--deps/v8/src/wasm/wasm-opcodes-inl.h113
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc3
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h99
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc316
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h3
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.cc46
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.h22
-rw-r--r--deps/v8/test/benchmarks/cpp/cppgc/allocation_perf.cc5
-rw-r--r--deps/v8/test/benchmarks/cpp/cppgc/trace_perf.cc1
-rwxr-xr-xdeps/v8/test/benchmarks/csuite/csuite.py13
-rw-r--r--deps/v8/test/cctest/BUILD.gn26
-rw-r--r--deps/v8/test/cctest/cctest-utils.h3
-rw-r--r--deps/v8/test/cctest/cctest.cc6
-rw-r--r--deps/v8/test/cctest/cctest.h16
-rw-r--r--deps/v8/test/cctest/cctest.status93
-rw-r--r--deps/v8/test/cctest/compiler/c-signature.h6
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc4
-rw-r--r--deps/v8/test/cctest/compiler/node-observer-tester.h92
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-basic-block-profiler.cc31
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc108
-rw-r--r--deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-sloppy-equality.cc141
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc6
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.h2
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-allocation.cc21
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc28
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc86
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden60
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden124
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden360
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden130
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden14
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden226
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden14
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden59
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden68
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden56
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden82
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden224
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden14
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden332
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden88
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden216
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden478
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden80
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden152
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden230
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden120
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden592
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden70
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden138
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden54
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden102
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden110
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden62
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden140
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden92
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden36
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden85
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden38
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden144
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden266
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden76
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden190
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden110
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden104
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden80
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden64
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden36
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden234
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden72
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden94
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden268
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc4
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc47
-rw-r--r--deps/v8/test/cctest/test-accessor-assembler.cc2
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc13
-rw-r--r--deps/v8/test/cctest/test-api-stack-traces.cc38
-rw-r--r--deps/v8/test/cctest/test-api-wasm.cc64
-rw-r--r--deps/v8/test/cctest/test-api.cc673
-rw-r--r--deps/v8/test/cctest/test-api.h131
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc22
-rw-r--r--deps/v8/test/cctest/test-assembler-riscv64.cc1874
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc25
-rw-r--r--deps/v8/test/cctest/test-compiler.cc24
-rw-r--r--deps/v8/test/cctest/test-concurrent-descriptor-array.cc2
-rw-r--r--deps/v8/test/cctest/test-concurrent-feedback-vector.cc1
-rw-r--r--deps/v8/test/cctest/test-concurrent-js-array.cc137
-rw-r--r--deps/v8/test/cctest/test-concurrent-prototype.cc3
-rw-r--r--deps/v8/test/cctest/test-concurrent-string.cc12
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc148
-rw-r--r--deps/v8/test/cctest/test-debug-helper.cc41
-rw-r--r--deps/v8/test/cctest/test-debug.cc5
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc20
-rw-r--r--deps/v8/test/cctest/test-disasm-riscv64.cc523
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc5
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc64
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc3
-rw-r--r--deps/v8/test/cctest/test-helper-riscv64.cc49
-rw-r--r--deps/v8/test/cctest/test-helper-riscv64.h334
-rw-r--r--deps/v8/test/cctest/test-icache.cc7
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc12
-rw-r--r--deps/v8/test/cctest/test-js-to-wasm.cc999
-rw-r--r--deps/v8/test/cctest/test-local-handles.cc4
-rw-r--r--deps/v8/test/cctest/test-log.cc127
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-riscv64.cc1556
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc21
-rw-r--r--deps/v8/test/cctest/test-modules.cc107
-rw-r--r--deps/v8/test/cctest/test-object.cc17
-rw-r--r--deps/v8/test/cctest/test-parsing.cc10
-rw-r--r--deps/v8/test/cctest/test-persistent-handles.cc3
-rw-r--r--deps/v8/test/cctest/test-pointer-auth-arm64.cc2
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc111
-rw-r--r--deps/v8/test/cctest/test-property-details.cc72
-rw-r--r--deps/v8/test/cctest/test-regexp.cc95
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc4
-rw-r--r--deps/v8/test/cctest/test-serialize.cc34
-rw-r--r--deps/v8/test/cctest/test-simple-riscv64.cc253
-rw-r--r--deps/v8/test/cctest/test-strings.cc87
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary.cc81
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc1642
-rw-r--r--deps/v8/test/cctest/test-unwinder-code-pages.cc40
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc52
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc714
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-inspection.cc255
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc22
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc318
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc44
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc35
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc234
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc22
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc18
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc6
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc1
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h8
-rw-r--r--deps/v8/test/common/assembler-tester.h42
-rw-r--r--deps/v8/test/common/wasm/test-signatures.h3
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.cc677
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.h12
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h41
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc58
-rw-r--r--deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js9
-rw-r--r--deps/v8/test/debugger/debugger.status15
-rw-r--r--deps/v8/test/fuzzer/inspector/regress-1166549189
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc6
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc1435
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc14
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-asm-js-expected.txt9
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-asm-js.js94
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js2
-rw-r--r--deps/v8/test/inspector/debugger/destructuring-expected.txt34
-rw-r--r--deps/v8/test/inspector/debugger/destructuring.js47
-rw-r--r--deps/v8/test/inspector/debugger/for-of-loops-expected.txt26
-rw-r--r--deps/v8/test/inspector/debugger/for-of-loops.js25
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-after-gc-expected.txt5
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-after-gc.js52
-rw-r--r--deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt18
-rw-r--r--deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js78
-rw-r--r--deps/v8/test/inspector/debugger/wasm-conditional-breakpoints-expected.txt66
-rw-r--r--deps/v8/test/inspector/debugger/wasm-conditional-breakpoints.js75
-rw-r--r--deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame-expected.txt96
-rw-r--r--deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame.js31
-rw-r--r--deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt160
-rw-r--r--deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js5
-rw-r--r--deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt37
-rw-r--r--deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js68
-rw-r--r--deps/v8/test/inspector/debugger/wasm-memory-names.js31
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt306
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt127
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-check-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-check.js5
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-a-lot-expected.txt28
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-a-lot.js56
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt20
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-after-trap.js5
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging-expected.txt22
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt89
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js13
-rw-r--r--deps/v8/test/inspector/inspector.status36
-rw-r--r--deps/v8/test/inspector/isolate-data.cc4
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1080638-expected.txt0
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1080638.js28
-rw-r--r--deps/v8/test/inspector/runtime/console-message-omit-data-urls-expected.txt6
-rw-r--r--deps/v8/test/inspector/runtime/console-message-omit-data-urls.js63
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt10
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js19
-rw-r--r--deps/v8/test/inspector/tasks.cc4
-rw-r--r--deps/v8/test/inspector/wasm-inspector-test.js95
-rw-r--r--deps/v8/test/intl/date-format/UnwrapDateTimeFormatUseOrdinaryHasInstance.js15
-rw-r--r--deps/v8/test/intl/displaynames/languagecanonical.js71
-rw-r--r--deps/v8/test/intl/intl.status5
-rw-r--r--deps/v8/test/intl/number-format/UnwrapNumberFormatUseOrdinaryHasInstance.js15
-rw-r--r--deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js6
-rw-r--r--deps/v8/test/intl/regress-11350.js49
-rw-r--r--deps/v8/test/intl/regress-1170305.js16
-rw-r--r--deps/v8/test/intl/regress-1177623.js5
-rw-r--r--deps/v8/test/intl/regress-1177812.js7
-rw-r--r--deps/v8/test/message/fail/class-fields-static-throw.out4
-rw-r--r--deps/v8/test/message/fail/modules-import-assertions-fail-1.mjs9
-rw-r--r--deps/v8/test/message/fail/modules-import-assertions-fail-1.out1
-rw-r--r--deps/v8/test/message/fail/modules-import-assertions-fail-2.mjs9
-rw-r--r--deps/v8/test/message/fail/modules-import-assertions-fail-2.out4
-rw-r--r--deps/v8/test/message/fail/modules-import-assertions-fail-3.mjs9
-rw-r--r--deps/v8/test/message/fail/modules-import-assertions-fail-3.out4
-rw-r--r--deps/v8/test/message/fail/modules-skip-1-import-assertions-fail.mjs7
-rw-r--r--deps/v8/test/message/fail/modules-skip-3-import-assertions-fail.json1
-rw-r--r--deps/v8/test/message/message.status15
-rw-r--r--deps/v8/test/message/wasm-trace-memory.js1
-rw-r--r--deps/v8/test/mjsunit/BUILD.gn1
-rw-r--r--deps/v8/test/mjsunit/array-bounds-check-removal.js80
-rw-r--r--deps/v8/test/mjsunit/baseline/cross-realm.js68
-rw-r--r--deps/v8/test/mjsunit/baseline/test-baseline-module-helper.mjs5
-rw-r--r--deps/v8/test/mjsunit/baseline/test-baseline-module.mjs24
-rw-r--r--deps/v8/test/mjsunit/baseline/test-baseline.js315
-rw-r--r--deps/v8/test/mjsunit/compiler/array-slice-clone.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/number-divide.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1177368.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1177369.js28
-rw-r--r--deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js (renamed from deps/v8/test/mjsunit/concurrent-initial-prototype-change.js)8
-rw-r--r--deps/v8/test/mjsunit/concurrent-initial-prototype-change-2.js69
-rw-r--r--deps/v8/test/mjsunit/const-dict-tracking.js262
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking-2.js2
-rw-r--r--deps/v8/test/mjsunit/d8/d8-fuzzable-worker.js69
-rw-r--r--deps/v8/test/mjsunit/dictionary-properties.js18
-rw-r--r--deps/v8/test/mjsunit/dictionary-prototypes.js3
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js9
-rw-r--r--deps/v8/test/mjsunit/es6/class-computed-property-names-super.js2
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js2
-rw-r--r--deps/v8/test/mjsunit/es6/computed-property-names-super.js2
-rw-r--r--deps/v8/test/mjsunit/es6/home-object-in-context.js196
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js3
-rw-r--r--deps/v8/test/mjsunit/es6/object-literals-super.js53
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js2
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt.js6
-rw-r--r--deps/v8/test/mjsunit/fast-prototype.js20
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-value-check.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-2timeout.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-buffer-out-of-scope-timeout.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeout.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeouts-and-no-timeouts.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-waitasync-helpers.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-waits.js20
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-workers.js17
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-no-timeout.js16
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-timeout.js16
-rw-r--r--deps/v8/test/mjsunit/harmony/class-static-blocks.js134
-rw-r--r--deps/v8/test/mjsunit/harmony/futex.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-15-top-level-await.mjs2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-15.mjs2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-1.mjs9
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-2.mjs9
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-3.mjs9
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-4.mjs9
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-1.mjs12
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-10.mjs19
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-11.mjs19
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-2.mjs13
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-3.mjs13
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-4.mjs14
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-5.mjs12
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-6.mjs13
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-7.mjs63
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-8.mjs13
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-9.mjs13
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-1.json1
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-imports-json-1.mjs6
-rw-r--r--deps/v8/test/mjsunit/harmony/private-brand-checks.js567
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-match-indices-no-flag.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-match-indices.js64
-rw-r--r--deps/v8/test/mjsunit/json2.js4
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js16
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status355
-rw-r--r--deps/v8/test/mjsunit/object-seal.js8
-rw-r--r--deps/v8/test/mjsunit/regexp-linear-flag.js7
-rw-r--r--deps/v8/test/mjsunit/regexp-no-linear-flag.js4
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-575364.js (renamed from deps/v8/test/mjsunit/regress/regress-575364.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-592352.js (renamed from deps/v8/test/mjsunit/regress/regress-592352.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-599719.js (renamed from deps/v8/test/mjsunit/regress/regress-599719.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-599825.js (renamed from deps/v8/test/mjsunit/regress/regress-599825.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-608630.js (renamed from deps/v8/test/mjsunit/regress/regress-608630.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-613928.js (renamed from deps/v8/test/mjsunit/regress/regress-613928.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-617525.js (renamed from deps/v8/test/mjsunit/regress/regress-617525.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-617526.js (renamed from deps/v8/test/mjsunit/regress/regress-617526.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-617529.js (renamed from deps/v8/test/mjsunit/regress/regress-617529.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-618608.js (renamed from deps/v8/test/mjsunit/regress/regress-618608.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-6196.js (renamed from deps/v8/test/mjsunit/regress/regress-6196.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-6298.js (renamed from deps/v8/test/mjsunit/regress/regress-6298.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-6431.js (renamed from deps/v8/test/mjsunit/regress/regress-6431.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-6700.js (renamed from deps/v8/test/mjsunit/regress/regress-6700.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-6838-1.js (renamed from deps/v8/test/mjsunit/regress/regress-6838-1.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-6838-2.js (renamed from deps/v8/test/mjsunit/regress/regress-6838-2.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-6838-3.js (renamed from deps/v8/test/mjsunit/regress/regress-6838-3.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-6838-4.js (renamed from deps/v8/test/mjsunit/regress/regress-6838-4.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-775710.js (renamed from deps/v8/test/mjsunit/regress/wasm/regress-775710.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-7893.js (renamed from deps/v8/test/mjsunit/regress/regress-7893.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-8377.js (renamed from deps/v8/test/mjsunit/regress/regress-8377.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-8505.js (renamed from deps/v8/test/mjsunit/regress/wasm/regress-8505.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-9022.js (renamed from deps/v8/test/mjsunit/regress/regress-9022.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-1006592.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-1006592.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-714971.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-714971.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-715455.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-715455.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-719384.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-719384.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-721835.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-721835.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-722348.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-722348.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-759327.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-759327.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-771428.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-771428.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-898974.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-898974.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-934138.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-934138.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-969368.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-969368.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-crbug-976934.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-976934.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-wasm-crbug-599413.js (renamed from deps/v8/test/mjsunit/regress/regress-wasm-crbug-599413.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-wasm-crbug-618602.js (renamed from deps/v8/test/mjsunit/regress/regress-wasm-crbug-618602.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/async-generator-is-awaiting.js43
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1034322.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1075514.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1163715.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1166138.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1168435.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1170261.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1172797.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1176318.js59
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1176504.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1180012.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1181246.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2326.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5902.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-666046.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7115.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-740694.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1158138.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1161847-1.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1162473.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1166095.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1167918.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1167981.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1167988.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1168055.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1171195.js160
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1171600.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1177058.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-605060.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-11360.js212
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1034394.js (renamed from deps/v8/test/mjsunit/regress/regress-1034394.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1054466.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1065599.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1070078.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1081030.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-11335.js56
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1161555.js38
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1168116.js48
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1171788.js46
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1179025.js42
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1179065.js21
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1179182.js28
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1180690.js29
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5888.js (renamed from deps/v8/test/mjsunit/regress/regress-5888.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5911.js (renamed from deps/v8/test/mjsunit/regress/regress-5911.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-813440.js (renamed from deps/v8/test/mjsunit/regress/regress-813440.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-863810.js (renamed from deps/v8/test/mjsunit/regress/regress-863810.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8896.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8947.js (renamed from deps/v8/test/mjsunit/regress/regress-8947.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9209.js (renamed from deps/v8/test/mjsunit/regress/regress-9209.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9832.js (renamed from deps/v8/test/mjsunit/regress/regress-9832.js)1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1047368.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-1047368.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168386.js26
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1172912.js49
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-746835.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-746835.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-772056.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-772056.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-816961.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-816961.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-969498.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-969498.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-v8-9106.js (renamed from deps/v8/test/mjsunit/regress/regress-v8-9106.js)0
-rw-r--r--deps/v8/test/mjsunit/smi-mul-const.js1
-rw-r--r--deps/v8/test/mjsunit/stack-traces-class-fields.js4
-rw-r--r--deps/v8/test/mjsunit/stack-traces-custom.js51
-rw-r--r--deps/v8/test/mjsunit/string-external-cached.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics-stress.js36
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/box2d.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/corrections.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-global.js54
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js17
-rw-r--r--deps/v8/test/mjsunit/wasm/externref.js62
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-call.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-shared-memory.js249
-rw-r--r--deps/v8/test/mjsunit/wasm/js-api.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/loop-unrolling.js146
-rw-r--r--deps/v8/test/mjsunit/wasm/memory64.js83
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/multi-value.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/origin-trial-flags.js34
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-arraybuffer-worker-simple-gc.js15
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js15
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js40
-rw-r--r--deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection-with-exnref.js21
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js63
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-memory.js64
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-module.js6
-rw-r--r--deps/v8/test/mjsunit/worker-ping-test.js74
-rw-r--r--deps/v8/test/mozilla/mozilla.status6
-rw-r--r--deps/v8/test/test262/OWNERS1
-rw-r--r--deps/v8/test/test262/test262.status10
-rw-r--r--deps/v8/test/torque/test-torque.tq63
-rw-r--r--deps/v8/test/unittests/BUILD.gn33
-rw-r--r--deps/v8/test/unittests/api/remote-object-unittest.cc4
-rw-r--r--deps/v8/test/unittests/api/v8-object-unittest.cc5
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-riscv64-unittest.cc64
-rw-r--r--deps/v8/test/unittests/codegen/source-position-table-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h7
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/decompression-optimizer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc26
-rw-r--r--deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc1589
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc6
-rw-r--r--deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc70
-rw-r--r--deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc48
-rw-r--r--deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc18
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-statistics-collector-unittest.cc130
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-unittest.cc171
-rw-r--r--deps/v8/test/unittests/heap/cppgc/incremental-marking-schedule-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marker-unittest.cc15
-rw-r--r--deps/v8/test/unittests/heap/cppgc/member-unittest.cc6
-rw-r--r--deps/v8/test/unittests/heap/cppgc/metric-recorder-unittest.cc324
-rw-r--r--deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/object-size-trait-unittest.cc51
-rw-r--r--deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc72
-rw-r--r--deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc71
-rw-r--r--deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc23
-rw-r--r--deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc57
-rw-r--r--deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc17
-rw-r--r--deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc57
-rw-r--r--deps/v8/test/unittests/heap/cppgc/testing-unittest.cc55
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.cc4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.h7
-rw-r--r--deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc28
-rw-r--r--deps/v8/test/unittests/heap/heap-utils.cc44
-rw-r--r--deps/v8/test/unittests/heap/heap-utils.h12
-rw-r--r--deps/v8/test/unittests/heap/local-factory-unittest.cc1
-rw-r--r--deps/v8/test/unittests/heap/local-heap-unittest.cc113
-rw-r--r--deps/v8/test/unittests/heap/safepoint-unittest.cc9
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc10
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-unittest.cc113
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-utils.cc45
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-utils.h32
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc16
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc40
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc118
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc34
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc35
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc3
-rw-r--r--deps/v8/test/unittests/logging/counters-unittest.cc14
-rw-r--r--deps/v8/test/unittests/objects/object-unittest.cc20
-rw-r--r--deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc41
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc21
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc2
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc1
-rw-r--r--deps/v8/test/unittests/unittests.status20
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc338
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc173
-rw-r--r--deps/v8/test/unittests/wasm/subtyping-unittest.cc112
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc3
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc14
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc30
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc22
-rw-r--r--deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc12
-rw-r--r--deps/v8/test/wasm-js/testcfg.py5
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status6
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py5
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status83
-rw-r--r--deps/v8/third_party/v8/builtins/OWNERS1
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq2
-rw-r--r--deps/v8/third_party/zlib/adler32.c4
-rw-r--r--deps/v8/third_party/zlib/google/zip_internal.cc6
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.cc12
-rw-r--r--deps/v8/tools/SourceMap.js382
-rw-r--r--deps/v8/tools/arguments.js78
-rwxr-xr-xdeps/v8/tools/callstats-from-telemetry.sh42
-rw-r--r--deps/v8/tools/callstats.html632
-rwxr-xr-xdeps/v8/tools/check-static-initializers.sh7
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_flags.json3
-rw-r--r--deps/v8/tools/codemap.js320
-rw-r--r--deps/v8/tools/consarray.js92
-rw-r--r--deps/v8/tools/csvparser.js105
-rw-r--r--deps/v8/tools/debug_helper/BUILD.gn4
-rw-r--r--deps/v8/tools/debug_helper/debug-macro-shims.h103
-rwxr-xr-xdeps/v8/tools/dev/gm.py4
-rwxr-xr-xdeps/v8/tools/dev/v8gen.py8
-rwxr-xr-xdeps/v8/tools/gcmole/bootstrap.sh2
-rw-r--r--deps/v8/tools/gdbinit27
-rwxr-xr-xdeps/v8/tools/gen-v8-gn.py85
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py2
-rwxr-xr-xdeps/v8/tools/grokdump.py5
-rw-r--r--deps/v8/tools/heap-stats/categories.js2
-rw-r--r--deps/v8/tools/inspect-d8.js30
-rw-r--r--deps/v8/tools/lldb_commands.py5
-rw-r--r--deps/v8/tools/logreader.js247
-rwxr-xr-xdeps/v8/tools/mb/mb.py93
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py39
-rw-r--r--deps/v8/tools/parse-processor.html66
-rw-r--r--deps/v8/tools/parse-processor.mjs90
-rw-r--r--deps/v8/tools/profile.js1172
-rw-r--r--deps/v8/tools/profile.mjs49
-rw-r--r--deps/v8/tools/splaytree.js327
-rw-r--r--deps/v8/tools/system-analyzer/index.html10
-rw-r--r--deps/v8/tools/system-analyzer/index.mjs2
-rw-r--r--deps/v8/tools/system-analyzer/view/log-file-reader-template.html1
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html2
-rw-r--r--deps/v8/tools/testrunner/base_runner.py28
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py2
-rw-r--r--deps/v8/tools/testrunner/local/variants.py21
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py8
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py40
-rw-r--r--deps/v8/tools/tickprocessor-driver.js83
-rw-r--r--deps/v8/tools/tickprocessor.js977
-rw-r--r--deps/v8/tools/tickprocessor.mjs1286
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json4
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json4
-rw-r--r--deps/v8/tools/v8heapconst.py811
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh2
-rw-r--r--deps/v8/tools/whitespace.txt3
1487 files changed, 108537 insertions, 44350 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 2721a0015f..42a9f29d89 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -69,8 +69,10 @@ Ben Newman <ben@meteor.com>
Ben Noordhuis <info@bnoordhuis.nl>
Benjamin Tan <demoneaux@gmail.com>
Bert Belder <bertbelder@gmail.com>
+Brice Dobry <brice.dobry@futurewei.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
+Chao Wang <chao.w@rioslab.org>
Craig Schlenter <craig.schlenter@gmail.com>
Charles Kerr <charles@charleskerr.com>
Chengzhong Wu <legendecas@gmail.com>
@@ -86,6 +88,7 @@ David Carlier <devnexen@gmail.com>
David Manouchehri <david@davidmanouchehri.com>
Deepak Mohan <hop2deep@gmail.com>
Deon Dior <diaoyuanjie@gmail.com>
+Derek Tu <derek.t@rioslab.org>
Dominic Farolini <domfarolino@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
@@ -101,6 +104,7 @@ Gergely Nagy <ngg@ngg.hu>
Gilang Mentari Hamidy <gilang@hamidy.net>
Gus Caplan <me@gus.host>
Gwang Yoon Hwang <ryumiel@company100.net>
+Haichuan Wang <hc.opensource@gmail.com>
Hannu Trey <hannu.trey@gmail.com>
Henrique Ferreiro <henrique.ferreiro@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
@@ -119,6 +123,7 @@ Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
James M Snell <jasnell@gmail.com>
Javad Amiri <javad.amiri@anu.edu.au>
+Ji Qiu <qiuji@iscas.ac.cn>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Jiawen Geng <technicalcute@gmail.com>
Jiaxun Yang <jiaxun.yang@flygoat.com>
@@ -169,6 +174,7 @@ Oliver Dunk <oliver@oliverdunk.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peng Fei <pfgenyun@gmail.com>
+Peng Wu <peng.w@rioslab.org>
Peng-Yu Chen <pengyu@libstarrify.so>
Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
@@ -183,6 +189,7 @@ Raul Tambre <raul@tambre.ee>
Ray Glover <ray@rayglover.net>
Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de>
+Reza Yazdani <ryazdani@futurewei.com>
Rick Waldron <waldron.rick@gmail.com>
Rob Wu <rob@robwu.nl>
Robert Meijer <robert.s.meijer@gmail.com>
@@ -203,7 +210,9 @@ Shawn Presser <shawnpresser@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
Sylvestre Ledru <sledru@mozilla.com>
Taketoshi Aono <brn@b6n.ch>
+Tao Liqiang <taolq@outlook.com>
Teddy Katz <teddy.katz@gmail.com>
+Thomas Young <wenzhang5800@gmail.com>
Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Tobias Burnus <burnus@net-b.de>
Tobias Nießen <tniessen@tnie.de>
@@ -214,12 +223,15 @@ Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
+Wei Wu <lazyparser@gmail.com>
Wenlu Wang <kingwenlu@gmail.com>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Wouter Vermeiren <wouter.vermeiren@essensium.com>
+Xiaofang Zou <zouxiaofang@iscas.ac.cn>
Xiaoyin Liu <xiaoyin.l@outlook.com>
Yanbo Li <lybvinci@gmail.com>
Yannic Bonenberger <contact@yannic-bonenberger.com>
+Yi Wang <wangyi8848@gmail.com>
Yong Wang <ccyongwang@tencent.com>
Youfeng Hao <ajihyf@gmail.com>
Yu Yin <xwafish@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index f39529a3a9..a9ab6783fa 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -40,11 +40,6 @@ declare_args() {
# Sets -DV8_ENABLE_FUTURE.
v8_enable_future = false
- # Lite mode disables a number of performance optimizations to reduce memory
- # at the cost of performance.
- # Sets --DV8_LITE_MODE.
- v8_enable_lite_mode = false
-
# Sets -DSYSTEM_INSTRUMENTATION. Enables OS-dependent event tracing
v8_enable_system_instrumentation = false
@@ -115,13 +110,6 @@ declare_args() {
v8_enable_pointer_compression = ""
v8_enable_31bit_smis_on_64bit_arch = false
- # Disable arguments adaptor frame (sets -dV8_NO_ARGUMENTS_ADAPTOR).
- v8_disable_arguments_adaptor =
- v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
- v8_current_cpu == "arm" || v8_current_cpu == "arm64" ||
- v8_current_cpu == "mipsel" || v8_current_cpu == "mips64el" ||
- v8_current_cpu == "ppc64" || v8_current_cpu == "s390x"
-
# Sets -dOBJECT_PRINT.
v8_enable_object_print = ""
@@ -131,8 +119,10 @@ declare_args() {
# Sets -dV8_ENABLE_CHECKS.
v8_enable_v8_checks = ""
- # Sets -dV8_TRACE_IGNITION.
+ # Sets -dV8_TRACE_UNOPTIMIZED.
+ v8_enable_trace_unoptimized = ""
v8_enable_trace_ignition = false
+ v8_enable_trace_baseline_exec = false
# Sets -dV8_TRACE_FEEDBACK_UPDATES.
v8_enable_trace_feedback_updates = false
@@ -292,6 +282,16 @@ declare_args() {
# Experimental feature for always keeping prototypes in dict/"slow" mode
# Sets -DV8_DICT_MODE_PROTOTYPES
v8_dict_mode_prototypes = false
+
+ # If enabled then macro definitions that are used in externally visible
+ # header files are placed in a separate header file v8-gn.h.
+ v8_generate_external_defines_header = false
+
+ # Experimental feature for tracking constness of properties in non-global
+ # dictionaries. Enabling this also always keeps prototypes in dict mode,
+ # meaning that they are not switched to fast mode.
+ # Sets -DV8_DICT_PROPERTY_CONST_TRACKING
+ v8_dict_property_const_tracking = false
}
# Derived defaults.
@@ -346,6 +346,14 @@ assert(!v8_enable_concurrent_marking || v8_enable_atomic_object_field_writes,
"Concurrent marking requires atomic object field writes.")
assert(!v8_enable_concurrent_marking || v8_enable_atomic_marking_state,
"Concurrent marking requires atomic marking state.")
+if (v8_enable_trace_unoptimized == "") {
+ v8_enable_trace_unoptimized =
+ v8_enable_trace_ignition || v8_enable_trace_baseline_exec
+}
+assert(!v8_enable_trace_ignition || v8_enable_trace_unoptimized,
+ "Ignition tracing requires unoptimized tracing to be enabled.")
+assert(!v8_enable_trace_baseline_exec || v8_enable_trace_unoptimized,
+ "Baseline tracing requires unoptimized tracing to be enabled.")
# Toggle pointer compression for correctness fuzzing when building the
# clang_x64_pointer_compression toolchain. We'll correctness-compare the
@@ -416,6 +424,7 @@ config("internal_config_base") {
".",
"include",
"$target_gen_dir",
+ "$target_gen_dir/include",
]
}
@@ -465,6 +474,7 @@ config("libbase_config") {
if (is_android && current_toolchain != host_toolchain) {
libs += [ "log" ]
}
+ include_dirs = [ "$target_gen_dir/include" ]
}
# Standalone cppgc cannot be built within chrome or with perfetto.
@@ -484,23 +494,31 @@ config("libsampler_config") {
include_dirs = [ "include" ]
}
-# This config should only be applied to code using V8 and not any V8 code
-# itself.
-config("external_config") {
+# This config is only applied to v8_headers and is the basis for external_config
+# but without setting the USING_V8_SHARED define, which means v8_headers can be
+# used inside v8 itself.
+config("headers_config") {
defines = []
configs = [
":v8_header_features",
":cppgc_header_features",
]
- if (is_component_build) {
- defines += [ "USING_V8_SHARED" ]
- }
include_dirs = [
"include",
"$target_gen_dir/include",
]
}
+# This config should only be applied to code using V8 and not any V8 code
+# itself.
+config("external_config") {
+ configs = [ ":headers_config" ]
+ defines = []
+ if (is_component_build) {
+ defines += [ "USING_V8_SHARED" ]
+ }
+}
+
# This config should only be applied to code that needs to be explicitly
# aware of whether we are using startup data or not.
config("external_startup_data") {
@@ -509,58 +527,99 @@ config("external_startup_data") {
}
}
+# List of defines that can appear in externally visible header files and that
+# are controlled by args.gn.
+external_v8_defines = [
+ "V8_ENABLE_CHECKS",
+ "V8_COMPRESS_POINTERS",
+ "V8_31BIT_SMIS_ON_64BIT_ARCH",
+ "V8_COMPRESS_ZONES",
+ "V8_HEAP_SANDBOX",
+ "V8_DEPRECATION_WARNINGS",
+ "V8_IMMINENT_DEPRECATION_WARNINGS",
+ "V8_NO_ARGUMENTS_ADAPTOR",
+ "V8_USE_PERFETTO",
+]
+
+enabled_external_v8_defines = []
+
+if (v8_enable_v8_checks) {
+ enabled_external_v8_defines += [ "V8_ENABLE_CHECKS" ]
+}
+if (v8_enable_pointer_compression) {
+ enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS" ]
+}
+if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
+ enabled_external_v8_defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
+}
+if (v8_enable_zone_compression) {
+ enabled_external_v8_defines += [ "V8_COMPRESS_ZONES" ]
+}
+if (v8_enable_heap_sandbox) {
+ enabled_external_v8_defines += [ "V8_HEAP_SANDBOX" ]
+}
+if (v8_deprecation_warnings) {
+ enabled_external_v8_defines += [ "V8_DEPRECATION_WARNINGS" ]
+}
+if (v8_imminent_deprecation_warnings) {
+ enabled_external_v8_defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
+}
+if (v8_use_perfetto) {
+ enabled_external_v8_defines += [ "V8_USE_PERFETTO" ]
+}
+
+disabled_external_v8_defines = external_v8_defines - enabled_external_v8_defines
+
# Put defines that are used in public headers here; public headers are
# defined in "v8_headers" and are included by embedders of V8.
config("v8_header_features") {
visibility = [ ":*" ]
- defines = []
-
- if (v8_enable_v8_checks) {
- defines += [ "V8_ENABLE_CHECKS" ] # Used in "include/v8.h".
- }
- if (v8_enable_pointer_compression) {
- defines += [ "V8_COMPRESS_POINTERS" ]
- }
- if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
- defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
- }
- if (v8_enable_zone_compression) {
- defines += [ "V8_COMPRESS_ZONES" ]
- }
- if (v8_enable_heap_sandbox) {
- defines += [ "V8_HEAP_SANDBOX" ]
- }
- if (v8_deprecation_warnings) {
- defines += [ "V8_DEPRECATION_WARNINGS" ]
- }
- if (v8_imminent_deprecation_warnings) {
- defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
- }
- if (v8_disable_arguments_adaptor) {
- defines += [ "V8_NO_ARGUMENTS_ADAPTOR" ]
- }
- if (v8_use_perfetto) {
- defines += [ "V8_USE_PERFETTO" ]
+ if (v8_generate_external_defines_header) {
+ defines = [ "V8_GN_HEADER" ]
+ } else {
+ defines = enabled_external_v8_defines
}
}
+# List of defines that can appear in externally visible cppgc header files and
+# that are controlled by args.gn.
+external_cppgc_defines = [
+ "CPPGC_SUPPORTS_OBJECT_NAMES",
+ "CPPGC_CAGED_HEAP",
+ "CPPGC_YOUNG_GENERATION",
+]
+
+enabled_external_cppgc_defines = []
+
+if (cppgc_enable_object_names) {
+ enabled_external_cppgc_defines += [ "CPPGC_SUPPORTS_OBJECT_NAMES" ]
+}
+if (cppgc_enable_caged_heap) {
+ enabled_external_cppgc_defines += [ "CPPGC_CAGED_HEAP" ]
+}
+if (cppgc_enable_young_generation) {
+ enabled_external_cppgc_defines += [ "CPPGC_YOUNG_GENERATION" ]
+}
+
+disabled_external_cppgc_defines =
+ external_cppgc_defines - enabled_external_cppgc_defines
+
config("cppgc_header_features") {
visibility = [ ":*" ]
- defines = []
-
- if (cppgc_enable_object_names) {
- defines += [ "CPPGC_SUPPORTS_OBJECT_NAMES" ]
- }
- if (cppgc_enable_caged_heap) {
- defines += [ "CPPGC_CAGED_HEAP" ]
- }
- if (cppgc_enable_young_generation) {
- defines += [ "CPPGC_YOUNG_GENERATION" ]
+ if (v8_generate_external_defines_header) {
+ defines = [ "V8_GN_HEADER" ]
+ } else {
+ defines = enabled_external_cppgc_defines
}
}
+enabled_external_defines =
+ enabled_external_v8_defines + enabled_external_cppgc_defines
+disabled_external_defines =
+ disabled_external_v8_defines + disabled_external_cppgc_defines
+
# Put defines here that are only used in our internal files and NEVER in
# external headers that embedders (such as chromium and node) might include.
config("features") {
@@ -621,8 +680,8 @@ config("features") {
if (v8_enable_trace_maps) {
defines += [ "V8_TRACE_MAPS" ]
}
- if (v8_enable_trace_ignition) {
- defines += [ "V8_TRACE_IGNITION" ]
+ if (v8_enable_trace_unoptimized) {
+ defines += [ "V8_TRACE_UNOPTIMIZED" ]
}
if (v8_enable_trace_feedback_updates) {
defines += [ "V8_TRACE_FEEDBACK_UPDATES" ]
@@ -707,6 +766,12 @@ config("features") {
if (v8_etw_guid != "") {
defines += [ "V8_ETW_GUID=\"$v8_etw_guid\"" ]
}
+ if (v8_enable_webassembly) {
+ defines += [ "V8_ENABLE_WEBASSEMBLY" ]
+ }
+ if (v8_dict_property_const_tracking) {
+ defines += [ "V8_DICT_PROPERTY_CONST_TRACKING" ]
+ }
}
config("toolchain") {
@@ -865,6 +930,15 @@ config("toolchain") {
}
}
+ # Under simulator build, compiler will not provide __riscv_xlen. Define here
+ if (v8_current_cpu == "riscv64") {
+ defines += [ "V8_TARGET_ARCH_RISCV64" ]
+ defines += [ "__riscv_xlen=64" ]
+
+ #FIXME: Temporarily use MIPS macro for the building.
+ defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
+ }
+
if (v8_current_cpu == "x86") {
defines += [ "V8_TARGET_ARCH_IA32" ]
if (is_win) {
@@ -953,7 +1027,7 @@ config("toolchain") {
}
if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
- v8_current_cpu == "mips64el") {
+ v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64") {
cflags += [ "-Wshorten-64-to-32" ]
}
}
@@ -1144,6 +1218,7 @@ action("postmortem-metadata") {
"src/objects/primitive-heap-object.h",
"src/objects/primitive-heap-object-inl.h",
"src/objects/scope-info.h",
+ "src/objects/scope-info-inl.h",
"src/objects/script.h",
"src/objects/script-inl.h",
"src/objects/shared-function-info.cc",
@@ -1254,7 +1329,10 @@ torque_files = [
"src/builtins/string-at.tq",
"src/builtins/string-endswith.tq",
"src/builtins/string-html.tq",
+ "src/builtins/string-includes.tq",
+ "src/builtins/string-indexof.tq",
"src/builtins/string-iterator.tq",
+ "src/builtins/string-match-search.tq",
"src/builtins/string-pad.tq",
"src/builtins/string-repeat.tq",
"src/builtins/string-replaceall.tq",
@@ -1287,6 +1365,7 @@ torque_files = [
"src/builtins/typed-array.tq",
"src/builtins/wasm.tq",
"src/builtins/weak-ref.tq",
+ "src/debug/debug-wasm-objects.tq",
"src/ic/handler-configuration.tq",
"src/objects/allocation-site.tq",
"src/objects/api-callbacks.tq",
@@ -1339,6 +1418,7 @@ torque_files = [
"src/objects/stack-frame-info.tq",
"src/objects/string.tq",
"src/objects/struct.tq",
+ "src/objects/swiss-name-dictionary.tq",
"src/objects/synthetic-module.tq",
"src/objects/template-objects.tq",
"src/objects/templates.tq",
@@ -1397,22 +1477,24 @@ template("run_torque") {
files = [
"$target_gen_dir/torque-generated/bit-fields.h",
"$target_gen_dir/torque-generated/builtin-definitions.h",
- "$target_gen_dir/torque-generated/interface-descriptors.inc",
- "$target_gen_dir/torque-generated/factory.cc",
- "$target_gen_dir/torque-generated/factory.inc",
- "$target_gen_dir/torque-generated/field-offsets.h",
+ "$target_gen_dir/torque-generated/class-debug-readers.cc",
+ "$target_gen_dir/torque-generated/class-debug-readers.h",
+ "$target_gen_dir/torque-generated/class-forward-declarations.h",
"$target_gen_dir/torque-generated/class-verifiers.cc",
"$target_gen_dir/torque-generated/class-verifiers.h",
+ "$target_gen_dir/torque-generated/csa-types.h",
+ "$target_gen_dir/torque-generated/debug-macros.cc",
+ "$target_gen_dir/torque-generated/debug-macros.h",
"$target_gen_dir/torque-generated/enum-verifiers.cc",
- "$target_gen_dir/torque-generated/objects-printer.cc",
- "$target_gen_dir/torque-generated/objects-body-descriptors-inl.inc",
- "$target_gen_dir/torque-generated/class-debug-readers.cc",
- "$target_gen_dir/torque-generated/class-debug-readers.h",
"$target_gen_dir/torque-generated/exported-macros-assembler.cc",
"$target_gen_dir/torque-generated/exported-macros-assembler.h",
- "$target_gen_dir/torque-generated/csa-types.h",
+ "$target_gen_dir/torque-generated/factory.cc",
+ "$target_gen_dir/torque-generated/factory.inc",
+ "$target_gen_dir/torque-generated/field-offsets.h",
"$target_gen_dir/torque-generated/instance-types.h",
- "$target_gen_dir/torque-generated/class-forward-declarations.h",
+ "$target_gen_dir/torque-generated/interface-descriptors.inc",
+ "$target_gen_dir/torque-generated/objects-body-descriptors-inl.inc",
+ "$target_gen_dir/torque-generated/objects-printer.cc",
]
outputs = []
@@ -1758,6 +1840,8 @@ action("v8_dump_build_config") {
"v8_enable_verify_csa=$v8_enable_verify_csa",
"v8_enable_lite_mode=$v8_enable_lite_mode",
"v8_enable_pointer_compression=$v8_enable_pointer_compression",
+ "v8_enable_webassembly=$v8_enable_webassembly",
+ "v8_control_flow_integrity=$v8_control_flow_integrity",
"v8_target_cpu=\"$v8_target_cpu\"",
]
@@ -1832,6 +1916,8 @@ v8_source_set("v8_initializers") {
deps = [
":torque_generated_initializers",
+ ":v8_base_without_compiler",
+ ":v8_shared_internal_headers",
":v8_tracing",
]
@@ -1954,6 +2040,11 @@ v8_source_set("v8_initializers") {
### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
]
+ } else if (v8_current_cpu == "riscv64") {
+ sources += [
+ ### gcmole(arch:riscv64) ###
+ "src/builtins/riscv64/builtins-riscv64.cc",
+ ]
}
if (!v8_enable_i18n_support) {
@@ -1967,6 +2058,7 @@ v8_source_set("v8_init") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
+ ":v8_base_without_compiler",
":v8_initializers",
":v8_tracing",
]
@@ -1993,21 +2085,35 @@ v8_header_set("v8_version") {
]
}
+v8_header_set("v8_config_headers") {
+ configs = [ ":internal_config" ]
+
+ sources = [
+ "include/v8-platform.h",
+ "include/v8config.h",
+ ]
+
+ deps = []
+
+ if (v8_generate_external_defines_header) {
+ sources += [ "$target_gen_dir/include/v8-gn.h" ]
+ deps += [ ":gen_v8_gn" ]
+ }
+}
+
# This is split out to be a non-code containing target that the Chromium browser
# can depend upon to get basic v8 types.
v8_header_set("v8_headers") {
configs = [ ":internal_config" ]
- public_configs = [
- ":v8_header_features",
- ":cppgc_header_features",
- ]
+ public_configs = [ ":headers_config" ]
sources = [
"include/v8-cppgc.h",
"include/v8-fast-api-calls.h",
"include/v8-internal.h",
+ "include/v8-profiler.h",
+ "include/v8-util.h",
"include/v8.h",
- "include/v8config.h",
]
sources += [
@@ -2018,23 +2124,59 @@ v8_header_set("v8_headers") {
"include/v8-wasm-trap-handler-win.h",
]
+ public_deps = [ ":v8_config_headers" ]
+
deps = [ ":v8_version" ]
}
-v8_source_set("v8_wrappers") {
+if (v8_generate_external_defines_header) {
+ action("gen_v8_gn") {
+ visibility = [ ":*" ]
+
+ script = "tools/gen-v8-gn.py"
+ outputs = [ "$target_gen_dir/include/v8-gn.h" ]
+
+ args = [
+ "-o",
+ rebase_path("$target_gen_dir/include/v8-gn.h", root_build_dir),
+ ]
+ foreach(define, enabled_external_defines) {
+ args += [
+ "-p",
+ define,
+ ]
+ }
+ foreach(define, disabled_external_defines) {
+ args += [
+ "-n",
+ define,
+ ]
+ }
+ }
+}
+
+v8_header_set("v8_wrappers") {
configs = [ ":internal_config" ]
sources = [ "src/base/platform/wrappers.h" ]
}
-# This is split out to share basic headers with Torque.
+# This is split out to share basic headers with Torque and everything else:(
v8_header_set("v8_shared_internal_headers") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
+ visibility = [
+ ":*",
+ "test/cctest:*",
+ "test/unittests:*",
+ "tools/debug_helper/:*",
+ ]
configs = [ ":internal_config" ]
sources = [ "src/common/globals.h" ]
- deps = [ ":v8_headers" ]
+ deps = [
+ ":v8_headers",
+ ":v8_libbase",
+ ]
}
v8_compiler_sources = [
@@ -2202,6 +2344,8 @@ v8_compiler_sources = [
"src/compiler/node-marker.h",
"src/compiler/node-matchers.cc",
"src/compiler/node-matchers.h",
+ "src/compiler/node-observer.cc",
+ "src/compiler/node-observer.h",
"src/compiler/node-origin-table.cc",
"src/compiler/node-origin-table.h",
"src/compiler/node-properties.cc",
@@ -2291,6 +2435,12 @@ v8_source_set("v8_compiler_opt") {
":v8_tracing",
]
+ deps = [
+ ":v8_base_without_compiler",
+ ":v8_libbase",
+ ":v8_shared_internal_headers",
+ ]
+
if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) {
# The :no_optimize config is added to v8_add_configs in v8.gni.
remove_configs = [ "//build/config/compiler:no_optimize" ]
@@ -2316,6 +2466,12 @@ v8_source_set("v8_compiler") {
":v8_tracing",
]
+ deps = [
+ ":v8_base_without_compiler",
+ ":v8_libbase",
+ ":v8_shared_internal_headers",
+ ]
+
configs = [ ":internal_config" ]
}
@@ -2351,35 +2507,20 @@ v8_source_set("v8_base_without_compiler") {
### gcmole(all) ###
"$target_gen_dir/builtins-generated/bytecodes-builtins-list.h",
"include/cppgc/common.h",
- "include/v8-cppgc.h",
- "include/v8-fast-api-calls.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
- "include/v8-internal.h",
"include/v8-metrics.h",
- "include/v8-platform.h",
- "include/v8-profiler.h",
"include/v8-unwinder-state.h",
- "include/v8-util.h",
"include/v8-wasm-trap-handler-posix.h",
- "include/v8.h",
- "include/v8config.h",
"src/api/api-arguments-inl.h",
"src/api/api-arguments.cc",
"src/api/api-arguments.h",
+ "src/api/api-inl.h",
+ "src/api/api-macros.h",
"src/api/api-natives.cc",
"src/api/api-natives.h",
"src/api/api.cc",
"src/api/api.h",
- "src/asmjs/asm-js.cc",
- "src/asmjs/asm-js.h",
- "src/asmjs/asm-names.h",
- "src/asmjs/asm-parser.cc",
- "src/asmjs/asm-parser.h",
- "src/asmjs/asm-scanner.cc",
- "src/asmjs/asm-scanner.h",
- "src/asmjs/asm-types.cc",
- "src/asmjs/asm-types.h",
"src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-source-ranges.h",
@@ -2398,6 +2539,12 @@ v8_source_set("v8_base_without_compiler") {
"src/ast/source-range-ast-visitor.h",
"src/ast/variables.cc",
"src/ast/variables.h",
+ "src/baseline/baseline-assembler-inl.h",
+ "src/baseline/baseline-assembler.h",
+ "src/baseline/baseline-compiler.cc",
+ "src/baseline/baseline-compiler.h",
+ "src/baseline/baseline.cc",
+ "src/baseline/baseline.h",
"src/builtins/accessors.cc",
"src/builtins/accessors.h",
"src/builtins/builtins-api.cc",
@@ -2529,6 +2676,7 @@ v8_source_set("v8_base_without_compiler") {
"src/debug/debug-evaluate.h",
"src/debug/debug-frames.cc",
"src/debug/debug-frames.h",
+ "src/debug/debug-interface.cc",
"src/debug/debug-interface.h",
"src/debug/debug-property-iterator.cc",
"src/debug/debug-property-iterator.h",
@@ -2540,8 +2688,9 @@ v8_source_set("v8_base_without_compiler") {
"src/debug/debug-stack-trace-iterator.h",
"src/debug/debug-type-profile.cc",
"src/debug/debug-type-profile.h",
- "src/debug/debug-wasm-support.cc",
- "src/debug/debug-wasm-support.h",
+ "src/debug/debug-wasm-objects-inl.h",
+ "src/debug/debug-wasm-objects.cc",
+ "src/debug/debug-wasm-objects.h",
"src/debug/debug.cc",
"src/debug/debug.h",
"src/debug/interface-types.h",
@@ -2549,8 +2698,18 @@ v8_source_set("v8_base_without_compiler") {
"src/debug/liveedit.h",
"src/deoptimizer/deoptimize-reason.cc",
"src/deoptimizer/deoptimize-reason.h",
+ "src/deoptimizer/deoptimized-frame-info.cc",
+ "src/deoptimizer/deoptimized-frame-info.h",
"src/deoptimizer/deoptimizer.cc",
"src/deoptimizer/deoptimizer.h",
+ "src/deoptimizer/frame-description.h",
+ "src/deoptimizer/materialized-object-store.cc",
+ "src/deoptimizer/materialized-object-store.h",
+ "src/deoptimizer/translated-state.cc",
+ "src/deoptimizer/translated-state.h",
+ "src/deoptimizer/translation-array.cc",
+ "src/deoptimizer/translation-array.h",
+ "src/deoptimizer/translation-opcode.h",
"src/diagnostics/basic-block-profiler.cc",
"src/diagnostics/basic-block-profiler.h",
"src/diagnostics/code-tracer.h",
@@ -2959,8 +3118,6 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/fixed-array.h",
"src/objects/foreign-inl.h",
"src/objects/foreign.h",
- "src/objects/frame-array-inl.h",
- "src/objects/frame-array.h",
"src/objects/free-space-inl.h",
"src/objects/free-space.h",
"src/objects/function-kind.h",
@@ -3040,9 +3197,6 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/js-weak-refs.h",
"src/objects/keys.cc",
"src/objects/keys.h",
- "src/objects/layout-descriptor-inl.h",
- "src/objects/layout-descriptor.cc",
- "src/objects/layout-descriptor.h",
"src/objects/literal-objects-inl.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
@@ -3103,6 +3257,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/prototype-info.h",
"src/objects/prototype.h",
"src/objects/regexp-match-info.h",
+ "src/objects/scope-info-inl.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
"src/objects/script-inl.h",
@@ -3130,6 +3285,10 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/string.h",
"src/objects/struct-inl.h",
"src/objects/struct.h",
+ "src/objects/swiss-hash-table-helpers.h",
+ "src/objects/swiss-name-dictionary-inl.h",
+ "src/objects/swiss-name-dictionary.cc",
+ "src/objects/swiss-name-dictionary.h",
"src/objects/synthetic-module-inl.h",
"src/objects/synthetic-module.cc",
"src/objects/synthetic-module.h",
@@ -3275,7 +3434,6 @@ v8_source_set("v8_base_without_compiler") {
"src/runtime/runtime-futex.cc",
"src/runtime/runtime-generator.cc",
"src/runtime/runtime-internal.cc",
- "src/runtime/runtime-interpreter.cc",
"src/runtime/runtime-intl.cc",
"src/runtime/runtime-literals.cc",
"src/runtime/runtime-module.cc",
@@ -3289,6 +3447,7 @@ v8_source_set("v8_base_without_compiler") {
"src/runtime/runtime-strings.cc",
"src/runtime/runtime-symbol.cc",
"src/runtime/runtime-test.cc",
+ "src/runtime/runtime-trace.cc",
"src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-utils.h",
"src/runtime/runtime-wasm.cc",
@@ -3439,6 +3598,7 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/streaming-decoder.h",
"src/wasm/struct-types.h",
"src/wasm/sync-streaming-decoder.cc",
+ "src/wasm/value-type.cc",
"src/wasm/value-type.h",
"src/wasm/wasm-arguments.h",
"src/wasm/wasm-code-manager.cc",
@@ -3498,6 +3658,20 @@ v8_source_set("v8_base_without_compiler") {
"src/zone/zone.h",
]
+ if (v8_enable_webassembly) {
+ sources += [
+ "src/asmjs/asm-js.cc",
+ "src/asmjs/asm-js.h",
+ "src/asmjs/asm-names.h",
+ "src/asmjs/asm-parser.cc",
+ "src/asmjs/asm-parser.h",
+ "src/asmjs/asm-scanner.cc",
+ "src/asmjs/asm-scanner.h",
+ "src/asmjs/asm-types.cc",
+ "src/asmjs/asm-types.h",
+ ]
+ }
+
if (!v8_control_flow_integrity) {
sources += [ "src/execution/pointer-authentication-dummy.h" ]
}
@@ -3546,6 +3720,8 @@ v8_source_set("v8_base_without_compiler") {
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
+ "src/baseline/ia32/baseline-assembler-ia32-inl.h",
+ "src/baseline/ia32/baseline-compiler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32.cc",
"src/codegen/ia32/assembler-ia32.h",
@@ -3572,6 +3748,8 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
+ "src/baseline/x64/baseline-assembler-x64-inl.h",
+ "src/baseline/x64/baseline-compiler-x64-inl.h",
"src/codegen/x64/assembler-x64-inl.h",
"src/codegen/x64/assembler-x64.cc",
"src/codegen/x64/assembler-x64.h",
@@ -3622,6 +3800,8 @@ v8_source_set("v8_base_without_compiler") {
}
} else if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
+ "src/baseline/arm/baseline-assembler-arm-inl.h",
+ "src/baseline/arm/baseline-compiler-arm-inl.h",
"src/codegen/arm/assembler-arm-inl.h",
"src/codegen/arm/assembler-arm.cc",
"src/codegen/arm/assembler-arm.h",
@@ -3653,6 +3833,8 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
+ "src/baseline/arm64/baseline-assembler-arm64-inl.h",
+ "src/baseline/arm64/baseline-compiler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64.cc",
"src/codegen/arm64/assembler-arm64.h",
@@ -3697,6 +3879,13 @@ v8_source_set("v8_base_without_compiler") {
if (v8_control_flow_integrity) {
sources += [ "src/execution/arm64/pointer-authentication-arm64.h" ]
}
+ if (current_cpu == "arm64" && is_mac) {
+ sources += [
+ "src/trap-handler/handler-inside-posix.cc",
+ "src/trap-handler/handler-inside-posix.h",
+ "src/trap-handler/handler-outside-posix.cc",
+ ]
+ }
if (is_win) {
sources += [
"src/diagnostics/unwinding-info-win64.cc",
@@ -3705,6 +3894,8 @@ v8_source_set("v8_base_without_compiler") {
}
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
+ "src/baseline/mips/baseline-assembler-mips-inl.h",
+ "src/baseline/mips/baseline-compiler-mips-inl.h",
"src/codegen/mips/assembler-mips-inl.h",
"src/codegen/mips/assembler-mips.cc",
"src/codegen/mips/assembler-mips.h",
@@ -3733,6 +3924,8 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
+ "src/baseline/mips64/baseline-assembler-mips64-inl.h",
+ "src/baseline/mips64/baseline-compiler-mips64-inl.h",
"src/codegen/mips64/assembler-mips64-inl.h",
"src/codegen/mips64/assembler-mips64.cc",
"src/codegen/mips64/assembler-mips64.h",
@@ -3761,6 +3954,8 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
+ "src/baseline/ppc/baseline-assembler-ppc-inl.h",
+ "src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
"src/codegen/ppc/assembler-ppc.h",
@@ -3792,6 +3987,8 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc64) ###
+ "src/baseline/ppc/baseline-assembler-ppc-inl.h",
+ "src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
"src/codegen/ppc/assembler-ppc.h",
@@ -3823,6 +4020,8 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
+ "src/baseline/s390/baseline-assembler-s390-inl.h",
+ "src/baseline/s390/baseline-compiler-s390-inl.h",
"src/codegen/s390/assembler-s390-inl.h",
"src/codegen/s390/assembler-s390.cc",
"src/codegen/s390/assembler-s390.h",
@@ -3852,6 +4051,34 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/s390/regexp-macro-assembler-s390.h",
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
+ } else if (v8_current_cpu == "riscv64") {
+ sources += [ ### gcmole(arch:riscv64) ###
+ "src/codegen/riscv64/assembler-riscv64-inl.h",
+ "src/codegen/riscv64/assembler-riscv64.cc",
+ "src/codegen/riscv64/assembler-riscv64.h",
+ "src/codegen/riscv64/constants-riscv64.cc",
+ "src/codegen/riscv64/constants-riscv64.h",
+ "src/codegen/riscv64/cpu-riscv64.cc",
+ "src/codegen/riscv64/interface-descriptors-riscv64.cc",
+ "src/codegen/riscv64/macro-assembler-riscv64.cc",
+ "src/codegen/riscv64/macro-assembler-riscv64.h",
+ "src/codegen/riscv64/register-riscv64.h",
+ "src/compiler/backend/riscv64/code-generator-riscv64.cc",
+ "src/compiler/backend/riscv64/instruction-codes-riscv64.h",
+ "src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
+ "src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
+ "src/debug/riscv64/debug-riscv64.cc",
+ "src/deoptimizer/riscv64/deoptimizer-riscv64.cc",
+ "src/diagnostics/riscv64/disasm-riscv64.cc",
+ "src/diagnostics/riscv64/unwinder-riscv64.cc",
+ "src/execution/riscv64/frame-constants-riscv64.cc",
+ "src/execution/riscv64/frame-constants-riscv64.h",
+ "src/execution/riscv64/simulator-riscv64.cc",
+ "src/execution/riscv64/simulator-riscv64.h",
+ "src/regexp/riscv64/regexp-macro-assembler-riscv64.cc",
+ "src/regexp/riscv64/regexp-macro-assembler-riscv64.h",
+ "src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h",
+ ]
}
configs = [
@@ -3876,6 +4103,7 @@ v8_source_set("v8_base_without_compiler") {
":cppgc_base",
":generate_bytecode_builtins_list",
":run_torque",
+ ":v8_headers",
":v8_maybe_icu",
]
@@ -3952,7 +4180,8 @@ v8_source_set("v8_base_without_compiler") {
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
- v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
+ v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
+ v8_current_cpu == "riscv64") {
libs += [ "atomic" ]
}
@@ -4297,8 +4526,15 @@ v8_component("v8_libbase") {
sources += [ "src/base/ubsan.cc" ]
}
+ if (v8_current_cpu == "riscv64") {
+ libs += [ "atomic" ]
+ }
+
if (is_tsan && !build_with_chromium) {
data += [ "tools/sanitizers/tsan_suppressions.txt" ]
+
+ # llvm-symbolizer uses libstdc++ from the clang package.
+ data += [ "//third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6" ]
}
# TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
@@ -4413,6 +4649,7 @@ v8_source_set("v8_cppgc_shared") {
"src/heap/base/stack.h",
"src/heap/base/worklist.cc",
"src/heap/base/worklist.h",
+ "src/heap/cppgc/sanitizers.h",
]
if (is_clang || !is_win) {
@@ -4432,6 +4669,8 @@ v8_source_set("v8_cppgc_shared") {
sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ]
} else if (current_cpu == "mips64el") {
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
+ } else if (current_cpu == "riscv64") {
+ sources += [ "src/heap/base/asm/riscv64/push_registers_asm.cc" ]
}
} else if (is_win) {
if (current_cpu == "x64") {
@@ -4465,6 +4704,7 @@ v8_header_set("cppgc_headers") {
"include/cppgc/visitor.h",
]
+ deps = [ ":cppgc_base" ]
public_deps = [ ":v8_headers" ]
}
@@ -4479,6 +4719,8 @@ v8_source_set("cppgc_base") {
"include/cppgc/ephemeron-pair.h",
"include/cppgc/garbage-collected.h",
"include/cppgc/heap-consistency.h",
+ "include/cppgc/heap-state.h",
+ "include/cppgc/heap-statistics.h",
"include/cppgc/heap.h",
"include/cppgc/internal/api-constants.h",
"include/cppgc/internal/atomic-entry-flag.h",
@@ -4489,20 +4731,21 @@ v8_source_set("cppgc_base") {
"include/cppgc/internal/persistent-node.h",
"include/cppgc/internal/pointer-policies.h",
"include/cppgc/internal/prefinalizer-handler.h",
- "include/cppgc/internal/process-heap.h",
"include/cppgc/internal/write-barrier.h",
"include/cppgc/liveness-broker.h",
"include/cppgc/macros.h",
"include/cppgc/member.h",
"include/cppgc/name-provider.h",
+ "include/cppgc/object-size-trait.h",
"include/cppgc/persistent.h",
"include/cppgc/platform.h",
"include/cppgc/prefinalizer.h",
+ "include/cppgc/process-heap-statistics.h",
+ "include/cppgc/sentinel-pointer.h",
"include/cppgc/source-location.h",
"include/cppgc/trace-trait.h",
"include/cppgc/type-traits.h",
"include/cppgc/visitor.h",
- "include/v8config.h",
"src/heap/cppgc/allocation.cc",
"src/heap/cppgc/compaction-worklists.cc",
"src/heap/cppgc/compaction-worklists.h",
@@ -4521,6 +4764,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/gc-invoker.h",
"src/heap/cppgc/heap-base.cc",
"src/heap/cppgc/heap-base.h",
+ "src/heap/cppgc/heap-consistency.cc",
"src/heap/cppgc/heap-growing.cc",
"src/heap/cppgc/heap-growing.h",
"src/heap/cppgc/heap-object-header.cc",
@@ -4529,6 +4773,9 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/heap-page.h",
"src/heap/cppgc/heap-space.cc",
"src/heap/cppgc/heap-space.h",
+ "src/heap/cppgc/heap-state.cc",
+ "src/heap/cppgc/heap-statistics-collector.cc",
+ "src/heap/cppgc/heap-statistics-collector.h",
"src/heap/cppgc/heap-visitor.h",
"src/heap/cppgc/heap.cc",
"src/heap/cppgc/heap.h",
@@ -4547,9 +4794,11 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/marking-visitor.h",
"src/heap/cppgc/marking-worklists.cc",
"src/heap/cppgc/marking-worklists.h",
+ "src/heap/cppgc/metric-recorder.h",
"src/heap/cppgc/name-trait.cc",
"src/heap/cppgc/object-allocator.cc",
"src/heap/cppgc/object-allocator.h",
+ "src/heap/cppgc/object-size-trait.cc",
"src/heap/cppgc/object-start-bitmap.h",
"src/heap/cppgc/page-memory.cc",
"src/heap/cppgc/page-memory.h",
@@ -4558,11 +4807,12 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/pointer-policies.cc",
"src/heap/cppgc/prefinalizer-handler.cc",
"src/heap/cppgc/prefinalizer-handler.h",
+ "src/heap/cppgc/process-heap-statistics.cc",
+ "src/heap/cppgc/process-heap-statistics.h",
"src/heap/cppgc/process-heap.cc",
"src/heap/cppgc/process-heap.h",
"src/heap/cppgc/raw-heap.cc",
"src/heap/cppgc/raw-heap.h",
- "src/heap/cppgc/sanitizers.h",
"src/heap/cppgc/source-location.cc",
"src/heap/cppgc/stats-collector.cc",
"src/heap/cppgc/stats-collector.h",
@@ -4575,6 +4825,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/virtual-memory.h",
"src/heap/cppgc/visitor.cc",
"src/heap/cppgc/write-barrier.cc",
+ "src/heap/cppgc/write-barrier.h",
]
if (cppgc_is_standalone) {
@@ -4598,12 +4849,29 @@ v8_source_set("cppgc_base") {
]
public_deps = [
+ ":v8_config_headers",
":v8_cppgc_shared",
":v8_libbase",
":v8_libplatform",
]
}
+v8_source_set("cppgc_base_for_testing") {
+ visibility = [ ":*" ]
+
+ sources = [
+ "include/cppgc/testing.h",
+ "src/heap/cppgc/testing.cc",
+ ]
+
+ configs = [
+ ":internal_config",
+ ":cppgc_base_config",
+ ]
+
+ public_deps = [ ":cppgc_base" ]
+}
+
###############################################################################
# Produce a single static library for embedders
#
@@ -4633,6 +4901,7 @@ v8_static_library("wee8") {
":v8_libbase",
":v8_libplatform",
":v8_libsampler",
+ ":v8_shared_internal_headers",
":v8_snapshot",
"//build/win:default_exe_manifest",
]
@@ -4664,6 +4933,7 @@ if (current_toolchain == v8_generator_toolchain) {
"src/builtins/generate-bytecodes-builtins-list.cc",
"src/interpreter/bytecode-operands.cc",
"src/interpreter/bytecode-operands.h",
+ "src/interpreter/bytecode-traits.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
]
@@ -4672,6 +4942,7 @@ if (current_toolchain == v8_generator_toolchain) {
deps = [
":v8_libbase",
+ ":v8_shared_internal_headers",
"//build/win:default_exe_manifest",
]
}
@@ -4712,6 +4983,7 @@ if (current_toolchain == v8_snapshot_toolchain) {
":v8_libbase",
":v8_libplatform",
":v8_maybe_icu",
+ ":v8_shared_internal_headers",
":v8_tracing",
":v8_wrappers",
"//build/win:default_exe_manifest",
@@ -4785,10 +5057,14 @@ if (v8_enable_i18n_support) {
v8_executable("gen-regexp-special-case") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- sources = [ "src/regexp/gen-regexp-special-case.cc" ]
+ sources = [
+ "src/regexp/gen-regexp-special-case.cc",
+ "src/regexp/special-case.h",
+ ]
deps = [
":v8_libbase",
+ ":v8_shared_internal_headers",
"//build/win:default_exe_manifest",
"//third_party/icu",
]
@@ -4893,6 +5169,7 @@ if (is_fuchsia && !build_with_chromium) {
cr_fuchsia_package("d8_fuchsia_pkg") {
testonly = true
binary = ":d8"
+ manifest = "//build/config/fuchsia/tests-with-exec.cmx"
package_name_override = "d8"
}
@@ -4939,6 +5216,7 @@ if (is_component_build) {
sources = [ "src/utils/v8dll-main.cc" ]
public_deps = [
+ ":cppgc_base_for_testing",
":torque_base",
":torque_ls_base",
":v8_base",
@@ -4955,12 +5233,11 @@ if (is_component_build) {
v8_component("cppgc") {
public_deps = [ ":cppgc_base" ]
- configs = [ ":internal_config" ]
-
if (!cppgc_is_standalone) {
deps = [ ":v8" ]
}
+ configs = []
public_configs = [ ":external_config" ]
}
@@ -4968,9 +5245,12 @@ if (is_component_build) {
v8_component("cppgc_for_testing") {
testonly = true
- public_deps = [ ":cppgc_base" ]
+ public_deps = [
+ ":cppgc_base",
+ ":cppgc_base_for_testing",
+ ]
- configs = [ ":internal_config" ]
+ configs = []
public_configs = [ ":external_config" ]
}
}
@@ -4980,7 +5260,7 @@ if (is_component_build) {
public_deps = [ ":v8_cppgc_shared" ]
- configs = [ ":internal_config" ]
+ configs = []
public_configs = [ ":external_config" ]
}
} else {
@@ -4997,6 +5277,7 @@ if (is_component_build) {
testonly = true
public_deps = [
+ ":cppgc_base_for_testing",
":torque_base",
":torque_ls_base",
":v8_base",
@@ -5021,7 +5302,10 @@ if (is_component_build) {
group("cppgc_for_testing") {
testonly = true
- public_deps = [ ":cppgc_base" ]
+ public_deps = [
+ ":cppgc_base",
+ ":cppgc_base_for_testing",
+ ]
public_configs = [ ":external_config" ]
}
@@ -5149,6 +5433,10 @@ if (want_v8_shell) {
v8_executable("cppgc_sample") {
sources = [ "samples/cppgc/cppgc-sample.cc" ]
+ if (v8_current_cpu == "riscv64") {
+ libs = [ "atomic" ]
+ }
+
configs = [
# Note: don't use :internal_config here because this target will get
# the :external_config applied to it by virtue of depending on :cppgc, and
@@ -5277,6 +5565,8 @@ v8_source_set("wasm_test_common") {
deps = [
":generate_bytecode_builtins_list",
":run_torque",
+ ":v8_libbase",
+ ":v8_shared_internal_headers",
":v8_tracing",
]
@@ -5358,6 +5648,7 @@ v8_source_set("lib_wasm_fuzzer_common") {
":generate_bytecode_builtins_list",
":run_torque",
":v8_tracing",
+ ":wasm_test_common",
]
public_deps = [ ":v8_maybe_icu" ]
@@ -5468,28 +5759,77 @@ if (!build_with_chromium && v8_use_perfetto) {
sources = [
"third_party/protobuf/src/google/protobuf/any_lite.cc",
"third_party/protobuf/src/google/protobuf/arena.cc",
+ "third_party/protobuf/src/google/protobuf/arena.h",
+ "third_party/protobuf/src/google/protobuf/arena_impl.h",
+ "third_party/protobuf/src/google/protobuf/arenastring.h",
"third_party/protobuf/src/google/protobuf/extension_set.cc",
+ "third_party/protobuf/src/google/protobuf/extension_set.h",
+ "third_party/protobuf/src/google/protobuf/generated_enum_util.cc",
+ "third_party/protobuf/src/google/protobuf/generated_enum_util.h",
"third_party/protobuf/src/google/protobuf/generated_message_table_driven_lite.cc",
+ "third_party/protobuf/src/google/protobuf/generated_message_table_driven_lite.h",
"third_party/protobuf/src/google/protobuf/generated_message_util.cc",
+ "third_party/protobuf/src/google/protobuf/generated_message_util.h",
+ "third_party/protobuf/src/google/protobuf/has_bits.h",
"third_party/protobuf/src/google/protobuf/implicit_weak_message.cc",
+ "third_party/protobuf/src/google/protobuf/implicit_weak_message.h",
+ "third_party/protobuf/src/google/protobuf/inlined_string_field.h",
"third_party/protobuf/src/google/protobuf/io/coded_stream.cc",
+ "third_party/protobuf/src/google/protobuf/io/coded_stream.h",
+ "third_party/protobuf/src/google/protobuf/io/coded_stream_inl.h",
+ "third_party/protobuf/src/google/protobuf/io/io_win32.cc",
+ "third_party/protobuf/src/google/protobuf/io/io_win32.h",
"third_party/protobuf/src/google/protobuf/io/strtod.cc",
+ "third_party/protobuf/src/google/protobuf/io/strtod.h",
"third_party/protobuf/src/google/protobuf/io/zero_copy_stream.cc",
+ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream.h",
+ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl.cc",
+ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl.h",
"third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.cc",
+ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.h",
+ "third_party/protobuf/src/google/protobuf/map.h",
+ "third_party/protobuf/src/google/protobuf/map_entry_lite.h",
+ "third_party/protobuf/src/google/protobuf/map_field_lite.h",
+ "third_party/protobuf/src/google/protobuf/map_type_handler.h",
"third_party/protobuf/src/google/protobuf/message_lite.cc",
+ "third_party/protobuf/src/google/protobuf/message_lite.h",
"third_party/protobuf/src/google/protobuf/repeated_field.cc",
+ "third_party/protobuf/src/google/protobuf/repeated_field.h",
"third_party/protobuf/src/google/protobuf/stubs/bytestream.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/bytestream.h",
+ "third_party/protobuf/src/google/protobuf/stubs/callback.h",
+ "third_party/protobuf/src/google/protobuf/stubs/casts.h",
"third_party/protobuf/src/google/protobuf/stubs/common.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/common.h",
+ "third_party/protobuf/src/google/protobuf/stubs/fastmem.h",
+ "third_party/protobuf/src/google/protobuf/stubs/hash.h",
"third_party/protobuf/src/google/protobuf/stubs/int128.cc",
- "third_party/protobuf/src/google/protobuf/stubs/io_win32.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/int128.h",
+ "third_party/protobuf/src/google/protobuf/stubs/logging.h",
+ "third_party/protobuf/src/google/protobuf/stubs/macros.h",
+ "third_party/protobuf/src/google/protobuf/stubs/map_util.h",
+ "third_party/protobuf/src/google/protobuf/stubs/mutex.h",
+ "third_party/protobuf/src/google/protobuf/stubs/once.h",
+ "third_party/protobuf/src/google/protobuf/stubs/platform_macros.h",
+ "third_party/protobuf/src/google/protobuf/stubs/port.h",
"third_party/protobuf/src/google/protobuf/stubs/status.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/status.h",
+ "third_party/protobuf/src/google/protobuf/stubs/status_macros.h",
"third_party/protobuf/src/google/protobuf/stubs/statusor.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/statusor.h",
+ "third_party/protobuf/src/google/protobuf/stubs/stl_util.h",
"third_party/protobuf/src/google/protobuf/stubs/stringpiece.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/stringpiece.h",
"third_party/protobuf/src/google/protobuf/stubs/stringprintf.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/stringprintf.h",
"third_party/protobuf/src/google/protobuf/stubs/structurally_valid.cc",
"third_party/protobuf/src/google/protobuf/stubs/strutil.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/strutil.h",
+ "third_party/protobuf/src/google/protobuf/stubs/template_util.h",
"third_party/protobuf/src/google/protobuf/stubs/time.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/time.h",
"third_party/protobuf/src/google/protobuf/wire_format_lite.cc",
+ "third_party/protobuf/src/google/protobuf/wire_format_lite.h",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [
@@ -5507,59 +5847,119 @@ if (!build_with_chromium && v8_use_perfetto) {
deps = [ ":protobuf_lite" ]
sources = [
"third_party/protobuf/src/google/protobuf/any.cc",
+ "third_party/protobuf/src/google/protobuf/any.h",
"third_party/protobuf/src/google/protobuf/any.pb.cc",
+ "third_party/protobuf/src/google/protobuf/any.pb.h",
"third_party/protobuf/src/google/protobuf/api.pb.cc",
+ "third_party/protobuf/src/google/protobuf/api.pb.h",
"third_party/protobuf/src/google/protobuf/compiler/importer.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/importer.h",
"third_party/protobuf/src/google/protobuf/compiler/parser.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/parser.h",
"third_party/protobuf/src/google/protobuf/descriptor.cc",
+ "third_party/protobuf/src/google/protobuf/descriptor.h",
"third_party/protobuf/src/google/protobuf/descriptor.pb.cc",
+ "third_party/protobuf/src/google/protobuf/descriptor.pb.h",
"third_party/protobuf/src/google/protobuf/descriptor_database.cc",
+ "third_party/protobuf/src/google/protobuf/descriptor_database.h",
"third_party/protobuf/src/google/protobuf/duration.pb.cc",
+ "third_party/protobuf/src/google/protobuf/duration.pb.h",
"third_party/protobuf/src/google/protobuf/dynamic_message.cc",
+ "third_party/protobuf/src/google/protobuf/dynamic_message.h",
"third_party/protobuf/src/google/protobuf/empty.pb.cc",
+ "third_party/protobuf/src/google/protobuf/empty.pb.h",
"third_party/protobuf/src/google/protobuf/extension_set_heavy.cc",
"third_party/protobuf/src/google/protobuf/field_mask.pb.cc",
+ "third_party/protobuf/src/google/protobuf/field_mask.pb.h",
+ "third_party/protobuf/src/google/protobuf/generated_enum_reflection.h",
"third_party/protobuf/src/google/protobuf/generated_message_reflection.cc",
- "third_party/protobuf/src/google/protobuf/generated_message_table_driven.cc",
+ "third_party/protobuf/src/google/protobuf/generated_message_reflection.h",
"third_party/protobuf/src/google/protobuf/io/gzip_stream.cc",
+ "third_party/protobuf/src/google/protobuf/io/gzip_stream.h",
"third_party/protobuf/src/google/protobuf/io/printer.cc",
+ "third_party/protobuf/src/google/protobuf/io/printer.h",
"third_party/protobuf/src/google/protobuf/io/tokenizer.cc",
- "third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl.cc",
+ "third_party/protobuf/src/google/protobuf/io/tokenizer.h",
+ "third_party/protobuf/src/google/protobuf/map_entry.h",
"third_party/protobuf/src/google/protobuf/map_field.cc",
+ "third_party/protobuf/src/google/protobuf/map_field.h",
+ "third_party/protobuf/src/google/protobuf/map_field_inl.h",
"third_party/protobuf/src/google/protobuf/message.cc",
+ "third_party/protobuf/src/google/protobuf/message.h",
+ "third_party/protobuf/src/google/protobuf/metadata.h",
+ "third_party/protobuf/src/google/protobuf/reflection.h",
+ "third_party/protobuf/src/google/protobuf/reflection_internal.h",
"third_party/protobuf/src/google/protobuf/reflection_ops.cc",
+ "third_party/protobuf/src/google/protobuf/reflection_ops.h",
"third_party/protobuf/src/google/protobuf/service.cc",
+ "third_party/protobuf/src/google/protobuf/service.h",
"third_party/protobuf/src/google/protobuf/source_context.pb.cc",
+ "third_party/protobuf/src/google/protobuf/source_context.pb.h",
"third_party/protobuf/src/google/protobuf/struct.pb.cc",
+ "third_party/protobuf/src/google/protobuf/struct.pb.h",
"third_party/protobuf/src/google/protobuf/stubs/mathlimits.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/mathlimits.h",
+ "third_party/protobuf/src/google/protobuf/stubs/mathutil.h",
"third_party/protobuf/src/google/protobuf/stubs/substitute.cc",
+ "third_party/protobuf/src/google/protobuf/stubs/substitute.h",
"third_party/protobuf/src/google/protobuf/text_format.cc",
+ "third_party/protobuf/src/google/protobuf/text_format.h",
"third_party/protobuf/src/google/protobuf/timestamp.pb.cc",
+ "third_party/protobuf/src/google/protobuf/timestamp.pb.h",
"third_party/protobuf/src/google/protobuf/type.pb.cc",
+ "third_party/protobuf/src/google/protobuf/type.pb.h",
"third_party/protobuf/src/google/protobuf/unknown_field_set.cc",
- "third_party/protobuf/src/google/protobuf/util/delimited_message_util.cc",
+ "third_party/protobuf/src/google/protobuf/unknown_field_set.h",
"third_party/protobuf/src/google/protobuf/util/field_comparator.cc",
+ "third_party/protobuf/src/google/protobuf/util/field_comparator.h",
"third_party/protobuf/src/google/protobuf/util/field_mask_util.cc",
+ "third_party/protobuf/src/google/protobuf/util/field_mask_util.h",
+ "third_party/protobuf/src/google/protobuf/util/internal/constants.h",
"third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/datapiece.h",
"third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.h",
"third_party/protobuf/src/google/protobuf/util/internal/error_listener.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/error_listener.h",
"third_party/protobuf/src/google/protobuf/util/internal/field_mask_utility.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/field_mask_utility.h",
"third_party/protobuf/src/google/protobuf/util/internal/json_escaping.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/json_escaping.h",
"third_party/protobuf/src/google/protobuf/util/internal/json_objectwriter.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/json_objectwriter.h",
"third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.h",
+ "third_party/protobuf/src/google/protobuf/util/internal/location_tracker.h",
+ "third_party/protobuf/src/google/protobuf/util/internal/object_location_tracker.h",
+ "third_party/protobuf/src/google/protobuf/util/internal/object_source.h",
"third_party/protobuf/src/google/protobuf/util/internal/object_writer.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/object_writer.h",
"third_party/protobuf/src/google/protobuf/util/internal/proto_writer.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/proto_writer.h",
"third_party/protobuf/src/google/protobuf/util/internal/protostream_objectsource.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/protostream_objectsource.h",
"third_party/protobuf/src/google/protobuf/util/internal/protostream_objectwriter.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/protostream_objectwriter.h",
+ "third_party/protobuf/src/google/protobuf/util/internal/structured_objectwriter.h",
"third_party/protobuf/src/google/protobuf/util/internal/type_info.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/type_info.h",
"third_party/protobuf/src/google/protobuf/util/internal/type_info_test_helper.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/type_info_test_helper.h",
"third_party/protobuf/src/google/protobuf/util/internal/utility.cc",
+ "third_party/protobuf/src/google/protobuf/util/internal/utility.h",
"third_party/protobuf/src/google/protobuf/util/json_util.cc",
+ "third_party/protobuf/src/google/protobuf/util/json_util.h",
"third_party/protobuf/src/google/protobuf/util/message_differencer.cc",
+ "third_party/protobuf/src/google/protobuf/util/message_differencer.h",
"third_party/protobuf/src/google/protobuf/util/time_util.cc",
+ "third_party/protobuf/src/google/protobuf/util/time_util.h",
+ "third_party/protobuf/src/google/protobuf/util/type_resolver.h",
"third_party/protobuf/src/google/protobuf/util/type_resolver_util.cc",
+ "third_party/protobuf/src/google/protobuf/util/type_resolver_util.h",
"third_party/protobuf/src/google/protobuf/wire_format.cc",
+ "third_party/protobuf/src/google/protobuf/wire_format.h",
"third_party/protobuf/src/google/protobuf/wrappers.pb.cc",
+ "third_party/protobuf/src/google/protobuf/wrappers.pb.h",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [
@@ -5577,25 +5977,47 @@ if (!build_with_chromium && v8_use_perfetto) {
deps = [ ":protobuf_full" ]
sources = [
"third_party/protobuf/src/google/protobuf/compiler/code_generator.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/code_generator.h",
"third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/command_line_interface.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum_field.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum_field.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_extension.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_extension.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_field.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_field.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_map_field.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_map_field.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message_field.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message_field.h",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message_layout_helper.h",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_options.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_primitive_field.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_primitive_field.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_service.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_service.h",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_string_field.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_string_field.h",
"third_party/protobuf/src/google/protobuf/compiler/plugin.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/plugin.h",
"third_party/protobuf/src/google/protobuf/compiler/plugin.pb.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/plugin.pb.h",
"third_party/protobuf/src/google/protobuf/compiler/subprocess.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/subprocess.h",
"third_party/protobuf/src/google/protobuf/compiler/zip_writer.cc",
+ "third_party/protobuf/src/google/protobuf/compiler/zip_writer.h",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [
@@ -5632,7 +6054,7 @@ if (!build_with_chromium && v8_use_perfetto) {
"//third_party/perfetto/protos/perfetto/config:cpp",
"//third_party/perfetto/protos/perfetto/trace/track_event:zero",
"//third_party/perfetto/src/tracing:in_process_backend",
- "//third_party/perfetto/src/tracing:platform_posix",
+ "//third_party/perfetto/src/tracing:platform_impl",
]
}
} # if (!build_with_chromium && v8_use_perfetto)
diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS
index 74026eaeff..a6aff24098 100644
--- a/deps/v8/COMMON_OWNERS
+++ b/deps/v8/COMMON_OWNERS
@@ -34,7 +34,6 @@ sigurds@chromium.org
solanes@chromium.org
syg@chromium.org
szuend@chromium.org
-tebbi@chromium.org
thibaudm@chromium.org
ulan@chromium.org
vahl@chromium.org
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index e6c2e762e0..48ddbad6af 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -47,10 +47,10 @@ vars = {
'checkout_google_benchmark' : False,
# GN CIPD package version.
- 'gn_version': 'git_revision:595e3be7c8381d4eeefce62a63ec12bae9ce5140',
+ 'gn_version': 'git_revision:dfcbc6fed0a8352696f92d67ccad54048ad182b3',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:67aba6e3373bb0b9e3ef9871362045736cd29b6e',
+ 'luci_go': 'git_revision:fd10124659e991321df2f8a5d3749687b54ceb0a',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -88,17 +88,17 @@ vars = {
deps = {
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + 'd5995537211ebc4d1bc37f215c25fa3781ba9d6e',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '446bf3e5a00bfe4fd99d91cb76ec3b3a7b34d226',
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '82b992a1656d7d1cd0ee3cbea8ff609ffdfed380',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '5fe664f150beaf71104ce7787560fabdb55ebf5b',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '899e18383fd732b47e6978db2b960a1b2a80179b',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'e05b663d1c50b4e9ecc3ff9325f5158f1d071471',
'third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '4d3867052d35b2171f2edbb3466fa8f7e2d11319',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '0964a78c832d1d0f2669b020b073c38f67509cf2',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '235cfe435ca5a9826569ee4ef603e226216bd768',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '4c78ef9c38b683c5c5cbac70445378c2362cebfc',
'buildtools/clang_format/script':
- Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94',
'buildtools/linux64': {
'packages': [
{
@@ -120,11 +120,11 @@ deps = {
'condition': 'host_os == "mac"',
},
'buildtools/third_party/libc++/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'd9040c75cfea5928c804ab7c235fed06a63f743a',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '8fa87946779682841e21e2da977eccfb6cb3bded',
'buildtools/third_party/libc++abi/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '196ba1aaa8ac285d94f4ea8d9836390a45360533',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'd999d54f4bca789543a2eb6c995af2d9b5a1f3ed',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a2cc4f8c554dedcb0c64cac5511b19c43f1f3d32',
'buildtools/win': {
'packages': [
{
@@ -136,13 +136,13 @@ deps = {
'condition': 'host_os == "win"',
},
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'eb94f1c7aa96207f469008f29989a43feb2718f8',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '7af6071eddf11ad91fbd5df54138f9d3c6d980d5',
'third_party/android_ndk': {
- 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '27c0a8d090c666a50e40fceb4ee5b40b1a2d3f87',
+ 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '401019bf85744311b26c88ced255cd53401af8b7',
'condition': 'checkout_android',
},
'third_party/android_platform': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'ef64306e7772dea22df5f98102e6288da3510843',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'fdaa5e5b9f6384c3011ca8479d672e47528f743f',
'condition': 'checkout_android',
},
'third_party/android_sdk/public': {
@@ -184,7 +184,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'd1a3011cd91205aa96b74b5dfc227d391e88108d',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '81c9d30d7f1b3c1ab0f1856761f738cc81741322',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -196,7 +196,7 @@ deps = {
'condition': 'checkout_fuchsia',
},
'third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '1b0cdaae57c046c87fb99cb4f69c312a7e794adb',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '1e315c5b1a62707fac9b8f1d4e03180ee7507f98',
'third_party/google_benchmark/src': {
'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '7f27afe83b82f3a98baf58ef595814b9d42a5b2b',
'condition': 'checkout_google_benchmark',
@@ -206,15 +206,15 @@ deps = {
'third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '0944e71f4b2cb9a871bcbe353f95e889b64a611a',
'tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '1a072711d4388c62e02480fabc26c68c24494be9',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + 'a32a1607f6093d338f756c7e7c7b4333b0c50c9c',
'test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'b2e9dff2816cceb5ee84c0c226c50a31d01a7297',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f6034ebe9fb92d4d3dea644b9225bdc18b44a7ab',
'test/test262/harness':
- Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b',
+ Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b',
'third_party/qemu-linux-x64': {
'packages': [
{
@@ -239,7 +239,7 @@ deps = {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-amd64',
- 'version': 'xAHa1IXmKteChkPvba9ezjSnKL7IyDePQRzWVUEAx9UC'
+ 'version': 'qI8e328VwkWv64EapCvG3Xj9_hDpKQFuJWeVdUHz7W0C'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
@@ -256,7 +256,7 @@ deps = {
'dep_type': 'cipd',
},
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '2246bee280e908ac1fd27ab75e7d0021b14d875c',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'cfd0f628093b7382ac054fb33e23fa9d9a278bc3',
'tools/luci-go': {
'packages': [
{
@@ -286,11 +286,11 @@ deps = {
'dep_type': 'cipd',
},
'third_party/perfetto':
- Var('android_url') + '/platform/external/perfetto.git' + '@' + '7cdc44f903d3bcfd1d0f67188bfa797a24756868',
+ Var('android_url') + '/platform/external/perfetto.git' + '@' + 'aa4385bc5997ecad4c633885e1b331b1115012fb',
'third_party/protobuf':
- Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91',
+ Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + '6a59a2ad1f61d9696092f79b6d74368b4d7970a3',
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '2c183c9f93a328bfb3121284da13cf89a0f7e64a',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '348acca950b1d6de784a954f4fda0952046c652c',
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1',
'third_party/ittapi': {
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 3698d14dd9..2a478dbdc5 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -26,4 +26,5 @@ per-file WATCHLIST=file:COMMON_OWNERS
per-file *-mips*=file:MIPS_OWNERS
per-file *-mips64*=file:MIPS_OWNERS
per-file *-ppc*=file:PPC_OWNERS
+per-file *-riscv64*=file:RISCV_OWNERS
per-file *-s390*=file:S390_OWNERS
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 113ed2fd61..2ee14d545e 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -64,6 +64,8 @@ _TEST_CODE_EXCLUDED_PATHS = (
r'src[\\\/]extensions[\\\/]gc-extension\.cc',
# Runtime functions used for testing.
r'src[\\\/]runtime[\\\/]runtime-test\.cc',
+ # Testing helpers.
+ r'src[\\\/]heap[\\\/]cppgc[\\\/]testing\.cc',
)
@@ -480,8 +482,10 @@ def _CheckNoexceptAnnotations(input_api, output_api):
def FilterFile(affected_file):
return input_api.FilterSourceFile(
affected_file,
- files_to_check=(r'src/.*', r'test/.*'))
-
+ files_to_check=(r'src[\\\/].*', r'test[\\\/].*'),
+ # Skip api.cc since we cannot easily add the 'noexcept' annotation to
+ # public methods.
+ files_to_skip=(r'src[\\\/]api[\\\/]api\.cc',))
# matches any class name.
class_name = r'\b([A-Z][A-Za-z0-9_:]*)(?:::\1)?'
diff --git a/deps/v8/RISCV_OWNERS b/deps/v8/RISCV_OWNERS
new file mode 100644
index 0000000000..f3240b500b
--- /dev/null
+++ b/deps/v8/RISCV_OWNERS
@@ -0,0 +1,3 @@
+brice.dobry@futurewei.com
+lazyparser@gmail.com
+peng.w@rioslab.org
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 120481f30f..9b6783bb35 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -5,16 +5,6 @@
#ifndef BASE_TRACE_EVENT_COMMON_TRACE_EVENT_COMMON_H_
#define BASE_TRACE_EVENT_COMMON_TRACE_EVENT_COMMON_H_
-// This header file defines the set of trace_event macros without specifying
-// how the events actually get collected and stored. If you need to expose trace
-// events to some other universe, you can copy-and-paste this file as well as
-// trace_event.h, modifying the macros contained there as necessary for the
-// target platform. The end result is that multiple libraries can funnel events
-// through to a shared trace event collector.
-
-// IMPORTANT: To avoid conflicts, if you need to modify this file for a library,
-// land your change in base/ first, and then copy-and-paste it.
-
// Trace events are for tracking application performance and resource usage.
// Macros are provided to track:
// Begin and end of function calls
@@ -194,6 +184,100 @@
// trace points would carry a significant performance cost of acquiring a lock
// and resolving the category.
+// There are currently two implementations of the tracing macros. Firstly,
+// Perfetto (https://perfetto.dev/) implements a compatible set of macros which
+// we are migrating to. The Perfetto implementation is enabled through the
+// use_perfetto_client_library GN arg. If that flag is disabled, we fall back to
+// the legacy implementation in the latter half of this file (and
+// trace_event.h).
+// TODO(skyostil): Remove the legacy macro implementation.
+
+// Normally we'd use BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) for this, but
+// because v8 includes trace_event_common.h directly (in non-Perfetto mode), we
+// can't depend on any other header files here.
+#if defined(BASE_USE_PERFETTO_CLIENT_LIBRARY)
+////////////////////////////////////////////////////////////////////////////////
+// Perfetto trace macros
+
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+// Export Perfetto symbols in the same way as //base symbols.
+#define PERFETTO_COMPONENT_EXPORT BASE_EXPORT
+
+// Enable legacy trace event macros (e.g., TRACE_EVENT{0,1,2}).
+#define PERFETTO_ENABLE_LEGACY_TRACE_EVENTS 1
+
+// Macros for reading the current trace time (bypassing any virtual time
+// overrides).
+#define TRACE_TIME_TICKS_NOW() ::base::subtle::TimeTicksNowIgnoringOverride()
+#define TRACE_TIME_NOW() ::base::subtle::TimeNowIgnoringOverride()
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) PERFETTO_UID(name_prefix)
+
+// Special trace event macro to trace task execution with the location where it
+// was posted from.
+// TODO(skyostil): Convert this into a regular typed trace event.
+#define TRACE_TASK_EXECUTION(run_function, task) \
+ INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
+
+// Special trace event macro to trace log messages.
+// TODO(skyostil): Convert this into a regular typed trace event.
+#define TRACE_LOG_MESSAGE(file, message, line) \
+ INTERNAL_TRACE_LOG_MESSAGE(file, message, line)
+
+// Declare debug annotation converters for base time types, so they can be
+// passed as trace event arguments.
+// TODO(skyostil): Serialize timestamps using perfetto::TracedValue instead.
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+class DebugAnnotation;
+} // namespace pbzero
+} // namespace protos
+namespace internal {
+
+void BASE_EXPORT
+WriteDebugAnnotation(protos::pbzero::DebugAnnotation* annotation,
+ ::base::TimeTicks);
+void BASE_EXPORT
+WriteDebugAnnotation(protos::pbzero::DebugAnnotation* annotation, ::base::Time);
+
+} // namespace internal
+} // namespace perfetto
+
+// Pull in the tracing macro definitions from Perfetto.
+#include "third_party/perfetto/include/perfetto/tracing.h"
+
+namespace perfetto {
+namespace legacy {
+
+template <>
+bool BASE_EXPORT ConvertThreadId(const ::base::PlatformThreadId& thread,
+ uint64_t* track_uuid_out,
+ int32_t* pid_override_out,
+ int32_t* tid_override_out);
+
+} // namespace legacy
+
+template <>
+BASE_EXPORT TraceTimestamp
+ConvertTimestampToTraceTimeNs(const ::base::TimeTicks& ticks);
+
+} // namespace perfetto
+
+#else // !defined(BASE_USE_PERFETTO_CLIENT_LIBRARY)
+////////////////////////////////////////////////////////////////////////////////
+// Legacy trace macros
+
+// What follows is the legacy TRACE_EVENT macro implementation, which is being
+// replaced by the Perfetto-based implementation above. New projects wishing to
+// enable tracing should use the Perfetto SDK. See
+// https://perfetto.dev/docs/instrumentation/tracing-sdk.
+
// Check that nobody includes this file directly. Clients are supposed to
// include the surrounding "trace_event.h" of their project instead.
#if defined(TRACE_EVENT0)
@@ -861,109 +945,6 @@
category_group, name, id, \
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
-// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
-// associated arguments. If the category is not enabled, then this
-// does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
-// events are considered to match if their category_group, name and id values
-// all match. |id| must either be a pointer or an integer value up to 64 bits.
-// If it's a pointer, the bits will be xored with a hash of the process ID so
-// that the same pointer on two different processes will not collide.
-// FLOW events are different from ASYNC events in how they are drawn by the
-// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
-// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
-// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
-// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
-// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
-// macros. When the operation completes, call FLOW_END. An async operation can
-// span threads and processes, but all events in that operation must use the
-// same |name| and |id|. Each event can have its own args.
-#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
- TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
- TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Records a single FLOW_STEP event for |step| immediately. If the category
-// is not enabled, then this does nothing. The |name| and |id| must match the
-// FLOW_BEGIN event above. The |step| param identifies this step within the
-// async event. This should be called at the beginning of the next phase of an
-// asynchronous operation.
-#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_NONE, "step", step)
-#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
- TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_COPY, "step", step)
-#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
- TRACE_EVENT_FLAG_COPY, "step", step, arg1_name, arg1_val)
-
-// Records a single FLOW_END event for "name" immediately. If the category
-// is not enabled, then this does nothing.
-#define TRACE_EVENT_FLOW_END0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
- name, id, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
- name, id, \
- TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
-#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
- name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
- arg1_val)
-#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
- name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
- arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
- name, id, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
- name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
- arg1_val)
-#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
- name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
- arg1_val, arg2_name, arg2_val)
-
// Special trace event macro to trace task execution with the location where it
// was posted from.
#define TRACE_TASK_EXECUTION(run_function, task) \
@@ -1126,6 +1107,7 @@
#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
+#define TRACE_VALUE_TYPE_PROTO (static_cast<unsigned char>(9))
// Enum reflecting the scope of an INSTANT event. Must fit within
// TRACE_EVENT_FLAG_SCOPE_MASK.
@@ -1137,4 +1119,5 @@
#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
+#endif // !defined(BASE_USE_PERFETTO_CLIENT_LIBRARY)
#endif // BASE_TRACE_EVENT_COMMON_TRACE_EVENT_COMMON_H_
diff --git a/deps/v8/gni/proto_library.gni b/deps/v8/gni/proto_library.gni
index 0b72d7b8a4..583057e0a5 100644
--- a/deps/v8/gni/proto_library.gni
+++ b/deps/v8/gni/proto_library.gni
@@ -7,16 +7,16 @@ import("//build_overrides/build.gni")
# This file should not be pulled in chromium builds.
assert(!build_with_chromium)
+if (host_os == "win") {
+ _host_executable_suffix = ".exe"
+} else {
+ _host_executable_suffix = ""
+}
+
template("proto_library") {
assert(defined(invoker.sources))
proto_sources = invoker.sources
- if (host_os == "win") {
- host_executable_suffix = ".exe"
- } else {
- host_executable_suffix = ""
- }
-
# All the proto imports should be relative to the project root.
proto_in_dir = "//"
if (defined(invoker.proto_in_dir)) {
@@ -32,6 +32,11 @@ template("proto_library") {
# generate_python = true.
assert(defined(invoker.generate_python) && !invoker.generate_python)
+ import_dirs = []
+ if (defined(invoker.import_dirs)) {
+ import_dirs = invoker.import_dirs
+ }
+
# If false will not generate the default .pb.{cc,h} files. Used for custom
# codegen plugins.
generate_cc = true
@@ -48,7 +53,7 @@ template("proto_library") {
plugin_host_label = invoker.generator_plugin_label + "($host_toolchain)"
plugin_path =
get_label_info(plugin_host_label, "root_out_dir") + "/" +
- get_label_info(plugin_host_label, "name") + host_executable_suffix
+ get_label_info(plugin_host_label, "name") + _host_executable_suffix
generate_with_plugin = true
} else if (defined(invoker.generator_plugin_script)) {
plugin_path = invoker.generator_plugin_script
@@ -68,14 +73,19 @@ template("proto_library") {
}
}
- cc_out_dir = "$root_gen_dir/" + proto_out_dir
- rel_cc_out_dir = rebase_path(cc_out_dir, root_build_dir)
+ out_dir = "$root_gen_dir/" + proto_out_dir
+ rel_out_dir = rebase_path(out_dir, root_build_dir)
+
+ # Prevent unused errors when generating descriptor only.
+ if (generate_descriptor != "") {
+ not_needed([ "rel_out_dir" ])
+ }
protos = rebase_path(proto_sources, proto_in_dir)
protogens = []
if (generate_descriptor != "") {
- protogens += [ "$root_gen_dir/" + generate_descriptor ]
+ protogens += [ "$out_dir/${generate_descriptor}" ]
}
foreach(proto, protos) {
@@ -83,44 +93,64 @@ template("proto_library") {
proto_name = get_path_info(proto, "name")
proto_path = proto_dir + "/" + proto_name
+ # Prevent unused errors when generating descriptor only.
+ if (generate_descriptor != "") {
+ not_needed([ "proto_path" ])
+ }
+
if (generate_cc) {
protogens += [
- "$cc_out_dir/$proto_path.pb.h",
- "$cc_out_dir/$proto_path.pb.cc",
+ "$out_dir/$proto_path.pb.h",
+ "$out_dir/$proto_path.pb.cc",
]
}
if (generate_with_plugin) {
foreach(suffix, generator_plugin_suffixes) {
- protogens += [ "$cc_out_dir/${proto_path}${suffix}" ]
+ protogens += [ "$out_dir/${proto_path}${suffix}" ]
}
}
}
config_name = "${target_name}_config"
- action_name = "${target_name}_gen"
- source_set_name = target_name
+ if (generate_descriptor == "") {
+ action_name = "${target_name}_gen"
+ source_set_name = target_name
+ } else {
+ action_name = target_name
+ }
config(config_name) {
- include_dirs = [ cc_out_dir ]
+ include_dirs = [ out_dir ]
}
# The XXX_gen action that generates the .pb.{cc,h} files.
action(action_name) {
- visibility = [ ":$source_set_name" ]
- script = "//build/gn_run_binary.py"
+ if (generate_descriptor == "") {
+ visibility = [ ":$source_set_name" ]
+ }
sources = proto_sources
outputs = get_path_info(protogens, "abspath")
protoc_label = "//:protoc($host_toolchain)"
protoc_path = get_label_info(protoc_label, "root_out_dir") + "/protoc" +
- host_executable_suffix
+ _host_executable_suffix
+ protoc_rebased_path = "./" + rebase_path(protoc_path, root_build_dir)
+ script = "//gni/protoc.py"
args = [
# Path should be rebased because |root_build_dir| for current toolchain
# may be different from |root_out_dir| of protoc built on host toolchain.
- "./" + rebase_path(protoc_path, root_build_dir),
+ protoc_rebased_path,
"--proto_path",
rebase_path(proto_in_dir, root_build_dir),
]
+
+ foreach(path, import_dirs) {
+ args += [
+ "--proto_path",
+ rebase_path(path, root_build_dir),
+ ]
+ }
+
if (generate_cc) {
cc_generator_options_ = ""
if (defined(invoker.cc_generator_options)) {
@@ -128,30 +158,27 @@ template("proto_library") {
}
args += [
"--cpp_out",
- cc_generator_options_ + rel_cc_out_dir,
+ cc_generator_options_ + rel_out_dir,
]
}
if (generate_descriptor != "") {
+ depfile = "$out_dir/$generate_descriptor.d"
args += [
"--include_imports",
"--descriptor_set_out",
- rebase_path("$root_gen_dir/" + generate_descriptor, root_build_dir),
+ rebase_path("$out_dir/$generate_descriptor", root_build_dir),
+ "--dependency_out",
+ rebase_path(depfile, root_build_dir),
]
}
- if (defined(invoker.import_dirs)) {
- foreach(path, invoker.import_dirs) {
- args += [ "--import-dir=" + rebase_path(path, root_build_dir) ]
- }
- }
-
if (generate_with_plugin) {
plugin_path_rebased = rebase_path(plugin_path, root_build_dir)
plugin_out_args = ""
if (defined(invoker.generator_plugin_options)) {
plugin_out_args += invoker.generator_plugin_options
}
- plugin_out_args += ":$rel_cc_out_dir"
+ plugin_out_args += ":$rel_out_dir"
args += [
"--plugin=protoc-gen-plugin=$plugin_path_rebased",
@@ -162,8 +189,15 @@ template("proto_library") {
args += rebase_path(proto_sources, root_build_dir)
inputs = [ protoc_path ]
-
deps = [ protoc_label ]
+
+ # TODO(hjd): Avoid adding to deps here this.
+ # When we generate BUILD files we need find the transitive proto,
+ # dependencies, so also add link_deps to actual deps so they show up
+ # in gn desc.
+ if (defined(invoker.link_deps)) {
+ deps += invoker.link_deps
+ }
if (generate_with_plugin) {
inputs += [ plugin_path ]
if (defined(plugin_host_label)) {
@@ -172,61 +206,64 @@ template("proto_library") {
}
}
- if (defined(invoker.proto_deps)) {
- deps += invoker.proto_deps
- }
if (defined(invoker.deps)) {
deps += invoker.deps
}
- } # action "${target_name}_gen"
+ } # action(action_name)
# The source_set that builds the generated .pb.cc files.
- source_set(target_name) {
- forward_variables_from(invoker,
- [
- "defines",
- "include_dirs",
- "public_configs",
- "testonly",
- "visibility",
- ])
-
- sources = get_target_outputs(":$action_name")
-
- # configs -= [ "//gn/standalone:extra_warnings" ]
- if (defined(invoker.extra_configs)) {
- configs += invoker.extra_configs
- }
+ if (generate_descriptor == "") {
+ source_set(source_set_name) {
+ forward_variables_from(invoker,
+ [
+ "defines",
+ "include_dirs",
+ "public_configs",
+ "testonly",
+ "visibility",
+ ])
+
+ sources = get_target_outputs(":$action_name")
+
+ if (defined(invoker.extra_configs)) {
+ configs += invoker.extra_configs
+ }
- if (!defined(invoker.public_configs)) {
- public_configs = []
- }
+ if (!defined(invoker.public_configs)) {
+ public_configs = []
+ }
- public_configs += [ "//:protobuf_gen_config" ]
+ public_configs += [
+ "//:protobuf_gen_config",
+ ":$config_name",
+ ]
- propagate_imports_configs = !defined(invoker.propagate_imports_configs) ||
- invoker.propagate_imports_configs
- if (propagate_imports_configs) {
- public_configs += [ ":$config_name" ]
- } else {
- # Embedder handles include directory propagation to dependents.
- configs += [ ":$config_name" ]
- }
+ # By default, propagate the config for |include_dirs| to dependent
+ # targets, so that public imports can be resolved to corresponding header
+ # files. In some cases, the embedder target handles include directory
+ # propagation itself, e.g. via a common config.
+ propagate_imports_configs = !defined(invoker.propagate_imports_configs) ||
+ invoker.propagate_imports_configs
+ if (propagate_imports_configs) {
+ public_configs += [ ":$config_name" ]
+ } else {
+ configs += [ ":$config_name" ]
+ }
- # Use protobuf_full only for tests.
- if (defined(invoker.use_protobuf_full) &&
- invoker.use_protobuf_full == true) {
- deps = [ "//:protobuf_full" ]
- } else {
- deps = [ "//:protobuf_lite" ]
- }
+ # Use protobuf_full only for tests.
+ if (defined(invoker.use_protobuf_full) &&
+ invoker.use_protobuf_full == true) {
+ deps = [ "//:protobuf_full" ]
+ } else if (generate_cc) {
+ deps = [ "//:protobuf_lite" ]
+ } else {
+ deps = []
+ }
- deps += [ ":$action_name" ]
- if (defined(invoker.deps)) {
- deps += invoker.deps
- }
- if (defined(invoker.link_deps)) {
- deps += invoker.link_deps
- }
- } # source_set(target_name)
+ deps += [ ":$action_name" ]
+ if (defined(invoker.deps)) {
+ deps += invoker.deps
+ }
+ } # source_set(source_set_name)
+ }
} # template
diff --git a/deps/v8/gni/protoc.py b/deps/v8/gni/protoc.py
new file mode 100755
index 0000000000..dc8920009d
--- /dev/null
+++ b/deps/v8/gni/protoc.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# Copyright 2021 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Script to wrap protoc execution.
+
+This script exists to work-around the bad depfile generation by protoc when
+generating descriptors."""
+
+from __future__ import print_function
+import argparse
+import os
+import sys
+import subprocess
+import tempfile
+import uuid
+
+from codecs import open
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--descriptor_set_out', default=None)
+ parser.add_argument('--dependency_out', default=None)
+ parser.add_argument('protoc')
+ args, remaining = parser.parse_known_args()
+
+ if args.dependency_out and args.descriptor_set_out:
+ tmp_path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
+ custom = [
+ '--descriptor_set_out', args.descriptor_set_out, '--dependency_out',
+ tmp_path
+ ]
+ try:
+ cmd = [args.protoc] + custom + remaining
+ subprocess.check_call(cmd)
+ with open(tmp_path, 'rb') as tmp_rd:
+ dependency_data = tmp_rd.read().decode('utf-8')
+ finally:
+ if os.path.exists(tmp_path):
+ os.unlink(tmp_path)
+
+ with open(args.dependency_out, 'w', encoding='utf-8') as f:
+ f.write(args.descriptor_set_out + ":")
+ f.write(dependency_data)
+ else:
+ subprocess.check_call(sys.argv[1:])
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni
index b5fb1823b3..53963a048b 100644
--- a/deps/v8/gni/snapshot_toolchain.gni
+++ b/deps/v8/gni/snapshot_toolchain.gni
@@ -79,7 +79,8 @@ if (v8_snapshot_toolchain == "") {
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
- } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") {
+ } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
+ v8_current_cpu == "riscv64") {
if (is_win && v8_current_cpu == "arm64") {
# set _cpus to blank for Windows ARM64 so host_toolchain could be
# selected as snapshot toolchain later.
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 5651a178c2..9325baf996 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -64,6 +64,17 @@ declare_args() {
# Enable WebAssembly debugging via GDB-remote protocol.
v8_enable_wasm_gdb_remote_debugging = false
+ # Lite mode disables a number of performance optimizations to reduce memory
+ # at the cost of performance.
+ # Sets -DV8_LITE_MODE.
+ v8_enable_lite_mode = false
+
+ # Include support for WebAssembly. If disabled, the 'WebAssembly' global
+ # will not be available, and embedder APIs to generate WebAssembly modules
+ # will fail. Also, asm.js will not be translated to WebAssembly and will be
+ # executed as standard JavaScript instead.
+ v8_enable_webassembly = ""
+
# Add fuzzilli fuzzer support.
v8_fuzzilli = false
@@ -98,6 +109,13 @@ if (build_with_chromium && use_perfetto_client_library) {
v8_use_perfetto = true
}
+# WebAssembly is enabled by default, except in lite mode.
+if (v8_enable_webassembly == "") {
+ v8_enable_webassembly = !v8_enable_lite_mode
+}
+assert(!(v8_enable_webassembly && v8_enable_lite_mode),
+ "Webassembly is not available in lite mode.")
+
# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
# paths for all configs in templates as they are shared in different
# subdirectories.
diff --git a/deps/v8/include/DEPS b/deps/v8/include/DEPS
index 9f4002059b..21ce3d9645 100644
--- a/deps/v8/include/DEPS
+++ b/deps/v8/include/DEPS
@@ -4,6 +4,7 @@ include_rules = [
"+cppgc/common.h",
# Used by v8-cppgc.h to bridge to cppgc.
"+cppgc/custom-space.h",
+ "+cppgc/heap-statistics.h",
"+cppgc/internal/write-barrier.h",
"+cppgc/visitor.h",
]
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index 19c21fbf52..cd5fd0535e 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -16,6 +16,7 @@ per-file v8-inspector-protocol.h=pfeldman@chromium.org
per-file v8-inspector-protocol.h=kozyatinskiy@chromium.org
per-file js_protocol.pdl=dgozman@chromium.org
per-file js_protocol.pdl=pfeldman@chromium.org
+per-file js_protocol.pdl=bmeurer@chromium.org
# For branch updates:
per-file v8-version.h=file:../INFRA_OWNERS
diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h
index 1164f6925c..b6f9d3902b 100644
--- a/deps/v8/include/cppgc/allocation.h
+++ b/deps/v8/include/cppgc/allocation.h
@@ -64,6 +64,13 @@ template <typename T>
class MakeGarbageCollectedTraitBase
: private internal::MakeGarbageCollectedTraitInternal {
private:
+ static_assert(internal::IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+ static_assert(!IsGarbageCollectedWithMixinTypeV<T> ||
+ sizeof(T) <=
+ internal::api_constants::kLargeObjectSizeThreshold,
+ "GarbageCollectedMixin may not be a large object");
+
template <typename U, typename CustomSpace>
struct SpacePolicy {
static void* Allocate(AllocationHandle& handle, size_t size) {
@@ -153,12 +160,6 @@ class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase<T> {
public:
template <typename... Args>
static T* Call(AllocationHandle& handle, Args&&... args) {
- static_assert(internal::IsGarbageCollectedType<T>::value,
- "T needs to be a garbage collected object");
- static_assert(
- !internal::IsGarbageCollectedMixinType<T>::value ||
- sizeof(T) <= internal::api_constants::kLargeObjectSizeThreshold,
- "GarbageCollectedMixin may not be a large object");
void* memory =
MakeGarbageCollectedTraitBase<T>::Allocate(handle, sizeof(T));
T* object = ::new (memory) T(std::forward<Args>(args)...);
@@ -169,12 +170,6 @@ class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase<T> {
template <typename... Args>
static T* Call(AllocationHandle& handle, AdditionalBytes additional_bytes,
Args&&... args) {
- static_assert(internal::IsGarbageCollectedType<T>::value,
- "T needs to be a garbage collected object");
- static_assert(
- !internal::IsGarbageCollectedMixinType<T>::value ||
- sizeof(T) <= internal::api_constants::kLargeObjectSizeThreshold,
- "GarbageCollectedMixin may not be a large object");
void* memory = MakeGarbageCollectedTraitBase<T>::Allocate(
handle, sizeof(T) + additional_bytes.value);
T* object = ::new (memory) T(std::forward<Args>(args)...);
diff --git a/deps/v8/include/cppgc/common.h b/deps/v8/include/cppgc/common.h
index 1fff1a03fe..b6dbff3dd6 100644
--- a/deps/v8/include/cppgc/common.h
+++ b/deps/v8/include/cppgc/common.h
@@ -10,15 +10,18 @@
namespace cppgc {
-// Indicator for the stack state of the embedder.
+/**
+ * Indicator for the stack state of the embedder.
+ */
enum class EmbedderStackState {
+ /**
+ * Stack may contain interesting heap pointers.
+ */
kMayContainHeapPointers,
+ /**
+ * Stack does not contain any interesting heap pointers.
+ */
kNoHeapPointers,
- kUnknown V8_ENUM_DEPRECATED("Use kMayContainHeapPointers") =
- kMayContainHeapPointers,
- kNonEmpty V8_ENUM_DEPRECATED("Use kMayContainHeapPointers") =
- kMayContainHeapPointers,
- kEmpty V8_ENUM_DEPRECATED("Use kNoHeapPointers") = kNoHeapPointers,
};
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/cross-thread-persistent.h b/deps/v8/include/cppgc/cross-thread-persistent.h
index 3d49d557c2..1f509d4b00 100644
--- a/deps/v8/include/cppgc/cross-thread-persistent.h
+++ b/deps/v8/include/cppgc/cross-thread-persistent.h
@@ -44,7 +44,6 @@ class BasicCrossThreadPersistent final : public PersistentBase,
T* raw, const SourceLocation& loc = SourceLocation::Current())
: PersistentBase(raw), LocationPolicy(loc) {
if (!IsValid(raw)) return;
- PersistentRegionLock guard;
PersistentRegion& region = this->GetPersistentRegion(raw);
SetNode(region.AllocateNode(this, &Trace));
this->CheckPointer(raw);
@@ -162,13 +161,24 @@ class BasicCrossThreadPersistent final : public PersistentBase,
// heterogeneous assignments between different Member and Persistent handles
// based on their actual types.
V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") T* Get() const {
- return static_cast<T*>(GetValue());
+ return static_cast<T*>(const_cast<void*>(GetValue()));
}
/**
* Clears the stored object.
*/
- void Clear() { Assign(nullptr); }
+ void Clear() {
+ // Simplified version of `Assign()` to allow calling without a complete type
+ // `T`.
+ const void* old_value = GetValue();
+ if (IsValid(old_value)) {
+ PersistentRegionLock guard;
+ PersistentRegion& region = this->GetPersistentRegion(old_value);
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ }
+ SetValue(nullptr);
+ }
/**
* Returns a pointer to the stored object and releases it.
@@ -209,8 +219,31 @@ class BasicCrossThreadPersistent final : public PersistentBase,
T* operator->() const { return Get(); }
T& operator*() const { return *Get(); }
+ template <typename U, typename OtherWeaknessPolicy = WeaknessPolicy,
+ typename OtherLocationPolicy = LocationPolicy,
+ typename OtherCheckingPolicy = CheckingPolicy>
+ BasicCrossThreadPersistent<U, OtherWeaknessPolicy, OtherLocationPolicy,
+ OtherCheckingPolicy>
+ To() const {
+ PersistentRegionLock guard;
+ return BasicCrossThreadPersistent<U, OtherWeaknessPolicy,
+ OtherLocationPolicy, OtherCheckingPolicy>(
+ static_cast<U*>(Get()));
+ }
+
+ template <typename U = T,
+ typename = typename std::enable_if<!BasicCrossThreadPersistent<
+ U, WeaknessPolicy>::IsStrongPersistent::value>::type>
+ BasicCrossThreadPersistent<U, internal::StrongCrossThreadPersistentPolicy>
+ Lock() const {
+ return BasicCrossThreadPersistent<
+ U, internal::StrongCrossThreadPersistentPolicy>(*this);
+ }
+
private:
- static bool IsValid(void* ptr) { return ptr && ptr != kSentinelPointer; }
+ static bool IsValid(const void* ptr) {
+ return ptr && ptr != kSentinelPointer;
+ }
static void Trace(Visitor* v, const void* ptr) {
const auto* handle = static_cast<const BasicCrossThreadPersistent*>(ptr);
@@ -218,7 +251,7 @@ class BasicCrossThreadPersistent final : public PersistentBase,
}
void Assign(T* ptr) {
- void* old_value = GetValue();
+ const void* old_value = GetValue();
if (IsValid(old_value)) {
PersistentRegionLock guard;
PersistentRegion& region = this->GetPersistentRegion(old_value);
@@ -238,7 +271,8 @@ class BasicCrossThreadPersistent final : public PersistentBase,
}
void AssignUnsafe(T* ptr) {
- void* old_value = GetValue();
+ PersistentRegionLock::AssertLocked();
+ const void* old_value = GetValue();
if (IsValid(old_value)) {
PersistentRegion& region = this->GetPersistentRegion(old_value);
if (IsValid(ptr) && (&region == &this->GetPersistentRegion(ptr))) {
diff --git a/deps/v8/include/cppgc/custom-space.h b/deps/v8/include/cppgc/custom-space.h
index 9a8cd876c3..757c4fde15 100644
--- a/deps/v8/include/cppgc/custom-space.h
+++ b/deps/v8/include/cppgc/custom-space.h
@@ -9,8 +9,11 @@
namespace cppgc {
+/**
+ * Index identifying a custom space.
+ */
struct CustomSpaceIndex {
- CustomSpaceIndex(size_t value) : value(value) {} // NOLINT
+ constexpr CustomSpaceIndex(size_t value) : value(value) {} // NOLINT
size_t value;
};
@@ -45,15 +48,18 @@ class CustomSpaceBase {
template <typename ConcreteCustomSpace>
class CustomSpace : public CustomSpaceBase {
public:
+ /**
+ * Compaction is only supported on spaces that manually manage slots
+ * recording.
+ */
+ static constexpr bool kSupportsCompaction = false;
+
CustomSpaceIndex GetCustomSpaceIndex() const final {
return ConcreteCustomSpace::kSpaceIndex;
}
bool IsCompactable() const final {
return ConcreteCustomSpace::kSupportsCompaction;
}
-
- protected:
- static constexpr bool kSupportsCompaction = false;
};
/**
diff --git a/deps/v8/include/cppgc/ephemeron-pair.h b/deps/v8/include/cppgc/ephemeron-pair.h
index 47163d3071..e16cf1f0aa 100644
--- a/deps/v8/include/cppgc/ephemeron-pair.h
+++ b/deps/v8/include/cppgc/ephemeron-pair.h
@@ -5,6 +5,7 @@
#ifndef INCLUDE_CPPGC_EPHEMERON_PAIR_H_
#define INCLUDE_CPPGC_EPHEMERON_PAIR_H_
+#include "cppgc/liveness-broker.h"
#include "cppgc/member.h"
namespace cppgc {
@@ -18,6 +19,10 @@ struct EphemeronPair {
EphemeronPair(K* k, V* v) : key(k), value(v) {}
WeakMember<K> key;
Member<V> value;
+
+ void ClearValueIfKeyIsDead(const LivenessBroker& broker) {
+ if (!broker.IsHeapObjectAlive(key)) value = nullptr;
+ }
};
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/heap-consistency.h b/deps/v8/include/cppgc/heap-consistency.h
index 4a4eb10381..47caea1847 100644
--- a/deps/v8/include/cppgc/heap-consistency.h
+++ b/deps/v8/include/cppgc/heap-consistency.h
@@ -8,6 +8,7 @@
#include <cstddef>
#include "cppgc/internal/write-barrier.h"
+#include "cppgc/macros.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -49,17 +50,22 @@ class HeapConsistency final {
/**
* Gets the required write barrier type for a specific write.
*
- * \param slot Slot containing the pointer to some part of an object object
- * that has been allocated using `MakeGarbageCollected()`. Does not consider
- * the value of `slot`.
+ * \param slot Slot to some part of an object. The object must not necessarily
+ have been allocated using `MakeGarbageCollected()` but can also live
+ off-heap or on stack.
* \param params Parameters that may be used for actual write barrier calls.
* Only filled if return value indicates that a write barrier is needed. The
* contents of the `params` are an implementation detail.
+ * \param callback Callback returning the corresponding heap handle. The
+ * callback is only invoked if the heap cannot otherwise be figured out. The
+ * callback must not allocate.
* \returns whether a write barrier is needed and which barrier to invoke.
*/
+ template <typename HeapHandleCallback>
static V8_INLINE WriteBarrierType
- GetWriteBarrierType(const void* slot, WriteBarrierParams& params) {
- return internal::WriteBarrier::GetWriteBarrierType(slot, params);
+ GetWriteBarrierType(const void* slot, WriteBarrierParams& params,
+ HeapHandleCallback callback) {
+ return internal::WriteBarrier::GetWriteBarrierType(slot, params, callback);
}
/**
@@ -80,7 +86,6 @@ class HeapConsistency final {
* elements if they have not yet been processed.
*
* \param params The parameters retrieved from `GetWriteBarrierType()`.
- * \param heap The corresponding heap.
* \param first_element Pointer to the first element that should be processed.
* The slot itself must reside in an object that has been allocated using
* `MakeGarbageCollected()`.
@@ -91,11 +96,11 @@ class HeapConsistency final {
* element if necessary.
*/
static V8_INLINE void DijkstraWriteBarrierRange(
- const WriteBarrierParams& params, HeapHandle& heap,
- const void* first_element, size_t element_size, size_t number_of_elements,
+ const WriteBarrierParams& params, const void* first_element,
+ size_t element_size, size_t number_of_elements,
TraceCallback trace_callback) {
internal::WriteBarrier::DijkstraMarkingBarrierRange(
- params, heap, first_element, element_size, number_of_elements,
+ params, first_element, element_size, number_of_elements,
trace_callback);
}
@@ -131,6 +136,100 @@ class HeapConsistency final {
HeapConsistency() = delete;
};
+/**
+ * Disallows garbage collection finalizations. Any garbage collection triggers
+ * result in a crash when in this scope.
+ *
+ * Note that the garbage collector already covers paths that can lead to garbage
+ * collections, so user code does not require checking
+ * `IsGarbageCollectionAllowed()` before allocations.
+ */
+class V8_EXPORT V8_NODISCARD DisallowGarbageCollectionScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ /**
+ * \returns whether garbage collections are currently allowed.
+ */
+ static bool IsGarbageCollectionAllowed(HeapHandle& heap_handle);
+
+ /**
+ * Enters a disallow garbage collection scope. Must be paired with `Leave()`.
+ * Prefer a scope instance of `DisallowGarbageCollectionScope`.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ static void Enter(HeapHandle& heap_handle);
+
+ /**
+ * Leaves a disallow garbage collection scope. Must be paired with `Enter()`.
+ * Prefer a scope instance of `DisallowGarbageCollectionScope`.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ static void Leave(HeapHandle& heap_handle);
+
+ /**
+ * Constructs a scoped object that automatically enters and leaves a disallow
+ * garbage collection scope based on its lifetime.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ explicit DisallowGarbageCollectionScope(HeapHandle& heap_handle);
+ ~DisallowGarbageCollectionScope();
+
+ DisallowGarbageCollectionScope(const DisallowGarbageCollectionScope&) =
+ delete;
+ DisallowGarbageCollectionScope& operator=(
+ const DisallowGarbageCollectionScope&) = delete;
+
+ private:
+ HeapHandle& heap_handle_;
+};
+
+/**
+ * Avoids invoking garbage collection finalizations. Already running garbage
+ * collection phase are unaffected by this scope.
+ *
+ * Should only be used temporarily as the scope has an impact on memory usage
+ * and follow up garbage collections.
+ */
+class V8_EXPORT V8_NODISCARD NoGarbageCollectionScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ /**
+ * Enters a no garbage collection scope. Must be paired with `Leave()`. Prefer
+ * a scope instance of `NoGarbageCollectionScope`.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ static void Enter(HeapHandle& heap_handle);
+
+ /**
+ * Leaves a no garbage collection scope. Must be paired with `Enter()`. Prefer
+ * a scope instance of `NoGarbageCollectionScope`.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ static void Leave(HeapHandle& heap_handle);
+
+ /**
+ * Constructs a scoped object that automatically enters and leaves a no
+ * garbage collection scope based on its lifetime.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ explicit NoGarbageCollectionScope(HeapHandle& heap_handle);
+ ~NoGarbageCollectionScope();
+
+ NoGarbageCollectionScope(const NoGarbageCollectionScope&) = delete;
+ NoGarbageCollectionScope& operator=(const NoGarbageCollectionScope&) = delete;
+
+ private:
+ HeapHandle& heap_handle_;
+};
+
} // namespace subtle
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/heap-state.h b/deps/v8/include/cppgc/heap-state.h
new file mode 100644
index 0000000000..0157282a56
--- /dev/null
+++ b/deps/v8/include/cppgc/heap-state.h
@@ -0,0 +1,59 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_HEAP_STATE_H_
+#define INCLUDE_CPPGC_HEAP_STATE_H_
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+class HeapHandle;
+
+namespace subtle {
+
+/**
+ * Helpers to peek into heap-internal state.
+ */
+class V8_EXPORT HeapState final {
+ public:
+ /**
+ * Returns whether the garbage collector is marking. This API is experimental
+ * and is expected to be removed in future.
+ *
+ * \param heap_handle The corresponding heap.
+ * \returns true if the garbage collector is currently marking, and false
+ * otherwise.
+ */
+ static bool IsMarking(const HeapHandle& heap_handle);
+
+ /*
+ * Returns whether the garbage collector is sweeping. This API is experimental
+ * and is expected to be removed in future.
+ *
+ * \param heap_handle The corresponding heap.
+ * \returns true if the garbage collector is currently sweeping, and false
+ * otherwise.
+ */
+ static bool IsSweeping(const HeapHandle& heap_handle);
+
+ /**
+ * Returns whether the garbage collector is in the atomic pause, i.e., the
+ * mutator is stopped from running. This API is experimental and is expected
+ * to be removed in future.
+ *
+ * \param heap_handle The corresponding heap.
+ * \returns true if the garbage collector is currently in the atomic pause,
+ * and false otherwise.
+ */
+ static bool IsInAtomicPause(const HeapHandle& heap_handle);
+
+ private:
+ HeapState() = delete;
+};
+
+} // namespace subtle
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_HEAP_STATE_H_
diff --git a/deps/v8/include/cppgc/heap-statistics.h b/deps/v8/include/cppgc/heap-statistics.h
new file mode 100644
index 0000000000..cf8d6633cc
--- /dev/null
+++ b/deps/v8/include/cppgc/heap-statistics.h
@@ -0,0 +1,110 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_HEAP_STATISTICS_H_
+#define INCLUDE_CPPGC_HEAP_STATISTICS_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace cppgc {
+
+/**
+ * `HeapStatistics` contains memory consumption and utilization statistics for a
+ * cppgc heap.
+ */
+struct HeapStatistics final {
+ /**
+ * Specifies the detail level of the heap statistics. Brief statistics contain
+ * only the top-level allocated and used memory statistics for the entire
+ * heap. Detailed statistics also contain a break down per space and page, as
+ * well as freelist statistics and object type histograms. Note that used
+ * memory reported by brief statistics and detailed statistics might differ
+ * slightly.
+ */
+ enum DetailLevel : uint8_t {
+ kBrief,
+ kDetailed,
+ };
+
+ /**
+ * Statistics of object types. For each type the statistics record its name,
+ * how many objects of that type were allocated, and the overall size used by
+ * these objects.
+ */
+ struct ObjectStatistics {
+ /** Number of distinct types in the heap. */
+ size_t num_types = 0;
+ /** Name of each type in the heap. */
+ std::vector<std::string> type_name;
+ /** Number of allocated objects per each type. */
+ std::vector<size_t> type_count;
+ /** Overall size of allocated objects per each type. */
+ std::vector<size_t> type_bytes;
+ };
+
+ /**
+ * Page granularity statistics. For each page the statistics record the
+ * allocated memory size and overall used memory size for the page.
+ */
+ struct PageStatistics {
+ /** Overall amount of memory allocated for the page. */
+ size_t physical_size_bytes = 0;
+ /** Amount of memory actually used on the page. */
+ size_t used_size_bytes = 0;
+ };
+
+ /**
+ * Stastistics of the freelist (used only in non-large object spaces). For
+ * each bucket in the freelist the statistics record the bucket size, the
+ * number of freelist entries in the bucket, and the overall allocated memory
+ * consumed by these freelist entries.
+ */
+ struct FreeListStatistics {
+ /** bucket sizes in the freelist. */
+ std::vector<size_t> bucket_size;
+ /** number of freelist entries per bucket. */
+ std::vector<size_t> free_count;
+ /** memory size concumed by freelist entries per size. */
+ std::vector<size_t> free_size;
+ };
+
+ /**
+ * Space granularity statistics. For each space the statistics record the
+ * space name, the amount of allocated memory and overall used memory for the
+ * space. The statistics also contain statistics for each of the space's
+ * pages, its freelist and the objects allocated on the space.
+ */
+ struct SpaceStatistics {
+ /** The space name */
+ std::string name;
+ /** Overall amount of memory allocated for the space. */
+ size_t physical_size_bytes = 0;
+ /** Amount of memory actually used on the space. */
+ size_t used_size_bytes = 0;
+ /** Statistics for each of the pages in the space. */
+ std::vector<PageStatistics> page_stats;
+ /** Statistics for the freelist of the space. */
+ FreeListStatistics free_list_stats;
+ /** Statistics for object allocated on the space. Filled only when
+ * NameProvider::HideInternalNames() is false. */
+ ObjectStatistics object_stats;
+ };
+
+ /** Overall amount of memory allocated for the heap. */
+ size_t physical_size_bytes = 0;
+ /** Amount of memory actually used on the heap. */
+ size_t used_size_bytes = 0;
+ /** Detail level of this HeapStatistics. */
+ DetailLevel detail_level;
+
+ /** Statistics for each of the spaces in the heap. Filled only when
+ * detail_level is kDetailed. */
+ std::vector<SpaceStatistics> space_stats;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_HEAP_STATISTICS_H_
diff --git a/deps/v8/include/cppgc/internal/caged-heap-local-data.h b/deps/v8/include/cppgc/internal/caged-heap-local-data.h
index 8c42147738..1fa60b6953 100644
--- a/deps/v8/include/cppgc/internal/caged-heap-local-data.h
+++ b/deps/v8/include/cppgc/internal/caged-heap-local-data.h
@@ -10,6 +10,7 @@
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/logging.h"
#include "cppgc/platform.h"
+#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
@@ -54,7 +55,7 @@ static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
struct CagedHeapLocalData final {
explicit CagedHeapLocalData(HeapBase* heap_base) : heap_base(heap_base) {}
- bool is_marking_in_progress = false;
+ bool is_incremental_marking_in_progress = false;
HeapBase* heap_base = nullptr;
#if defined(CPPGC_YOUNG_GENERATION)
AgeTable age_table;
diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h
index 685d8a2d6a..6524f326a5 100644
--- a/deps/v8/include/cppgc/internal/persistent-node.h
+++ b/deps/v8/include/cppgc/internal/persistent-node.h
@@ -90,23 +90,29 @@ class V8_EXPORT PersistentRegion final {
PersistentNode* node = free_list_head_;
free_list_head_ = free_list_head_->FreeListNext();
node->InitializeAsUsedNode(owner, trace);
+ nodes_in_use_++;
return node;
}
void FreeNode(PersistentNode* node) {
node->InitializeAsFreeNode(free_list_head_);
free_list_head_ = node;
+ CPPGC_DCHECK(nodes_in_use_ > 0);
+ nodes_in_use_--;
}
void Trace(Visitor*);
size_t NodesInUse() const;
+ void ClearAllUsedNodes();
+
private:
void EnsureNodeSlots();
std::vector<std::unique_ptr<PersistentNodeSlots>> nodes_;
PersistentNode* free_list_head_ = nullptr;
+ size_t nodes_in_use_ = 0;
};
// CrossThreadPersistent uses PersistentRegion but protects it using this lock
@@ -115,6 +121,8 @@ class V8_EXPORT PersistentRegionLock final {
public:
PersistentRegionLock();
~PersistentRegionLock();
+
+ static void AssertLocked();
};
} // namespace internal
diff --git a/deps/v8/include/cppgc/internal/pointer-policies.h b/deps/v8/include/cppgc/internal/pointer-policies.h
index 58f2515a3d..ea86a0a705 100644
--- a/deps/v8/include/cppgc/internal/pointer-policies.h
+++ b/deps/v8/include/cppgc/internal/pointer-policies.h
@@ -105,22 +105,22 @@ using DefaultLocationPolicy = IgnoreLocationPolicy;
struct StrongPersistentPolicy {
using IsStrongPersistent = std::true_type;
- static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
+ static V8_EXPORT PersistentRegion& GetPersistentRegion(const void* object);
};
struct WeakPersistentPolicy {
using IsStrongPersistent = std::false_type;
- static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
+ static V8_EXPORT PersistentRegion& GetPersistentRegion(const void* object);
};
struct StrongCrossThreadPersistentPolicy {
using IsStrongPersistent = std::true_type;
- static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
+ static V8_EXPORT PersistentRegion& GetPersistentRegion(const void* object);
};
struct WeakCrossThreadPersistentPolicy {
using IsStrongPersistent = std::false_type;
- static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
+ static V8_EXPORT PersistentRegion& GetPersistentRegion(const void* object);
};
// Forward declarations setting up the default policies.
@@ -136,23 +136,8 @@ template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy = DefaultCheckingPolicy>
class BasicMember;
-// Special tag type used to denote some sentinel member. The semantics of the
-// sentinel is defined by the embedder.
-struct SentinelPointer {
- template <typename T>
- operator T*() const { // NOLINT
- static constexpr intptr_t kSentinelValue = 1;
- return reinterpret_cast<T*>(kSentinelValue);
- }
- // Hidden friends.
- friend bool operator==(SentinelPointer, SentinelPointer) { return true; }
- friend bool operator!=(SentinelPointer, SentinelPointer) { return false; }
-};
-
} // namespace internal
-constexpr internal::SentinelPointer kSentinelPointer;
-
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_POINTER_POLICIES_H_
diff --git a/deps/v8/include/cppgc/internal/process-heap.h b/deps/v8/include/cppgc/internal/process-heap.h
deleted file mode 100644
index 0f742a50a9..0000000000
--- a/deps/v8/include/cppgc/internal/process-heap.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef INCLUDE_CPPGC_INTERNAL_PROCESS_HEAP_H_
-#define INCLUDE_CPPGC_INTERNAL_PROCESS_HEAP_H_
-
-#include "cppgc/internal/atomic-entry-flag.h"
-#include "v8config.h" // NOLINT(build/include_directory)
-
-namespace cppgc {
-namespace internal {
-
-class V8_EXPORT ProcessHeap final {
- public:
- static void EnterIncrementalOrConcurrentMarking() {
- concurrent_marking_flag_.Enter();
- }
- static void ExitIncrementalOrConcurrentMarking() {
- concurrent_marking_flag_.Exit();
- }
-
- static bool IsAnyIncrementalOrConcurrentMarking() {
- return concurrent_marking_flag_.MightBeEntered();
- }
-
- private:
- static AtomicEntryFlag concurrent_marking_flag_;
-};
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // INCLUDE_CPPGC_INTERNAL_PROCESS_HEAP_H_
diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h
index e3cc4c989d..f3aaedb1b8 100644
--- a/deps/v8/include/cppgc/internal/write-barrier.h
+++ b/deps/v8/include/cppgc/internal/write-barrier.h
@@ -5,8 +5,10 @@
#ifndef INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_
#define INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_
+#include "cppgc/heap-state.h"
#include "cppgc/internal/api-constants.h"
-#include "cppgc/internal/process-heap.h"
+#include "cppgc/internal/atomic-entry-flag.h"
+#include "cppgc/sentinel-pointer.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -32,17 +34,17 @@ class V8_EXPORT WriteBarrier final {
};
struct Params {
+ HeapHandle* heap = nullptr;
#if V8_ENABLE_CHECKS
Type type = Type::kNone;
#endif // !V8_ENABLE_CHECKS
#if defined(CPPGC_CAGED_HEAP)
- uintptr_t start;
-
+ uintptr_t start = 0;
CagedHeapLocalData& caged_heap() const {
return *reinterpret_cast<CagedHeapLocalData*>(start);
}
- uintptr_t slot_offset;
- uintptr_t value_offset;
+ uintptr_t slot_offset = 0;
+ uintptr_t value_offset = 0;
#endif // CPPGC_CAGED_HEAP
};
@@ -55,14 +57,19 @@ class V8_EXPORT WriteBarrier final {
static V8_INLINE Type GetWriteBarrierType(const void* slot, const void* value,
Params& params);
// Returns the required write barrier for a given `slot`.
- static V8_INLINE Type GetWriteBarrierType(const void* slot, Params& params);
+ template <typename HeapHandleCallback>
+ static V8_INLINE Type GetWriteBarrierType(const void* slot, Params& params,
+ HeapHandleCallback callback);
+
+ template <typename HeapHandleCallback>
+ static V8_INLINE Type GetWriteBarrierTypeForExternallyReferencedObject(
+ const void* value, Params& params, HeapHandleCallback callback);
static V8_INLINE void DijkstraMarkingBarrier(const Params& params,
const void* object);
static V8_INLINE void DijkstraMarkingBarrierRange(
- const Params& params, HeapHandle& heap, const void* first_element,
- size_t element_size, size_t number_of_elements,
- TraceCallback trace_callback);
+ const Params& params, const void* first_element, size_t element_size,
+ size_t number_of_elements, TraceCallback trace_callback);
static V8_INLINE void SteeleMarkingBarrier(const Params& params,
const void* object);
#if defined(CPPGC_YOUNG_GENERATION)
@@ -79,6 +86,13 @@ class V8_EXPORT WriteBarrier final {
static void CheckParams(Type expected_type, const Params& params) {}
#endif // !V8_ENABLE_CHECKS
+ // The IncrementalOrConcurrentUpdater class allows cppgc internal to update
+ // |incremental_or_concurrent_marking_flag_|.
+ class IncrementalOrConcurrentMarkingFlagUpdater;
+ static bool IsAnyIncrementalOrConcurrentMarking() {
+ return incremental_or_concurrent_marking_flag_.MightBeEntered();
+ }
+
private:
WriteBarrier() = delete;
@@ -99,50 +113,52 @@ class V8_EXPORT WriteBarrier final {
static void SteeleMarkingBarrierSlowWithSentinelCheck(const void* value);
#if defined(CPPGC_YOUNG_GENERATION)
+ static CagedHeapLocalData& GetLocalData(HeapHandle&);
static void GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
const AgeTable& ageTable,
const void* slot, uintptr_t value_offset);
#endif // CPPGC_YOUNG_GENERATION
+
+ static AtomicEntryFlag incremental_or_concurrent_marking_flag_;
};
+template <WriteBarrier::Type type>
+V8_INLINE WriteBarrier::Type SetAndReturnType(WriteBarrier::Params& params) {
+ if (type == WriteBarrier::Type::kNone) return WriteBarrier::Type::kNone;
+#if V8_ENABLE_CHECKS
+ params.type = type;
+#endif // !V8_ENABLE_CHECKS
+ return type;
+}
+
#if defined(CPPGC_CAGED_HEAP)
-class WriteBarrierTypeForCagedHeapPolicy final {
+class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
public:
- template <WriteBarrier::ValueMode value_mode>
+ template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
- WriteBarrier::Params& params) {
- const bool have_caged_heap =
- value_mode == WriteBarrier::ValueMode::kValuePresent
- ? TryGetCagedHeap(slot, value, params)
- : TryGetCagedHeap(slot, slot, params);
- if (!have_caged_heap) {
+ WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+ return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
+ }
+
+ template <typename HeapHandleCallback>
+ static V8_INLINE WriteBarrier::Type GetForExternallyReferenced(
+ const void* value, WriteBarrier::Params& params, HeapHandleCallback) {
+ if (!TryGetCagedHeap(value, value, params)) {
return WriteBarrier::Type::kNone;
}
- if (V8_UNLIKELY(params.caged_heap().is_marking_in_progress)) {
-#if V8_ENABLE_CHECKS
- params.type = WriteBarrier::Type::kMarking;
-#endif // !V8_ENABLE_CHECKS
- return WriteBarrier::Type::kMarking;
- }
-#if defined(CPPGC_YOUNG_GENERATION)
- params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start;
- if (value_mode == WriteBarrier::ValueMode::kValuePresent) {
- params.value_offset = reinterpret_cast<uintptr_t>(value) - params.start;
- } else {
- params.value_offset = 0;
+ if (V8_UNLIKELY(params.caged_heap().is_incremental_marking_in_progress)) {
+ return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
-#if V8_ENABLE_CHECKS
- params.type = WriteBarrier::Type::kGenerational;
-#endif // !V8_ENABLE_CHECKS
- return WriteBarrier::Type::kGenerational;
-#else // !CPPGC_YOUNG_GENERATION
- return WriteBarrier::Type::kNone;
-#endif // !CPPGC_YOUNG_GENERATION
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
private:
WriteBarrierTypeForCagedHeapPolicy() = delete;
+ template <WriteBarrier::ValueMode value_mode>
+ struct ValueModeDispatch;
+
static V8_INLINE bool TryGetCagedHeap(const void* slot, const void* value,
WriteBarrier::Params& params) {
params.start = reinterpret_cast<uintptr_t>(value) &
@@ -156,40 +172,165 @@ class WriteBarrierTypeForCagedHeapPolicy final {
}
return true;
}
+
+ // Returns whether marking is in progress. If marking is not in progress
+ // sets the start of the cage accordingly.
+ //
+ // TODO(chromium:1056170): Create fast path on API.
+ static bool IsMarking(const HeapHandle&, WriteBarrier::Params&);
+};
+
+template <>
+struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
+ WriteBarrier::ValueMode::kValuePresent> {
+ template <typename HeapHandleCallback>
+ static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
+ WriteBarrier::Params& params,
+ HeapHandleCallback) {
+ bool within_cage = TryGetCagedHeap(slot, value, params);
+ if (!within_cage) {
+ return WriteBarrier::Type::kNone;
+ }
+ if (V8_LIKELY(!params.caged_heap().is_incremental_marking_in_progress)) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ params.heap = reinterpret_cast<HeapHandle*>(params.start);
+ params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start;
+ params.value_offset = reinterpret_cast<uintptr_t>(value) - params.start;
+ return SetAndReturnType<WriteBarrier::Type::kGenerational>(params);
+#else // !CPPGC_YOUNG_GENERATION
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
+#endif // !CPPGC_YOUNG_GENERATION
+ }
+ params.heap = reinterpret_cast<HeapHandle*>(params.start);
+ return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
+ }
};
+
+template <>
+struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
+ WriteBarrier::ValueMode::kNoValuePresent> {
+ template <typename HeapHandleCallback>
+ static V8_INLINE WriteBarrier::Type Get(const void* slot, const void*,
+ WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ HeapHandle& handle = callback();
+ if (V8_LIKELY(!IsMarking(handle, params))) {
+ // params.start is populated by IsMarking().
+ params.heap = &handle;
+ params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start;
+ // params.value_offset stays 0.
+ if (params.slot_offset > api_constants::kCagedHeapReservationSize) {
+ // Check if slot is on stack.
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
+ }
+ return SetAndReturnType<WriteBarrier::Type::kGenerational>(params);
+ }
+#else // !CPPGC_YOUNG_GENERATION
+ if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
+ }
+ HeapHandle& handle = callback();
+ if (V8_UNLIKELY(!subtle::HeapState::IsMarking(handle))) {
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
+ }
+#endif // !CPPGC_YOUNG_GENERATION
+ params.heap = &handle;
+ return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
+ }
+};
+
#endif // CPPGC_CAGED_HEAP
-class WriteBarrierTypeForNonCagedHeapPolicy final {
+class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final {
public:
- template <WriteBarrier::ValueMode value_mode>
+ template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
- WriteBarrier::Params& params) {
- WriteBarrier::Type type =
- V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking())
- ? WriteBarrier::Type::kNone
- : WriteBarrier::Type::kMarking;
-#if V8_ENABLE_CHECKS
- params.type = type;
-#endif // !V8_ENABLE_CHECKS
- return type;
+ WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+ return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
+ }
+
+ template <typename HeapHandleCallback>
+ static V8_INLINE WriteBarrier::Type GetForExternallyReferenced(
+ const void* value, WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+ // The slot will never be used in `Get()` below.
+ return Get<WriteBarrier::ValueMode::kValuePresent>(nullptr, value, params,
+ callback);
}
private:
+ template <WriteBarrier::ValueMode value_mode>
+ struct ValueModeDispatch;
+
+ // TODO(chromium:1056170): Create fast path on API.
+ static bool IsMarking(const void*, HeapHandle**);
+ // TODO(chromium:1056170): Create fast path on API.
+ static bool IsMarking(HeapHandle&);
+
WriteBarrierTypeForNonCagedHeapPolicy() = delete;
};
+template <>
+struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
+ WriteBarrier::ValueMode::kValuePresent> {
+ template <typename HeapHandleCallback>
+ static V8_INLINE WriteBarrier::Type Get(const void*, const void* object,
+ WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+ // The following check covers nullptr as well as sentinel pointer.
+ if (object <= static_cast<void*>(kSentinelPointer)) {
+ return WriteBarrier::Type::kNone;
+ }
+ if (IsMarking(object, &params.heap)) {
+ return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
+ }
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
+ }
+};
+
+template <>
+struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
+ WriteBarrier::ValueMode::kNoValuePresent> {
+ template <typename HeapHandleCallback>
+ static V8_INLINE WriteBarrier::Type Get(const void*, const void*,
+ WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+ if (V8_UNLIKELY(WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
+ HeapHandle& handle = callback();
+ if (IsMarking(handle)) {
+ params.heap = &handle;
+ return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
+ }
+ }
+ return WriteBarrier::Type::kNone;
+ }
+};
+
// static
WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
const void* slot, const void* value, WriteBarrier::Params& params) {
return WriteBarrierTypePolicy::Get<ValueMode::kValuePresent>(slot, value,
- params);
+ params, []() {});
}
// static
+template <typename HeapHandleCallback>
WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
- const void* slot, WriteBarrier::Params& params) {
- return WriteBarrierTypePolicy::Get<ValueMode::kNoValuePresent>(slot, nullptr,
- params);
+ const void* slot, WriteBarrier::Params& params,
+ HeapHandleCallback callback) {
+ return WriteBarrierTypePolicy::Get<ValueMode::kNoValuePresent>(
+ slot, nullptr, params, callback);
+}
+
+// static
+template <typename HeapHandleCallback>
+WriteBarrier::Type
+WriteBarrier::GetWriteBarrierTypeForExternallyReferencedObject(
+ const void* value, Params& params, HeapHandleCallback callback) {
+ return WriteBarrierTypePolicy::GetForExternallyReferenced(value, params,
+ callback);
}
// static
@@ -206,13 +347,12 @@ void WriteBarrier::DijkstraMarkingBarrier(const Params& params,
// static
void WriteBarrier::DijkstraMarkingBarrierRange(const Params& params,
- HeapHandle& heap,
const void* first_element,
size_t element_size,
size_t number_of_elements,
TraceCallback trace_callback) {
CheckParams(Type::kMarking, params);
- DijkstraMarkingBarrierRangeSlow(heap, first_element, element_size,
+ DijkstraMarkingBarrierRangeSlow(*params.heap, first_element, element_size,
number_of_elements, trace_callback);
}
diff --git a/deps/v8/include/cppgc/liveness-broker.h b/deps/v8/include/cppgc/liveness-broker.h
index b69a69535b..e449091280 100644
--- a/deps/v8/include/cppgc/liveness-broker.h
+++ b/deps/v8/include/cppgc/liveness-broker.h
@@ -50,6 +50,12 @@ class V8_EXPORT LivenessBroker final {
}
template <typename T>
+ bool IsHeapObjectAlive(const WeakMember<T>& weak_member) const {
+ return (weak_member != kSentinelPointer) &&
+ IsHeapObjectAlive<T>(weak_member.Get());
+ }
+
+ template <typename T>
bool IsHeapObjectAlive(const UntracedMember<T>& untraced_member) const {
return (untraced_member != kSentinelPointer) &&
IsHeapObjectAlive<T>(untraced_member.Get());
diff --git a/deps/v8/include/cppgc/macros.h b/deps/v8/include/cppgc/macros.h
index c0b1814e29..70ab44c657 100644
--- a/deps/v8/include/cppgc/macros.h
+++ b/deps/v8/include/cppgc/macros.h
@@ -5,6 +5,8 @@
#ifndef INCLUDE_CPPGC_MACROS_H_
#define INCLUDE_CPPGC_MACROS_H_
+#include <stddef.h>
+
#include "cppgc/internal/compiler-specific.h"
namespace cppgc {
diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h
index 84e81251c2..7b76bc4f75 100644
--- a/deps/v8/include/cppgc/member.h
+++ b/deps/v8/include/cppgc/member.h
@@ -10,6 +10,7 @@
#include <type_traits>
#include "cppgc/internal/pointer-policies.h"
+#include "cppgc/sentinel-pointer.h"
#include "cppgc/type-traits.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -19,28 +20,30 @@ class Visitor;
namespace internal {
+// MemberBase always refers to the object as const object and defers to
+// BasicMember on casting to the right type as needed.
class MemberBase {
protected:
MemberBase() = default;
- explicit MemberBase(void* value) : raw_(value) {}
+ explicit MemberBase(const void* value) : raw_(value) {}
- void** GetRawSlot() const { return &raw_; }
- void* GetRaw() const { return raw_; }
+ const void** GetRawSlot() const { return &raw_; }
+ const void* GetRaw() const { return raw_; }
void SetRaw(void* value) { raw_ = value; }
- void* GetRawAtomic() const {
- return reinterpret_cast<const std::atomic<void*>*>(&raw_)->load(
+ const void* GetRawAtomic() const {
+ return reinterpret_cast<const std::atomic<const void*>*>(&raw_)->load(
std::memory_order_relaxed);
}
- void SetRawAtomic(void* value) {
- reinterpret_cast<std::atomic<void*>*>(&raw_)->store(
+ void SetRawAtomic(const void* value) {
+ reinterpret_cast<std::atomic<const void*>*>(&raw_)->store(
value, std::memory_order_relaxed);
}
void ClearFromGC() const { raw_ = nullptr; }
private:
- mutable void* raw_ = nullptr;
+ mutable const void* raw_ = nullptr;
};
// The basic class from which all Member classes are 'generated'.
@@ -167,7 +170,11 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
// based on their actual types.
V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") T* Get() const {
// Executed by the mutator, hence non atomic load.
- return static_cast<T*>(MemberBase::GetRaw());
+ //
+ // The const_cast below removes the constness from MemberBase storage. The
+ // following static_cast re-adds any constness if specified through the
+ // user-visible template parameter T.
+ return static_cast<T*>(const_cast<void*>(MemberBase::GetRaw()));
}
void Clear() { SetRawAtomic(nullptr); }
@@ -179,12 +186,12 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
}
const T** GetSlotForTesting() const {
- return reinterpret_cast<const T**>(const_cast<const void**>(GetRawSlot()));
+ return reinterpret_cast<const T**>(GetRawSlot());
}
private:
- T* GetRawAtomic() const {
- return static_cast<T*>(MemberBase::GetRawAtomic());
+ const T* GetRawAtomic() const {
+ return static_cast<const T*>(MemberBase::GetRawAtomic());
}
void InitializingWriteBarrier() const {
@@ -197,6 +204,8 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
void ClearFromGC() const { MemberBase::ClearFromGC(); }
friend class cppgc::Visitor;
+ template <typename U>
+ friend struct cppgc::TraceTrait;
};
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
diff --git a/deps/v8/include/cppgc/object-size-trait.h b/deps/v8/include/cppgc/object-size-trait.h
new file mode 100644
index 0000000000..35795596d3
--- /dev/null
+++ b/deps/v8/include/cppgc/object-size-trait.h
@@ -0,0 +1,58 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_OBJECT_SIZE_TRAIT_H_
+#define INCLUDE_CPPGC_OBJECT_SIZE_TRAIT_H_
+
+#include <cstddef>
+
+#include "cppgc/type-traits.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+namespace internal {
+
+struct V8_EXPORT BaseObjectSizeTrait {
+ protected:
+ static size_t GetObjectSizeForGarbageCollected(const void*);
+ static size_t GetObjectSizeForGarbageCollectedMixin(const void*);
+};
+
+} // namespace internal
+
+namespace subtle {
+
+/**
+ * Trait specifying how to get the size of an object that was allocated using
+ * `MakeGarbageCollected()`. Also supports querying the size with an inner
+ * pointer to a mixin.
+ */
+template <typename T, bool = IsGarbageCollectedMixinTypeV<T>>
+struct ObjectSizeTrait;
+
+template <typename T>
+struct ObjectSizeTrait<T, false> : cppgc::internal::BaseObjectSizeTrait {
+ static_assert(sizeof(T), "T must be fully defined");
+ static_assert(IsGarbageCollectedTypeV<T>,
+ "T must be of type GarbageCollected or GarbageCollectedMixin");
+
+ static size_t GetSize(const T& object) {
+ return GetObjectSizeForGarbageCollected(&object);
+ }
+};
+
+template <typename T>
+struct ObjectSizeTrait<T, true> : cppgc::internal::BaseObjectSizeTrait {
+ static_assert(sizeof(T), "T must be fully defined");
+
+ static size_t GetSize(const T& object) {
+ return GetObjectSizeForGarbageCollectedMixin(&object);
+ }
+};
+
+} // namespace subtle
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_OBJECT_SIZE_TRAIT_H_
diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h
index c2d8a7a5a6..d7aac723c0 100644
--- a/deps/v8/include/cppgc/persistent.h
+++ b/deps/v8/include/cppgc/persistent.h
@@ -9,6 +9,7 @@
#include "cppgc/internal/persistent-node.h"
#include "cppgc/internal/pointer-policies.h"
+#include "cppgc/sentinel-pointer.h"
#include "cppgc/source-location.h"
#include "cppgc/type-traits.h"
#include "cppgc/visitor.h"
@@ -20,13 +21,15 @@ class Visitor;
namespace internal {
+// PersistentBase always refers to the object as const object and defers to
+// BasicPersistent on casting to the right type as needed.
class PersistentBase {
protected:
PersistentBase() = default;
- explicit PersistentBase(void* raw) : raw_(raw) {}
+ explicit PersistentBase(const void* raw) : raw_(raw) {}
- void* GetValue() const { return raw_; }
- void SetValue(void* value) { raw_ = value; }
+ const void* GetValue() const { return raw_; }
+ void SetValue(const void* value) { raw_ = value; }
PersistentNode* GetNode() const { return node_; }
void SetNode(PersistentNode* node) { node_ = node; }
@@ -39,7 +42,7 @@ class PersistentBase {
}
private:
- mutable void* raw_ = nullptr;
+ mutable const void* raw_ = nullptr;
mutable PersistentNode* node_ = nullptr;
friend class PersistentRegion;
@@ -178,7 +181,7 @@ class BasicPersistent final : public PersistentBase,
}
explicit operator bool() const { return Get(); }
- operator T*() const { return Get(); }
+ operator T*() const { return Get(); } // NOLINT
T* operator->() const { return Get(); }
T& operator*() const { return *Get(); }
@@ -186,10 +189,21 @@ class BasicPersistent final : public PersistentBase,
// heterogeneous assignments between different Member and Persistent handles
// based on their actual types.
V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") T* Get() const {
- return static_cast<T*>(GetValue());
+ // The const_cast below removes the constness from PersistentBase storage.
+ // The following static_cast re-adds any constness if specified through the
+ // user-visible template parameter T.
+ return static_cast<T*>(const_cast<void*>(GetValue()));
}
- void Clear() { Assign(nullptr); }
+ void Clear() {
+ // Simplified version of `Assign()` to allow calling without a complete type
+ // `T`.
+ if (IsValid()) {
+ WeaknessPolicy::GetPersistentRegion(GetValue()).FreeNode(GetNode());
+ SetNode(nullptr);
+ }
+ SetValue(nullptr);
+ }
T* Release() {
T* result = Get();
@@ -197,6 +211,16 @@ class BasicPersistent final : public PersistentBase,
return result;
}
+ template <typename U, typename OtherWeaknessPolicy = WeaknessPolicy,
+ typename OtherLocationPolicy = LocationPolicy,
+ typename OtherCheckingPolicy = CheckingPolicy>
+ BasicPersistent<U, OtherWeaknessPolicy, OtherLocationPolicy,
+ OtherCheckingPolicy>
+ To() const {
+ return BasicPersistent<U, OtherWeaknessPolicy, OtherLocationPolicy,
+ OtherCheckingPolicy>(static_cast<U*>(Get()));
+ }
+
private:
static void Trace(Visitor* v, const void* ptr) {
const auto* persistent = static_cast<const BasicPersistent*>(ptr);
diff --git a/deps/v8/include/cppgc/platform.h b/deps/v8/include/cppgc/platform.h
index 571aa80c3b..0d7377668c 100644
--- a/deps/v8/include/cppgc/platform.h
+++ b/deps/v8/include/cppgc/platform.h
@@ -126,11 +126,18 @@ class V8_EXPORT Platform {
/**
* Process-global initialization of the garbage collector. Must be called before
* creating a Heap.
+ *
+ * Can be called multiple times when paired with `ShutdownProcess()`.
+ *
+ * \param page_allocator The allocator used for maintaining meta data. Must not
+ * change between multiple calls to InitializeProcess.
*/
-V8_EXPORT void InitializeProcess(PageAllocator*);
+V8_EXPORT void InitializeProcess(PageAllocator* page_allocator);
/**
- * Must be called after destroying the last used heap.
+ * Must be called after destroying the last used heap. Some process-global
+ * metadata may not be returned and reused upon a subsequent
+ * `InitializeProcess()` call.
*/
V8_EXPORT void ShutdownProcess();
diff --git a/deps/v8/include/cppgc/prefinalizer.h b/deps/v8/include/cppgc/prefinalizer.h
index 9b7bc0e594..29b18bef90 100644
--- a/deps/v8/include/cppgc/prefinalizer.h
+++ b/deps/v8/include/cppgc/prefinalizer.h
@@ -34,7 +34,7 @@ class PrefinalizerRegistration final {
public: \
static bool InvokePreFinalizer(const cppgc::LivenessBroker& liveness_broker, \
void* object) { \
- static_assert(cppgc::IsGarbageCollectedTypeV<Class>, \
+ static_assert(cppgc::IsGarbageCollectedOrMixinTypeV<Class>, \
"Only garbage collected objects can have prefinalizers"); \
Class* self = static_cast<Class*>(object); \
if (liveness_broker.IsHeapObjectAlive(self)) return false; \
diff --git a/deps/v8/include/cppgc/process-heap-statistics.h b/deps/v8/include/cppgc/process-heap-statistics.h
new file mode 100644
index 0000000000..774cc92f46
--- /dev/null
+++ b/deps/v8/include/cppgc/process-heap-statistics.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_PROCESS_HEAP_STATISTICS_H_
+#define INCLUDE_CPPGC_PROCESS_HEAP_STATISTICS_H_
+
+#include <atomic>
+#include <cstddef>
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+class ProcessHeapStatisticsUpdater;
+} // namespace internal
+
+class V8_EXPORT ProcessHeapStatistics final {
+ public:
+ static size_t TotalAllocatedObjectSize() {
+ return total_allocated_object_size_.load(std::memory_order_relaxed);
+ }
+ static size_t TotalAllocatedSpace() {
+ return total_allocated_space_.load(std::memory_order_relaxed);
+ }
+
+ private:
+ static std::atomic_size_t total_allocated_space_;
+ static std::atomic_size_t total_allocated_object_size_;
+
+ friend class internal::ProcessHeapStatisticsUpdater;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_PROCESS_HEAP_STATISTICS_H_
diff --git a/deps/v8/include/cppgc/sentinel-pointer.h b/deps/v8/include/cppgc/sentinel-pointer.h
new file mode 100644
index 0000000000..f7915834e5
--- /dev/null
+++ b/deps/v8/include/cppgc/sentinel-pointer.h
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_SENTINEL_POINTER_H_
+#define INCLUDE_CPPGC_SENTINEL_POINTER_H_
+
+#include <cstdint>
+
+namespace cppgc {
+namespace internal {
+
+// Special tag type used to denote some sentinel member. The semantics of the
+// sentinel is defined by the embedder.
+struct SentinelPointer {
+ template <typename T>
+ operator T*() const { // NOLINT
+ static constexpr intptr_t kSentinelValue = 1;
+ return reinterpret_cast<T*>(kSentinelValue);
+ }
+ // Hidden friends.
+ friend bool operator==(SentinelPointer, SentinelPointer) { return true; }
+ friend bool operator!=(SentinelPointer, SentinelPointer) { return false; }
+};
+
+} // namespace internal
+
+constexpr internal::SentinelPointer kSentinelPointer;
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_SENTINEL_POINTER_H_
diff --git a/deps/v8/include/cppgc/testing.h b/deps/v8/include/cppgc/testing.h
new file mode 100644
index 0000000000..f93897a9aa
--- /dev/null
+++ b/deps/v8/include/cppgc/testing.h
@@ -0,0 +1,50 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_TESTING_H_
+#define INCLUDE_CPPGC_TESTING_H_
+
+#include "cppgc/common.h"
+#include "cppgc/macros.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+class HeapHandle;
+
+/**
+ * Namespace contains testing helpers.
+ */
+namespace testing {
+
+/**
+ * Overrides the state of the stack with the provided value. Takes precedence
+ * over other parameters that set the stack state. Must no be nested.
+ */
+class V8_EXPORT V8_NODISCARD OverrideEmbedderStackStateScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ /**
+ * Constructs a scoped object that automatically enters and leaves the scope.
+ *
+ * \param heap_handle The corresponding heap.
+ */
+ explicit OverrideEmbedderStackStateScope(HeapHandle& heap_handle,
+ EmbedderStackState state);
+ ~OverrideEmbedderStackStateScope();
+
+ OverrideEmbedderStackStateScope(const OverrideEmbedderStackStateScope&) =
+ delete;
+ OverrideEmbedderStackStateScope& operator=(
+ const OverrideEmbedderStackStateScope&) = delete;
+
+ private:
+ HeapHandle& heap_handle_;
+};
+
+} // namespace testing
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_TESTING_H_
diff --git a/deps/v8/include/cppgc/trace-trait.h b/deps/v8/include/cppgc/trace-trait.h
index e33d3ad36b..83619b1d51 100644
--- a/deps/v8/include/cppgc/trace-trait.h
+++ b/deps/v8/include/cppgc/trace-trait.h
@@ -96,6 +96,8 @@ namespace internal {
template <typename T>
struct TraceTraitImpl<T, false> {
+ static_assert(IsGarbageCollectedTypeV<T>,
+ "T must be of type GarbageCollected or GarbageCollectedMixin");
static TraceDescriptor GetTraceDescriptor(const void* self) {
return {self, TraceTrait<T>::Trace};
}
diff --git a/deps/v8/include/cppgc/type-traits.h b/deps/v8/include/cppgc/type-traits.h
index c7d02db902..2b50a2164b 100644
--- a/deps/v8/include/cppgc/type-traits.h
+++ b/deps/v8/include/cppgc/type-traits.h
@@ -65,12 +65,12 @@ template <typename T>
constexpr bool IsTraceableV = IsTraceable<T>::value;
template <typename T, typename = void>
-struct IsGarbageCollectedMixinType : std::false_type {
+struct HasGarbageCollectedMixinTypeMarker : std::false_type {
static_assert(sizeof(T), "T must be fully defined");
};
template <typename T>
-struct IsGarbageCollectedMixinType<
+struct HasGarbageCollectedMixinTypeMarker<
T,
void_t<typename std::remove_const_t<T>::IsGarbageCollectedMixinTypeMarker>>
: std::true_type {
@@ -78,17 +78,56 @@ struct IsGarbageCollectedMixinType<
};
template <typename T, typename = void>
-struct IsGarbageCollectedType : IsGarbageCollectedMixinType<T> {
+struct HasGarbageCollectedTypeMarker : std::false_type {
static_assert(sizeof(T), "T must be fully defined");
};
template <typename T>
-struct IsGarbageCollectedType<
+struct HasGarbageCollectedTypeMarker<
T, void_t<typename std::remove_const_t<T>::IsGarbageCollectedTypeMarker>>
: std::true_type {
static_assert(sizeof(T), "T must be fully defined");
};
+template <typename T, bool = HasGarbageCollectedTypeMarker<T>::value,
+ bool = HasGarbageCollectedMixinTypeMarker<T>::value>
+struct IsGarbageCollectedMixinType : std::false_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T>
+struct IsGarbageCollectedMixinType<T, false, true> : std::true_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T, bool = HasGarbageCollectedTypeMarker<T>::value>
+struct IsGarbageCollectedType : std::false_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T>
+struct IsGarbageCollectedType<T, true> : std::true_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T>
+struct IsGarbageCollectedOrMixinType
+ : std::integral_constant<bool, IsGarbageCollectedType<T>::value ||
+ IsGarbageCollectedMixinType<T>::value> {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T, bool = (HasGarbageCollectedTypeMarker<T>::value &&
+ HasGarbageCollectedMixinTypeMarker<T>::value)>
+struct IsGarbageCollectedWithMixinType : std::false_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T>
+struct IsGarbageCollectedWithMixinType<T, true> : std::true_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
template <typename BasicMemberCandidate, typename WeaknessTag,
typename WriteBarrierPolicy>
struct IsSubclassOfBasicMemberTemplate {
@@ -127,18 +166,60 @@ struct IsUntracedMemberType<T, true> : std::true_type {};
} // namespace internal
+/**
+ * Value is true for types that inherit from `GarbageCollectedMixin` but not
+ * `GarbageCollected<T>` (i.e., they are free mixins), and false otherwise.
+ */
template <typename T>
constexpr bool IsGarbageCollectedMixinTypeV =
internal::IsGarbageCollectedMixinType<T>::value;
+
+/**
+ * Value is true for types that inherit from `GarbageCollected<T>`, and false
+ * otherwise.
+ */
template <typename T>
constexpr bool IsGarbageCollectedTypeV =
internal::IsGarbageCollectedType<T>::value;
+
+/**
+ * Value is true for types that inherit from either `GarbageCollected<T>` or
+ * `GarbageCollectedMixin`, and false otherwise.
+ */
+template <typename T>
+constexpr bool IsGarbageCollectedOrMixinTypeV =
+ internal::IsGarbageCollectedOrMixinType<T>::value;
+
+/**
+ * Value is true for types that inherit from `GarbageCollected<T>` and
+ * `GarbageCollectedMixin`, and false otherwise.
+ */
+template <typename T>
+constexpr bool IsGarbageCollectedWithMixinTypeV =
+ internal::IsGarbageCollectedWithMixinType<T>::value;
+
+/**
+ * Value is true for types of type `Member<T>`, and false otherwise.
+ */
template <typename T>
constexpr bool IsMemberTypeV = internal::IsMemberType<T>::value;
+
+/**
+ * Value is true for types of type `UntracedMember<T>`, and false otherwise.
+ */
template <typename T>
constexpr bool IsUntracedMemberTypeV = internal::IsUntracedMemberType<T>::value;
+
+/**
+ * Value is true for types of type `WeakMember<T>`, and false otherwise.
+ */
template <typename T>
constexpr bool IsWeakMemberTypeV = internal::IsWeakMemberType<T>::value;
+
+/**
+ * Value is true for types that are considered weak references, and false
+ * otherwise.
+ */
template <typename T>
constexpr bool IsWeakV = internal::IsWeak<T>::value;
diff --git a/deps/v8/include/cppgc/visitor.h b/deps/v8/include/cppgc/visitor.h
index 74024c3d0e..95fd5fc842 100644
--- a/deps/v8/include/cppgc/visitor.h
+++ b/deps/v8/include/cppgc/visitor.h
@@ -62,6 +62,22 @@ class V8_EXPORT Visitor {
virtual ~Visitor() = default;
/**
+ * Trace method for raw pointers. Prefer the versions for managed pointers.
+ *
+ * \param member Reference retaining an object.
+ */
+ template <typename T>
+ void Trace(const T* t) {
+ static_assert(sizeof(T), "Pointee type must be fully defined.");
+ static_assert(internal::IsGarbageCollectedOrMixinType<T>::value,
+ "T must be GarbageCollected or GarbageCollectedMixin type");
+ if (!t) {
+ return;
+ }
+ Visit(t, TraceTrait<T>::GetTraceDescriptor(t));
+ }
+
+ /**
* Trace method for Member.
*
* \param member Member reference retaining an object.
@@ -81,7 +97,7 @@ class V8_EXPORT Visitor {
template <typename T>
void Trace(const WeakMember<T>& weak_member) {
static_assert(sizeof(T), "Pointee type must be fully defined.");
- static_assert(internal::IsGarbageCollectedType<T>::value,
+ static_assert(internal::IsGarbageCollectedOrMixinType<T>::value,
"T must be GarbageCollected or GarbageCollectedMixin type");
static_assert(!internal::IsAllocatedOnCompactableSpace<T>::value,
"Weak references to compactable objects are not allowed");
@@ -135,7 +151,10 @@ class V8_EXPORT Visitor {
*/
template <typename K, typename V>
void Trace(const EphemeronPair<K, V>& ephemeron_pair) {
- TraceEphemeron(ephemeron_pair.key, ephemeron_pair.value.GetRawAtomic());
+ TraceEphemeron(ephemeron_pair.key, &ephemeron_pair.value);
+ RegisterWeakCallbackMethod<EphemeronPair<K, V>,
+ &EphemeronPair<K, V>::ClearValueIfKeyIsDead>(
+ &ephemeron_pair);
}
/**
@@ -147,8 +166,14 @@ class V8_EXPORT Visitor {
*/
template <typename K, typename V>
void TraceEphemeron(const WeakMember<K>& key, const V* value) {
+ const K* k = key.GetRawAtomic();
+ if (!k) return;
TraceDescriptor value_desc = TraceTrait<V>::GetTraceDescriptor(value);
- VisitEphemeron(key, value_desc);
+ // `value` must always be non-null. `value_desc.base_object_payload` may be
+ // null in the case that value is not a garbage-collected object but only
+ // traceable.
+ CPPGC_DCHECK(value);
+ VisitEphemeron(key, value, value_desc);
}
/**
@@ -229,7 +254,8 @@ class V8_EXPORT Visitor {
virtual void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) {}
virtual void VisitWeakRoot(const void* self, TraceDescriptor, WeakCallback,
const void* weak_root, const SourceLocation&) {}
- virtual void VisitEphemeron(const void* key, TraceDescriptor value_desc) {}
+ virtual void VisitEphemeron(const void* key, const void* value,
+ TraceDescriptor value_desc) {}
virtual void VisitWeakContainer(const void* self, TraceDescriptor strong_desc,
TraceDescriptor weak_desc,
WeakCallback callback, const void* data) {}
@@ -261,7 +287,7 @@ class V8_EXPORT Visitor {
using PointeeType = typename Persistent::PointeeType;
static_assert(sizeof(PointeeType),
"Persistent's pointee type must be fully defined");
- static_assert(internal::IsGarbageCollectedType<PointeeType>::value,
+ static_assert(internal::IsGarbageCollectedOrMixinType<PointeeType>::value,
"Persistent's pointee type must be GarbageCollected or "
"GarbageCollectedMixin");
if (!p.Get()) {
@@ -278,7 +304,7 @@ class V8_EXPORT Visitor {
using PointeeType = typename WeakPersistent::PointeeType;
static_assert(sizeof(PointeeType),
"Persistent's pointee type must be fully defined");
- static_assert(internal::IsGarbageCollectedType<PointeeType>::value,
+ static_assert(internal::IsGarbageCollectedOrMixinType<PointeeType>::value,
"Persistent's pointee type must be GarbageCollected or "
"GarbageCollectedMixin");
static_assert(!internal::IsAllocatedOnCompactableSpace<PointeeType>::value,
@@ -287,17 +313,6 @@ class V8_EXPORT Visitor {
&HandleWeak<WeakPersistent>, &p, loc);
}
- template <typename T>
- void Trace(const T* t) {
- static_assert(sizeof(T), "Pointee type must be fully defined.");
- static_assert(internal::IsGarbageCollectedType<T>::value,
- "T must be GarbageCollected or GarbageCollectedMixin type");
- if (!t) {
- return;
- }
- Visit(t, TraceTrait<T>::GetTraceDescriptor(t));
- }
-
#if V8_ENABLE_CHECKS
void CheckObjectNotInConstruction(const void* address);
#endif // V8_ENABLE_CHECKS
@@ -312,6 +327,14 @@ class V8_EXPORT Visitor {
friend class internal::VisitorBase;
};
+template <typename T>
+struct TraceTrait<Member<T>> {
+ static TraceDescriptor GetTraceDescriptor(const void* self) {
+ return TraceTrait<T>::GetTraceDescriptor(
+ static_cast<const Member<T>*>(self)->GetRawAtomic());
+ }
+};
+
} // namespace cppgc
#endif // INCLUDE_CPPGC_VISITOR_H_
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index 42470d88ef..666952f27b 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -1030,6 +1030,7 @@ domain Runtime
arraybuffer
dataview
webassemblymemory
+ wasmvalue
# Object class (constructor) name. Specified for `object` type values only.
optional string className
# Remote object value in case of primitive values or JSON values (if it was requested).
@@ -1088,6 +1089,7 @@ domain Runtime
arraybuffer
dataview
webassemblymemory
+ wasmvalue
# String representation of the object.
optional string description
# True iff some of the properties or entries of the original object did not fit.
@@ -1136,6 +1138,7 @@ domain Runtime
arraybuffer
dataview
webassemblymemory
+ wasmvalue
experimental type EntryPreview extends object
properties
diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h
index 5a3712dd63..2c22193046 100644
--- a/deps/v8/include/v8-cppgc.h
+++ b/deps/v8/include/v8-cppgc.h
@@ -5,10 +5,12 @@
#ifndef INCLUDE_V8_CPPGC_H_
#define INCLUDE_V8_CPPGC_H_
+#include <cstdint>
#include <memory>
#include <vector>
#include "cppgc/custom-space.h"
+#include "cppgc/heap-statistics.h"
#include "cppgc/internal/write-barrier.h"
#include "cppgc/visitor.h"
#include "v8-internal.h" // NOLINT(build/include_directory)
@@ -25,11 +27,56 @@ namespace internal {
class CppHeap;
} // namespace internal
+/**
+ * Describes how V8 wrapper objects maintain references to garbage-collected C++
+ * objects.
+ */
+struct WrapperDescriptor final {
+ /**
+ * The index used on `v8::Ojbect::SetAlignedPointerFromInternalField()` and
+ * related APIs to add additional data to an object which is used to identify
+ * JS->C++ references.
+ */
+ using InternalFieldIndex = int;
+
+ /**
+ * Unknown embedder id. The value is reserved for internal usages and must not
+ * be used with `CppHeap`.
+ */
+ static constexpr uint16_t kUnknownEmbedderId = UINT16_MAX;
+
+ constexpr WrapperDescriptor(InternalFieldIndex wrappable_type_index,
+ InternalFieldIndex wrappable_instance_index,
+ uint16_t embedder_id_for_garbage_collected)
+ : wrappable_type_index(wrappable_type_index),
+ wrappable_instance_index(wrappable_instance_index),
+ embedder_id_for_garbage_collected(embedder_id_for_garbage_collected) {}
+
+ /**
+ * Index of the wrappable type.
+ */
+ InternalFieldIndex wrappable_type_index;
+
+ /**
+ * Index of the wrappable instance.
+ */
+ InternalFieldIndex wrappable_instance_index;
+
+ /**
+ * Embedder id identifying instances of garbage-collected objects. It is
+ * expected that the first field of the wrappable type is a uint16_t holding
+ * the id. Only references to instances of wrappables types with an id of
+ * `embedder_id_for_garbage_collected` will be considered by CppHeap.
+ */
+ uint16_t embedder_id_for_garbage_collected;
+};
+
struct V8_EXPORT CppHeapCreateParams {
CppHeapCreateParams(const CppHeapCreateParams&) = delete;
CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete;
std::vector<std::unique_ptr<cppgc::CustomSpaceBase>> custom_spaces;
+ WrapperDescriptor wrapper_descriptor;
};
/**
@@ -37,6 +84,9 @@ struct V8_EXPORT CppHeapCreateParams {
*/
class V8_EXPORT CppHeap {
public:
+ static std::unique_ptr<CppHeap> Create(v8::Platform* platform,
+ const CppHeapCreateParams& params);
+
virtual ~CppHeap() = default;
/**
@@ -51,6 +101,23 @@ class V8_EXPORT CppHeap {
*/
cppgc::HeapHandle& GetHeapHandle();
+ /**
+ * Terminate clears all roots and performs multiple garbage collections to
+ * reclaim potentially newly created objects in destructors.
+ *
+ * After this call, object allocation is prohibited.
+ */
+ void Terminate();
+
+ /**
+ * \param detail_level specifies whether should return detailed
+ * statistics or only brief summary statistics.
+ * \returns current CppHeap statistics regarding memory consumption
+ * and utilization.
+ */
+ cppgc::HeapStatistics CollectStatistics(
+ cppgc::HeapStatistics::DetailLevel detail_level);
+
private:
CppHeap() = default;
@@ -78,7 +145,7 @@ class JSVisitor : public cppgc::Visitor {
* Consistency helpers that aid in maintaining a consistent internal state of
* the garbage collector.
*/
-class JSHeapConsistency final {
+class V8_EXPORT JSHeapConsistency final {
public:
using WriteBarrierParams = cppgc::internal::WriteBarrier::Params;
using WriteBarrierType = cppgc::internal::WriteBarrier::Type;
@@ -86,16 +153,65 @@ class JSHeapConsistency final {
/**
* Gets the required write barrier type for a specific write.
*
+ * Note: Handling for C++ to JS references.
+ *
* \param ref The reference being written to.
* \param params Parameters that may be used for actual write barrier calls.
* Only filled if return value indicates that a write barrier is needed. The
* contents of the `params` are an implementation detail.
+ * \param callback Callback returning the corresponding heap handle. The
+ * callback is only invoked if the heap cannot otherwise be figured out. The
+ * callback must not allocate.
* \returns whether a write barrier is needed and which barrier to invoke.
*/
- static V8_INLINE WriteBarrierType GetWriteBarrierType(
- const TracedReferenceBase& ref, WriteBarrierParams& params) {
+ template <typename HeapHandleCallback>
+ static V8_INLINE WriteBarrierType
+ GetWriteBarrierType(const TracedReferenceBase& ref,
+ WriteBarrierParams& params, HeapHandleCallback callback) {
if (ref.IsEmpty()) return WriteBarrierType::kNone;
- return cppgc::internal::WriteBarrier::GetWriteBarrierType(&ref, params);
+
+ if (V8_LIKELY(!cppgc::internal::WriteBarrier::
+ IsAnyIncrementalOrConcurrentMarking())) {
+ return cppgc::internal::WriteBarrier::Type::kNone;
+ }
+ cppgc::HeapHandle& handle = callback();
+ if (!cppgc::subtle::HeapState::IsMarking(handle)) {
+ return cppgc::internal::WriteBarrier::Type::kNone;
+ }
+ params.heap = &handle;
+#if V8_ENABLE_CHECKS
+ params.type = cppgc::internal::WriteBarrier::Type::kMarking;
+#endif // !V8_ENABLE_CHECKS
+ return cppgc::internal::WriteBarrier::Type::kMarking;
+ }
+
+ /**
+ * Gets the required write barrier type for a specific write.
+ *
+ * Note: Handling for JS to C++ references.
+ *
+ * \param wrapper The wrapper that has been written into.
+ * \param wrapper_index The wrapper index in `wrapper` that has been written
+ * into.
+ * \param wrappable The value that was written.
+ * \param params Parameters that may be used for actual write barrier calls.
+ * Only filled if return value indicates that a write barrier is needed. The
+ * contents of the `params` are an implementation detail.
+ * \param callback Callback returning the corresponding heap handle. The
+ * callback is only invoked if the heap cannot otherwise be figured out. The
+ * callback must not allocate.
+ * \returns whether a write barrier is needed and which barrier to invoke.
+ */
+ template <typename HeapHandleCallback>
+ static V8_INLINE WriteBarrierType GetWriteBarrierType(
+ v8::Local<v8::Object>& wrapper, int wrapper_index, const void* wrappable,
+ WriteBarrierParams& params, HeapHandleCallback callback) {
+#if V8_ENABLE_CHECKS
+ CheckWrapper(wrapper, wrapper_index, wrappable);
+#endif // V8_ENABLE_CHECKS
+ return cppgc::internal::WriteBarrier::
+ GetWriteBarrierTypeForExternallyReferencedObject(wrappable, params,
+ callback);
}
/**
@@ -114,6 +230,20 @@ class JSHeapConsistency final {
}
/**
+ * Conservative Dijkstra-style write barrier that processes an object if it
+ * has not yet been processed.
+ *
+ * \param params The parameters retrieved from `GetWriteBarrierType()`.
+ * \param object The pointer to the object. May be an interior pointer to a
+ * an interface of the actual object.
+ */
+ static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params,
+ cppgc::HeapHandle& heap_handle,
+ const void* object) {
+ cppgc::internal::WriteBarrier::DijkstraMarkingBarrier(params, object);
+ }
+
+ /**
* Generational barrier for maintaining consistency when running with multiple
* generations.
*
@@ -126,6 +256,8 @@ class JSHeapConsistency final {
private:
JSHeapConsistency() = delete;
+ static void CheckWrapper(v8::Local<v8::Object>&, int, const void*);
+
static void DijkstraMarkingBarrierSlow(cppgc::HeapHandle&,
const TracedReferenceBase& ref);
};
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
index 2dea8db271..ca5fc764a3 100644
--- a/deps/v8/include/v8-fast-api-calls.h
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -148,7 +148,13 @@
* receiver := the {embedder_object} from above
* param := 42
*
- * Currently only void return types are supported.
+ * Currently supported return types:
+ * - void
+ * - bool
+ * - int32_t
+ * - uint32_t
+ * - float32_t
+ * - float64_t
* Currently supported argument types:
* - pointer to an embedder type
* - bool
@@ -187,7 +193,7 @@ namespace v8 {
class CTypeInfo {
public:
- enum class Type : char {
+ enum class Type : uint8_t {
kVoid,
kBool,
kInt32,
@@ -199,57 +205,31 @@ class CTypeInfo {
kV8Value,
};
+ // kCallbackOptionsType and kInvalidType are not part of the Type enum
+ // because they are only used internally. Use values 255 and 254 that
+ // are larger than any valid Type enum.
+ static constexpr Type kCallbackOptionsType = Type(255);
+ static constexpr Type kInvalidType = Type(254);
+
enum class ArgFlags : uint8_t {
kNone = 0,
- kIsArrayBit = 1 << 0, // This argument is first in an array of values.
};
- static CTypeInfo FromWrapperType(ArgFlags flags = ArgFlags::kNone) {
- return CTypeInfo(static_cast<int>(flags) | kIsWrapperTypeBit);
- }
-
- static constexpr CTypeInfo FromCType(Type ctype,
- ArgFlags flags = ArgFlags::kNone) {
- // TODO(mslekova): Refactor the manual bit manipulations to use
- // PointerWithPayload instead.
- // ctype cannot be Type::kV8Value.
- return CTypeInfo(
- ((static_cast<uintptr_t>(ctype) << kTypeOffset) & kTypeMask) |
- static_cast<int>(flags));
- }
-
- const void* GetWrapperInfo() const;
+ explicit constexpr CTypeInfo(Type type, ArgFlags flags = ArgFlags::kNone)
+ : type_(type), flags_(flags) {}
- constexpr Type GetType() const {
- if (payload_ & kIsWrapperTypeBit) {
- return Type::kV8Value;
- }
- return static_cast<Type>((payload_ & kTypeMask) >> kTypeOffset);
- }
+ constexpr Type GetType() const { return type_; }
- constexpr bool IsArray() const {
- return payload_ & static_cast<int>(ArgFlags::kIsArrayBit);
- }
+ constexpr ArgFlags GetFlags() const { return flags_; }
static const CTypeInfo& Invalid() {
- static CTypeInfo invalid = CTypeInfo(0);
+ static CTypeInfo invalid = CTypeInfo(kInvalidType);
return invalid;
}
private:
- explicit constexpr CTypeInfo(uintptr_t payload) : payload_(payload) {}
-
- // That must be the last bit after ArgFlags.
- static constexpr uintptr_t kIsWrapperTypeBit = 1 << 1;
- static constexpr uintptr_t kWrapperTypeInfoMask = static_cast<uintptr_t>(~0)
- << 2;
-
- static constexpr unsigned int kTypeOffset = kIsWrapperTypeBit;
- static constexpr unsigned int kTypeSize = 8 - kTypeOffset;
- static constexpr uintptr_t kTypeMask =
- (~(static_cast<uintptr_t>(~0) << kTypeSize)) << kTypeOffset;
-
- const uintptr_t payload_;
+ Type type_;
+ ArgFlags flags_;
};
class CFunctionInfo {
@@ -257,27 +237,50 @@ class CFunctionInfo {
virtual const CTypeInfo& ReturnInfo() const = 0;
virtual unsigned int ArgumentCount() const = 0;
virtual const CTypeInfo& ArgumentInfo(unsigned int index) const = 0;
+ virtual bool HasOptions() const = 0;
};
struct ApiObject {
uintptr_t address;
};
-namespace internal {
+/**
+ * A struct which may be passed to a fast call callback, like so:
+ * \code
+ * void FastMethodWithOptions(int param, FastApiCallbackOptions& options);
+ * \endcode
+ */
+struct FastApiCallbackOptions {
+ /**
+ * If the callback wants to signal an error condition or to perform an
+ * allocation, it must set options.fallback to true and do an early return
+ * from the fast method. Then V8 checks the value of options.fallback and if
+ * it's true, falls back to executing the SlowCallback, which is capable of
+ * reporting the error (either by throwing a JS exception or logging to the
+ * console) or doing the allocation. It's the embedder's responsibility to
+ * ensure that the fast callback is idempotent up to the point where error and
+ * fallback conditions are checked, because otherwise executing the slow
+ * callback might produce visible side-effects twice.
+ */
+ bool fallback;
-template <typename T>
-struct GetCType {
- static constexpr CTypeInfo Get() {
- return CTypeInfo::FromCType(CTypeInfo::Type::kV8Value);
- }
+ /**
+ * The `data` passed to the FunctionTemplate constructor, or `undefined`.
+ */
+ const ApiObject data;
};
-#define SPECIALIZE_GET_C_TYPE_FOR(ctype, ctypeinfo) \
- template <> \
- struct GetCType<ctype> { \
- static constexpr CTypeInfo Get() { \
- return CTypeInfo::FromCType(CTypeInfo::Type::ctypeinfo); \
- } \
+namespace internal {
+
+template <typename T>
+struct GetCType;
+
+#define SPECIALIZE_GET_C_TYPE_FOR(ctype, ctypeinfo) \
+ template <> \
+ struct GetCType<ctype> { \
+ static constexpr CTypeInfo Get() { \
+ return CTypeInfo(CTypeInfo::Type::ctypeinfo); \
+ } \
};
#define SUPPORTED_C_TYPES(V) \
@@ -293,55 +296,45 @@ struct GetCType {
SUPPORTED_C_TYPES(SPECIALIZE_GET_C_TYPE_FOR)
-// T* where T is a primitive (array of primitives).
-template <typename T, typename = void>
-struct GetCTypePointerImpl {
- static constexpr CTypeInfo Get() {
- return CTypeInfo::FromCType(GetCType<T>::Get().GetType(),
- CTypeInfo::ArgFlags::kIsArrayBit);
- }
-};
-
-// T* where T is an API object.
-template <typename T>
-struct GetCTypePointerImpl<T, void> {
- static constexpr CTypeInfo Get() { return CTypeInfo::FromWrapperType(); }
-};
-
-// T** where T is a primitive. Not allowed.
-template <typename T, typename = void>
-struct GetCTypePointerPointerImpl {
- static_assert(sizeof(T**) != sizeof(T**), "Unsupported type");
-};
-
-// T** where T is an API object (array of API objects).
-template <typename T>
-struct GetCTypePointerPointerImpl<T, void> {
+template <>
+struct GetCType<FastApiCallbackOptions&> {
static constexpr CTypeInfo Get() {
- return CTypeInfo::FromWrapperType(CTypeInfo::ArgFlags::kIsArrayBit);
+ return CTypeInfo(CTypeInfo::kCallbackOptionsType);
}
};
-template <typename T>
-struct GetCType<T**> : public GetCTypePointerPointerImpl<T> {};
-
-template <typename T>
-struct GetCType<T*> : public GetCTypePointerImpl<T> {};
+// Helper to count the number of occurances of `T` in `List`
+template <typename T, typename... List>
+struct count : std::integral_constant<int, 0> {};
+template <typename T, typename... Args>
+struct count<T, T, Args...>
+ : std::integral_constant<std::size_t, 1 + count<T, Args...>::value> {};
+template <typename T, typename U, typename... Args>
+struct count<T, U, Args...> : count<T, Args...> {};
-template <typename R, bool RaisesException, typename... Args>
+template <typename R, typename... Args>
class CFunctionInfoImpl : public CFunctionInfo {
public:
- static constexpr int kFallbackArgCount = (RaisesException ? 1 : 0);
+ static constexpr int kOptionsArgCount =
+ count<FastApiCallbackOptions&, Args...>();
static constexpr int kReceiverCount = 1;
CFunctionInfoImpl()
: return_info_(internal::GetCType<R>::Get()),
- arg_count_(sizeof...(Args) - kFallbackArgCount),
+ arg_count_(sizeof...(Args) - kOptionsArgCount),
arg_info_{internal::GetCType<Args>::Get()...} {
- static_assert(sizeof...(Args) >= kFallbackArgCount + kReceiverCount,
+ static_assert(kOptionsArgCount == 0 || kOptionsArgCount == 1,
+ "Only one options parameter is supported.");
+ static_assert(sizeof...(Args) >= kOptionsArgCount + kReceiverCount,
"The receiver or the fallback argument is missing.");
- static_assert(
- internal::GetCType<R>::Get().GetType() == CTypeInfo::Type::kVoid,
- "Only void return types are currently supported.");
+ constexpr CTypeInfo::Type type = internal::GetCType<R>::Get().GetType();
+ static_assert(type == CTypeInfo::Type::kVoid ||
+ type == CTypeInfo::Type::kBool ||
+ type == CTypeInfo::Type::kInt32 ||
+ type == CTypeInfo::Type::kUint32 ||
+ type == CTypeInfo::Type::kFloat32 ||
+ type == CTypeInfo::Type::kFloat64,
+ "64-bit int and api object values are not currently "
+ "supported return types.");
}
const CTypeInfo& ReturnInfo() const override { return return_info_; }
@@ -352,6 +345,7 @@ class CFunctionInfoImpl : public CFunctionInfo {
}
return arg_info_[index];
}
+ bool HasOptions() const override { return kOptionsArgCount == 1; }
private:
const CTypeInfo return_info_;
@@ -382,8 +376,9 @@ class V8_EXPORT CFunction {
}
template <typename F>
+ V8_DEPRECATED("Use CFunction::Make instead.")
static CFunction MakeWithFallbackSupport(F* func) {
- return ArgUnwrap<F*>::MakeWithFallbackSupport(func);
+ return ArgUnwrap<F*>::Make(func);
}
template <typename F>
@@ -397,9 +392,9 @@ class V8_EXPORT CFunction {
CFunction(const void* address, const CFunctionInfo* type_info);
- template <typename R, bool RaisesException, typename... Args>
+ template <typename R, typename... Args>
static CFunctionInfo* GetCFunctionInfo() {
- static internal::CFunctionInfoImpl<R, RaisesException, Args...> instance;
+ static internal::CFunctionInfoImpl<R, Args...> instance;
return &instance;
}
@@ -414,19 +409,11 @@ class V8_EXPORT CFunction {
public:
static CFunction Make(R (*func)(Args...)) {
return CFunction(reinterpret_cast<const void*>(func),
- GetCFunctionInfo<R, false, Args...>());
- }
- static CFunction MakeWithFallbackSupport(R (*func)(Args...)) {
- return CFunction(reinterpret_cast<const void*>(func),
- GetCFunctionInfo<R, true, Args...>());
+ GetCFunctionInfo<R, Args...>());
}
};
};
-struct FastApiCallbackOptions {
- bool fallback;
-};
-
} // namespace v8
#endif // INCLUDE_V8_FAST_API_CALLS_H_
diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h
index 1e2bd50acf..0217f40d63 100644
--- a/deps/v8/include/v8-metrics.h
+++ b/deps/v8/include/v8-metrics.h
@@ -10,6 +10,58 @@
namespace v8 {
namespace metrics {
+struct GarbageCollectionPhases {
+ int64_t compact_wall_clock_duration_in_us = -1;
+ int64_t mark_wall_clock_duration_in_us = -1;
+ int64_t sweep_wall_clock_duration_in_us = -1;
+ int64_t weak_wall_clock_duration_in_us = -1;
+};
+
+struct GarbageCollectionSizes {
+ int64_t bytes_before = -1;
+ int64_t bytes_after = -1;
+ int64_t bytes_freed = -1;
+};
+
+struct GarbageCollectionFullCycle {
+ GarbageCollectionPhases total;
+ GarbageCollectionPhases total_cpp;
+ GarbageCollectionPhases main_thread;
+ GarbageCollectionPhases main_thread_cpp;
+ GarbageCollectionPhases main_thread_atomic;
+ GarbageCollectionPhases main_thread_atomic_cpp;
+ GarbageCollectionPhases main_thread_incremental;
+ GarbageCollectionPhases main_thread_incremental_cpp;
+ GarbageCollectionSizes objects;
+ GarbageCollectionSizes objects_cpp;
+ GarbageCollectionSizes memory;
+ GarbageCollectionSizes memory_cpp;
+ double collection_rate_in_percent;
+ double collection_rate_cpp_in_percent;
+ double efficiency_in_bytes_per_us;
+ double efficiency_cpp_in_bytes_per_us;
+ double main_thread_efficiency_in_bytes_per_us;
+ double main_thread_efficiency_cpp_in_bytes_per_us;
+};
+
+struct GarbageCollectionFullMainThreadIncrementalMark {
+ int64_t wall_clock_duration_in_us = -1;
+ int64_t cpp_wall_clock_duration_in_us = -1;
+};
+
+struct GarbageCollectionFullMainThreadIncrementalSweep {
+ int64_t wall_clock_duration_in_us = -1;
+ int64_t cpp_wall_clock_duration_in_us = -1;
+};
+
+struct GarbageCollectionYoungCycle {
+ int64_t total_wall_clock_duration_in_us = -1;
+ int64_t main_thread_wall_clock_duration_in_us = -1;
+ double collection_rate_in_percent;
+ double efficiency_in_bytes_per_us;
+ double main_thread_efficiency_in_bytes_per_us;
+};
+
struct WasmModuleDecoded {
bool async = false;
bool streamed = false;
@@ -48,10 +100,14 @@ struct WasmModulesPerIsolate {
size_t count = 0;
};
-#define V8_MAIN_THREAD_METRICS_EVENTS(V) \
- V(WasmModuleDecoded) \
- V(WasmModuleCompiled) \
- V(WasmModuleInstantiated) \
+#define V8_MAIN_THREAD_METRICS_EVENTS(V) \
+ V(GarbageCollectionFullCycle) \
+ V(GarbageCollectionFullMainThreadIncrementalMark) \
+ V(GarbageCollectionFullMainThreadIncrementalSweep) \
+ V(GarbageCollectionYoungCycle) \
+ V(WasmModuleDecoded) \
+ V(WasmModuleCompiled) \
+ V(WasmModuleInstantiated) \
V(WasmModuleTieredUp)
#define V8_THREAD_SAFE_METRICS_EVENTS(V) V(WasmModulesPerIsolate)
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 74b6df884d..85d3f8a482 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -259,6 +259,17 @@ enum class CpuProfilingStatus {
};
/**
+ * Delegate for when max samples reached and samples are discarded.
+ */
+class V8_EXPORT DiscardedSamplesDelegate {
+ public:
+ DiscardedSamplesDelegate() {}
+
+ virtual ~DiscardedSamplesDelegate() = default;
+ virtual void Notify() = 0;
+};
+
+/**
* Optional profiling attributes.
*/
class V8_EXPORT CpuProfilingOptions {
@@ -346,8 +357,9 @@ class V8_EXPORT CpuProfiler {
* profiles may be collected at once. Attempts to start collecting several
* profiles with the same title are silently ignored.
*/
- CpuProfilingStatus StartProfiling(Local<String> title,
- CpuProfilingOptions options);
+ CpuProfilingStatus StartProfiling(
+ Local<String> title, CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
/**
* Starts profiling with the same semantics as above, except with expanded
diff --git a/deps/v8/include/v8-unwinder-state.h b/deps/v8/include/v8-unwinder-state.h
index ed9988711b..00f8b8b176 100644
--- a/deps/v8/include/v8-unwinder-state.h
+++ b/deps/v8/include/v8-unwinder-state.h
@@ -19,7 +19,7 @@ struct CalleeSavedRegisters {
};
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
- V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390
+ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390
struct CalleeSavedRegisters {};
#else
#error Target architecture was not detected as supported by v8
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 67c1c84b32..ec959c40b4 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -8,10 +8,10 @@
// These macros define the version number for the current version.
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
-#define V8_MAJOR_VERSION 8
-#define V8_MINOR_VERSION 9
-#define V8_BUILD_NUMBER 255
-#define V8_PATCH_LEVEL 19
+#define V8_MAJOR_VERSION 9
+#define V8_MINOR_VERSION 0
+#define V8_BUILD_NUMBER 257
+#define V8_PATCH_LEVEL 11
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index b1cc054d76..7cb19bbede 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -47,11 +47,12 @@ class BigIntObject;
class Boolean;
class BooleanObject;
class CFunction;
+class CallHandlerHelper;
class Context;
class CppHeap;
-struct CppHeapCreateParams;
class Data;
class Date;
+class EscapableHandleScope;
class External;
class Function;
class FunctionTemplate;
@@ -60,8 +61,7 @@ class ImplementationUtilities;
class Int32;
class Integer;
class Isolate;
-template <class T>
-class Maybe;
+class Isolate;
class MicrotaskQueue;
class Name;
class Number;
@@ -71,6 +71,8 @@ class ObjectOperationDescriptor;
class ObjectTemplate;
class Platform;
class Primitive;
+class PrimitiveArray;
+class Private;
class Promise;
class PropertyDescriptor;
class Proxy;
@@ -78,75 +80,72 @@ class RawOperationDescriptor;
class Script;
class SharedArrayBuffer;
class Signature;
-class StartupData;
class StackFrame;
class StackTrace;
+class StartupData;
class String;
class StringObject;
class Symbol;
class SymbolObject;
class TracedReferenceBase;
-class PrimitiveArray;
-class Private;
class Uint32;
class Utils;
class Value;
class WasmMemoryObject;
class WasmModuleObject;
-template <class T> class Local;
-template <class T>
-class MaybeLocal;
-template <class T> class Eternal;
+template <class K, class V, class T>
+class GlobalValueMap;
+template <class K, class V, class T>
+class PersistentValueMapBase;
template<class T> class NonCopyablePersistentTraits;
-template<class T> class PersistentBase;
-template <class T, class M = NonCopyablePersistentTraits<T> >
+template <class T, class M = NonCopyablePersistentTraits<T>>
class Persistent;
template <class T>
+class BasicTracedReference;
+template <class T>
+class Eternal;
+template <class T>
class Global;
template <class T>
+class Local;
+template <class T>
+class Maybe;
+template <class T>
+class MaybeLocal;
+template <class T>
class TracedGlobal;
template <class T>
class TracedReference;
-template <class T>
-class BasicTracedReference;
template<class K, class V, class T> class PersistentValueMap;
-template <class K, class V, class T>
-class PersistentValueMapBase;
-template <class K, class V, class T>
-class GlobalValueMap;
-template<class V, class T> class PersistentValueVector;
template<class T, class P> class WeakCallbackObject;
-class FunctionTemplate;
-class ObjectTemplate;
+template <class T>
+class PersistentBase;
+template <class V, class T>
+class PersistentValueVector;
template<typename T> class FunctionCallbackInfo;
template<typename T> class PropertyCallbackInfo;
-class StackTrace;
-class StackFrame;
-class Isolate;
-class CallHandlerHelper;
-class EscapableHandleScope;
template<typename T> class ReturnValue;
namespace internal {
-enum class ArgumentsType;
-template <ArgumentsType>
-class Arguments;
class BasicTracedReferenceExtractor;
-template <typename T>
-class CustomArguments;
+class ExternalString;
class FunctionCallbackArguments;
class GlobalHandles;
class Heap;
class HeapObject;
-class ExternalString;
class Isolate;
class LocalEmbedderHeapTracer;
class MicrotaskQueue;
class PropertyCallbackArguments;
class ReadOnlyHeap;
class ScopedExternalStringLock;
-struct ScriptStreamingData;
class ThreadLocalTop;
+struct ScriptStreamingData;
+enum class ArgumentsType;
+template <ArgumentsType>
+class Arguments;
+template <typename T>
+class CustomArguments;
namespace wasm {
class NativeModule;
@@ -1343,6 +1342,11 @@ class V8_EXPORT Data {
*/
bool IsFunctionTemplate() const;
+ /**
+ * Returns true if this data is a |v8::Context|.
+ */
+ bool IsContext() const;
+
private:
Data();
};
@@ -1423,9 +1427,7 @@ class ScriptOriginOptions {
*/
class ScriptOrigin {
public:
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
- V8_DEPRECATE_SOON("Use constructor with primitvie C++ types")
-#endif
+ V8_DEPRECATE_SOON("Use constructor with primitive C++ types")
V8_INLINE explicit ScriptOrigin(
Local<Value> resource_name, Local<Integer> resource_line_offset,
Local<Integer> resource_column_offset,
@@ -1436,6 +1438,7 @@ class ScriptOrigin {
Local<Boolean> is_wasm = Local<Boolean>(),
Local<Boolean> is_module = Local<Boolean>(),
Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
+ V8_DEPRECATE_SOON("Use constructor that takes an isolate")
V8_INLINE explicit ScriptOrigin(
Local<Value> resource_name, int resource_line_offset = 0,
int resource_column_offset = 0,
@@ -1444,6 +1447,14 @@ class ScriptOrigin {
bool resource_is_opaque = false, bool is_wasm = false,
bool is_module = false,
Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
+ V8_INLINE explicit ScriptOrigin(
+ Isolate* isolate, Local<Value> resource_name,
+ int resource_line_offset = 0, int resource_column_offset = 0,
+ bool resource_is_shared_cross_origin = false, int script_id = -1,
+ Local<Value> source_map_url = Local<Value>(),
+ bool resource_is_opaque = false, bool is_wasm = false,
+ bool is_module = false,
+ Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
V8_INLINE Local<Value> ResourceName() const;
V8_DEPRECATE_SOON("Use getter with primitvie C++ types.")
@@ -1552,6 +1563,13 @@ class V8_EXPORT ModuleRequest : public Data {
* The keys and values are of type v8::String, and the source offsets are of
* type Int32. Use Module::SourceOffsetToLocation to convert the source
* offsets to Locations with line/column numbers.
+ *
+ * All assertions present in the module request will be supplied in this
+ * list, regardless of whether they are supported by the host. Per
+ * https://tc39.es/proposal-import-assertions/#sec-hostgetsupportedimportassertions,
+ * hosts are expected to ignore assertions that they do not support (as
+ * opposed to, for example, triggering an error if an unsupported assertion is
+ * present).
*/
Local<FixedArray> GetImportAssertions() const;
@@ -1631,7 +1649,7 @@ class V8_EXPORT Module : public Data {
*/
int GetIdentityHash() const;
- using ResolveCallback =
+ using ResolveCallback V8_DEPRECATE_SOON("Use ResolveModuleCallback") =
MaybeLocal<Module> (*)(Local<Context> context, Local<String> specifier,
Local<Module> referrer);
using ResolveModuleCallback = MaybeLocal<Module> (*)(
@@ -1706,7 +1724,7 @@ class V8_EXPORT Module : public Data {
/*
* Callback defined in the embedder. This is responsible for setting
* the module's exported values with calls to SetSyntheticModuleExport().
- * The callback must return a Value to indicate success (where no
+ * The callback must return a resolved Promise to indicate success (where no
* exception was thrown) and return an empy MaybeLocal to indicate falure
* (where an exception was thrown).
*/
@@ -1920,11 +1938,9 @@ class V8_EXPORT ScriptCompiler {
public:
enum Encoding { ONE_BYTE, TWO_BYTE, UTF8 };
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
V8_DEPRECATED(
"This class takes ownership of source_stream, so use the constructor "
"taking a unique_ptr to make these semantics clearer")
-#endif
StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
Encoding encoding);
@@ -3307,7 +3323,8 @@ class V8_EXPORT String : public Name {
~ExternalStringResource() override = default;
/**
- * The string data from the underlying buffer.
+ * The string data from the underlying buffer. If the resource is cacheable
+ * then data() must return the same value for all invocations.
*/
virtual const uint16_t* data() const = 0;
@@ -3316,8 +3333,29 @@ class V8_EXPORT String : public Name {
*/
virtual size_t length() const = 0;
+ /**
+ * Returns the cached data from the underlying buffer. This method can be
+ * called only for cacheable resources (i.e. IsCacheable() == true) and only
+ * after UpdateDataCache() was called.
+ */
+ const uint16_t* cached_data() const {
+ CheckCachedDataInvariants();
+ return cached_data_;
+ }
+
+ /**
+ * Update {cached_data_} with the data from the underlying buffer. This can
+ * be called only for cacheable resources.
+ */
+ void UpdateDataCache();
+
protected:
ExternalStringResource() = default;
+
+ private:
+ void CheckCachedDataInvariants() const;
+
+ const uint16_t* cached_data_ = nullptr;
};
/**
@@ -3338,12 +3376,39 @@ class V8_EXPORT String : public Name {
* buffer.
*/
~ExternalOneByteStringResource() override = default;
- /** The string data from the underlying buffer.*/
+
+ /**
+ * The string data from the underlying buffer. If the resource is cacheable
+ * then data() must return the same value for all invocations.
+ */
virtual const char* data() const = 0;
+
/** The number of Latin-1 characters in the string.*/
virtual size_t length() const = 0;
+
+ /**
+ * Returns the cached data from the underlying buffer. If the resource is
+ * uncacheable or if UpdateDataCache() was not called before, it has
+ * undefined behaviour.
+ */
+ const char* cached_data() const {
+ CheckCachedDataInvariants();
+ return cached_data_;
+ }
+
+ /**
+ * Update {cached_data_} with the data from the underlying buffer. This can
+ * be called only for cacheable resources.
+ */
+ void UpdateDataCache();
+
protected:
ExternalOneByteStringResource() = default;
+
+ private:
+ void CheckCachedDataInvariants() const;
+
+ const char* cached_data_ = nullptr;
};
/**
@@ -4206,12 +4271,18 @@ class V8_EXPORT Object : public Value {
/**
* Returns the context in which the object was created.
*/
+ V8_DEPRECATE_SOON("Use MaybeLocal<Context> GetCreationContext()")
Local<Context> CreationContext();
+ MaybeLocal<Context> GetCreationContext();
/** Same as above, but works for Persistents */
- V8_INLINE static Local<Context> CreationContext(
+ V8_DEPRECATE_SOON(
+ "Use MaybeLocal<Context> GetCreationContext(const "
+ "PersistentBase<Object>& object)")
+ static Local<Context> CreationContext(const PersistentBase<Object>& object);
+ V8_INLINE static MaybeLocal<Context> GetCreationContext(
const PersistentBase<Object>& object) {
- return object.val_->CreationContext();
+ return object.val_->GetCreationContext();
}
/**
@@ -4684,6 +4755,11 @@ class V8_EXPORT Function : public Object {
* User-defined name assigned to the "displayName" property of this function.
* Used to facilitate debugging and profiling of JavaScript code.
*/
+ V8_DEPRECATED(
+ "Use v8::Object::Get() instead to look up \"displayName\". "
+ "V8 and DevTools no longer use \"displayName\" in stack "
+ "traces, but the standard \"name\" property. "
+ "See http://crbug.com/1177685.")
Local<Value> GetDisplayName() const;
/**
@@ -5388,7 +5464,7 @@ class V8_EXPORT ArrayBuffer : public Object {
* |Allocator::Free| once all ArrayBuffers referencing it are collected by
* the garbage collector.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use the version that takes a BackingStore. "
"See http://crbug.com/v8/9908.")
static Local<ArrayBuffer> New(
@@ -5437,7 +5513,7 @@ class V8_EXPORT ArrayBuffer : public Object {
* Returns true if ArrayBuffer is externalized, that is, does not
* own its memory block.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"With v8::BackingStore externalized ArrayBuffers are "
"the same as ordinary ArrayBuffers. See http://crbug.com/v8/9908.")
bool IsExternal() const;
@@ -5465,8 +5541,7 @@ class V8_EXPORT ArrayBuffer : public Object {
* deleter, which will call ArrayBuffer::Allocator::Free if the buffer
* was allocated with ArrayBuffer::Allocator::Allocate.
*/
- V8_DEPRECATE_SOON(
- "Use GetBackingStore or Detach. See http://crbug.com/v8/9908.")
+ V8_DEPRECATED("Use GetBackingStore or Detach. See http://crbug.com/v8/9908.")
Contents Externalize();
/**
@@ -5476,7 +5551,7 @@ class V8_EXPORT ArrayBuffer : public Object {
* With the new lifetime management of backing stores there is no need for
* externalizing, so this function exists only to make the transition easier.
*/
- V8_DEPRECATE_SOON("This will be removed together with IsExternal.")
+ V8_DEPRECATED("This will be removed together with IsExternal.")
void Externalize(const std::shared_ptr<BackingStore>& backing_store);
/**
@@ -5487,7 +5562,7 @@ class V8_EXPORT ArrayBuffer : public Object {
* The embedder should make sure to hold a strong reference to the
* ArrayBuffer while accessing this pointer.
*/
- V8_DEPRECATE_SOON("Use GetBackingStore. See http://crbug.com/v8/9908.")
+ V8_DEPRECATED("Use GetBackingStore. See http://crbug.com/v8/9908.")
Contents GetContents();
/**
@@ -5871,7 +5946,7 @@ class V8_EXPORT SharedArrayBuffer : public Object {
* specified. The memory block will not be reclaimed when a created
* SharedArrayBuffer is garbage-collected.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use the version that takes a BackingStore. "
"See http://crbug.com/v8/9908.")
static Local<SharedArrayBuffer> New(
@@ -5931,7 +6006,7 @@ class V8_EXPORT SharedArrayBuffer : public Object {
* Returns true if SharedArrayBuffer is externalized, that is, does not
* own its memory block.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"With v8::BackingStore externalized SharedArrayBuffers are the same "
"as ordinary SharedArrayBuffers. See http://crbug.com/v8/9908.")
bool IsExternal() const;
@@ -5948,8 +6023,7 @@ class V8_EXPORT SharedArrayBuffer : public Object {
* v8::Isolate::CreateParams::array_buffer_allocator.
*
*/
- V8_DEPRECATE_SOON(
- "Use GetBackingStore or Detach. See http://crbug.com/v8/9908.")
+ V8_DEPRECATED("Use GetBackingStore or Detach. See http://crbug.com/v8/9908.")
Contents Externalize();
/**
@@ -5959,7 +6033,7 @@ class V8_EXPORT SharedArrayBuffer : public Object {
* With the new lifetime management of backing stores there is no need for
* externalizing, so this function exists only to make the transition easier.
*/
- V8_DEPRECATE_SOON("This will be removed together with IsExternal.")
+ V8_DEPRECATED("This will be removed together with IsExternal.")
void Externalize(const std::shared_ptr<BackingStore>& backing_store);
/**
@@ -5974,7 +6048,7 @@ class V8_EXPORT SharedArrayBuffer : public Object {
* by the allocator specified in
* v8::Isolate::CreateParams::array_buffer_allocator.
*/
- V8_DEPRECATE_SOON("Use GetBackingStore. See http://crbug.com/v8/9908.")
+ V8_DEPRECATED("Use GetBackingStore. See http://crbug.com/v8/9908.")
Contents GetContents();
/**
@@ -6119,9 +6193,10 @@ class V8_EXPORT RegExp : public Object {
kUnicode = 1 << 4,
kDotAll = 1 << 5,
kLinear = 1 << 6,
+ kHasIndices = 1 << 7,
};
- static constexpr int kFlagCount = 7;
+ static constexpr int kFlagCount = 8;
/**
* Creates a regular expression from the given pattern string and
@@ -7418,7 +7493,39 @@ using CallCompletedCallback = void (*)(Isolate*);
* The specifier is the name of the module that should be imported.
*
* The embedder must compile, instantiate, evaluate the Module, and
- * obtain it's namespace object.
+ * obtain its namespace object.
+ *
+ * The Promise returned from this function is forwarded to userland
+ * JavaScript. The embedder must resolve this promise with the module
+ * namespace object. In case of an exception, the embedder must reject
+ * this promise with the exception. If the promise creation itself
+ * fails (e.g. due to stack overflow), the embedder must propagate
+ * that exception by returning an empty MaybeLocal.
+ */
+using HostImportModuleDynamicallyCallback V8_DEPRECATE_SOON(
+ "Use HostImportModuleDynamicallyWithImportAssertionsCallback instead") =
+ MaybeLocal<Promise> (*)(Local<Context> context,
+ Local<ScriptOrModule> referrer,
+ Local<String> specifier);
+
+/**
+ * HostImportModuleDynamicallyWithImportAssertionsCallback is called when we
+ * require the embedder to load a module. This is used as part of the dynamic
+ * import syntax.
+ *
+ * The referrer contains metadata about the script/module that calls
+ * import.
+ *
+ * The specifier is the name of the module that should be imported.
+ *
+ * The import_assertions are import assertions for this request in the form:
+ * [key1, value1, key2, value2, ...] where the keys and values are of type
+ * v8::String. Note, unlike the FixedArray passed to ResolveModuleCallback and
+ * returned from ModuleRequest::GetImportAssertions(), this array does not
+ * contain the source Locations of the assertions.
+ *
+ * The embedder must compile, instantiate, evaluate the Module, and
+ * obtain its namespace object.
*
* The Promise returned from this function is forwarded to userland
* JavaScript. The embedder must resolve this promise with the module
@@ -7427,9 +7534,11 @@ using CallCompletedCallback = void (*)(Isolate*);
* fails (e.g. due to stack overflow), the embedder must propagate
* that exception by returning an empty MaybeLocal.
*/
-using HostImportModuleDynamicallyCallback = MaybeLocal<Promise> (*)(
- Local<Context> context, Local<ScriptOrModule> referrer,
- Local<String> specifier);
+using HostImportModuleDynamicallyWithImportAssertionsCallback =
+ MaybeLocal<Promise> (*)(Local<Context> context,
+ Local<ScriptOrModule> referrer,
+ Local<String> specifier,
+ Local<FixedArray> import_assertions);
/**
* HostInitializeImportMetaObjectCallback is called the first time import.meta
@@ -7689,9 +7798,6 @@ using ApiImplementationCallback = void (*)(const FunctionCallbackInfo<Value>&);
// --- Callback for WebAssembly.compileStreaming ---
using WasmStreamingCallback = void (*)(const FunctionCallbackInfo<Value>&);
-// --- Callback for checking if WebAssembly threads are enabled ---
-using WasmThreadsEnabledCallback = bool (*)(Local<Context> context);
-
// --- Callback for loading source map file for Wasm profiling support
using WasmLoadSourceMapCallback = Local<String> (*)(Isolate* isolate,
const char* name);
@@ -7699,6 +7805,9 @@ using WasmLoadSourceMapCallback = Local<String> (*)(Isolate* isolate,
// --- Callback for checking if WebAssembly Simd is enabled ---
using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
+// --- Callback for checking if WebAssembly exceptions are enabled ---
+using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
+
// --- Garbage Collection Callbacks ---
/**
@@ -8417,28 +8526,9 @@ class V8_EXPORT Isolate {
int embedder_wrapper_type_index = -1;
int embedder_wrapper_object_index = -1;
- /**
- * If parameters are set, V8 creates a managed C++ heap as extension to its
- * JavaScript heap.
- *
- * See v8::Isolate::GetCppHeap() for working with the heap.
- *
- * This is an experimental feature and may still change significantly.
- */
- std::shared_ptr<CppHeapCreateParams> cpp_heap_params;
-
- /**
- * This list is provided by the embedder to indicate which import assertions
- * they want to handle. Only import assertions whose keys are present in
- * supported_import_assertions will be included in the import assertions
- * lists of ModuleRequests that will be passed to the embedder. If
- * supported_import_assertions is left empty, then the embedder will not
- * receive any import assertions.
- *
- * This corresponds to the list returned by the HostGetSupportedAssertions
- * host-defined abstract operation:
- * https://tc39.es/proposal-import-assertions/#sec-hostgetsupportedimportassertions
- */
+ V8_DEPRECATED(
+ "Setting this has no effect. Embedders should ignore import assertions "
+ "that they do not use.")
std::vector<std::string> supported_import_assertions;
};
@@ -8480,7 +8570,11 @@ class V8_EXPORT Isolate {
private:
OnFailure on_failure_;
- void* internal_;
+ Isolate* isolate_;
+
+ bool was_execution_allowed_assert_;
+ bool was_execution_allowed_throws_;
+ bool was_execution_allowed_dump_;
};
/**
@@ -8498,9 +8592,10 @@ class V8_EXPORT Isolate {
const AllowJavascriptExecutionScope&) = delete;
private:
- void* internal_throws_;
- void* internal_assert_;
- void* internal_dump_;
+ Isolate* isolate_;
+ bool was_execution_allowed_assert_;
+ bool was_execution_allowed_throws_;
+ bool was_execution_allowed_dump_;
};
/**
@@ -8590,8 +8685,8 @@ class V8_EXPORT Isolate {
kArrayInstanceProtoModified = 27,
kArrayInstanceConstructorModified = 28,
kLegacyFunctionDeclaration = 29,
- kRegExpPrototypeSourceGetter = 30,
- kRegExpPrototypeOldFlagGetter = 31,
+ kRegExpPrototypeSourceGetter = 30, // Unused.
+ kRegExpPrototypeOldFlagGetter = 31, // Unused.
kDecimalWithLeadingZeroInStrictMode = 32,
kLegacyDateParser = 33,
kDefineGetterOrSetterWouldThrow = 34,
@@ -8669,8 +8764,9 @@ class V8_EXPORT Isolate {
kWasmSimdOpcodes = 106,
kVarRedeclaredCatchBinding = 107,
kWasmRefTypes = 108,
- kWasmBulkMemory = 109,
+ kWasmBulkMemory = 109, // Unused.
kWasmMultiValue = 110,
+ kWasmExceptionHandling = 111,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
@@ -8763,10 +8859,20 @@ class V8_EXPORT Isolate {
* This specifies the callback called by the upcoming dynamic
* import() language feature to load modules.
*/
+ V8_DEPRECATE_SOON(
+ "Use the version of SetHostImportModuleDynamicallyCallback that takes a "
+ "HostImportModuleDynamicallyWithImportAssertionsCallback instead")
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback);
/**
+ * This specifies the callback called by the upcoming dynamic
+ * import() language feature to load modules.
+ */
+ void SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyWithImportAssertionsCallback callback);
+
+ /**
* This specifies the callback called by the upcoming import.meta
* language feature to retrieve host-defined meta data for a module.
*/
@@ -9041,8 +9147,26 @@ class V8_EXPORT Isolate {
EmbedderHeapTracer* GetEmbedderHeapTracer();
/**
- * \returns the C++ heap managed by V8. Only available if the Isolate was
- * created with proper CreatePrams::cpp_heap_params option.
+ * Attaches a managed C++ heap as an extension to the JavaScript heap. The
+ * embedder maintains ownership of the CppHeap. At most one C++ heap can be
+ * attached to V8.
+ *
+ * This is an experimental feature and may still change significantly.
+ */
+ void AttachCppHeap(CppHeap*);
+
+ /**
+ * Detaches a managed C++ heap if one was attached using `AttachCppHeap()`.
+ *
+ * This is an experimental feature and may still change significantly.
+ */
+ void DetachCppHeap();
+
+ /**
+ * This is an experimental feature and may still change significantly.
+
+ * \returns the C++ heap managed by V8. Only available if such a heap has been
+ * attached using `AttachCppHeap()`.
*/
CppHeap* GetCppHeap() const;
@@ -9580,12 +9704,12 @@ class V8_EXPORT Isolate {
void SetWasmStreamingCallback(WasmStreamingCallback callback);
- void SetWasmThreadsEnabledCallback(WasmThreadsEnabledCallback callback);
-
void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
void SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback);
+ void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
+
/**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
@@ -10434,7 +10558,7 @@ class V8_EXPORT ExtensionConfiguration {
* A sandboxed execution context with its own set of built-in objects
* and functions.
*/
-class V8_EXPORT Context {
+class V8_EXPORT Context : public Data {
public:
/**
* Returns the global proxy object.
@@ -10711,18 +10835,21 @@ class V8_EXPORT Context {
const BackupIncumbentScope* prev_ = nullptr;
};
+ V8_INLINE static Context* Cast(Data* data);
+
private:
friend class Value;
friend class Script;
friend class Object;
friend class Function;
+ static void CheckCast(Data* obj);
+
internal::Address* GetDataFromSnapshotOnce(size_t index);
Local<Value> SlowGetEmbedderData(int index);
void* SlowGetAlignedPointerFromEmbedderData(int index);
};
-
/**
* Multiple threads in V8 are allowed, but only one thread at a time is allowed
* to use any given V8 isolate, see the comments in the Isolate class. The
@@ -11484,7 +11611,7 @@ ScriptOrigin::ScriptOrigin(
Local<Boolean> is_opaque, Local<Boolean> is_wasm, Local<Boolean> is_module,
Local<PrimitiveArray> host_defined_options)
: ScriptOrigin(
- resource_name,
+ Isolate::GetCurrent(), resource_name,
line_offset.IsEmpty() ? 0 : static_cast<int>(line_offset->Value()),
column_offset.IsEmpty() ? 0
: static_cast<int>(column_offset->Value()),
@@ -11508,6 +11635,21 @@ ScriptOrigin::ScriptOrigin(Local<Value> resource_name, int line_offset,
source_map_url_(source_map_url),
host_defined_options_(host_defined_options) {}
+ScriptOrigin::ScriptOrigin(Isolate* isolate, Local<Value> resource_name,
+ int line_offset, int column_offset,
+ bool is_shared_cross_origin, int script_id,
+ Local<Value> source_map_url, bool is_opaque,
+ bool is_wasm, bool is_module,
+ Local<PrimitiveArray> host_defined_options)
+ : isolate_(isolate),
+ resource_name_(resource_name),
+ resource_line_offset_(line_offset),
+ resource_column_offset_(column_offset),
+ options_(is_shared_cross_origin, is_opaque, is_wasm, is_module),
+ script_id_(script_id),
+ source_map_url_(source_map_url),
+ host_defined_options_(host_defined_options) {}
+
Local<Value> ScriptOrigin::ResourceName() const { return resource_name_; }
Local<PrimitiveArray> ScriptOrigin::HostDefinedOptions() const {
@@ -11876,6 +12018,13 @@ BigInt* BigInt::Cast(v8::Data* data) {
return static_cast<BigInt*>(data);
}
+Context* Context::Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Context*>(data);
+}
+
Date* Date::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 0886f691d5..acd34d7a1f 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -5,6 +5,14 @@
#ifndef V8CONFIG_H_
#define V8CONFIG_H_
+#ifdef V8_GN_HEADER
+#if __cplusplus >= 201703L && !__has_include("v8-gn.h")
+#error Missing v8-gn.h. The configuration for v8 is missing from the include \
+path. Add it with -I<path> to the command line
+#endif
+#include "v8-gn.h" // NOLINT(build/include_directory)
+#endif
+
// clang-format off
// Platform headers for feature detection below.
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index b95f7d2fbf..2e66b1c99e 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -7,7 +7,7 @@
# config names (where each config name is a key in the 'configs' dict,
# below). MB uses this dict to look up which config to use for a given bot.
# Bots are ordered by appearance on waterfall.
- 'masters': {
+ 'builder_groups': {
'developer_default': {
'android.arm.debug': 'default_debug_android_arm',
'android.arm.optdebug': 'default_optdebug_android_arm',
@@ -33,6 +33,12 @@
'ppc64.debug.sim': 'default_debug_ppc64_sim',
'ppc64.optdebug.sim': 'default_optdebug_ppc64_sim',
'ppc64.release.sim': 'default_release_ppc64_sim',
+ 'riscv64.debug': 'default_debug_riscv64',
+ 'riscv64.optdebug': 'default_optdebug_riscv64',
+ 'riscv64.release': 'default_release_riscv64',
+ 'riscv64.debug.sim': 'default_debug_riscv64_sim',
+ 'riscv64.optdebug.sim': 'default_optdebug_riscv64_sim',
+ 'riscv64.release.sim': 'default_release_riscv64_sim',
's390x.debug': 'default_debug_s390x',
's390x.optdebug': 'default_optdebug_s390x',
's390x.release': 'default_release_s390x',
@@ -57,11 +63,13 @@
# Linux64.
'V8 Linux64 - builder': 'release_x64',
'V8 Linux64 - debug builder': 'debug_x64',
+ 'V8 Linux64 - dict tracking - debug - builder': 'debug_x64_dict_tracking_trybot',
'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom',
'V8 Linux64 - internal snapshot': 'release_x64_internal',
'V8 Linux64 - debug - header includes': 'debug_x64_header_includes',
'V8 Linux64 - shared': 'release_x64_shared_verify_heap',
'V8 Linux64 - verify csa': 'release_x64_verify_csa',
+ 'V8 Linux64 - no wasm - builder': 'release_x64_webassembly_disabled',
# Windows.
'V8 Win32 - builder': 'release_x86_minimal_symbols',
'V8 Win32 - debug builder': 'debug_x86_minimal_symbols',
@@ -175,38 +183,6 @@
# RISC-V
'V8 Linux - riscv64 - sim - builder': 'release_simulate_riscv64',
},
- 'client.v8.branches': {
- 'V8 Linux - previous branch': 'release_x86',
- 'V8 Linux - previous branch - debug': 'debug_x86',
- 'V8 Linux - beta branch': 'release_x86',
- 'V8 Linux - beta branch - debug': 'debug_x86',
- 'V8 Linux - stable branch': 'release_x86',
- 'V8 Linux - stable branch - debug': 'debug_x86',
- 'V8 Linux64 - previous branch': 'release_x64',
- 'V8 Linux64 - previous branch - debug': 'debug_x64',
- 'V8 Linux64 - beta branch': 'release_x64',
- 'V8 Linux64 - beta branch - debug': 'debug_x64',
- 'V8 Linux64 - stable branch': 'release_x64',
- 'V8 Linux64 - stable branch - debug': 'debug_x64',
- 'V8 arm - sim - previous branch': 'release_simulate_arm',
- 'V8 arm - sim - previous branch - debug': 'debug_simulate_arm',
- 'V8 arm - sim - beta branch': 'release_simulate_arm',
- 'V8 arm - sim - beta branch - debug': 'debug_simulate_arm',
- 'V8 arm - sim - stable branch': 'release_simulate_arm',
- 'V8 arm - sim - stable branch - debug': 'debug_simulate_arm',
- 'V8 mips64el - sim - previous branch': 'release_simulate_mips64el',
- 'V8 mips64el - sim - beta branch': 'release_simulate_mips64el',
- 'V8 mips64el - sim - stable branch': 'release_simulate_mips64el',
- 'V8 mipsel - sim - previous branch': 'release_simulate_mipsel',
- 'V8 mipsel - sim - beta branch': 'release_simulate_mipsel',
- 'V8 mipsel - sim - stable branch': 'release_simulate_mipsel',
- 'V8 ppc64 - sim - previous branch': 'release_simulate_ppc64',
- 'V8 ppc64 - sim - beta branch': 'release_simulate_ppc64',
- 'V8 ppc64 - sim - stable branch': 'release_simulate_ppc64',
- 'V8 s390x - sim - previous branch': 'release_simulate_s390x',
- 'V8 s390x - sim - beta branch': 'release_simulate_s390x',
- 'V8 s390x - sim - stable branch': 'release_simulate_s390x',
- },
'tryserver.v8': {
'v8_android_arm_compile_rel': 'release_android_arm',
'v8_android_arm64_compile_dbg': 'debug_android_arm64',
@@ -228,6 +204,7 @@
'v8_linux64_arm64_pointer_compression_rel_ng':
'release_simulate_arm64_pointer_compression',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
+ 'v8_linux64_dict_tracking_dbg_ng': 'debug_x64_dict_tracking_trybot',
'v8_linux64_gc_stress_custom_snapshot_dbg_ng': 'debug_x64_trybot_custom',
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
@@ -237,6 +214,7 @@
'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression',
'v8_linux64_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
+ 'v8_linux64_no_wasm_compile_rel': 'release_x64_webassembly_disabled',
'v8_linux64_verify_csa_rel_ng': 'release_x64_verify_csa',
'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols',
'v8_linux64_cfi_rel_ng': 'release_x64_cfi',
@@ -262,6 +240,7 @@
'v8_mac_arm64_rel_ng': 'release_arm64',
'v8_mac_arm64_dbg_ng': 'debug_arm64',
'v8_mac_arm64_full_dbg_ng': 'full_debug_arm64',
+ 'v8_mac_arm64_compile_rel': 'release_arm64',
'v8_mac_arm64_sim_rel_ng': 'release_simulate_arm64_trybot',
'v8_mac_arm64_sim_dbg_ng': 'debug_simulate_arm64',
'v8_mac_arm64_sim_nodcheck_rel_ng': 'release_simulate_arm64',
@@ -320,6 +299,18 @@
'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
'default_release_mips64el': [
'release', 'simulate_mips64el'],
+ 'default_debug_riscv64': [
+ 'debug', 'riscv64', 'gcc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
+ 'default_optdebug_riscv64': [
+ 'debug', 'riscv64', 'gcc', 'v8_enable_slow_dchecks'],
+ 'default_release_riscv64': [
+ 'release', 'riscv64', 'gcc'],
+ 'default_debug_riscv64_sim': [
+ 'debug', 'simulate_riscv64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
+ 'default_optdebug_riscv64_sim': [
+ 'debug', 'simulate_riscv64', 'v8_enable_slow_dchecks'],
+ 'default_release_riscv64_sim': [
+ 'release', 'simulate_riscv64'],
'default_debug_ppc64': [
'debug', 'ppc64', 'gcc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc64': [
@@ -517,6 +508,8 @@
'release_x64_verify_csa': [
'release_bot', 'x64', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
+ 'release_x64_webassembly_disabled': [
+ 'release_bot', 'x64', 'webassembly_disabled'],
# Official configs for x64.
'official_x64': [
@@ -544,6 +537,8 @@
'debug_bot', 'x64', 'perfetto'],
'debug_x64_trybot': [
'debug_trybot', 'x64'],
+ 'debug_x64_dict_tracking_trybot': [
+ 'debug_trybot', 'x64', 'v8_enable_dict_property_const_tracking'],
'debug_x64_trybot_custom': [
'debug_trybot', 'x64', 'v8_snapshot_custom'],
'full_debug_x64': [
@@ -564,8 +559,6 @@
'debug', 'x86', 'goma', 'v8_enable_slow_dchecks', 'v8_full_debug'],
# Release configs for x86.
- 'release_x86': [
- 'release_bot', 'x86'],
'release_x86_gcc': [
'release_bot_no_goma', 'x86', 'gcc', 'v8_check_header_includes'],
'release_x86_gcc_minimal_symbols': [
@@ -848,6 +841,14 @@
'gn_args': 'v8_enable_slow_dchecks=true',
},
+ 'webassembly_disabled': {
+ 'gn_args': 'v8_enable_webassembly=false',
+ },
+
+ 'v8_enable_dict_property_const_tracking': {
+ 'gn_args': 'v8_dict_property_const_tracking=true',
+ },
+
'v8_disable_pointer_compression': {
'gn_args': 'v8_enable_pointer_compression=false',
},
@@ -901,6 +902,10 @@
'gn_args': 'target_cpu="ppc64" use_custom_libcxx=false',
},
+ 'riscv64': {
+ 'gn_args': 'target_cpu="riscv64" use_custom_libcxx=false',
+ },
+
'x64': {
'gn_args': 'target_cpu="x64"',
},
diff --git a/deps/v8/infra/testing/README.md b/deps/v8/infra/testing/README.md
index 438ba2e6d0..67231e9d64 100644
--- a/deps/v8/infra/testing/README.md
+++ b/deps/v8/infra/testing/README.md
@@ -34,7 +34,7 @@ The structure of the file is:
```
The `<buildername>` is a string name of the builder to execute the tests.
`<test-spec name>` is a label defining a test specification matching the
-[infra-side](https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/recipe_modules/v8/testing.py#58).
+[infra-side](https://chromium.googlesource.com/chromium/tools/build/+/refs/heads/master/recipes/recipe_modules/v8/testing.py).
The optional `suffix` will be appended to test-step names for disambiguation.
The optional `variant` is a testing variant specified
[here](https://chromium.googlesource.com/v8/v8/+/master/tools/testrunner/local/variants.py).
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index bffd21d01b..7617d885de 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -39,11 +39,10 @@
'os': 'Android',
},
'tests': [
- {'name': 'benchmarks', 'variant': 'default'},
+ {'name': 'mozilla', 'variant': 'default'},
+ {'name': 'test262', 'variant': 'default', 'shards': 10},
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
- {'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262', 'variant': 'default', 'shards': 9},
],
},
##############################################################################
@@ -259,7 +258,7 @@
{'name': 'mozilla'},
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 7},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 6},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 7},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
@@ -280,7 +279,7 @@
{'name': 'mozilla', 'shards': 2},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 8},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 10},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
@@ -294,7 +293,6 @@
{'name': 'test262', 'shards': 7},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
- {'name': 'v8testing', 'variant': 'no_local_heaps'},
{'name': 'v8testing', 'variant': 'slow_path'},
],
},
@@ -329,6 +327,16 @@
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
+ {'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
+ ],
+ },
+ 'v8_linux64_dict_tracking_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-16.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
],
},
'v8_linux64_fuzzilli_ng_triggered': {
@@ -357,9 +365,6 @@
'tests': [
# Infra staging.
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
- # Native context independent code.
- {'name': 'v8testing', 'variant': 'nci'},
- {'name': 'v8testing', 'variant': 'nci_as_midtier'},
# Stress sampling.
{'name': 'mjsunit', 'variant': 'stress_sampling'},
{'name': 'webkit', 'variant': 'stress_sampling'},
@@ -449,7 +454,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 3},
{'name': 'v8testing', 'shards': 5},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
- {'name': 'v8testing', 'variant': 'no_local_heaps'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
],
@@ -469,6 +473,7 @@
'test_args': ['--extra-flags=--future'],
'shards': 6,
},
+ {'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
],
},
'v8_linux64_tsan_isolates_rel_ng_triggered': {
@@ -506,7 +511,7 @@
{'name': 'mozilla', 'shards': 4},
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 10},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 12},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 5},
],
},
@@ -527,7 +532,7 @@
{'name': 'mozilla', 'shards': 4},
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 10},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 12},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 5},
],
},
@@ -650,46 +655,42 @@
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
- {'name': 'v8testing', 'shards': 4},
+ {'name': 'v8testing', 'shards': 8},
],
},
'v8_mac64_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'v8testing', 'shards': 3},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262', 'variant': 'default', 'shards': 4},
+ {'name': 'v8testing', 'shards': 6},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 6},
],
},
'v8_mac64_gc_stress_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
- {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 4},
+ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6},
],
},
'v8_mac64_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'v8testing', 'shards': 2},
- {'name': 'v8testing', 'variant': 'extra'},
+ {'name': 'test262', 'variant': 'default', 'shards': 3},
+ {'name': 'v8testing', 'shards': 3},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
],
},
'v8_mac_arm64_rel_ng_triggered': {
@@ -1093,6 +1094,7 @@
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
+ {'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
# Noavx.
{
'name': 'mozilla',
@@ -1113,6 +1115,15 @@
},
],
},
+ 'V8 Linux64 - dict tracking - debug': {
+ 'swarming_dimensions': {
+ 'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-16.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'V8 Linux64 - debug - fyi': {
'swarming_dimensions' : {
'os': 'Ubuntu-16.04',
@@ -1120,9 +1131,6 @@
'tests': [
# Infra staging.
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
- # Native context independent code.
- {'name': 'v8testing', 'variant': 'nci'},
- {'name': 'v8testing', 'variant': 'nci_as_midtier'},
# Stress sampling.
{'name': 'mjsunit', 'variant': 'stress_sampling'},
{'name': 'webkit', 'variant': 'stress_sampling'},
@@ -1152,9 +1160,6 @@
'tests': [
# Infra staging.
{'name': 'v8testing', 'variant': 'infra_staging'},
- # Native context independent code.
- {'name': 'v8testing', 'variant': 'nci'},
- {'name': 'v8testing', 'variant': 'nci_as_midtier'},
# Stress sampling.
{'name': 'mjsunit', 'variant': 'stress_sampling'},
{'name': 'webkit', 'variant': 'stress_sampling'},
@@ -1214,7 +1219,6 @@
{'name': 'test262', 'shards': 5},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
- {'name': 'v8testing', 'variant': 'no_local_heaps', 'shards': 1},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
],
},
@@ -1240,7 +1244,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 3},
{'name': 'v8testing', 'shards': 5},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
- {'name': 'v8testing', 'variant': 'no_local_heaps', 'shards': 1},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 1},
],
@@ -1299,6 +1302,7 @@
'test_args': ['--extra-flags=--future'],
'shards': 6,
},
+ {'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
],
},
'V8 Linux64 UBSan': {
@@ -1316,46 +1320,42 @@
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'v8testing', 'shards': 2},
- {'name': 'v8testing', 'variant': 'extra'},
+ {'name': 'test262', 'variant': 'default', 'shards': 3},
+ {'name': 'v8testing', 'shards': 3},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
],
},
'V8 Mac64 - debug': {
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
{'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'v8testing', 'shards': 4},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 2},
+ {'name': 'test262', 'variant': 'default', 'shards': 4},
+ {'name': 'v8testing', 'shards': 6},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 6},
],
},
'V8 Mac64 ASAN': {
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
- {'name': 'v8testing', 'shards': 5},
+ {'name': 'v8testing', 'shards': 10},
],
},
'V8 Mac64 GC Stress': {
'swarming_dimensions': {
'cpu': 'x86-64',
'os': 'Mac-10.15',
- 'gpu': 'none',
},
'tests': [
- {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 4},
+ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6},
],
},
'V8 Mac - arm64 - release': {
@@ -1476,9 +1476,9 @@
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
- {'name': 'test262', 'variant': 'default', 'shards': 8},
- {'name': 'v8testing', 'variant': 'default', 'shards': 3},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 3},
+ {'name': 'test262', 'variant': 'default', 'shards': 10},
+ {'name': 'v8testing', 'variant': 'default', 'shards': 4},
+ {'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
],
},
'V8 Arm': {
@@ -1616,7 +1616,7 @@
{'name': 'mozilla', 'shards': 6},
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 10},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 8},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 10},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
# Armv8-a.
{
@@ -1702,7 +1702,7 @@
{'name': 'mozilla', 'shards': 2},
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 12},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 9},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 11},
{'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
diff --git a/deps/v8/samples/cppgc/cppgc-sample.cc b/deps/v8/samples/cppgc/cppgc-sample.cc
index befda96197..d76c16a553 100644
--- a/deps/v8/samples/cppgc/cppgc-sample.cc
+++ b/deps/v8/samples/cppgc/cppgc-sample.cc
@@ -46,11 +46,7 @@ int main(int argc, char* argv[]) {
// backend allocation.
auto cppgc_platform = std::make_shared<cppgc::DefaultPlatform>();
// Initialize the process. This must happen before any cppgc::Heap::Create()
- // calls. cppgc::DefaultPlatform::InitializeProcess initializes both cppgc
- // and v8 (if cppgc is not used as a standalone) as needed.
- // If using a platform other than cppgc::DefaultPlatform, should call
- // cppgc::InitializeProcess (for standalone builds) or
- // v8::V8::InitializePlatform (for non-standalone builds) directly.
+ // calls.
cppgc::DefaultPlatform::InitializeProcess(cppgc_platform.get());
// Create a managed heap.
std::unique_ptr<cppgc::Heap> heap = cppgc::Heap::Create(cppgc_platform);
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 4cff77cc04..e844ca51bf 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -314,7 +314,7 @@ bool ExecuteString(v8::Isolate* isolate, v8::Local<v8::String> source,
bool report_exceptions) {
v8::HandleScope handle_scope(isolate);
v8::TryCatch try_catch(isolate);
- v8::ScriptOrigin origin(name);
+ v8::ScriptOrigin origin(isolate, name);
v8::Local<v8::Context> context(isolate->GetCurrentContext());
v8::Local<v8::Script> script;
if (!v8::Script::Compile(context, source, &origin).ToLocal(&script)) {
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 42d0c2d8a4..2bdba94b46 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -3,6 +3,8 @@ include_rules = [
"+src",
"-src/asmjs",
"+src/asmjs/asm-js.h",
+ "-src/baseline",
+ "+src/baseline/baseline.h",
"-src/compiler",
"+src/compiler/pipeline.h",
"+src/compiler/code-assembler.h",
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index b8a5c19532..10c8fb064d 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -6,6 +6,7 @@
#define V8_API_API_INL_H_
#include "src/api/api.h"
+#include "src/execution/microtask-queue.h"
#include "src/handles/handles-inl.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/js-weak-refs.h"
@@ -96,7 +97,7 @@ MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace)
-MAKE_TO_LOCAL(StackFrameToLocal, StackTraceFrame, StackFrame)
+MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
@@ -131,6 +132,111 @@ OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
#undef MAKE_OPEN_HANDLE
#undef OPEN_HANDLE_LIST
+template <bool do_callback>
+class V8_NODISCARD CallDepthScope {
+ public:
+ CallDepthScope(i::Isolate* isolate, Local<Context> context)
+ : isolate_(isolate),
+ context_(context),
+ escaped_(false),
+ safe_for_termination_(isolate->next_v8_call_is_safe_for_termination()),
+ interrupts_scope_(isolate_, i::StackGuard::TERMINATE_EXECUTION,
+ isolate_->only_terminate_in_safe_scope()
+ ? (safe_for_termination_
+ ? i::InterruptsScope::kRunInterrupts
+ : i::InterruptsScope::kPostponeInterrupts)
+ : i::InterruptsScope::kNoop) {
+ isolate_->thread_local_top()->IncrementCallDepth(this);
+ isolate_->set_next_v8_call_is_safe_for_termination(false);
+ if (!context.IsEmpty()) {
+ i::Handle<i::Context> env = Utils::OpenHandle(*context);
+ i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ if (!isolate->context().is_null() &&
+ isolate->context().native_context() == env->native_context()) {
+ context_ = Local<Context>();
+ } else {
+ impl->SaveContext(isolate->context());
+ isolate->set_context(*env);
+ }
+ }
+ if (do_callback) isolate_->FireBeforeCallEnteredCallback();
+ }
+ ~CallDepthScope() {
+ i::MicrotaskQueue* microtask_queue = isolate_->default_microtask_queue();
+ if (!context_.IsEmpty()) {
+ i::HandleScopeImplementer* impl = isolate_->handle_scope_implementer();
+ isolate_->set_context(impl->RestoreContext());
+
+ i::Handle<i::Context> env = Utils::OpenHandle(*context_);
+ microtask_queue = env->native_context().microtask_queue();
+ }
+ if (!escaped_) isolate_->thread_local_top()->DecrementCallDepth(this);
+ if (do_callback) isolate_->FireCallCompletedCallback(microtask_queue);
+// TODO(jochen): This should be #ifdef DEBUG
+#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
+ if (do_callback) {
+ if (microtask_queue && microtask_queue->microtasks_policy() ==
+ v8::MicrotasksPolicy::kScoped) {
+ DCHECK(microtask_queue->GetMicrotasksScopeDepth() ||
+ !microtask_queue->DebugMicrotasksScopeDepthIsZero());
+ }
+ }
+#endif
+ DCHECK(CheckKeptObjectsClearedAfterMicrotaskCheckpoint(microtask_queue));
+ isolate_->set_next_v8_call_is_safe_for_termination(safe_for_termination_);
+ }
+
+ CallDepthScope(const CallDepthScope&) = delete;
+ CallDepthScope& operator=(const CallDepthScope&) = delete;
+
+ void Escape() {
+ DCHECK(!escaped_);
+ escaped_ = true;
+ auto thread_local_top = isolate_->thread_local_top();
+ thread_local_top->DecrementCallDepth(this);
+ bool clear_exception = thread_local_top->CallDepthIsZero() &&
+ thread_local_top->try_catch_handler_ == nullptr;
+ isolate_->OptionalRescheduleException(clear_exception);
+ }
+
+ private:
+ bool CheckKeptObjectsClearedAfterMicrotaskCheckpoint(
+ i::MicrotaskQueue* microtask_queue) {
+ bool did_perform_microtask_checkpoint =
+ isolate_->thread_local_top()->CallDepthIsZero() && do_callback &&
+ microtask_queue &&
+ microtask_queue->microtasks_policy() == MicrotasksPolicy::kAuto;
+ return !did_perform_microtask_checkpoint ||
+ isolate_->heap()->weak_refs_keep_during_job().IsUndefined(isolate_);
+ }
+
+ i::Isolate* const isolate_;
+ Local<Context> context_;
+ bool escaped_;
+ bool do_callback_;
+ bool safe_for_termination_;
+ i::InterruptsScope interrupts_scope_;
+ i::Address previous_stack_height_;
+
+ friend class i::ThreadLocalTop;
+
+ DISALLOW_NEW_AND_DELETE()
+};
+
+class V8_NODISCARD InternalEscapableScope : public EscapableHandleScope {
+ public:
+ explicit inline InternalEscapableScope(i::Isolate* isolate)
+ : EscapableHandleScope(reinterpret_cast<v8::Isolate*>(isolate)) {}
+};
+
+inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
+ if (isolate->has_scheduled_exception()) {
+ return isolate->scheduled_exception() ==
+ i::ReadOnlyRoots(isolate).termination_exception();
+ }
+ return false;
+}
+
namespace internal {
Handle<Context> HandleScopeImplementer::LastEnteredContext() {
diff --git a/deps/v8/src/api/api-macros-undef.h b/deps/v8/src/api/api-macros-undef.h
new file mode 100644
index 0000000000..d3eea83a5f
--- /dev/null
+++ b/deps/v8/src/api/api-macros-undef.h
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+
+#undef LOG_API
+#undef ENTER_V8_DO_NOT_USE
+#undef ENTER_V8_HELPER_DO_NOT_USE
+#undef PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE
+#undef PREPARE_FOR_EXECUTION_WITH_CONTEXT
+#undef PREPARE_FOR_EXECUTION
+#undef ENTER_V8
+#undef ENTER_V8_NO_SCRIPT
+#undef ENTER_V8_NO_SCRIPT_NO_EXCEPTION
+#undef ENTER_V8_FOR_NEW_CONTEXT
+#undef EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE
+#undef RETURN_ON_FAILED_EXECUTION
+#undef RETURN_ON_FAILED_EXECUTION_PRIMITIVE
+#undef RETURN_ESCAPED
diff --git a/deps/v8/src/api/api-macros.h b/deps/v8/src/api/api-macros.h
new file mode 100644
index 0000000000..b126e1cd5a
--- /dev/null
+++ b/deps/v8/src/api/api-macros.h
@@ -0,0 +1,132 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Note 1: Any file that includes this one should include api-macros-undef.h
+// at the bottom.
+
+// Note 2: This file is deliberately missing the include guards (the undeffing
+// approach wouldn't work otherwise).
+//
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+
+/*
+ * Most API methods should use one of the three macros:
+ *
+ * ENTER_V8, ENTER_V8_NO_SCRIPT, ENTER_V8_NO_SCRIPT_NO_EXCEPTION.
+ *
+ * The latter two assume that no script is executed, and no exceptions are
+ * scheduled in addition (respectively). Creating a pending exception and
+ * removing it before returning is ok.
+ *
+ * Exceptions should be handled either by invoking one of the
+ * RETURN_ON_FAILED_EXECUTION* macros.
+ *
+ * Don't use macros with DO_NOT_USE in their name.
+ *
+ * TODO(jochen): Document debugger specific macros.
+ * TODO(jochen): Document LOG_API and other RuntimeCallStats macros.
+ * TODO(jochen): All API methods should invoke one of the ENTER_V8* macros.
+ * TODO(jochen): Remove calls form API methods to DO_NOT_USE macros.
+ */
+
+#define LOG_API(isolate, class_name, function_name) \
+ i::RuntimeCallTimerScope _runtime_timer( \
+ isolate, i::RuntimeCallCounterId::kAPI_##class_name##_##function_name); \
+ LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
+
+#define ENTER_V8_DO_NOT_USE(isolate) i::VMState<v8::OTHER> __state__((isolate))
+
+#define ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, \
+ function_name, bailout_value, \
+ HandleScopeClass, do_callback) \
+ if (IsExecutionTerminatingCheck(isolate)) { \
+ return bailout_value; \
+ } \
+ HandleScopeClass handle_scope(isolate); \
+ CallDepthScope<do_callback> call_depth_scope(isolate, context); \
+ LOG_API(isolate, class_name, function_name); \
+ i::VMState<v8::OTHER> __state__((isolate)); \
+ bool has_pending_exception = false
+
+#define PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, T) \
+ if (IsExecutionTerminatingCheck(isolate)) { \
+ return MaybeLocal<T>(); \
+ } \
+ InternalEscapableScope handle_scope(isolate); \
+ CallDepthScope<false> call_depth_scope(isolate, v8::Local<v8::Context>()); \
+ i::VMState<v8::OTHER> __state__((isolate)); \
+ bool has_pending_exception = false
+
+#define PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
+ bailout_value, HandleScopeClass, \
+ do_callback) \
+ auto isolate = context.IsEmpty() \
+ ? i::Isolate::Current() \
+ : reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
+ ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass, do_callback);
+
+#define PREPARE_FOR_EXECUTION(context, class_name, function_name, T) \
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
+ MaybeLocal<T>(), InternalEscapableScope, \
+ false)
+
+#define ENTER_V8(isolate, context, class_name, function_name, bailout_value, \
+ HandleScopeClass) \
+ ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass, true)
+
+#ifdef DEBUG
+#define ENTER_V8_NO_SCRIPT(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass) \
+ ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass, false); \
+ i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate))
+
+// Lightweight version for APIs that don't require an active context.
+#define ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate) \
+ i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate)); \
+ i::DisallowExceptions __no_exceptions__((isolate))
+
+#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \
+ i::VMState<v8::OTHER> __state__((isolate)); \
+ ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate)
+
+#define ENTER_V8_FOR_NEW_CONTEXT(isolate) \
+ i::VMState<v8::OTHER> __state__((isolate)); \
+ i::DisallowExceptions __no_exceptions__((isolate))
+#else
+#define ENTER_V8_NO_SCRIPT(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass) \
+ ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass, false)
+
+#define ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate)
+
+#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \
+ i::VMState<v8::OTHER> __state__((isolate));
+
+#define ENTER_V8_FOR_NEW_CONTEXT(isolate) \
+ i::VMState<v8::OTHER> __state__((isolate));
+#endif // DEBUG
+
+#define EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, value) \
+ do { \
+ if (has_pending_exception) { \
+ call_depth_scope.Escape(); \
+ return value; \
+ } \
+ } while (false)
+
+#define RETURN_ON_FAILED_EXECUTION(T) \
+ EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, MaybeLocal<T>())
+
+#define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \
+ EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, Nothing<T>())
+
+#define RETURN_ESCAPED(value) return handle_scope.Escape(value);
+
+// TODO(jochen): This should be #ifdef DEBUG
+#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
+#endif
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index 43d0b3a166..584d326aaf 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -603,7 +603,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
- PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, PropertyConstness::kMutable);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -614,7 +614,7 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
PropertyAttributes attributes) {
auto value = handle(Smi::FromInt(intrinsic), isolate);
auto intrinsic_marker = isolate->factory()->true_value();
- PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, PropertyConstness::kMutable);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -626,7 +626,7 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> getter,
Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
- PropertyDetails details(kAccessor, attributes, PropertyCellType::kNoCell);
+ PropertyDetails details(kAccessor, attributes, PropertyConstness::kMutable);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, getter, setter};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -694,8 +694,8 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
immutable_proto = GetInstanceTemplate->immutable_proto();
}
- // JS_FUNCTION_TYPE requires information about the prototype slot.
- DCHECK_NE(JS_FUNCTION_TYPE, type);
+ // JSFunction requires information about the prototype slot.
+ DCHECK(!InstanceTypeChecker::IsJSFunction(type));
int instance_size = JSObject::GetHeaderSize(type) +
kEmbedderDataSlotSize * embedder_field_count;
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 9b19d155bd..a56b7e1a7d 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -34,10 +34,6 @@
#include "src/common/globals.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/date/date.h"
-#include "src/debug/debug-coverage.h"
-#include "src/debug/debug-evaluate.h"
-#include "src/debug/debug-type-profile.h"
-#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/diagnostics/gdb-jit.h"
@@ -68,12 +64,10 @@
#include "src/objects/contexts.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/embedder-data-slot-inl.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
-#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-weak-refs-inl.h"
@@ -139,6 +133,9 @@
#endif // V8_OS_WIN64
#endif // V8_OS_WIN
+// Has to be the last include (doesn't have include guards):
+#include "src/api/api-macros.h"
+
#define TRACE_BS(...) \
do { \
if (i::FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
@@ -146,229 +143,6 @@
namespace v8 {
-/*
- * Most API methods should use one of the three macros:
- *
- * ENTER_V8, ENTER_V8_NO_SCRIPT, ENTER_V8_NO_SCRIPT_NO_EXCEPTION.
- *
- * The latter two assume that no script is executed, and no exceptions are
- * scheduled in addition (respectively). Creating a pending exception and
- * removing it before returning is ok.
- *
- * Exceptions should be handled either by invoking one of the
- * RETURN_ON_FAILED_EXECUTION* macros.
- *
- * Don't use macros with DO_NOT_USE in their name.
- *
- * TODO(jochen): Document debugger specific macros.
- * TODO(jochen): Document LOG_API and other RuntimeCallStats macros.
- * TODO(jochen): All API methods should invoke one of the ENTER_V8* macros.
- * TODO(jochen): Remove calls form API methods to DO_NOT_USE macros.
- */
-
-#define LOG_API(isolate, class_name, function_name) \
- i::RuntimeCallTimerScope _runtime_timer( \
- isolate, i::RuntimeCallCounterId::kAPI_##class_name##_##function_name); \
- LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
-
-#define ENTER_V8_DO_NOT_USE(isolate) i::VMState<v8::OTHER> __state__((isolate))
-
-#define ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, \
- function_name, bailout_value, \
- HandleScopeClass, do_callback) \
- if (IsExecutionTerminatingCheck(isolate)) { \
- return bailout_value; \
- } \
- HandleScopeClass handle_scope(isolate); \
- CallDepthScope<do_callback> call_depth_scope(isolate, context); \
- LOG_API(isolate, class_name, function_name); \
- i::VMState<v8::OTHER> __state__((isolate)); \
- bool has_pending_exception = false
-
-#define PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, T) \
- if (IsExecutionTerminatingCheck(isolate)) { \
- return MaybeLocal<T>(); \
- } \
- InternalEscapableScope handle_scope(isolate); \
- CallDepthScope<false> call_depth_scope(isolate, v8::Local<v8::Context>()); \
- i::VMState<v8::OTHER> __state__((isolate)); \
- bool has_pending_exception = false
-
-#define PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
- bailout_value, HandleScopeClass, \
- do_callback) \
- auto isolate = context.IsEmpty() \
- ? i::Isolate::Current() \
- : reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
- ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
- bailout_value, HandleScopeClass, do_callback);
-
-#define PREPARE_FOR_EXECUTION(context, class_name, function_name, T) \
- PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
- MaybeLocal<T>(), InternalEscapableScope, \
- false)
-
-#define ENTER_V8(isolate, context, class_name, function_name, bailout_value, \
- HandleScopeClass) \
- ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
- bailout_value, HandleScopeClass, true)
-
-#ifdef DEBUG
-#define ENTER_V8_NO_SCRIPT(isolate, context, class_name, function_name, \
- bailout_value, HandleScopeClass) \
- ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
- bailout_value, HandleScopeClass, false); \
- i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate))
-
-// Lightweight version for APIs that don't require an active context.
-#define ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate) \
- i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate)); \
- i::DisallowExceptions __no_exceptions__((isolate))
-
-#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \
- i::VMState<v8::OTHER> __state__((isolate)); \
- ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate)
-
-#define ENTER_V8_FOR_NEW_CONTEXT(isolate) \
- i::VMState<v8::OTHER> __state__((isolate)); \
- i::DisallowExceptions __no_exceptions__((isolate))
-#else
-#define ENTER_V8_NO_SCRIPT(isolate, context, class_name, function_name, \
- bailout_value, HandleScopeClass) \
- ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
- bailout_value, HandleScopeClass, false)
-
-#define ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate)
-
-#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \
- i::VMState<v8::OTHER> __state__((isolate));
-
-#define ENTER_V8_FOR_NEW_CONTEXT(isolate) \
- i::VMState<v8::OTHER> __state__((isolate));
-#endif // DEBUG
-
-#define EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, value) \
- do { \
- if (has_pending_exception) { \
- call_depth_scope.Escape(); \
- return value; \
- } \
- } while (false)
-
-#define RETURN_ON_FAILED_EXECUTION(T) \
- EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, MaybeLocal<T>())
-
-#define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \
- EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, Nothing<T>())
-
-#define RETURN_ESCAPED(value) return handle_scope.Escape(value);
-
-namespace {
-
-class V8_NODISCARD InternalEscapableScope : public v8::EscapableHandleScope {
- public:
- explicit inline InternalEscapableScope(i::Isolate* isolate)
- : v8::EscapableHandleScope(reinterpret_cast<v8::Isolate*>(isolate)) {}
-};
-
-// TODO(jochen): This should be #ifdef DEBUG
-#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
-void CheckMicrotasksScopesConsistency(i::MicrotaskQueue* microtask_queue) {
- if (microtask_queue &&
- microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kScoped) {
- DCHECK(microtask_queue->GetMicrotasksScopeDepth() ||
- !microtask_queue->DebugMicrotasksScopeDepthIsZero());
- }
-}
-#endif
-
-template <bool do_callback>
-class V8_NODISCARD CallDepthScope {
- public:
- CallDepthScope(i::Isolate* isolate, Local<Context> context)
- : isolate_(isolate),
- context_(context),
- escaped_(false),
- safe_for_termination_(isolate->next_v8_call_is_safe_for_termination()),
- interrupts_scope_(isolate_, i::StackGuard::TERMINATE_EXECUTION,
- isolate_->only_terminate_in_safe_scope()
- ? (safe_for_termination_
- ? i::InterruptsScope::kRunInterrupts
- : i::InterruptsScope::kPostponeInterrupts)
- : i::InterruptsScope::kNoop) {
- isolate_->thread_local_top()->IncrementCallDepth(this);
- isolate_->set_next_v8_call_is_safe_for_termination(false);
- if (!context.IsEmpty()) {
- i::Handle<i::Context> env = Utils::OpenHandle(*context);
- i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- if (!isolate->context().is_null() &&
- isolate->context().native_context() == env->native_context()) {
- context_ = Local<Context>();
- } else {
- impl->SaveContext(isolate->context());
- isolate->set_context(*env);
- }
- }
- if (do_callback) isolate_->FireBeforeCallEnteredCallback();
- }
- ~CallDepthScope() {
- i::MicrotaskQueue* microtask_queue = isolate_->default_microtask_queue();
- if (!context_.IsEmpty()) {
- i::HandleScopeImplementer* impl = isolate_->handle_scope_implementer();
- isolate_->set_context(impl->RestoreContext());
-
- i::Handle<i::Context> env = Utils::OpenHandle(*context_);
- microtask_queue = env->native_context().microtask_queue();
- }
- if (!escaped_) isolate_->thread_local_top()->DecrementCallDepth(this);
- if (do_callback) isolate_->FireCallCompletedCallback(microtask_queue);
-// TODO(jochen): This should be #ifdef DEBUG
-#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
- if (do_callback) CheckMicrotasksScopesConsistency(microtask_queue);
-#endif
- DCHECK(CheckKeptObjectsClearedAfterMicrotaskCheckpoint(microtask_queue));
- isolate_->set_next_v8_call_is_safe_for_termination(safe_for_termination_);
- }
-
- CallDepthScope(const CallDepthScope&) = delete;
- CallDepthScope& operator=(const CallDepthScope&) = delete;
-
- void Escape() {
- DCHECK(!escaped_);
- escaped_ = true;
- auto thread_local_top = isolate_->thread_local_top();
- thread_local_top->DecrementCallDepth(this);
- bool clear_exception = thread_local_top->CallDepthIsZero() &&
- thread_local_top->try_catch_handler_ == nullptr;
- isolate_->OptionalRescheduleException(clear_exception);
- }
-
- private:
- bool CheckKeptObjectsClearedAfterMicrotaskCheckpoint(
- i::MicrotaskQueue* microtask_queue) {
- bool did_perform_microtask_checkpoint =
- isolate_->thread_local_top()->CallDepthIsZero() &&
- do_callback && microtask_queue &&
- microtask_queue->microtasks_policy() == MicrotasksPolicy::kAuto;
- return !did_perform_microtask_checkpoint ||
- isolate_->heap()->weak_refs_keep_during_job().IsUndefined(isolate_);
- }
-
- i::Isolate* const isolate_;
- Local<Context> context_;
- bool escaped_;
- bool do_callback_;
- bool safe_for_termination_;
- i::InterruptsScope interrupts_scope_;
- i::Address previous_stack_height_;
-
- friend class i::ThreadLocalTop;
-
- DISALLOW_NEW_AND_DELETE()
-};
-
-} // namespace
-
static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
i::Handle<i::Script> script) {
i::Handle<i::Object> scriptName(script->GetNameOrSourceURL(), isolate);
@@ -377,8 +151,9 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
isolate);
ScriptOriginOptions options(script->origin_options());
v8::ScriptOrigin origin(
- Utils::ToLocal(scriptName), script->line_offset(),
- script->column_offset(), options.IsSharedCrossOrigin(), script->id(),
+ reinterpret_cast<v8::Isolate*>(isolate), Utils::ToLocal(scriptName),
+ script->line_offset(), script->column_offset(),
+ options.IsSharedCrossOrigin(), script->id(),
Utils::ToLocal(source_map_url), options.IsOpaque(),
script->type() == i::Script::TYPE_WASM, options.IsModule(),
Utils::PrimitiveArrayToLocal(host_defined_options));
@@ -533,14 +308,6 @@ void Utils::ReportOOMFailure(i::Isolate* isolate, const char* location,
isolate->SignalFatalError();
}
-static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
- if (isolate->has_scheduled_exception()) {
- return isolate->scheduled_exception() ==
- i::ReadOnlyRoots(isolate).termination_exception();
- }
- return false;
-}
-
void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
i::V8::SetSnapshotBlob(snapshot_blob);
}
@@ -1182,6 +949,8 @@ bool Data::IsFunctionTemplate() const {
return Utils::OpenHandle(this)->IsFunctionTemplateInfo();
}
+bool Data::IsContext() const { return Utils::OpenHandle(this)->IsContext(); }
+
void Context::Enter() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
@@ -1314,9 +1083,9 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
// --- T e m p l a t e ---
-static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
- that->set_number_of_properties(0);
- that->set_tag(type);
+static void InitializeTemplate(i::TemplateInfo that, int type) {
+ that.set_number_of_properties(0);
+ that.set_tag(type);
}
void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
@@ -1364,10 +1133,9 @@ void Template::SetAccessorProperty(v8::Local<v8::Name> name,
}
// --- F u n c t i o n T e m p l a t e ---
-static void InitializeFunctionTemplate(
- i::Handle<i::FunctionTemplateInfo> info) {
+static void InitializeFunctionTemplate(i::FunctionTemplateInfo info) {
InitializeTemplate(info, Consts::FUNCTION_TEMPLATE);
- info->set_flag(0);
+ info.set_flag(0);
}
static Local<ObjectTemplate> ObjectTemplateNew(
@@ -1419,7 +1187,8 @@ void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
static Local<FunctionTemplate> FunctionTemplateNew(
i::Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
- v8::Local<Signature> signature, int length, bool do_not_cache,
+ v8::Local<Signature> signature, int length, ConstructorBehavior behavior,
+ bool do_not_cache,
v8::Local<Private> cached_property_name = v8::Local<Private>(),
SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
const CFunction* c_function = nullptr) {
@@ -1430,29 +1199,31 @@ static Local<FunctionTemplate> FunctionTemplateNew(
{
// Disallow GC until all fields of obj have acceptable types.
i::DisallowGarbageCollection no_gc;
- InitializeFunctionTemplate(obj);
- obj->set_length(length);
- obj->set_do_not_cache(do_not_cache);
+ i::FunctionTemplateInfo raw = *obj;
+ InitializeFunctionTemplate(raw);
+ raw.set_length(length);
+ raw.set_do_not_cache(do_not_cache);
int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber;
if (!do_not_cache) {
next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
}
- obj->set_serial_number(next_serial_number);
+ raw.set_serial_number(next_serial_number);
+ raw.set_undetectable(false);
+ raw.set_needs_access_check(false);
+ raw.set_accept_any_receiver(true);
+ if (!signature.IsEmpty()) {
+ raw.set_signature(*Utils::OpenHandle(*signature));
+ }
+ raw.set_cached_property_name(
+ cached_property_name.IsEmpty()
+ ? i::ReadOnlyRoots(isolate).the_hole_value()
+ : *Utils::OpenHandle(*cached_property_name));
+ if (behavior == ConstructorBehavior::kThrow) raw.set_remove_prototype(true);
}
if (callback != nullptr) {
Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type,
c_function);
}
- obj->set_undetectable(false);
- obj->set_needs_access_check(false);
- obj->set_accept_any_receiver(true);
- if (!signature.IsEmpty()) {
- obj->set_signature(*Utils::OpenHandle(*signature));
- }
- obj->set_cached_property_name(
- cached_property_name.IsEmpty()
- ? i::ReadOnlyRoots(isolate).the_hole_value()
- : *Utils::OpenHandle(*cached_property_name));
return Utils::ToLocal(obj);
}
@@ -1465,10 +1236,9 @@ Local<FunctionTemplate> FunctionTemplate::New(
// function templates when the isolate is created for serialization.
LOG_API(i_isolate, FunctionTemplate, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- auto templ =
- FunctionTemplateNew(i_isolate, callback, data, signature, length, false,
- Local<Private>(), side_effect_type, c_function);
- if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
+ auto templ = FunctionTemplateNew(i_isolate, callback, data, signature, length,
+ behavior, false, Local<Private>(),
+ side_effect_type, c_function);
return templ;
}
@@ -1480,7 +1250,8 @@ Local<FunctionTemplate> FunctionTemplate::NewWithCache(
LOG_API(i_isolate, FunctionTemplate, NewWithCache);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return FunctionTemplateNew(i_isolate, callback, data, signature, length,
- false, cache_property, side_effect_type);
+ ConstructorBehavior::kAllow, false, cache_property,
+ side_effect_type);
}
Local<Signature> Signature::New(Isolate* isolate,
@@ -1651,16 +1422,18 @@ static Local<ObjectTemplate> ObjectTemplateNew(
{
// Disallow GC until all fields of obj have acceptable types.
i::DisallowGarbageCollection no_gc;
- InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
+ i::ObjectTemplateInfo raw = *obj;
+ InitializeTemplate(raw, Consts::OBJECT_TEMPLATE);
+ raw.set_data(0);
int next_serial_number = 0;
if (!do_not_cache) {
next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
}
- obj->set_serial_number(next_serial_number);
- obj->set_data(0);
+ raw.set_serial_number(next_serial_number);
+ if (!constructor.IsEmpty()) {
+ raw.set_constructor(*Utils::OpenHandle(*constructor));
+ }
}
- if (!constructor.IsEmpty())
- obj->set_constructor(*Utils::OpenHandle(*constructor));
return Utils::ToLocal(obj);
}
@@ -3123,11 +2896,9 @@ void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
Local<StackFrame> StackTrace::GetFrame(Isolate* v8_isolate,
uint32_t index) const {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- EscapableHandleScope scope(v8_isolate);
- auto obj = handle(Utils::OpenHandle(this)->get(index), isolate);
- auto frame = i::Handle<i::StackTraceFrame>::cast(obj);
- return scope.Escape(Utils::StackFrameToLocal(frame));
+ i::Handle<i::StackFrameInfo> frame(
+ i::StackFrameInfo::cast(Utils::OpenHandle(this)->get(index)), isolate);
+ return Utils::StackFrameToLocal(frame);
}
int StackTrace::GetFrameCount() const {
@@ -3147,65 +2918,50 @@ Local<StackTrace> StackTrace::CurrentStackTrace(Isolate* isolate,
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
- return i::StackTraceFrame::GetOneBasedLineNumber(Utils::OpenHandle(this));
+ return i::StackFrameInfo::GetLineNumber(Utils::OpenHandle(this));
}
int StackFrame::GetColumn() const {
- return i::StackTraceFrame::GetOneBasedColumnNumber(Utils::OpenHandle(this));
+ return i::StackFrameInfo::GetColumnNumber(Utils::OpenHandle(this));
}
int StackFrame::GetScriptId() const {
- return i::StackTraceFrame::GetScriptId(Utils::OpenHandle(this));
+ return Utils::OpenHandle(this)->GetScriptId();
}
Local<String> StackFrame::GetScriptName() const {
auto self = Utils::OpenHandle(this);
- i::Isolate* isolate = self->GetIsolate();
- ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
- EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::Object> name = i::StackTraceFrame::GetFileName(self);
- return name->IsString()
- ? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
- : Local<String>();
+ auto isolate = self->GetIsolate();
+ i::Handle<i::Object> name(self->GetScriptName(), isolate);
+ if (!name->IsString()) return {};
+ return Local<String>::Cast(Utils::ToLocal(name));
}
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
auto self = Utils::OpenHandle(this);
- i::Isolate* isolate = self->GetIsolate();
- ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
- EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::Object> name =
- i::StackTraceFrame::GetScriptNameOrSourceUrl(self);
- return name->IsString()
- ? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
- : Local<String>();
+ auto isolate = self->GetIsolate();
+ i::Handle<i::Object> name_or_url(self->GetScriptNameOrSourceURL(), isolate);
+ if (!name_or_url->IsString()) return {};
+ return Local<String>::Cast(Utils::ToLocal(name_or_url));
}
Local<String> StackFrame::GetFunctionName() const {
auto self = Utils::OpenHandle(this);
- i::Isolate* isolate = self->GetIsolate();
- ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate);
- EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::Object> name = i::StackTraceFrame::GetFunctionName(self);
- return name->IsString()
- ? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
- : Local<String>();
+ auto name = i::StackFrameInfo::GetFunctionName(self);
+ if (!name->IsString()) return {};
+ return Local<String>::Cast(Utils::ToLocal(name));
}
-bool StackFrame::IsEval() const {
- return i::StackTraceFrame::IsEval(Utils::OpenHandle(this));
-}
+bool StackFrame::IsEval() const { return Utils::OpenHandle(this)->IsEval(); }
bool StackFrame::IsConstructor() const {
- return i::StackTraceFrame::IsConstructor(Utils::OpenHandle(this));
+ return Utils::OpenHandle(this)->IsConstructor();
}
-bool StackFrame::IsWasm() const {
- return i::StackTraceFrame::IsWasm(Utils::OpenHandle(this));
-}
+bool StackFrame::IsWasm() const { return Utils::OpenHandle(this)->IsWasm(); }
bool StackFrame::IsUserJavaScript() const {
- return i::StackTraceFrame::IsUserJavaScript(Utils::OpenHandle(this));
+ return Utils::OpenHandle(this)->IsUserJavaScript();
}
// --- J S O N ---
@@ -3876,6 +3632,12 @@ void v8::BigInt::CheckCast(v8::Data* that) {
"Value is not a BigInt");
}
+void v8::Context::CheckCast(v8::Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsContext(), "v8::Context::Cast",
+ "Value is not a Context");
+}
+
void v8::Array::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsJSArray(), "v8::Array::Cast", "Value is not an Array");
@@ -3915,12 +3677,6 @@ void v8::WasmModuleObject::CheckCast(Value* that) {
"Value is not a WasmModuleObject");
}
-void v8::debug::AccessorPair::CheckCast(Value* that) {
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsAccessorPair(), "v8::AccessorPair::Cast",
- "Value is not a debug::AccessorPair");
-}
-
v8::BackingStore::~BackingStore() {
auto i_this = reinterpret_cast<const i::BackingStore*>(this);
i_this->~BackingStore(); // manually call internal destructor
@@ -4954,10 +4710,36 @@ Local<v8::Object> v8::Object::Clone() {
return Utils::ToLocal(result);
}
+namespace {
+Local<v8::Context> CreationContextImpl(i::Handle<i::JSReceiver> self) {
+ i::Handle<i::Context> context;
+ if (self->GetCreationContext().ToHandle(&context)) {
+ return Utils::ToLocal(context);
+ }
+
+ return Local<v8::Context>();
+}
+} // namespace
+
Local<v8::Context> v8::Object::CreationContext() {
auto self = Utils::OpenHandle(this);
- i::Handle<i::Context> context = self->GetCreationContext();
- return Utils::ToLocal(context);
+ return CreationContextImpl(self);
+}
+
+Local<v8::Context> v8::Object::CreationContext(
+ const PersistentBase<Object>& object) {
+ auto self = Utils::OpenHandle(object.val_);
+ return CreationContextImpl(self);
+}
+
+MaybeLocal<v8::Context> v8::Object::GetCreationContext() {
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Context> context;
+ if (self->GetCreationContext().ToHandle(&context)) {
+ return Utils::ToLocal(context);
+ }
+
+ return MaybeLocal<v8::Context>();
}
int v8::Object::GetIdentityHash() {
@@ -5034,8 +4816,7 @@ MaybeLocal<Function> Function::New(Local<Context> context,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
auto templ =
FunctionTemplateNew(isolate, callback, data, Local<Signature>(), length,
- true, Local<Private>(), side_effect_type);
- if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
+ behavior, true, Local<Private>(), side_effect_type);
return templ->GetFunction(context);
}
@@ -5172,7 +4953,7 @@ Local<Value> Function::GetDisplayName() const {
}
auto func = i::Handle<i::JSFunction>::cast(self);
i::Handle<i::String> property_name =
- isolate->factory()->display_name_string();
+ isolate->factory()->InternalizeString(i::StaticCharVector("displayName"));
i::Handle<i::Object> value =
i::JSReceiver::GetDataProperty(func, property_name);
if (value->IsString()) {
@@ -5184,14 +4965,15 @@ Local<Value> Function::GetDisplayName() const {
ScriptOrigin Function::GetScriptOrigin() const {
auto self = Utils::OpenHandle(this);
- if (!self->IsJSFunction()) return v8::ScriptOrigin(Local<Value>());
+ auto isolate = reinterpret_cast<v8::Isolate*>(self->GetIsolate());
+ if (!self->IsJSFunction()) return v8::ScriptOrigin(isolate, Local<Value>());
auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared().script().IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared().script()),
func->GetIsolate());
return GetScriptOriginForScript(func->GetIsolate(), script);
}
- return v8::ScriptOrigin(Local<Value>());
+ return v8::ScriptOrigin(isolate, Local<Value>());
}
const int Function::kLineOffsetNotFound = -1;
@@ -5671,6 +5453,24 @@ String::ExternalStringResource* String::GetExternalStringResourceSlow() const {
return nullptr;
}
+void String::ExternalStringResource::UpdateDataCache() {
+ DCHECK(IsCacheable());
+ cached_data_ = data();
+}
+
+void String::ExternalStringResource::CheckCachedDataInvariants() const {
+ DCHECK(IsCacheable() && cached_data_ != nullptr);
+}
+
+void String::ExternalOneByteStringResource::UpdateDataCache() {
+ DCHECK(IsCacheable());
+ cached_data_ = data();
+}
+
+void String::ExternalOneByteStringResource::CheckCachedDataInvariants() const {
+ DCHECK(IsCacheable() && cached_data_ != nullptr);
+}
+
String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
String::Encoding* encoding_out) const {
i::DisallowGarbageCollection no_gc;
@@ -5897,7 +5697,13 @@ bool v8::V8::Initialize(const int build_config) {
#if V8_OS_LINUX || V8_OS_MACOSX
bool TryHandleWebAssemblyTrapPosix(int sig_code, siginfo_t* info,
void* context) {
-#if V8_TARGET_ARCH_X64 && !V8_OS_ANDROID
+ // When the target code runs on the V8 arm simulator, the trap handler does
+ // not behave as expected: the instruction pointer points inside the simulator
+ // code rather than the wasm code, so the trap handler cannot find the landing
+ // pad and lets the process crash. Therefore, only enable trap handlers if
+ // the host and target arch are the same.
+#if (V8_TARGET_ARCH_X64 && !V8_OS_ANDROID) || \
+ (V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64 && V8_OS_MACOSX)
return i::trap_handler::TryHandleSignal(sig_code, info, context);
#else
return false;
@@ -7018,6 +6824,7 @@ REGEXP_FLAG_ASSERT_EQ(kIgnoreCase);
REGEXP_FLAG_ASSERT_EQ(kMultiline);
REGEXP_FLAG_ASSERT_EQ(kSticky);
REGEXP_FLAG_ASSERT_EQ(kUnicode);
+REGEXP_FLAG_ASSERT_EQ(kHasIndices);
REGEXP_FLAG_ASSERT_EQ(kLinear);
#undef REGEXP_FLAG_ASSERT_EQ
@@ -8442,6 +8249,16 @@ EmbedderHeapTracer* Isolate::GetEmbedderHeapTracer() {
return isolate->heap()->GetEmbedderHeapTracer();
}
+void Isolate::AttachCppHeap(CppHeap* cpp_heap) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AttachCppHeap(cpp_heap);
+}
+
+void Isolate::DetachCppHeap() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->DetachCppHeap();
+}
+
CppHeap* Isolate::GetCppHeap() const {
const i::Isolate* isolate = reinterpret_cast<const i::Isolate*>(this);
return isolate->heap()->cpp_heap();
@@ -8553,13 +8370,7 @@ void Isolate::Initialize(Isolate* isolate,
i_isolate->set_api_external_references(params.external_references);
i_isolate->set_allow_atomics_wait(params.allow_atomics_wait);
- i_isolate->set_supported_import_assertions(
- params.supported_import_assertions);
-
i_isolate->heap()->ConfigureHeap(params.constraints);
- if (params.cpp_heap_params) {
- i_isolate->heap()->ConfigureCppHeap(params.cpp_heap_params);
- }
if (params.constraints.stack_limit() != nullptr) {
uintptr_t limit =
reinterpret_cast<uintptr_t>(params.constraints.stack_limit());
@@ -8641,7 +8452,13 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
}
void Isolate::SetHostImportModuleDynamicallyCallback(
- HostImportModuleDynamicallyCallback callback) {
+ i::Isolate::DeprecatedHostImportModuleDynamicallyCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetHostImportModuleDynamicallyCallback(callback);
+}
+
+void Isolate::SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyWithImportAssertionsCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetHostImportModuleDynamicallyCallback(callback);
}
@@ -8660,21 +8477,20 @@ void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* isolate,
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
- : on_failure_(on_failure) {
+ : on_failure_(on_failure), isolate_(isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
switch (on_failure_) {
case CRASH_ON_FAILURE:
- internal_ = reinterpret_cast<void*>(
- new i::DisallowJavascriptExecution(i_isolate));
+ i::DisallowJavascriptExecution::Open(i_isolate,
+ &was_execution_allowed_assert_);
break;
case THROW_ON_FAILURE:
- DCHECK_EQ(THROW_ON_FAILURE, on_failure);
- internal_ =
- reinterpret_cast<void*>(new i::ThrowOnJavascriptExecution(i_isolate));
+ i::ThrowOnJavascriptExecution::Open(i_isolate,
+ &was_execution_allowed_throws_);
break;
case DUMP_ON_FAILURE:
- internal_ =
- reinterpret_cast<void*>(new i::DumpOnJavascriptExecution(i_isolate));
+ i::DumpOnJavascriptExecution::Open(i_isolate,
+ &was_execution_allowed_dump_);
break;
default:
UNREACHABLE();
@@ -8682,15 +8498,19 @@ Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
}
Isolate::DisallowJavascriptExecutionScope::~DisallowJavascriptExecutionScope() {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
switch (on_failure_) {
case CRASH_ON_FAILURE:
- delete reinterpret_cast<i::DisallowJavascriptExecution*>(internal_);
+ i::DisallowJavascriptExecution::Close(i_isolate,
+ was_execution_allowed_assert_);
break;
case THROW_ON_FAILURE:
- delete reinterpret_cast<i::ThrowOnJavascriptExecution*>(internal_);
+ i::ThrowOnJavascriptExecution::Close(i_isolate,
+ was_execution_allowed_throws_);
break;
case DUMP_ON_FAILURE:
- delete reinterpret_cast<i::DumpOnJavascriptExecution*>(internal_);
+ i::DumpOnJavascriptExecution::Close(i_isolate,
+ was_execution_allowed_dump_);
break;
default:
UNREACHABLE();
@@ -8698,20 +8518,21 @@ Isolate::DisallowJavascriptExecutionScope::~DisallowJavascriptExecutionScope() {
}
Isolate::AllowJavascriptExecutionScope::AllowJavascriptExecutionScope(
- Isolate* isolate) {
+ Isolate* isolate)
+ : isolate_(isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_assert_ =
- reinterpret_cast<void*>(new i::AllowJavascriptExecution(i_isolate));
- internal_throws_ =
- reinterpret_cast<void*>(new i::NoThrowOnJavascriptExecution(i_isolate));
- internal_dump_ =
- reinterpret_cast<void*>(new i::NoDumpOnJavascriptExecution(i_isolate));
+ i::AllowJavascriptExecution::Open(i_isolate, &was_execution_allowed_assert_);
+ i::NoThrowOnJavascriptExecution::Open(i_isolate,
+ &was_execution_allowed_throws_);
+ i::NoDumpOnJavascriptExecution::Open(i_isolate, &was_execution_allowed_dump_);
}
Isolate::AllowJavascriptExecutionScope::~AllowJavascriptExecutionScope() {
- delete reinterpret_cast<i::AllowJavascriptExecution*>(internal_assert_);
- delete reinterpret_cast<i::NoThrowOnJavascriptExecution*>(internal_throws_);
- delete reinterpret_cast<i::NoDumpOnJavascriptExecution*>(internal_dump_);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
+ i::AllowJavascriptExecution::Close(i_isolate, was_execution_allowed_assert_);
+ i::NoThrowOnJavascriptExecution::Close(i_isolate,
+ was_execution_allowed_throws_);
+ i::NoDumpOnJavascriptExecution::Close(i_isolate, was_execution_allowed_dump_);
}
Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope(
@@ -9249,15 +9070,15 @@ CALLBACK_SETTER(WasmInstanceCallback, ExtensionCallback, wasm_instance_callback)
CALLBACK_SETTER(WasmStreamingCallback, WasmStreamingCallback,
wasm_streaming_callback)
-CALLBACK_SETTER(WasmThreadsEnabledCallback, WasmThreadsEnabledCallback,
- wasm_threads_enabled_callback)
-
CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback,
wasm_load_source_map_callback)
CALLBACK_SETTER(WasmSimdEnabledCallback, WasmSimdEnabledCallback,
wasm_simd_enabled_callback)
+CALLBACK_SETTER(WasmExceptionsEnabledCallback, WasmExceptionsEnabledCallback,
+ wasm_exceptions_enabled_callback)
+
void Isolate::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -9538,729 +9359,6 @@ Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
return Utils::StackTraceToLocal(isolate->GetDetailedStackTrace(js_obj));
}
-// --- D e b u g S u p p o r t ---
-
-void debug::SetContextId(Local<Context> context, int id) {
- Utils::OpenHandle(*context)->set_debug_context_id(i::Smi::FromInt(id));
-}
-
-int debug::GetContextId(Local<Context> context) {
- i::Object value = Utils::OpenHandle(*context)->debug_context_id();
- return (value.IsSmi()) ? i::Smi::ToInt(value) : 0;
-}
-
-void debug::SetInspector(Isolate* isolate,
- v8_inspector::V8Inspector* inspector) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i_isolate->set_inspector(inspector);
-}
-
-v8_inspector::V8Inspector* debug::GetInspector(Isolate* isolate) {
- return reinterpret_cast<i::Isolate*>(isolate)->inspector();
-}
-
-void debug::SetBreakOnNextFunctionCall(Isolate* isolate) {
- reinterpret_cast<i::Isolate*>(isolate)->debug()->SetBreakOnNextFunctionCall();
-}
-
-void debug::ClearBreakOnNextFunctionCall(Isolate* isolate) {
- reinterpret_cast<i::Isolate*>(isolate)
- ->debug()
- ->ClearBreakOnNextFunctionCall();
-}
-
-MaybeLocal<Array> debug::GetInternalProperties(Isolate* v8_isolate,
- Local<Value> value) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::Handle<i::Object> val = Utils::OpenHandle(*value);
- i::Handle<i::JSArray> result;
- if (!i::Runtime::GetInternalProperties(isolate, val).ToHandle(&result))
- return MaybeLocal<Array>();
- return Utils::ToLocal(result);
-}
-
-namespace {
-void CollectPrivateMethodsAndAccessorsFromContext(
- i::Isolate* isolate, i::Handle<i::Context> context,
- i::IsStaticFlag is_static_flag, std::vector<Local<Value>>* names_out,
- std::vector<Local<Value>>* values_out) {
- i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
- int local_count = scope_info->ContextLocalCount();
- for (int j = 0; j < local_count; ++j) {
- i::VariableMode mode = scope_info->ContextLocalMode(j);
- i::IsStaticFlag flag = scope_info->ContextLocalIsStaticFlag(j);
- if (!i::IsPrivateMethodOrAccessorVariableMode(mode) ||
- flag != is_static_flag) {
- continue;
- }
-
- i::Handle<i::String> name(scope_info->ContextLocalName(j), isolate);
- int context_index = scope_info->ContextHeaderLength() + j;
- i::Handle<i::Object> slot_value(context->get(context_index), isolate);
- DCHECK_IMPLIES(mode == i::VariableMode::kPrivateMethod,
- slot_value->IsJSFunction());
- DCHECK_IMPLIES(mode != i::VariableMode::kPrivateMethod,
- slot_value->IsAccessorPair());
- names_out->push_back(Utils::ToLocal(name));
- values_out->push_back(Utils::ToLocal(slot_value));
- }
-}
-} // anonymous namespace
-
-bool debug::GetPrivateMembers(Local<Context> context, Local<Object> value,
- std::vector<Local<Value>>* names_out,
- std::vector<Local<Value>>* values_out) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- LOG_API(isolate, debug, GetPrivateMembers);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::Handle<i::JSReceiver> receiver = Utils::OpenHandle(*value);
- i::Handle<i::JSArray> names;
- i::Handle<i::FixedArray> values;
-
- i::PropertyFilter key_filter =
- static_cast<i::PropertyFilter>(i::PropertyFilter::PRIVATE_NAMES_ONLY);
- i::Handle<i::FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, keys,
- i::KeyAccumulator::GetKeys(receiver, i::KeyCollectionMode::kOwnOnly,
- key_filter,
- i::GetKeysConversion::kConvertToString),
- false);
-
- // Estimate number of private fields and private instance methods/accessors.
- int private_entries_count = 0;
- for (int i = 0; i < keys->length(); ++i) {
- // Exclude the private brand symbols.
- i::Handle<i::Symbol> key(i::Symbol::cast(keys->get(i)), isolate);
- if (key->is_private_brand()) {
- i::Handle<i::Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, i::Object::GetProperty(isolate, receiver, key),
- false);
-
- i::Handle<i::Context> context(i::Context::cast(*value), isolate);
- i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
- // At least one slot contains the brand symbol so it does not count.
- private_entries_count += (scope_info->ContextLocalCount() - 1);
- } else {
- private_entries_count++;
- }
- }
-
- // Estimate number of static private methods/accessors for classes.
- bool has_static_private_methods_or_accessors = false;
- if (receiver->IsJSFunction()) {
- i::Handle<i::JSFunction> func(i::JSFunction::cast(*receiver), isolate);
- i::Handle<i::SharedFunctionInfo> shared(func->shared(), isolate);
- if (shared->is_class_constructor() &&
- shared->has_static_private_methods_or_accessors()) {
- has_static_private_methods_or_accessors = true;
- i::Handle<i::Context> context(func->context(), isolate);
- i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
- int local_count = scope_info->ContextLocalCount();
- for (int j = 0; j < local_count; ++j) {
- i::VariableMode mode = scope_info->ContextLocalMode(j);
- i::IsStaticFlag is_static_flag =
- scope_info->ContextLocalIsStaticFlag(j);
- if (i::IsPrivateMethodOrAccessorVariableMode(mode) &&
- is_static_flag == i::IsStaticFlag::kStatic) {
- private_entries_count += local_count;
- break;
- }
- }
- }
- }
-
- DCHECK(names_out->empty());
- names_out->reserve(private_entries_count);
- DCHECK(values_out->empty());
- values_out->reserve(private_entries_count);
-
- if (has_static_private_methods_or_accessors) {
- i::Handle<i::Context> context(i::JSFunction::cast(*receiver).context(),
- isolate);
- CollectPrivateMethodsAndAccessorsFromContext(
- isolate, context, i::IsStaticFlag::kStatic, names_out, values_out);
- }
-
- for (int i = 0; i < keys->length(); ++i) {
- i::Handle<i::Object> obj_key(keys->get(i), isolate);
- i::Handle<i::Symbol> key(i::Symbol::cast(*obj_key), isolate);
- CHECK(key->is_private_name());
- i::Handle<i::Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, i::Object::GetProperty(isolate, receiver, key), false);
-
- if (key->is_private_brand()) {
- DCHECK(value->IsContext());
- i::Handle<i::Context> context(i::Context::cast(*value), isolate);
- CollectPrivateMethodsAndAccessorsFromContext(
- isolate, context, i::IsStaticFlag::kNotStatic, names_out, values_out);
- } else { // Private fields
- i::Handle<i::String> name(
- i::String::cast(i::Symbol::cast(*key).description()), isolate);
- names_out->push_back(Utils::ToLocal(name));
- values_out->push_back(Utils::ToLocal(value));
- }
- }
-
- DCHECK_EQ(names_out->size(), values_out->size());
- DCHECK_LE(names_out->size(), private_entries_count);
- return true;
-}
-
-Local<Context> debug::GetCreationContext(Local<Object> value) {
- i::Handle<i::Object> val = Utils::OpenHandle(*value);
- if (val->IsJSGlobalProxy()) {
- return Local<Context>();
- }
- return value->CreationContext();
-}
-
-void debug::ChangeBreakOnException(Isolate* isolate, ExceptionBreakState type) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->debug()->ChangeBreakOnException(
- i::BreakException, type == BreakOnAnyException);
- internal_isolate->debug()->ChangeBreakOnException(i::BreakUncaughtException,
- type != NoBreakOnException);
-}
-
-void debug::SetBreakPointsActive(Isolate* v8_isolate, bool is_active) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- isolate->debug()->set_break_points_active(is_active);
-}
-
-void debug::PrepareStep(Isolate* v8_isolate, StepAction action) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_DO_NOT_USE(isolate);
- CHECK(isolate->debug()->CheckExecutionState());
- // Clear all current stepping setup.
- isolate->debug()->ClearStepping();
- // Prepare step.
- isolate->debug()->PrepareStep(static_cast<i::StepAction>(action));
-}
-
-void debug::ClearStepping(Isolate* v8_isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- // Clear all current stepping setup.
- isolate->debug()->ClearStepping();
-}
-
-void debug::BreakRightNow(Isolate* v8_isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_DO_NOT_USE(isolate);
- isolate->debug()->HandleDebugBreak(i::kIgnoreIfAllFramesBlackboxed);
-}
-
-void debug::SetTerminateOnResume(Isolate* v8_isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- isolate->debug()->SetTerminateOnResume();
-}
-
-bool debug::CanBreakProgram(Isolate* v8_isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_DO_NOT_USE(isolate);
- // We cannot break a program if we are currently running a regexp.
- // TODO(yangguo): fix this exception.
- return !isolate->regexp_stack()->is_in_use() &&
- isolate->debug()->AllFramesOnStackAreBlackboxed();
-}
-
-v8::Isolate* debug::Script::GetIsolate() const {
- return reinterpret_cast<v8::Isolate*>(Utils::OpenHandle(this)->GetIsolate());
-}
-
-ScriptOriginOptions debug::Script::OriginOptions() const {
- return Utils::OpenHandle(this)->origin_options();
-}
-
-bool debug::Script::WasCompiled() const {
- return Utils::OpenHandle(this)->compilation_state() ==
- i::Script::COMPILATION_STATE_COMPILED;
-}
-
-bool debug::Script::IsEmbedded() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- return script->context_data() ==
- script->GetReadOnlyRoots().uninitialized_symbol();
-}
-
-int debug::Script::Id() const { return Utils::OpenHandle(this)->id(); }
-
-int debug::Script::LineOffset() const {
- return Utils::OpenHandle(this)->line_offset();
-}
-
-int debug::Script::ColumnOffset() const {
- return Utils::OpenHandle(this)->column_offset();
-}
-
-std::vector<int> debug::Script::LineEnds() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- if (script->type() == i::Script::TYPE_WASM) return std::vector<int>();
-
- i::Isolate* isolate = script->GetIsolate();
- i::HandleScope scope(isolate);
- i::Script::InitLineEnds(isolate, script);
- CHECK(script->line_ends().IsFixedArray());
- i::Handle<i::FixedArray> line_ends(i::FixedArray::cast(script->line_ends()),
- isolate);
- std::vector<int> result(line_ends->length());
- for (int i = 0; i < line_ends->length(); ++i) {
- i::Smi line_end = i::Smi::cast(line_ends->get(i));
- result[i] = line_end.value();
- }
- return result;
-}
-
-MaybeLocal<String> debug::Script::Name() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
- i::Handle<i::Object> value(script->name(), isolate);
- if (!value->IsString()) return MaybeLocal<String>();
- return Utils::ToLocal(
- handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
-}
-
-MaybeLocal<String> debug::Script::SourceURL() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
- i::Handle<i::Object> value(script->source_url(), isolate);
- if (!value->IsString()) return MaybeLocal<String>();
- return Utils::ToLocal(
- handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
-}
-
-MaybeLocal<String> debug::Script::SourceMappingURL() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
- i::Handle<i::Object> value(script->source_mapping_url(), isolate);
- if (!value->IsString()) return MaybeLocal<String>();
- return Utils::ToLocal(
- handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
-}
-
-Maybe<int> debug::Script::ContextId() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
- i::Object value = script->context_data();
- if (value.IsSmi()) return Just(i::Smi::ToInt(value));
- return Nothing<int>();
-}
-
-MaybeLocal<String> debug::Script::Source() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- i::HandleScope handle_scope(isolate);
- i::Handle<i::Object> value(script->source(), isolate);
- if (!value->IsString()) return MaybeLocal<String>();
- return Utils::ToLocal(
- handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
-}
-
-bool debug::Script::IsWasm() const {
- return Utils::OpenHandle(this)->type() == i::Script::TYPE_WASM;
-}
-
-bool debug::Script::IsModule() const {
- return Utils::OpenHandle(this)->origin_options().IsModule();
-}
-
-namespace {
-int GetSmiValue(i::Handle<i::FixedArray> array, int index) {
- return i::Smi::ToInt(array->get(index));
-}
-
-bool CompareBreakLocation(const i::BreakLocation& loc1,
- const i::BreakLocation& loc2) {
- return loc1.position() < loc2.position();
-}
-} // namespace
-
-bool debug::Script::GetPossibleBreakpoints(
- const debug::Location& start, const debug::Location& end,
- bool restrict_to_function,
- std::vector<debug::BreakLocation>* locations) const {
- CHECK(!start.IsEmpty());
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- if (script->type() == i::Script::TYPE_WASM) {
- i::wasm::NativeModule* native_module = script->wasm_native_module();
- return i::WasmScript::GetPossibleBreakpoints(native_module, start, end,
- locations);
- }
-
- i::Isolate* isolate = script->GetIsolate();
- i::Script::InitLineEnds(isolate, script);
- CHECK(script->line_ends().IsFixedArray());
- i::Handle<i::FixedArray> line_ends =
- i::Handle<i::FixedArray>::cast(i::handle(script->line_ends(), isolate));
- CHECK(line_ends->length());
-
- int start_offset = GetSourceOffset(start);
- int end_offset = end.IsEmpty()
- ? GetSmiValue(line_ends, line_ends->length() - 1) + 1
- : GetSourceOffset(end);
- if (start_offset >= end_offset) return true;
-
- std::vector<i::BreakLocation> v8_locations;
- if (!isolate->debug()->GetPossibleBreakpoints(
- script, start_offset, end_offset, restrict_to_function,
- &v8_locations)) {
- return false;
- }
-
- std::sort(v8_locations.begin(), v8_locations.end(), CompareBreakLocation);
- int current_line_end_index = 0;
- for (const auto& v8_location : v8_locations) {
- int offset = v8_location.position();
- while (offset > GetSmiValue(line_ends, current_line_end_index)) {
- ++current_line_end_index;
- CHECK(current_line_end_index < line_ends->length());
- }
- int line_offset = 0;
-
- if (current_line_end_index > 0) {
- line_offset = GetSmiValue(line_ends, current_line_end_index - 1) + 1;
- }
- locations->emplace_back(
- current_line_end_index + script->line_offset(),
- offset - line_offset +
- (current_line_end_index == 0 ? script->column_offset() : 0),
- v8_location.type());
- }
- return true;
-}
-
-int debug::Script::GetSourceOffset(const debug::Location& location) const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- if (script->type() == i::Script::TYPE_WASM) {
- DCHECK_EQ(0, location.GetLineNumber());
- return location.GetColumnNumber();
- }
-
- int line = std::max(location.GetLineNumber() - script->line_offset(), 0);
- int column = location.GetColumnNumber();
- if (line == 0) {
- column = std::max(0, column - script->column_offset());
- }
-
- i::Script::InitLineEnds(script->GetIsolate(), script);
- CHECK(script->line_ends().IsFixedArray());
- i::Handle<i::FixedArray> line_ends = i::Handle<i::FixedArray>::cast(
- i::handle(script->line_ends(), script->GetIsolate()));
- CHECK(line_ends->length());
- if (line >= line_ends->length())
- return GetSmiValue(line_ends, line_ends->length() - 1);
- int line_offset = GetSmiValue(line_ends, line);
- if (line == 0) return std::min(column, line_offset);
- int prev_line_offset = GetSmiValue(line_ends, line - 1);
- return std::min(prev_line_offset + column + 1, line_offset);
-}
-
-v8::debug::Location debug::Script::GetSourceLocation(int offset) const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Script::PositionInfo info;
- i::Script::GetPositionInfo(script, offset, &info, i::Script::WITH_OFFSET);
- return debug::Location(info.line, info.column);
-}
-
-bool debug::Script::SetScriptSource(v8::Local<v8::String> newSource,
- bool preview,
- debug::LiveEditResult* result) const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- return isolate->debug()->SetScriptSource(
- script, Utils::OpenHandle(*newSource), preview, result);
-}
-
-bool debug::Script::SetBreakpoint(v8::Local<v8::String> condition,
- debug::Location* location,
- debug::BreakpointId* id) const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- int offset = GetSourceOffset(*location);
- if (!isolate->debug()->SetBreakPointForScript(
- script, Utils::OpenHandle(*condition), &offset, id)) {
- return false;
- }
- *location = GetSourceLocation(offset);
- return true;
-}
-
-bool debug::Script::SetBreakpointOnScriptEntry(BreakpointId* id) const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- i::SharedFunctionInfo::ScriptIterator it(isolate, *script);
- for (i::SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
- if (sfi.is_toplevel()) {
- return isolate->debug()->SetBreakpointForFunction(
- handle(sfi, isolate), isolate->factory()->empty_string(), id);
- }
- }
- return false;
-}
-
-void debug::Script::RemoveWasmBreakpoint(debug::BreakpointId id) {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Isolate* isolate = script->GetIsolate();
- isolate->debug()->RemoveBreakpointForWasmScript(script, id);
-}
-
-void debug::RemoveBreakpoint(Isolate* v8_isolate, BreakpointId id) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- i::HandleScope handle_scope(isolate);
- isolate->debug()->RemoveBreakpoint(id);
-}
-
-v8::Platform* debug::GetCurrentPlatform() {
- return i::V8::GetCurrentPlatform();
-}
-
-void debug::ForceGarbageCollection(
- v8::Isolate* isolate,
- v8::EmbedderHeapTracer::EmbedderStackState embedder_stack_state) {
- i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- heap->SetEmbedderStackStateForNextFinalization(embedder_stack_state);
- isolate->LowMemoryNotification();
-}
-
-debug::WasmScript* debug::WasmScript::Cast(debug::Script* script) {
- CHECK(script->IsWasm());
- return static_cast<WasmScript*>(script);
-}
-
-debug::WasmScript::DebugSymbolsType debug::WasmScript::GetDebugSymbolType()
- const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- switch (script->wasm_native_module()->module()->debug_symbols.type) {
- case i::wasm::WasmDebugSymbols::Type::None:
- return debug::WasmScript::DebugSymbolsType::None;
- case i::wasm::WasmDebugSymbols::Type::EmbeddedDWARF:
- return debug::WasmScript::DebugSymbolsType::EmbeddedDWARF;
- case i::wasm::WasmDebugSymbols::Type::ExternalDWARF:
- return debug::WasmScript::DebugSymbolsType::ExternalDWARF;
- case i::wasm::WasmDebugSymbols::Type::SourceMap:
- return debug::WasmScript::DebugSymbolsType::SourceMap;
- }
-}
-
-MemorySpan<const char> debug::WasmScript::ExternalSymbolsURL() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- DCHECK_EQ(i::Script::TYPE_WASM, script->type());
-
- const i::wasm::WasmDebugSymbols& symbols =
- script->wasm_native_module()->module()->debug_symbols;
- if (symbols.external_url.is_empty()) return {};
-
- internal::wasm::ModuleWireBytes wire_bytes(
- script->wasm_native_module()->wire_bytes());
- i::wasm::WasmName external_url =
- wire_bytes.GetNameOrNull(symbols.external_url);
- return {external_url.data(), external_url.size()};
-}
-
-int debug::WasmScript::NumFunctions() const {
- i::DisallowGarbageCollection no_gc;
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::wasm::NativeModule* native_module = script->wasm_native_module();
- const i::wasm::WasmModule* module = native_module->module();
- DCHECK_GE(i::kMaxInt, module->functions.size());
- return static_cast<int>(module->functions.size());
-}
-
-int debug::WasmScript::NumImportedFunctions() const {
- i::DisallowGarbageCollection no_gc;
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::wasm::NativeModule* native_module = script->wasm_native_module();
- const i::wasm::WasmModule* module = native_module->module();
- DCHECK_GE(i::kMaxInt, module->num_imported_functions);
- return static_cast<int>(module->num_imported_functions);
-}
-
-MemorySpan<const uint8_t> debug::WasmScript::Bytecode() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Vector<const uint8_t> wire_bytes =
- script->wasm_native_module()->wire_bytes();
- return {wire_bytes.begin(), wire_bytes.size()};
-}
-
-std::pair<int, int> debug::WasmScript::GetFunctionRange(
- int function_index) const {
- i::DisallowGarbageCollection no_gc;
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::wasm::NativeModule* native_module = script->wasm_native_module();
- const i::wasm::WasmModule* module = native_module->module();
- DCHECK_LE(0, function_index);
- DCHECK_GT(module->functions.size(), function_index);
- const i::wasm::WasmFunction& func = module->functions[function_index];
- DCHECK_GE(i::kMaxInt, func.code.offset());
- DCHECK_GE(i::kMaxInt, func.code.end_offset());
- return std::make_pair(static_cast<int>(func.code.offset()),
- static_cast<int>(func.code.end_offset()));
-}
-
-int debug::WasmScript::GetContainingFunction(int byte_offset) const {
- i::DisallowGarbageCollection no_gc;
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::wasm::NativeModule* native_module = script->wasm_native_module();
- const i::wasm::WasmModule* module = native_module->module();
- DCHECK_LE(0, byte_offset);
-
- return i::wasm::GetContainingWasmFunction(module, byte_offset);
-}
-
-uint32_t debug::WasmScript::GetFunctionHash(int function_index) {
- i::DisallowGarbageCollection no_gc;
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::wasm::NativeModule* native_module = script->wasm_native_module();
- const i::wasm::WasmModule* module = native_module->module();
- DCHECK_LE(0, function_index);
- DCHECK_GT(module->functions.size(), function_index);
- const i::wasm::WasmFunction& func = module->functions[function_index];
- i::wasm::ModuleWireBytes wire_bytes(native_module->wire_bytes());
- i::Vector<const i::byte> function_bytes = wire_bytes.GetFunctionBytes(&func);
- // TODO(herhut): Maybe also take module, name and signature into account.
- return i::StringHasher::HashSequentialString(function_bytes.begin(),
- function_bytes.length(), 0);
-}
-
-int debug::WasmScript::CodeOffset() const {
- i::Handle<i::Script> script = Utils::OpenHandle(this);
- DCHECK_EQ(i::Script::TYPE_WASM, script->type());
- i::wasm::NativeModule* native_module = script->wasm_native_module();
- const i::wasm::WasmModule* module = native_module->module();
-
- // If the module contains at least one function, the code offset must have
- // been initialized, and it cannot be zero.
- DCHECK_IMPLIES(module->num_declared_functions > 0,
- module->code.offset() != 0);
- return module->code.offset();
-}
-
-debug::Location::Location(int line_number, int column_number)
- : line_number_(line_number),
- column_number_(column_number),
- is_empty_(false) {}
-
-debug::Location::Location()
- : line_number_(v8::Function::kLineOffsetNotFound),
- column_number_(v8::Function::kLineOffsetNotFound),
- is_empty_(true) {}
-
-int debug::Location::GetLineNumber() const {
- DCHECK(!IsEmpty());
- return line_number_;
-}
-
-int debug::Location::GetColumnNumber() const {
- DCHECK(!IsEmpty());
- return column_number_;
-}
-
-bool debug::Location::IsEmpty() const { return is_empty_; }
-
-void debug::GetLoadedScripts(v8::Isolate* v8_isolate,
- PersistentValueVector<debug::Script>& scripts) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- {
- i::DisallowGarbageCollection no_gc;
- i::Script::Iterator iterator(isolate);
- for (i::Script script = iterator.Next(); !script.is_null();
- script = iterator.Next()) {
- if (script.type() == i::Script::TYPE_NORMAL ||
- script.type() == i::Script::TYPE_WASM) {
- if (script.HasValidSource()) {
- i::HandleScope handle_scope(isolate);
- i::Handle<i::Script> script_handle(script, isolate);
- scripts.Append(ToApiHandle<Script>(script_handle));
- }
- }
- }
- }
-}
-
-MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
- Local<String> source) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, UnboundScript);
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Handle<i::SharedFunctionInfo> result;
- {
- ScriptOriginOptions origin_options;
- i::ScriptData* script_data = nullptr;
- i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- i::Compiler::GetSharedFunctionInfoForScript(
- isolate, str, i::Compiler::ScriptDetails(), origin_options, nullptr,
- script_data, ScriptCompiler::kNoCompileOptions,
- ScriptCompiler::kNoCacheBecauseInspector,
- i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
- : i::INSPECTOR_CODE);
- has_pending_exception = !maybe_function_info.ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION(UnboundScript);
- }
- RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
-}
-
-void debug::TierDownAllModulesPerIsolate(Isolate* v8_isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->wasm_engine()->TierDownAllModulesPerIsolate(isolate);
-}
-
-void debug::TierUpAllModulesPerIsolate(Isolate* v8_isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->wasm_engine()->TierUpAllModulesPerIsolate(isolate);
-}
-
-void debug::SetDebugDelegate(Isolate* v8_isolate,
- debug::DebugDelegate* delegate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->debug()->SetDebugDelegate(delegate);
-}
-
-void debug::SetAsyncEventDelegate(Isolate* v8_isolate,
- debug::AsyncEventDelegate* delegate) {
- reinterpret_cast<i::Isolate*>(v8_isolate)->set_async_event_delegate(delegate);
-}
-
-void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
- v8::Local<debug::Script> script) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::DisallowGarbageCollection no_gc;
- i::SharedFunctionInfo::ScriptIterator iter(isolate,
- *Utils::OpenHandle(*script));
- for (i::SharedFunctionInfo info = iter.Next(); !info.is_null();
- info = iter.Next()) {
- if (info.HasDebugInfo()) {
- info.GetDebugInfo().set_computed_debug_is_blackboxed(false);
- }
- }
-}
-
-int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::Handle<i::Object> object = Utils::OpenHandle(*value);
- if (object->IsSmi()) return i::kTaggedSize;
- CHECK(object->IsHeapObject());
- return i::Handle<i::HeapObject>::cast(object)->Size();
-}
-
v8::MaybeLocal<v8::Array> v8::Object::PreviewEntries(bool* is_key_value) {
if (IsMap()) {
*is_key_value = true;
@@ -10301,206 +9399,6 @@ v8::MaybeLocal<v8::Array> v8::Object::PreviewEntries(bool* is_key_value) {
return v8::MaybeLocal<v8::Array>();
}
-Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::HandleScope handle_scope(isolate);
-
- CHECK_EQ(builtin, kStringToLowerCase);
- i::Builtins::Name builtin_id = i::Builtins::kStringPrototypeToLocaleLowerCase;
-
- i::Factory* factory = isolate->factory();
- i::Handle<i::String> name = isolate->factory()->empty_string();
- i::Handle<i::NativeContext> context(isolate->native_context());
- i::Handle<i::SharedFunctionInfo> info =
- factory->NewSharedFunctionInfoForBuiltin(name, builtin_id);
- info->set_language_mode(i::LanguageMode::kStrict);
- i::Handle<i::JSFunction> fun =
- i::Factory::JSFunctionBuilder{isolate, info, context}
- .set_map(isolate->strict_function_without_prototype_map())
- .Build();
-
- fun->shared().set_internal_formal_parameter_count(0);
- fun->shared().set_length(0);
- return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
-}
-
-void debug::SetConsoleDelegate(Isolate* v8_isolate, ConsoleDelegate* delegate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- isolate->set_console_delegate(delegate);
-}
-
-debug::ConsoleCallArguments::ConsoleCallArguments(
- const v8::FunctionCallbackInfo<v8::Value>& info)
- : v8::FunctionCallbackInfo<v8::Value>(nullptr, info.values_, info.length_) {
-}
-
-debug::ConsoleCallArguments::ConsoleCallArguments(
- const internal::BuiltinArguments& args)
- : v8::FunctionCallbackInfo<v8::Value>(
- nullptr,
- // Drop the first argument (receiver, i.e. the "console" object).
- args.length() > 1 ? args.address_of_first_argument() : nullptr,
- args.length() - 1) {}
-
-// Marked V8_DEPRECATED.
-int debug::GetStackFrameId(v8::Local<v8::StackFrame> frame) { return 0; }
-
-v8::Local<v8::StackTrace> debug::GetDetailedStackTrace(
- Isolate* v8_isolate, v8::Local<v8::Object> v8_error) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- i::Handle<i::JSReceiver> error = Utils::OpenHandle(*v8_error);
- if (!error->IsJSObject()) {
- return v8::Local<v8::StackTrace>();
- }
- i::Handle<i::FixedArray> stack_trace =
- isolate->GetDetailedStackTrace(i::Handle<i::JSObject>::cast(error));
- return Utils::StackTraceToLocal(stack_trace);
-}
-
-MaybeLocal<debug::Script> debug::GeneratorObject::Script() {
- i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
- i::Object maybe_script = obj->function().shared().script();
- if (!maybe_script.IsScript()) return MaybeLocal<debug::Script>();
- i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
- return ToApiHandle<debug::Script>(script);
-}
-
-Local<Function> debug::GeneratorObject::Function() {
- i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
- return Utils::ToLocal(handle(obj->function(), obj->GetIsolate()));
-}
-
-debug::Location debug::GeneratorObject::SuspendedLocation() {
- i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
- CHECK(obj->is_suspended());
- i::Object maybe_script = obj->function().shared().script();
- if (!maybe_script.IsScript()) return debug::Location();
- i::Isolate* isolate = obj->GetIsolate();
- i::Handle<i::Script> script(i::Script::cast(maybe_script), isolate);
- i::Script::PositionInfo info;
- i::SharedFunctionInfo::EnsureSourcePositionsAvailable(
- isolate, i::handle(obj->function().shared(), isolate));
- i::Script::GetPositionInfo(script, obj->source_position(), &info,
- i::Script::WITH_OFFSET);
- return debug::Location(info.line, info.column);
-}
-
-bool debug::GeneratorObject::IsSuspended() {
- return Utils::OpenHandle(this)->is_suspended();
-}
-
-v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast(
- v8::Local<v8::Value> value) {
- CHECK(value->IsGeneratorObject());
- return ToApiHandle<debug::GeneratorObject>(Utils::OpenHandle(*value));
-}
-
-MaybeLocal<v8::Value> debug::EvaluateGlobal(v8::Isolate* isolate,
- v8::Local<v8::String> source,
- EvaluateGlobalMode mode,
- bool repl) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value);
- i::REPLMode repl_mode = repl ? i::REPLMode::kYes : i::REPLMode::kNo;
- Local<Value> result;
- has_pending_exception = !ToLocal<Value>(
- i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source),
- mode, repl_mode),
- &result);
- RETURN_ON_FAILED_EXECUTION(Value);
- RETURN_ESCAPED(result);
-}
-
-void debug::QueryObjects(v8::Local<v8::Context> v8_context,
- QueryObjectPredicate* predicate,
- PersistentValueVector<v8::Object>* objects) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_context->GetIsolate());
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- isolate->heap_profiler()->QueryObjects(Utils::OpenHandle(*v8_context),
- predicate, objects);
-}
-
-void debug::GlobalLexicalScopeNames(
- v8::Local<v8::Context> v8_context,
- v8::PersistentValueVector<v8::String>* names) {
- i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
- i::Isolate* isolate = context->GetIsolate();
- i::Handle<i::ScriptContextTable> table(
- context->global_object().native_context().script_context_table(),
- isolate);
- for (int i = 0; i < table->synchronized_used(); i++) {
- i::Handle<i::Context> context =
- i::ScriptContextTable::GetContext(isolate, table, i);
- DCHECK(context->IsScriptContext());
- i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
- int local_count = scope_info->ContextLocalCount();
- for (int j = 0; j < local_count; ++j) {
- i::String name = scope_info->ContextLocalName(j);
- if (i::ScopeInfo::VariableIsSynthetic(name)) continue;
- names->Append(Utils::ToLocal(handle(name, isolate)));
- }
- }
-}
-
-void debug::SetReturnValue(v8::Isolate* v8_isolate,
- v8::Local<v8::Value> value) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->debug()->set_return_value(*Utils::OpenHandle(*value));
-}
-
-int64_t debug::GetNextRandomInt64(v8::Isolate* v8_isolate) {
- return reinterpret_cast<i::Isolate*>(v8_isolate)
- ->random_number_generator()
- ->NextInt64();
-}
-
-void debug::EnumerateRuntimeCallCounters(v8::Isolate* v8_isolate,
- RuntimeCallCounterCallback callback) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- if (isolate->counters()) {
- isolate->counters()->runtime_call_stats()->EnumerateCounters(callback);
- }
-}
-
-int debug::GetDebuggingId(v8::Local<v8::Function> function) {
- i::Handle<i::JSReceiver> callable = v8::Utils::OpenHandle(*function);
- if (!callable->IsJSFunction()) return i::DebugInfo::kNoDebuggingId;
- i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(callable);
- int id = func->GetIsolate()->debug()->GetFunctionDebuggingId(func);
- DCHECK_NE(i::DebugInfo::kNoDebuggingId, id);
- return id;
-}
-
-bool debug::SetFunctionBreakpoint(v8::Local<v8::Function> function,
- v8::Local<v8::String> condition,
- BreakpointId* id) {
- i::Handle<i::JSReceiver> callable = Utils::OpenHandle(*function);
- if (!callable->IsJSFunction()) return false;
- i::Handle<i::JSFunction> jsfunction =
- i::Handle<i::JSFunction>::cast(callable);
- i::Isolate* isolate = jsfunction->GetIsolate();
- i::Handle<i::String> condition_string =
- condition.IsEmpty() ? isolate->factory()->empty_string()
- : Utils::OpenHandle(*condition);
- return isolate->debug()->SetBreakpointForFunction(
- handle(jsfunction->shared(), isolate), condition_string, id);
-}
-
-debug::PostponeInterruptsScope::PostponeInterruptsScope(v8::Isolate* isolate)
- : scope_(
- new i::PostponeInterruptsScope(reinterpret_cast<i::Isolate*>(isolate),
- i::StackGuard::API_INTERRUPT)) {}
-
-debug::PostponeInterruptsScope::~PostponeInterruptsScope() = default;
-
-debug::DisableBreakScope::DisableBreakScope(v8::Isolate* isolate)
- : scope_(std::make_unique<i::DisableBreak>(
- reinterpret_cast<i::Isolate*>(isolate)->debug())) {}
-
-debug::DisableBreakScope::~DisableBreakScope() = default;
-
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@@ -10510,193 +9408,6 @@ Local<String> CpuProfileNode::GetFunctionName() const {
return ToApiHandle<String>(name);
}
-int debug::Coverage::BlockData::StartOffset() const { return block_->start; }
-int debug::Coverage::BlockData::EndOffset() const { return block_->end; }
-uint32_t debug::Coverage::BlockData::Count() const { return block_->count; }
-
-int debug::Coverage::FunctionData::StartOffset() const {
- return function_->start;
-}
-int debug::Coverage::FunctionData::EndOffset() const { return function_->end; }
-uint32_t debug::Coverage::FunctionData::Count() const {
- return function_->count;
-}
-
-MaybeLocal<String> debug::Coverage::FunctionData::Name() const {
- return ToApiHandle<String>(function_->name);
-}
-
-size_t debug::Coverage::FunctionData::BlockCount() const {
- return function_->blocks.size();
-}
-
-bool debug::Coverage::FunctionData::HasBlockCoverage() const {
- return function_->has_block_coverage;
-}
-
-debug::Coverage::BlockData debug::Coverage::FunctionData::GetBlockData(
- size_t i) const {
- return BlockData(&function_->blocks.at(i), coverage_);
-}
-
-Local<debug::Script> debug::Coverage::ScriptData::GetScript() const {
- return ToApiHandle<debug::Script>(script_->script);
-}
-
-size_t debug::Coverage::ScriptData::FunctionCount() const {
- return script_->functions.size();
-}
-
-debug::Coverage::FunctionData debug::Coverage::ScriptData::GetFunctionData(
- size_t i) const {
- return FunctionData(&script_->functions.at(i), coverage_);
-}
-
-debug::Coverage::ScriptData::ScriptData(size_t index,
- std::shared_ptr<i::Coverage> coverage)
- : script_(&coverage->at(index)), coverage_(std::move(coverage)) {}
-
-size_t debug::Coverage::ScriptCount() const { return coverage_->size(); }
-
-debug::Coverage::ScriptData debug::Coverage::GetScriptData(size_t i) const {
- return ScriptData(i, coverage_);
-}
-
-debug::Coverage debug::Coverage::CollectPrecise(Isolate* isolate) {
- return Coverage(
- i::Coverage::CollectPrecise(reinterpret_cast<i::Isolate*>(isolate)));
-}
-
-debug::Coverage debug::Coverage::CollectBestEffort(Isolate* isolate) {
- return Coverage(
- i::Coverage::CollectBestEffort(reinterpret_cast<i::Isolate*>(isolate)));
-}
-
-void debug::Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
- i::Coverage::SelectMode(reinterpret_cast<i::Isolate*>(isolate), mode);
-}
-
-int debug::TypeProfile::Entry::SourcePosition() const {
- return entry_->position;
-}
-
-std::vector<MaybeLocal<String>> debug::TypeProfile::Entry::Types() const {
- std::vector<MaybeLocal<String>> result;
- for (const internal::Handle<internal::String>& type : entry_->types) {
- result.emplace_back(ToApiHandle<String>(type));
- }
- return result;
-}
-
-debug::TypeProfile::ScriptData::ScriptData(
- size_t index, std::shared_ptr<i::TypeProfile> type_profile)
- : script_(&type_profile->at(index)),
- type_profile_(std::move(type_profile)) {}
-
-Local<debug::Script> debug::TypeProfile::ScriptData::GetScript() const {
- return ToApiHandle<debug::Script>(script_->script);
-}
-
-std::vector<debug::TypeProfile::Entry> debug::TypeProfile::ScriptData::Entries()
- const {
- std::vector<debug::TypeProfile::Entry> result;
- for (const internal::TypeProfileEntry& entry : script_->entries) {
- result.push_back(debug::TypeProfile::Entry(&entry, type_profile_));
- }
- return result;
-}
-
-debug::TypeProfile debug::TypeProfile::Collect(Isolate* isolate) {
- return TypeProfile(
- i::TypeProfile::Collect(reinterpret_cast<i::Isolate*>(isolate)));
-}
-
-void debug::TypeProfile::SelectMode(Isolate* isolate,
- debug::TypeProfileMode mode) {
- i::TypeProfile::SelectMode(reinterpret_cast<i::Isolate*>(isolate), mode);
-}
-
-size_t debug::TypeProfile::ScriptCount() const { return type_profile_->size(); }
-
-debug::TypeProfile::ScriptData debug::TypeProfile::GetScriptData(
- size_t i) const {
- return ScriptData(i, type_profile_);
-}
-
-v8::MaybeLocal<v8::Value> debug::WeakMap::Get(v8::Local<v8::Context> context,
- v8::Local<v8::Value> key) {
- PREPARE_FOR_EXECUTION(context, WeakMap, Get, Value);
- auto self = Utils::OpenHandle(this);
- Local<Value> result;
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception =
- !ToLocal<Value>(i::Execution::CallBuiltin(isolate, isolate->weakmap_get(),
- self, arraysize(argv), argv),
- &result);
- RETURN_ON_FAILED_EXECUTION(Value);
- RETURN_ESCAPED(result);
-}
-
-v8::MaybeLocal<debug::WeakMap> debug::WeakMap::Set(
- v8::Local<v8::Context> context, v8::Local<v8::Value> key,
- v8::Local<v8::Value> value) {
- PREPARE_FOR_EXECUTION(context, WeakMap, Set, WeakMap);
- auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> result;
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
- Utils::OpenHandle(*value)};
- has_pending_exception =
- !i::Execution::CallBuiltin(isolate, isolate->weakmap_set(), self,
- arraysize(argv), argv)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION(WeakMap);
- RETURN_ESCAPED(Local<WeakMap>::Cast(Utils::ToLocal(result)));
-}
-
-Local<debug::WeakMap> debug::WeakMap::New(v8::Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, WeakMap, New);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::JSWeakMap> obj = i_isolate->factory()->NewJSWeakMap();
- return ToApiHandle<debug::WeakMap>(obj);
-}
-
-debug::WeakMap* debug::WeakMap::Cast(v8::Value* value) {
- return static_cast<debug::WeakMap*>(value);
-}
-
-Local<Value> debug::AccessorPair::getter() {
- i::Handle<i::AccessorPair> accessors = Utils::OpenHandle(this);
- i::Isolate* isolate = accessors->GetIsolate();
- i::Handle<i::Object> getter(accessors->getter(), isolate);
- return Utils::ToLocal(getter);
-}
-
-Local<Value> debug::AccessorPair::setter() {
- i::Handle<i::AccessorPair> accessors = Utils::OpenHandle(this);
- i::Isolate* isolate = accessors->GetIsolate();
- i::Handle<i::Object> setter(accessors->setter(), isolate);
- return Utils::ToLocal(setter);
-}
-
-bool debug::AccessorPair::IsAccessorPair(Local<Value> that) {
- i::Handle<i::Object> obj = Utils::OpenHandle(*that);
- return obj->IsAccessorPair();
-}
-
-MaybeLocal<Message> debug::GetMessageFromPromise(Local<Promise> p) {
- i::Handle<i::JSPromise> promise = Utils::OpenHandle(*p);
- i::Isolate* isolate = promise->GetIsolate();
-
- i::Handle<i::Symbol> key = isolate->factory()->promise_debug_message_symbol();
- i::Handle<i::Object> maybeMessage =
- i::JSReceiver::GetDataProperty(promise, key);
-
- if (!maybeMessage->IsJSMessageObject(isolate)) return MaybeLocal<Message>();
- return ToApiHandle<Message>(
- i::Handle<i::JSMessageObject>::cast(maybeMessage));
-}
-
const char* CpuProfileNode::GetFunctionNameStr() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->entry()->name();
@@ -10809,6 +9520,9 @@ const CpuProfileNode* CpuProfile::GetSample(int index) const {
return reinterpret_cast<const CpuProfileNode*>(profile->sample(index).node);
}
+const int CpuProfileNode::kNoLineNumberInfo;
+const int CpuProfileNode::kNoColumnNumberInfo;
+
int64_t CpuProfile::GetSampleTimestamp(int index) const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return profile->sample(index).timestamp.since_origin().InMicroseconds();
@@ -10861,10 +9575,11 @@ void CpuProfiler::SetUsePreciseSampling(bool use_precise_sampling) {
use_precise_sampling);
}
-CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
- CpuProfilingOptions options) {
+CpuProfilingStatus CpuProfiler::StartProfiling(
+ Local<String> title, CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate) {
return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
- *Utils::OpenHandle(*title), options);
+ *Utils::OpenHandle(*title), options, std::move(delegate));
}
CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
@@ -11266,44 +9981,19 @@ void EmbedderHeapTracer::ResetHandleInNonTracingGC(
UNREACHABLE();
}
-const void* CTypeInfo::GetWrapperInfo() const {
- DCHECK(payload_ & kWrapperTypeInfoMask);
- return reinterpret_cast<const void*>(payload_ & kWrapperTypeInfoMask);
-}
-
CFunction::CFunction(const void* address, const CFunctionInfo* type_info)
: address_(address), type_info_(type_info) {
CHECK_NOT_NULL(address_);
CHECK_NOT_NULL(type_info_);
- for (unsigned int i = 0; i < type_info_->ArgumentCount(); ++i) {
- if (type_info_->ArgumentInfo(i).IsArray()) {
- // Array args require an integer passed for their length
- // as the next argument.
- DCHECK_LT(i + 1, type_info_->ArgumentCount());
- switch (type_info_->ArgumentInfo(i + 1).GetType()) {
- case CTypeInfo::Type::kInt32:
- case CTypeInfo::Type::kUint32:
- case CTypeInfo::Type::kInt64:
- case CTypeInfo::Type::kUint64:
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- }
}
RegisterState::RegisterState()
: pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {}
RegisterState::~RegisterState() = default;
-RegisterState::RegisterState(const RegisterState& other) V8_NOEXCEPT {
- *this = other;
-}
+RegisterState::RegisterState(const RegisterState& other) { *this = other; }
-RegisterState& RegisterState::operator=(const RegisterState& other)
- V8_NOEXCEPT {
+RegisterState& RegisterState::operator=(const RegisterState& other) {
if (&other != this) {
pc = other.pc;
sp = other.sp;
@@ -11502,20 +10192,6 @@ void InvokeFinalizationRegistryCleanupFromTask(
}
// Undefine macros for jumbo build.
-#undef LOG_API
-#undef ENTER_V8_DO_NOT_USE
-#undef ENTER_V8_HELPER_DO_NOT_USE
-#undef PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE
-#undef PREPARE_FOR_EXECUTION_WITH_CONTEXT
-#undef PREPARE_FOR_EXECUTION
-#undef ENTER_V8
-#undef ENTER_V8_NO_SCRIPT
-#undef ENTER_V8_NO_SCRIPT_NO_EXCEPTION
-#undef ENTER_V8_FOR_NEW_CONTEXT
-#undef EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE
-#undef RETURN_ON_FAILED_EXECUTION
-#undef RETURN_ON_FAILED_EXECUTION_PRIMITIVE
-#undef RETURN_ESCAPED
#undef SET_FIELD_WRAPPED
#undef NEW_STRING
#undef CALLBACK_SETTER
@@ -11524,3 +10200,4 @@ void InvokeFinalizationRegistryCleanupFromTask(
} // namespace v8
#undef TRACE_BS
+#include "src/api/api-macros-undef.h"
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index bf9afb7959..b323f71bd8 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -122,7 +122,7 @@ class RegisteredExtension {
V(Context, Context) \
V(External, Object) \
V(StackTrace, FixedArray) \
- V(StackFrame, StackTraceFrame) \
+ V(StackFrame, StackFrameInfo) \
V(Proxy, JSProxy) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
@@ -218,7 +218,7 @@ class Utils {
static inline Local<StackTrace> StackTraceToLocal(
v8::internal::Handle<v8::internal::FixedArray> obj);
static inline Local<StackFrame> StackFrameToLocal(
- v8::internal::Handle<v8::internal::StackTraceFrame> obj);
+ v8::internal::Handle<v8::internal::StackFrameInfo> obj);
static inline Local<Number> NumberToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Integer> IntegerToLocal(
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
index 8c9318bfe7..298e9ea14f 100644
--- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
@@ -40,8 +40,8 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) {
Visit(expr->extends());
}
Visit(expr->constructor());
- if (expr->static_fields_initializer() != nullptr) {
- Visit(expr->static_fields_initializer());
+ if (expr->static_initializer() != nullptr) {
+ Visit(expr->static_initializer());
}
if (expr->instance_members_initializer_function() != nullptr) {
Visit(expr->instance_members_initializer_function());
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index c80b29975b..a9e3500931 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -469,8 +469,8 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
RECURSE_EXPRESSION(Visit(expr->extends()));
}
RECURSE_EXPRESSION(Visit(expr->constructor()));
- if (expr->static_fields_initializer() != nullptr) {
- RECURSE_EXPRESSION(Visit(expr->static_fields_initializer()));
+ if (expr->static_initializer() != nullptr) {
+ RECURSE_EXPRESSION(Visit(expr->static_initializer()));
}
if (expr->instance_members_initializer_function() != nullptr) {
RECURSE_EXPRESSION(Visit(expr->instance_members_initializer_function()));
@@ -506,6 +506,29 @@ void AstTraversalVisitor<Subclass>::VisitInitializeClassMembersStatement(
}
template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitInitializeClassStaticElementsStatement(
+ InitializeClassStaticElementsStatement* stmt) {
+ PROCESS_NODE(stmt);
+ ZonePtrList<ClassLiteral::StaticElement>* elements = stmt->elements();
+ for (int i = 0; i < elements->length(); ++i) {
+ ClassLiteral::StaticElement* element = elements->at(i);
+ switch (element->kind()) {
+ case ClassLiteral::StaticElement::PROPERTY: {
+ ClassLiteral::Property* prop = element->property();
+ if (!prop->key()->IsLiteral()) {
+ RECURSE(Visit(prop->key()));
+ }
+ RECURSE(Visit(prop->value()));
+ break;
+ }
+ case ClassLiteral::StaticElement::STATIC_BLOCK:
+ RECURSE(Visit(element->static_block()));
+ break;
+ }
+ }
+}
+
+template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSpread(Spread* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
@@ -546,7 +569,6 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
PROCESS_EXPRESSION(expr);
- RECURSE_EXPRESSION(Visit(expr->home_object()));
}
template <class Subclass>
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 115f23ee0d..b66e11f99f 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -200,8 +200,6 @@ class AstConsString final : public ZoneObject {
Segment segment_;
};
-enum class AstSymbol : uint8_t { kHomeObjectSymbol };
-
class AstBigInt {
public:
// |bigint| must be a NUL-terminated string of ASCII characters
@@ -229,58 +227,60 @@ using AstRawStringMap =
base::DefaultAllocationPolicy>;
// For generating constants.
-#define AST_STRING_CONSTANTS(F) \
- F(anonymous, "anonymous") \
- F(anonymous_function, "(anonymous function)") \
- F(arguments, "arguments") \
- F(as, "as") \
- F(assert, "assert") \
- F(async, "async") \
- F(await, "await") \
- F(bigint, "bigint") \
- F(boolean, "boolean") \
- F(computed, "<computed>") \
- F(dot_brand, ".brand") \
- F(constructor, "constructor") \
- F(default, "default") \
- F(done, "done") \
- F(dot, ".") \
- F(dot_default, ".default") \
- F(dot_for, ".for") \
- F(dot_generator_object, ".generator_object") \
- F(dot_result, ".result") \
- F(dot_repl_result, ".repl_result") \
- F(dot_switch_tag, ".switch_tag") \
- F(dot_catch, ".catch") \
- F(empty, "") \
- F(eval, "eval") \
- F(from, "from") \
- F(function, "function") \
- F(get, "get") \
- F(get_space, "get ") \
- F(length, "length") \
- F(let, "let") \
- F(meta, "meta") \
- F(name, "name") \
- F(native, "native") \
- F(new_target, ".new.target") \
- F(next, "next") \
- F(number, "number") \
- F(object, "object") \
- F(of, "of") \
- F(private_constructor, "#constructor") \
- F(proto, "__proto__") \
- F(prototype, "prototype") \
- F(return, "return") \
- F(set, "set") \
- F(set_space, "set ") \
- F(string, "string") \
- F(symbol, "symbol") \
- F(target, "target") \
- F(this, "this") \
- F(this_function, ".this_function") \
- F(throw, "throw") \
- F(undefined, "undefined") \
+#define AST_STRING_CONSTANTS(F) \
+ F(anonymous, "anonymous") \
+ F(anonymous_function, "(anonymous function)") \
+ F(arguments, "arguments") \
+ F(as, "as") \
+ F(assert, "assert") \
+ F(async, "async") \
+ F(await, "await") \
+ F(bigint, "bigint") \
+ F(boolean, "boolean") \
+ F(computed, "<computed>") \
+ F(dot_brand, ".brand") \
+ F(constructor, "constructor") \
+ F(default, "default") \
+ F(done, "done") \
+ F(dot, ".") \
+ F(dot_default, ".default") \
+ F(dot_for, ".for") \
+ F(dot_generator_object, ".generator_object") \
+ F(dot_home_object, ".home_object") \
+ F(dot_result, ".result") \
+ F(dot_repl_result, ".repl_result") \
+ F(dot_static_home_object, ".static_home_object") \
+ F(dot_switch_tag, ".switch_tag") \
+ F(dot_catch, ".catch") \
+ F(empty, "") \
+ F(eval, "eval") \
+ F(from, "from") \
+ F(function, "function") \
+ F(get, "get") \
+ F(get_space, "get ") \
+ F(length, "length") \
+ F(let, "let") \
+ F(meta, "meta") \
+ F(name, "name") \
+ F(native, "native") \
+ F(new_target, ".new.target") \
+ F(next, "next") \
+ F(number, "number") \
+ F(object, "object") \
+ F(of, "of") \
+ F(private_constructor, "#constructor") \
+ F(proto, "__proto__") \
+ F(prototype, "prototype") \
+ F(return, "return") \
+ F(set, "set") \
+ F(set_space, "set ") \
+ F(string, "string") \
+ F(symbol, "symbol") \
+ F(target, "target") \
+ F(this, "this") \
+ F(this_function, ".this_function") \
+ F(throw, "throw") \
+ F(undefined, "undefined") \
F(value, "value")
class AstStringConstants final {
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 835f2f42de..9eddb14e61 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -236,12 +236,6 @@ LanguageMode FunctionLiteral::language_mode() const {
FunctionKind FunctionLiteral::kind() const { return scope()->function_kind(); }
-bool FunctionLiteral::NeedsHomeObject(Expression* expr) {
- if (expr == nullptr || !expr->IsFunctionLiteral()) return false;
- DCHECK_NOT_NULL(expr->AsFunctionLiteral()->scope());
- return expr->AsFunctionLiteral()->scope()->NeedsHomeObject();
-}
-
std::unique_ptr<char[]> FunctionLiteral::GetDebugName() const {
const AstConsString* cons_string;
if (raw_name_ != nullptr && !raw_name_->IsEmpty()) {
@@ -581,7 +575,6 @@ int ArrayLiteral::InitDepthAndFlags() {
break;
case Literal::kBigInt:
case Literal::kString:
- case Literal::kSymbol:
case Literal::kBoolean:
case Literal::kUndefined:
case Literal::kNull:
@@ -981,8 +974,6 @@ Handle<Object> Literal::BuildValue(LocalIsolate* isolate) const {
number_);
case kString:
return string_->string();
- case kSymbol:
- return isolate->factory()->home_object_symbol();
case kBoolean:
return isolate->factory()->ToBoolean(boolean_);
case kNull:
@@ -1028,8 +1019,6 @@ bool Literal::ToBooleanIsTrue() const {
}
return false;
}
- case kSymbol:
- return true;
case kTheHole:
UNREACHABLE();
}
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 3b79dce992..50a0c55d4d 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -54,21 +54,22 @@ namespace internal {
V(Block) \
V(SwitchStatement)
-#define STATEMENT_NODE_LIST(V) \
- ITERATION_NODE_LIST(V) \
- BREAKABLE_NODE_LIST(V) \
- V(ExpressionStatement) \
- V(EmptyStatement) \
- V(SloppyBlockFunctionStatement) \
- V(IfStatement) \
- V(ContinueStatement) \
- V(BreakStatement) \
- V(ReturnStatement) \
- V(WithStatement) \
- V(TryCatchStatement) \
- V(TryFinallyStatement) \
- V(DebuggerStatement) \
- V(InitializeClassMembersStatement)
+#define STATEMENT_NODE_LIST(V) \
+ ITERATION_NODE_LIST(V) \
+ BREAKABLE_NODE_LIST(V) \
+ V(ExpressionStatement) \
+ V(EmptyStatement) \
+ V(SloppyBlockFunctionStatement) \
+ V(IfStatement) \
+ V(ContinueStatement) \
+ V(BreakStatement) \
+ V(ReturnStatement) \
+ V(WithStatement) \
+ V(TryCatchStatement) \
+ V(TryFinallyStatement) \
+ V(DebuggerStatement) \
+ V(InitializeClassMembersStatement) \
+ V(InitializeClassStaticElementsStatement)
#define LITERAL_NODE_LIST(V) \
V(RegExpLiteral) \
@@ -919,7 +920,6 @@ class Literal final : public Expression {
kHeapNumber,
kBigInt,
kString,
- kSymbol,
kBoolean,
kUndefined,
kNull,
@@ -974,11 +974,6 @@ class Literal final : public Expression {
return string_;
}
- AstSymbol AsSymbol() {
- DCHECK_EQ(type(), kSymbol);
- return symbol_;
- }
-
V8_EXPORT_PRIVATE bool ToBooleanIsTrue() const;
bool ToBooleanIsFalse() const { return !ToBooleanIsTrue(); }
@@ -1019,11 +1014,6 @@ class Literal final : public Expression {
bit_field_ = TypeField::update(bit_field_, kString);
}
- Literal(AstSymbol symbol, int position)
- : Expression(position, kLiteral), symbol_(symbol) {
- bit_field_ = TypeField::update(bit_field_, kSymbol);
- }
-
Literal(bool boolean, int position)
: Expression(position, kLiteral), boolean_(boolean) {
bit_field_ = TypeField::update(bit_field_, kBoolean);
@@ -1038,7 +1028,6 @@ class Literal final : public Expression {
const AstRawString* string_;
int smi_;
double number_;
- AstSymbol symbol_;
AstBigInt bigint_;
bool boolean_;
};
@@ -1307,6 +1296,8 @@ class ObjectLiteral final : public AggregateLiteral {
return flags;
}
+ Variable* home_object() const { return home_object_; }
+
enum Flags {
kFastElements = 1 << 3,
kHasNullPrototype = 1 << 4,
@@ -1321,10 +1312,11 @@ class ObjectLiteral final : public AggregateLiteral {
ObjectLiteral(Zone* zone, const ScopedPtrList<Property>& properties,
uint32_t boilerplate_properties, int pos,
- bool has_rest_property)
+ bool has_rest_property, Variable* home_object)
: AggregateLiteral(pos, kObjectLiteral),
boilerplate_properties_(boilerplate_properties),
- properties_(properties.ToConstVector(), zone) {
+ properties_(properties.ToConstVector(), zone),
+ home_object_(home_object) {
bit_field_ |= HasElementsField::encode(false) |
HasRestPropertyField::encode(has_rest_property) |
FastElementsField::encode(false) |
@@ -1345,6 +1337,7 @@ class ObjectLiteral final : public AggregateLiteral {
uint32_t boilerplate_properties_;
Handle<ObjectBoilerplateDescription> boilerplate_description_;
ZoneList<Property*> properties_;
+ Variable* home_object_;
using HasElementsField = AggregateLiteral::NextBitField<bool, 1>;
using HasRestPropertyField = HasElementsField::Next<bool, 1>;
@@ -2141,8 +2134,6 @@ class FunctionLiteral final : public Expression {
}
V8_EXPORT_PRIVATE LanguageMode language_mode() const;
- static bool NeedsHomeObject(Expression* expr);
-
void add_expected_properties(int number_properties) {
expected_property_count_ += number_properties;
}
@@ -2363,11 +2354,6 @@ class ClassLiteralProperty final : public LiteralProperty {
return private_or_computed_name_var_;
}
- bool NeedsHomeObjectOnClassPrototype() const {
- return is_private() && kind_ == METHOD &&
- FunctionLiteral::NeedsHomeObject(value_);
- }
-
private:
friend class AstNodeFactory;
friend Zone;
@@ -2381,6 +2367,40 @@ class ClassLiteralProperty final : public LiteralProperty {
Variable* private_or_computed_name_var_;
};
+class ClassLiteralStaticElement final : public ZoneObject {
+ public:
+ enum Kind : uint8_t { PROPERTY, STATIC_BLOCK };
+
+ Kind kind() const { return kind_; }
+
+ ClassLiteralProperty* property() const {
+ DCHECK(kind() == PROPERTY);
+ return property_;
+ }
+
+ Block* static_block() const {
+ DCHECK(kind() == STATIC_BLOCK);
+ return static_block_;
+ }
+
+ private:
+ friend class AstNodeFactory;
+ friend Zone;
+
+ explicit ClassLiteralStaticElement(ClassLiteralProperty* property)
+ : kind_(PROPERTY), property_(property) {}
+
+ explicit ClassLiteralStaticElement(Block* static_block)
+ : kind_(STATIC_BLOCK), static_block_(static_block) {}
+
+ Kind kind_;
+
+ union {
+ ClassLiteralProperty* property_;
+ Block* static_block_;
+ };
+};
+
class InitializeClassMembersStatement final : public Statement {
public:
using Property = ClassLiteralProperty;
@@ -2397,9 +2417,28 @@ class InitializeClassMembersStatement final : public Statement {
ZonePtrList<Property>* fields_;
};
+class InitializeClassStaticElementsStatement final : public Statement {
+ public:
+ using StaticElement = ClassLiteralStaticElement;
+
+ ZonePtrList<StaticElement>* elements() const { return elements_; }
+
+ private:
+ friend class AstNodeFactory;
+ friend Zone;
+
+ InitializeClassStaticElementsStatement(ZonePtrList<StaticElement>* elements,
+ int pos)
+ : Statement(pos, kInitializeClassStaticElementsStatement),
+ elements_(elements) {}
+
+ ZonePtrList<StaticElement>* elements_;
+};
+
class ClassLiteral final : public Expression {
public:
using Property = ClassLiteralProperty;
+ using StaticElement = ClassLiteralStaticElement;
ClassScope* scope() const { return scope_; }
Expression* extends() const { return extends_; }
@@ -2425,14 +2464,16 @@ class ClassLiteral final : public Expression {
return is_anonymous_expression();
}
- FunctionLiteral* static_fields_initializer() const {
- return static_fields_initializer_;
- }
+ FunctionLiteral* static_initializer() const { return static_initializer_; }
FunctionLiteral* instance_members_initializer_function() const {
return instance_members_initializer_function_;
}
+ Variable* home_object() const { return home_object_; }
+
+ Variable* static_home_object() const { return static_home_object_; }
+
private:
friend class AstNodeFactory;
friend Zone;
@@ -2441,11 +2482,12 @@ class ClassLiteral final : public Expression {
FunctionLiteral* constructor,
ZonePtrList<Property>* public_members,
ZonePtrList<Property>* private_members,
- FunctionLiteral* static_fields_initializer,
+ FunctionLiteral* static_initializer,
FunctionLiteral* instance_members_initializer_function,
int start_position, int end_position,
bool has_name_static_property, bool has_static_computed_names,
- bool is_anonymous, bool has_private_methods)
+ bool is_anonymous, bool has_private_methods,
+ Variable* home_object, Variable* static_home_object)
: Expression(start_position, kClassLiteral),
end_position_(end_position),
scope_(scope),
@@ -2453,9 +2495,11 @@ class ClassLiteral final : public Expression {
constructor_(constructor),
public_members_(public_members),
private_members_(private_members),
- static_fields_initializer_(static_fields_initializer),
+ static_initializer_(static_initializer),
instance_members_initializer_function_(
- instance_members_initializer_function) {
+ instance_members_initializer_function),
+ home_object_(home_object),
+ static_home_object_(static_home_object) {
bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) |
HasStaticComputedNames::encode(has_static_computed_names) |
IsAnonymousExpression::encode(is_anonymous) |
@@ -2468,12 +2512,14 @@ class ClassLiteral final : public Expression {
FunctionLiteral* constructor_;
ZonePtrList<Property>* public_members_;
ZonePtrList<Property>* private_members_;
- FunctionLiteral* static_fields_initializer_;
+ FunctionLiteral* static_initializer_;
FunctionLiteral* instance_members_initializer_function_;
using HasNameStaticProperty = Expression::NextBitField<bool, 1>;
using HasStaticComputedNames = HasNameStaticProperty::Next<bool, 1>;
using IsAnonymousExpression = HasStaticComputedNames::Next<bool, 1>;
using HasPrivateMethods = IsAnonymousExpression::Next<bool, 1>;
+ Variable* home_object_;
+ Variable* static_home_object_;
};
@@ -2500,19 +2546,16 @@ class NativeFunctionLiteral final : public Expression {
class SuperPropertyReference final : public Expression {
public:
- Expression* home_object() const { return home_object_; }
+ VariableProxy* home_object() const { return home_object_; }
private:
friend class AstNodeFactory;
friend Zone;
- // We take in ThisExpression* only as a proof that it was accessed.
- SuperPropertyReference(Expression* home_object, int pos)
- : Expression(pos, kSuperPropertyReference), home_object_(home_object) {
- DCHECK(home_object->IsProperty());
- }
+ explicit SuperPropertyReference(VariableProxy* home_object, int pos)
+ : Expression(pos, kSuperPropertyReference), home_object_(home_object) {}
- Expression* home_object_;
+ VariableProxy* home_object_;
};
@@ -2926,11 +2969,6 @@ class AstNodeFactory final {
return zone_->New<Literal>(string, pos);
}
- // A JavaScript symbol (ECMA-262 edition 6).
- Literal* NewSymbolLiteral(AstSymbol symbol, int pos) {
- return zone_->New<Literal>(symbol, pos);
- }
-
Literal* NewNumberLiteral(double number, int pos);
Literal* NewSmiLiteral(int number, int pos) {
@@ -2959,9 +2997,10 @@ class AstNodeFactory final {
ObjectLiteral* NewObjectLiteral(
const ScopedPtrList<ObjectLiteral::Property>& properties,
- uint32_t boilerplate_properties, int pos, bool has_rest_property) {
+ uint32_t boilerplate_properties, int pos, bool has_rest_property,
+ Variable* home_object = nullptr) {
return zone_->New<ObjectLiteral>(zone_, properties, boilerplate_properties,
- pos, has_rest_property);
+ pos, has_rest_property, home_object);
}
ObjectLiteral::Property* NewObjectLiteralProperty(
@@ -3187,20 +3226,32 @@ class AstNodeFactory final {
is_computed_name, is_private);
}
+ ClassLiteral::StaticElement* NewClassLiteralStaticElement(
+ ClassLiteral::Property* property) {
+ return zone_->New<ClassLiteral::StaticElement>(property);
+ }
+
+ ClassLiteral::StaticElement* NewClassLiteralStaticElement(
+ Block* static_block) {
+ return zone_->New<ClassLiteral::StaticElement>(static_block);
+ }
+
ClassLiteral* NewClassLiteral(
ClassScope* scope, Expression* extends, FunctionLiteral* constructor,
ZonePtrList<ClassLiteral::Property>* public_members,
ZonePtrList<ClassLiteral::Property>* private_members,
- FunctionLiteral* static_fields_initializer,
+ FunctionLiteral* static_initializer,
FunctionLiteral* instance_members_initializer_function,
int start_position, int end_position, bool has_name_static_property,
bool has_static_computed_names, bool is_anonymous,
- bool has_private_methods) {
+ bool has_private_methods, Variable* home_object,
+ Variable* static_home_object) {
return zone_->New<ClassLiteral>(
scope, extends, constructor, public_members, private_members,
- static_fields_initializer, instance_members_initializer_function,
+ static_initializer, instance_members_initializer_function,
start_position, end_position, has_name_static_property,
- has_static_computed_names, is_anonymous, has_private_methods);
+ has_static_computed_names, is_anonymous, has_private_methods,
+ home_object, static_home_object);
}
NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
@@ -3209,9 +3260,9 @@ class AstNodeFactory final {
return zone_->New<NativeFunctionLiteral>(name, extension, pos);
}
- SuperPropertyReference* NewSuperPropertyReference(Expression* home_object,
- int pos) {
- return zone_->New<SuperPropertyReference>(home_object, pos);
+ SuperPropertyReference* NewSuperPropertyReference(
+ VariableProxy* home_object_var, int pos) {
+ return zone_->New<SuperPropertyReference>(home_object_var, pos);
}
SuperCallReference* NewSuperCallReference(VariableProxy* new_target_var,
@@ -3253,6 +3304,12 @@ class AstNodeFactory final {
return zone_->New<InitializeClassMembersStatement>(args, pos);
}
+ InitializeClassStaticElementsStatement*
+ NewInitializeClassStaticElementsStatement(
+ ZonePtrList<ClassLiteral::StaticElement>* args, int pos) {
+ return zone_->New<InitializeClassStaticElementsStatement>(args, pos);
+ }
+
Zone* zone() const { return zone_; }
private:
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 162e263e4d..62dc619141 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -127,62 +127,23 @@ Handle<PrimitiveHeapObject> ToStringOrUndefined(LocalIsolate* isolate,
template <typename LocalIsolate>
Handle<ModuleRequest> SourceTextModuleDescriptor::AstModuleRequest::Serialize(
LocalIsolate* isolate) const {
- // Copy the assertions to a FixedArray, filtering out as we go the import
- // assertions that are not supported by the embedder.
- //
- // This is O(m * n) where m is the number of import assertions in this
- // request and n is the number of supported assertions. Both m and n are
- // expected to be quite small, with m being 0 in the common case and n
- // currently expected to be at most 1 since "type" is the only import
- // assertion in use at the time of this writing. So for now we go with
- // this simple nested loop approach, which should not result in a larger
- // than necessary performance cost unless more than a few additional
- // import assertions are standardized.
- //
- // The import assertions will be stored in the new array in the form: [key1,
- // value1, location1, key2, value2, location2, ...].
- const std::vector<std::string>& supported_assertions =
- isolate->supported_import_assertions();
+ // The import assertions will be stored in this array in the form:
+ // [key1, value1, location1, key2, value2, location2, ...]
Handle<FixedArray> import_assertions_array =
isolate->factory()->NewFixedArray(static_cast<int>(
import_assertions()->size() * ModuleRequest::kAssertionEntrySize));
- int filtered_assertions_index = 0;
- for (auto iter = import_assertions()->cbegin();
- iter != import_assertions()->cend(); ++iter) {
- for (const std::string& supported_assertion : supported_assertions) {
- Handle<String> assertion_key = iter->first->string();
- if (assertion_key->IsEqualTo(Vector<const char>(
- supported_assertion.c_str(), supported_assertion.length()))) {
- import_assertions_array->set(filtered_assertions_index,
- *iter->first->string());
- import_assertions_array->set(filtered_assertions_index + 1,
- *iter->second.first->string());
- import_assertions_array->set(filtered_assertions_index + 2,
- Smi::FromInt(iter->second.second.beg_pos));
- filtered_assertions_index += ModuleRequest::kAssertionEntrySize;
- break;
- }
- }
- }
- Handle<FixedArray> shortened_import_assertions_array;
- if (filtered_assertions_index < import_assertions_array->length()) {
- // If we did filter out any assertions, create a FixedArray with the correct
- // length. This should be rare, since it would be unexpected for developers
- // to commonly use unsupported assertions. Note, we don't use
- // FixedArray::ShrinkOrEmpty here since FixedArray::Shrink isn't available
- // on a LocalIsolate/LocalHeap.
- shortened_import_assertions_array =
- isolate->factory()->NewFixedArray(filtered_assertions_index);
- import_assertions_array->CopyTo(0, *shortened_import_assertions_array, 0,
- filtered_assertions_index);
- } else {
- shortened_import_assertions_array = import_assertions_array;
+ int i = 0;
+ for (auto iter = import_assertions()->cbegin();
+ iter != import_assertions()->cend();
+ ++iter, i += ModuleRequest::kAssertionEntrySize) {
+ import_assertions_array->set(i, *iter->first->string());
+ import_assertions_array->set(i + 1, *iter->second.first->string());
+ import_assertions_array->set(i + 2,
+ Smi::FromInt(iter->second.second.beg_pos));
}
-
return v8::internal::ModuleRequest::New(isolate, specifier()->string(),
- shortened_import_assertions_array,
- position());
+ import_assertions_array, position());
}
template Handle<ModuleRequest>
SourceTextModuleDescriptor::AstModuleRequest::Serialize(Isolate* isolate) const;
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 595b057010..cb8d2ec75a 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -235,6 +235,18 @@ void CallPrinter::VisitInitializeClassMembersStatement(
}
}
+void CallPrinter::VisitInitializeClassStaticElementsStatement(
+ InitializeClassStaticElementsStatement* node) {
+ for (int i = 0; i < node->elements()->length(); i++) {
+ ClassLiteral::StaticElement* element = node->elements()->at(i);
+ if (element->kind() == ClassLiteral::StaticElement::PROPERTY) {
+ Find(element->property()->value());
+ } else {
+ Find(element->static_block());
+ }
+ }
+}
+
void CallPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {}
@@ -256,6 +268,7 @@ void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print("/");
PrintLiteral(node->pattern(), false);
Print("/");
+ if (node->flags() & RegExp::kHasIndices) Print("d");
if (node->flags() & RegExp::kGlobal) Print("g");
if (node->flags() & RegExp::kIgnoreCase) Print("i");
if (node->flags() & RegExp::kLinear) Print("l");
@@ -662,14 +675,6 @@ void AstPrinter::PrintLiteral(Literal* literal, bool quote) {
case Literal::kString:
PrintLiteral(literal->AsRawString(), quote);
break;
- case Literal::kSymbol:
- const char* symbol;
- switch (literal->AsSymbol()) {
- case AstSymbol::kHomeObjectSymbol:
- symbol = "HomeObjectSymbol";
- }
- Print("%s", symbol);
- break;
case Literal::kSmi:
Print("%d", Smi::ToInt(literal->AsSmiLiteral()));
break;
@@ -1093,9 +1098,8 @@ void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
PrintLiteralWithModeIndented("BRAND", brand, brand->raw_name());
}
}
- if (node->static_fields_initializer() != nullptr) {
- PrintIndentedVisit("STATIC FIELDS INITIALIZER",
- node->static_fields_initializer());
+ if (node->static_initializer() != nullptr) {
+ PrintIndentedVisit("STATIC INITIALIZER", node->static_initializer());
}
if (node->instance_members_initializer_function() != nullptr) {
PrintIndentedVisit("INSTANCE MEMBERS INITIALIZER",
@@ -1111,35 +1115,59 @@ void AstPrinter::VisitInitializeClassMembersStatement(
PrintClassProperties(node->fields());
}
+void AstPrinter::VisitInitializeClassStaticElementsStatement(
+ InitializeClassStaticElementsStatement* node) {
+ IndentedScope indent(this, "INITIALIZE CLASS STATIC ELEMENTS",
+ node->position());
+ PrintClassStaticElements(node->elements());
+}
+
+void AstPrinter::PrintClassProperty(ClassLiteral::Property* property) {
+ const char* prop_kind = nullptr;
+ switch (property->kind()) {
+ case ClassLiteral::Property::METHOD:
+ prop_kind = "METHOD";
+ break;
+ case ClassLiteral::Property::GETTER:
+ prop_kind = "GETTER";
+ break;
+ case ClassLiteral::Property::SETTER:
+ prop_kind = "SETTER";
+ break;
+ case ClassLiteral::Property::FIELD:
+ prop_kind = "FIELD";
+ break;
+ }
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "PROPERTY%s%s - %s", property->is_static() ? " - STATIC" : "",
+ property->is_private() ? " - PRIVATE" : " - PUBLIC", prop_kind);
+ IndentedScope prop(this, buf.begin());
+ PrintIndentedVisit("KEY", property->key());
+ PrintIndentedVisit("VALUE", property->value());
+}
+
void AstPrinter::PrintClassProperties(
const ZonePtrList<ClassLiteral::Property>* properties) {
for (int i = 0; i < properties->length(); i++) {
- ClassLiteral::Property* property = properties->at(i);
- const char* prop_kind = nullptr;
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- prop_kind = "METHOD";
- break;
- case ClassLiteral::Property::GETTER:
- prop_kind = "GETTER";
- break;
- case ClassLiteral::Property::SETTER:
- prop_kind = "SETTER";
+ PrintClassProperty(properties->at(i));
+ }
+}
+
+void AstPrinter::PrintClassStaticElements(
+ const ZonePtrList<ClassLiteral::StaticElement>* static_elements) {
+ for (int i = 0; i < static_elements->length(); i++) {
+ ClassLiteral::StaticElement* element = static_elements->at(i);
+ switch (element->kind()) {
+ case ClassLiteral::StaticElement::PROPERTY:
+ PrintClassProperty(element->property());
break;
- case ClassLiteral::Property::FIELD:
- prop_kind = "FIELD";
+ case ClassLiteral::StaticElement::STATIC_BLOCK:
+ PrintIndentedVisit("STATIC BLOCK", element->static_block());
break;
}
- EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "PROPERTY%s%s - %s", property->is_static() ? " - STATIC" : "",
- property->is_private() ? " - PRIVATE" : " - PUBLIC", prop_kind);
- IndentedScope prop(this, buf.begin());
- PrintIndentedVisit("KEY", properties->at(i)->key());
- PrintIndentedVisit("VALUE", properties->at(i)->value());
}
}
-
void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
IndentedScope indent(this, "NATIVE FUNC LITERAL", node->position());
PrintLiteralIndented("NAME", node->raw_name(), false);
@@ -1164,6 +1192,7 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
PrintLiteralIndented("PATTERN", node->raw_pattern(), false);
int i = 0;
EmbeddedVector<char, 128> buf;
+ if (node->flags() & RegExp::kHasIndices) buf[i++] = 'd';
if (node->flags() & RegExp::kGlobal) buf[i++] = 'g';
if (node->flags() & RegExp::kIgnoreCase) buf[i++] = 'i';
if (node->flags() & RegExp::kLinear) buf[i++] = 'l';
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 4b939c7d17..e26d98e7a3 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -133,8 +133,11 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
const char* prefix = "");
void PrintObjectProperties(
const ZonePtrList<ObjectLiteral::Property>* properties);
+ void PrintClassProperty(ClassLiteral::Property* property);
void PrintClassProperties(
const ZonePtrList<ClassLiteral::Property>* properties);
+ void PrintClassStaticElements(
+ const ZonePtrList<ClassLiteral::StaticElement>* static_elements);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 2644184a43..4e396c457f 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -113,6 +113,32 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
outer_scope_->AddInnerScope(this);
}
+Variable* Scope::DeclareHomeObjectVariable(AstValueFactory* ast_value_factory) {
+ bool was_added;
+ Variable* home_object_variable = Declare(
+ zone(), ast_value_factory->dot_home_object_string(), VariableMode::kConst,
+ NORMAL_VARIABLE, InitializationFlag::kCreatedInitialized,
+ MaybeAssignedFlag::kNotAssigned, &was_added);
+ DCHECK(was_added);
+ home_object_variable->set_is_used();
+ home_object_variable->ForceContextAllocation();
+ return home_object_variable;
+}
+
+Variable* Scope::DeclareStaticHomeObjectVariable(
+ AstValueFactory* ast_value_factory) {
+ bool was_added;
+ Variable* static_home_object_variable =
+ Declare(zone(), ast_value_factory->dot_static_home_object_string(),
+ VariableMode::kConst, NORMAL_VARIABLE,
+ InitializationFlag::kCreatedInitialized,
+ MaybeAssignedFlag::kNotAssigned, &was_added);
+ DCHECK(was_added);
+ static_home_object_variable->set_is_used();
+ static_home_object_variable->ForceContextAllocation();
+ return static_home_object_variable;
+}
+
DeclarationScope::DeclarationScope(Zone* zone,
AstValueFactory* ast_value_factory,
REPLMode repl_mode)
@@ -148,7 +174,7 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope,
ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory)
- : DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info),
+ : DeclarationScope(avfactory->zone(), MODULE_SCOPE, avfactory, scope_info),
module_descriptor_(nullptr) {
set_language_mode(LanguageMode::kStrict);
}
@@ -163,7 +189,7 @@ ClassScope::ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous)
ClassScope::ClassScope(Isolate* isolate, Zone* zone,
AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info)
- : Scope(zone, CLASS_SCOPE, scope_info),
+ : Scope(zone, CLASS_SCOPE, ast_value_factory, scope_info),
rare_data_and_is_parsing_heritage_(nullptr) {
set_language_mode(LanguageMode::kStrict);
if (scope_info->HasClassBrand()) {
@@ -193,7 +219,8 @@ ClassScope::ClassScope(Isolate* isolate, Zone* zone,
}
}
-Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
+Scope::Scope(Zone* zone, ScopeType scope_type,
+ AstValueFactory* ast_value_factory, Handle<ScopeInfo> scope_info)
: outer_scope_(nullptr),
variables_(zone),
scope_info_(scope_info),
@@ -210,11 +237,31 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
// We don't really need to use the preparsed scope data; this is just to
// shorten the recursion in SetMustUsePreparseData.
must_use_preparsed_scope_data_ = true;
+
+ if (scope_type == BLOCK_SCOPE) {
+ // Set is_block_scope_for_object_literal_ based on the existince of the home
+ // object variable (we don't store it explicitly).
+ VariableMode mode;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ IsStaticFlag is_static_flag;
+
+ DCHECK_NOT_NULL(ast_value_factory);
+ int home_object_index = ScopeInfo::ContextSlotIndex(
+ *scope_info, *(ast_value_factory->dot_home_object_string()->string()),
+ &mode, &init_flag, &maybe_assigned_flag, &is_static_flag);
+ DCHECK_IMPLIES(home_object_index >= 0,
+ scope_type == CLASS_SCOPE || scope_type == BLOCK_SCOPE);
+ if (home_object_index >= 0) {
+ is_block_scope_for_object_literal_ = true;
+ }
+ }
}
DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type,
+ AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info)
- : Scope(zone, scope_type, scope_info),
+ : Scope(zone, scope_type, ast_value_factory, scope_info),
function_kind_(scope_info->function_kind()),
params_(0, zone) {
DCHECK_NE(scope_type, SCRIPT_SCOPE);
@@ -252,7 +299,7 @@ void DeclarationScope::SetDefaults() {
is_asm_module_ = false;
force_eager_compilation_ = false;
has_arguments_parameter_ = false;
- scope_uses_super_property_ = false;
+ uses_super_property_ = false;
has_checked_syntax_ = false;
has_this_reference_ = false;
has_this_declaration_ =
@@ -308,6 +355,9 @@ void Scope::SetDefaults() {
deserialized_scope_uses_external_cache_ = false;
+ needs_home_object_ = false;
+ is_block_scope_for_object_literal_ = false;
+
num_stack_slots_ = 0;
num_heap_slots_ = ContextHeaderLength();
@@ -357,13 +407,14 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
while (!scope_info.is_null()) {
if (scope_info.scope_type() == WITH_SCOPE) {
if (scope_info.IsDebugEvaluateScope()) {
- outer_scope = zone->New<DeclarationScope>(zone, FUNCTION_SCOPE,
- handle(scope_info, isolate));
+ outer_scope =
+ zone->New<DeclarationScope>(zone, FUNCTION_SCOPE, ast_value_factory,
+ handle(scope_info, isolate));
outer_scope->set_is_debug_evaluate_scope();
} else {
// For scope analysis, debug-evaluate is equivalent to a with scope.
- outer_scope =
- zone->New<Scope>(zone, WITH_SCOPE, handle(scope_info, isolate));
+ outer_scope = zone->New<Scope>(zone, WITH_SCOPE, ast_value_factory,
+ handle(scope_info, isolate));
}
} else if (scope_info.scope_type() == SCRIPT_SCOPE) {
@@ -377,24 +428,24 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
DCHECK(!scope_info.HasOuterScopeInfo());
break;
} else if (scope_info.scope_type() == FUNCTION_SCOPE) {
- outer_scope = zone->New<DeclarationScope>(zone, FUNCTION_SCOPE,
- handle(scope_info, isolate));
+ outer_scope = zone->New<DeclarationScope>(
+ zone, FUNCTION_SCOPE, ast_value_factory, handle(scope_info, isolate));
if (scope_info.IsAsmModule()) {
outer_scope->AsDeclarationScope()->set_is_asm_module();
}
} else if (scope_info.scope_type() == EVAL_SCOPE) {
- outer_scope = zone->New<DeclarationScope>(zone, EVAL_SCOPE,
- handle(scope_info, isolate));
+ outer_scope = zone->New<DeclarationScope>(
+ zone, EVAL_SCOPE, ast_value_factory, handle(scope_info, isolate));
} else if (scope_info.scope_type() == CLASS_SCOPE) {
outer_scope = zone->New<ClassScope>(isolate, zone, ast_value_factory,
handle(scope_info, isolate));
} else if (scope_info.scope_type() == BLOCK_SCOPE) {
if (scope_info.is_declaration_scope()) {
- outer_scope = zone->New<DeclarationScope>(zone, BLOCK_SCOPE,
- handle(scope_info, isolate));
+ outer_scope = zone->New<DeclarationScope>(
+ zone, BLOCK_SCOPE, ast_value_factory, handle(scope_info, isolate));
} else {
- outer_scope =
- zone->New<Scope>(zone, BLOCK_SCOPE, handle(scope_info, isolate));
+ outer_scope = zone->New<Scope>(zone, BLOCK_SCOPE, ast_value_factory,
+ handle(scope_info, isolate));
}
} else if (scope_info.scope_type() == MODULE_SCOPE) {
outer_scope = zone->New<ModuleScope>(isolate, handle(scope_info, isolate),
@@ -1378,6 +1429,29 @@ DeclarationScope* Scope::GetReceiverScope() {
return scope->AsDeclarationScope();
}
+Scope* Scope::GetHomeObjectScope() {
+ Scope* scope = this;
+ while (scope != nullptr && !scope->is_home_object_scope()) {
+ if (scope->is_function_scope()) {
+ FunctionKind function_kind = scope->AsDeclarationScope()->function_kind();
+ // "super" in arrow functions binds outside the arrow function. But if we
+ // find a function which doesn't bind "super" (is not a method etc.) and
+ // not an arrow function, we know "super" here doesn't bind anywhere and
+ // we can return nullptr.
+ if (!IsArrowFunction(function_kind) && !BindsSuper(function_kind)) {
+ return nullptr;
+ }
+ }
+ if (scope->private_name_lookup_skips_outer_class()) {
+ DCHECK(scope->outer_scope()->is_class_scope());
+ scope = scope->outer_scope()->outer_scope();
+ } else {
+ scope = scope->outer_scope();
+ }
+ }
+ return scope;
+}
+
DeclarationScope* Scope::GetScriptScope() {
Scope* scope = this;
while (!scope->is_script_scope()) {
@@ -1781,9 +1855,6 @@ void Scope::Print(int n) {
AsDeclarationScope()->sloppy_eval_can_extend_vars()) {
Indent(n1, "// scope calls sloppy 'eval'\n");
}
- if (is_declaration_scope() && AsDeclarationScope()->NeedsHomeObject()) {
- Indent(n1, "// scope needs home object\n");
- }
if (private_name_lookup_skips_outer_class()) {
Indent(n1, "// scope skips outer class for #-names\n");
}
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index e731d4c46a..eb97c95b32 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -225,6 +225,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
VariableKind kind = NORMAL_VARIABLE);
Variable* DeclareCatchVariableName(const AstRawString* name);
+ Variable* DeclareHomeObjectVariable(AstValueFactory* ast_value_factory);
+ Variable* DeclareStaticHomeObjectVariable(AstValueFactory* ast_value_factory);
+
// Declarations list.
base::ThreadedList<Declaration>* declarations() { return &decls_; }
@@ -369,6 +372,18 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
bool is_declaration_scope() const { return is_declaration_scope_; }
bool is_class_scope() const { return scope_type_ == CLASS_SCOPE; }
+ bool is_home_object_scope() const {
+ return is_class_scope() ||
+ (is_block_scope() && is_block_scope_for_object_literal_);
+ }
+ bool is_block_scope_for_object_literal() const {
+ DCHECK_IMPLIES(is_block_scope_for_object_literal_, is_block_scope());
+ return is_block_scope_for_object_literal_;
+ }
+ void set_is_block_scope_for_object_literal() {
+ DCHECK(is_block_scope());
+ is_block_scope_for_object_literal_ = true;
+ }
bool inner_scope_calls_eval() const { return inner_scope_calls_eval_; }
bool private_name_lookup_skips_outer_class() const {
@@ -525,6 +540,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// 'this' is bound, and what determines the function kind.
DeclarationScope* GetReceiverScope();
+ // Find the first class scope or object literal block scope. This is where
+ // 'super' is bound.
+ Scope* GetHomeObjectScope();
+
DeclarationScope* GetScriptScope();
// Find the innermost outer scope that needs a context.
@@ -570,6 +589,16 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return deserialized_scope_uses_external_cache_;
}
+ bool needs_home_object() const {
+ DCHECK(is_home_object_scope());
+ return needs_home_object_;
+ }
+
+ void set_needs_home_object() {
+ DCHECK(is_home_object_scope());
+ needs_home_object_ = true;
+ }
+
bool RemoveInnerScope(Scope* inner_scope) {
DCHECK_NOT_NULL(inner_scope);
if (inner_scope == inner_scope_) {
@@ -691,7 +720,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
MaybeHandle<ScopeInfo> outer_scope);
// Construct a scope based on the scope info.
- Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
+ Scope(Zone* zone, ScopeType type, AstValueFactory* ast_value_factory,
+ Handle<ScopeInfo> scope_info);
// Construct a catch scope with a binding for the name.
Scope(Zone* zone, const AstRawString* catch_variable_name,
@@ -808,6 +838,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// the compilation of the eval will have the "with" scope as the first scope
// with this flag enabled.
bool deserialized_scope_uses_external_cache_ : 1;
+
+ bool needs_home_object_ : 1;
+ bool is_block_scope_for_object_literal_ : 1;
};
class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
@@ -815,6 +848,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
DeclarationScope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
FunctionKind function_kind = kNormalFunction);
DeclarationScope(Zone* zone, ScopeType scope_type,
+ AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info);
// Creates a script scope.
DeclarationScope(Zone* zone, AstValueFactory* ast_value_factory,
@@ -822,24 +856,21 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
FunctionKind function_kind() const { return function_kind_; }
- bool is_arrow_scope() const {
- return is_function_scope() && IsArrowFunction(function_kind_);
- }
-
// Inform the scope that the corresponding code uses "super".
void RecordSuperPropertyUsage() {
DCHECK(IsConciseMethod(function_kind()) ||
IsAccessorFunction(function_kind()) ||
IsClassConstructor(function_kind()));
- scope_uses_super_property_ = true;
+ uses_super_property_ = true;
+ Scope* home_object_scope = GetHomeObjectScope();
+ DCHECK_NOT_NULL(home_object_scope);
+ home_object_scope->set_needs_home_object();
}
- // Does this scope access "super" property (super.foo).
- bool NeedsHomeObject() const {
- return scope_uses_super_property_ ||
- (inner_scope_calls_eval_ && (IsConciseMethod(function_kind()) ||
- IsAccessorFunction(function_kind()) ||
- IsClassConstructor(function_kind())));
+ bool uses_super_property() const { return uses_super_property_; }
+
+ bool is_arrow_scope() const {
+ return is_function_scope() && IsArrowFunction(function_kind_);
}
// Inform the scope and outer scopes that the corresponding code contains an
@@ -1219,7 +1250,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// This scope has a parameter called "arguments".
bool has_arguments_parameter_ : 1;
// This scope uses "super" property ('super.foo').
- bool scope_uses_super_property_ : 1;
+ bool uses_super_property_ : 1;
bool should_eager_compile_ : 1;
// Set to true after we have finished lazy parsing the scope.
bool was_lazily_parsed_ : 1;
@@ -1298,6 +1329,14 @@ void Scope::RecordEvalCall() {
calls_eval_ = true;
GetDeclarationScope()->RecordDeclarationScopeEvalCall();
RecordInnerScopeEvalCall();
+ // The eval contents might access "super" (if it's inside a function that
+ // binds super).
+ DeclarationScope* receiver_scope = GetReceiverScope();
+ DCHECK(!receiver_scope->is_arrow_scope());
+ FunctionKind function_kind = receiver_scope->function_kind();
+ if (BindsSuper(function_kind)) {
+ receiver_scope->RecordSuperPropertyUsage();
+ }
}
Scope::Snapshot::Snapshot(Scope* scope)
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index cf4b77fa18..b137f73936 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -107,6 +107,8 @@ inline constexpr unsigned CountLeadingZeros64(uint64_t value) {
// CountTrailingZeros(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
// returns {sizeof(T) * 8}.
+// See CountTrailingZerosNonZero for an optimized version for the case that
+// |value| is guaranteed to be non-zero.
template <typename T, unsigned bits = sizeof(T) * 8>
inline constexpr
typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
@@ -133,6 +135,24 @@ inline constexpr unsigned CountTrailingZeros64(uint64_t value) {
return CountTrailingZeros(value);
}
+// CountTrailingZerosNonZero(value) returns the number of zero bits preceding
+// the least significant 1 bit in |value| if |value| is non-zero, otherwise the
+// behavior is undefined.
+// See CountTrailingZeros for an alternative version that allows |value| == 0.
+template <typename T, unsigned bits = sizeof(T) * 8>
+inline constexpr
+ typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountTrailingZerosNonZero(T value) {
+ CONSTEXPR_DCHECK(value != 0);
+#if V8_HAS_BUILTIN_CTZ
+ return bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
+ : __builtin_ctz(static_cast<uint32_t>(value));
+#else
+ return CountTrailingZeros<T, bits>(value);
+#endif
+}
+
// Returns true iff |value| is a power of 2.
template <typename T,
typename = typename std::enable_if<std::is_integral<T>::value ||
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 2bfbe1ba32..21db0b6e66 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -46,38 +46,44 @@
#else
#define V8_HOST_ARCH_32_BIT 1
#endif
+#elif defined(__riscv) || defined(__riscv__)
+#if __riscv_xlen == 64
+#define V8_HOST_ARCH_RISCV64 1
+#define V8_HOST_ARCH_64_BIT 1
+#else
+#error "Cannot detect Riscv's bitwidth"
+#endif
#else
#error "Host architecture was not detected as supported by v8"
#endif
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || \
+#if defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
+#define CAN_USE_ARMV7_INSTRUCTIONS 1
#ifdef __ARM_ARCH_EXT_IDIV__
#define CAN_USE_SUDIV 1
#endif
-# ifndef CAN_USE_VFP3_INSTRUCTIONS
+#ifndef CAN_USE_VFP3_INSTRUCTIONS
#define CAN_USE_VFP3_INSTRUCTIONS 1
-# endif
+#endif
#endif
#if defined(__ARM_ARCH_8A__)
#define CAN_USE_ARMV7_INSTRUCTIONS 1
#define CAN_USE_SUDIV 1
-# define CAN_USE_ARMV8_INSTRUCTIONS 1
+#define CAN_USE_ARMV8_INSTRUCTIONS 1
#ifndef CAN_USE_VFP3_INSTRUCTIONS
#define CAN_USE_VFP3_INSTRUCTIONS 1
#endif
#endif
-
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390
+ !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
+ !V8_TARGET_ARCH_RISCV64
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -94,6 +100,10 @@
#define V8_TARGET_ARCH_PPC64 1
#elif defined(_ARCH_PPC)
#define V8_TARGET_ARCH_PPC 1
+#elif defined(__riscv) || defined(__riscv__)
+#if __riscv_xlen == 64
+#define V8_TARGET_ARCH_RISCV64 1
+#endif
#else
#error Target architecture was not detected as supported by v8
#endif
@@ -128,6 +138,8 @@
#else
#define V8_TARGET_ARCH_32_BIT 1
#endif
+#elif V8_TARGET_ARCH_RISCV64
+#define V8_TARGET_ARCH_64_BIT 1
#else
#error Unknown target architecture pointer size
#endif
@@ -156,6 +168,9 @@
#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64))
#error Target architecture mips64 is only supported on mips64 and x64 host
#endif
+#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64))
+#error Target architecture riscv64 is only supported on riscv64 and x64 host
+#endif
// Determine architecture endianness.
#if V8_TARGET_ARCH_IA32
@@ -190,6 +205,8 @@
#else
#define V8_TARGET_BIG_ENDIAN 1
#endif
+#elif V8_TARGET_ARCH_RISCV64
+#define V8_TARGET_LITTLE_ENDIAN 1
#else
#error Unknown target architecture endianness
#endif
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index 49ce128a4a..f7e2e0e14d 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -98,7 +98,8 @@
// there.
#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
!defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
- !defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64)) || \
+ !defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) && \
+ !defined(V8_TARGET_ARCH_RISCV64)) || \
(defined(__clang__) && __cplusplus > 201300L))
#define V8_NOEXCEPT noexcept
#else
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 45700b707f..abfd048ee4 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -740,7 +740,7 @@ CPU::CPU()
has_jscvt_ = HasListItem(features, "jscvt");
delete[] features;
}
-#elif V8_OS_MAC
+#elif V8_OS_MACOSX
// ARM64 Macs always have JSCVT.
has_jscvt_ = true;
#endif // V8_OS_WIN
diff --git a/deps/v8/src/base/enum-set.h b/deps/v8/src/base/enum-set.h
index 2415f1c500..f623198c2d 100644
--- a/deps/v8/src/base/enum-set.h
+++ b/deps/v8/src/base/enum-set.h
@@ -27,31 +27,47 @@ class EnumSet {
bits_ = bits;
}
- bool empty() const { return bits_ == 0; }
- bool contains(E element) const { return (bits_ & Mask(element)) != 0; }
- bool contains_any(EnumSet set) const { return (bits_ & set.bits_) != 0; }
+ constexpr bool empty() const { return bits_ == 0; }
+ constexpr bool contains(E element) const {
+ return (bits_ & Mask(element)) != 0;
+ }
+ constexpr bool contains_any(EnumSet set) const {
+ return (bits_ & set.bits_) != 0;
+ }
void Add(E element) { bits_ |= Mask(element); }
void Add(EnumSet set) { bits_ |= set.bits_; }
void Remove(E element) { bits_ &= ~Mask(element); }
void Remove(EnumSet set) { bits_ &= ~set.bits_; }
void RemoveAll() { bits_ = 0; }
void Intersect(EnumSet set) { bits_ &= set.bits_; }
- T ToIntegral() const { return bits_; }
+ constexpr T ToIntegral() const { return bits_; }
- bool operator==(EnumSet set) const { return bits_ == set.bits_; }
- bool operator!=(EnumSet set) const { return bits_ != set.bits_; }
+ constexpr bool operator==(EnumSet set) const { return bits_ == set.bits_; }
+ constexpr bool operator!=(EnumSet set) const { return bits_ != set.bits_; }
- EnumSet operator|(EnumSet set) const { return EnumSet(bits_ | set.bits_); }
- EnumSet operator&(EnumSet set) const { return EnumSet(bits_ & set.bits_); }
- EnumSet operator-(EnumSet set) const { return EnumSet(bits_ & ~set.bits_); }
+ constexpr EnumSet operator|(EnumSet set) const {
+ return EnumSet(bits_ | set.bits_);
+ }
+ constexpr EnumSet operator&(EnumSet set) const {
+ return EnumSet(bits_ & set.bits_);
+ }
+ constexpr EnumSet operator-(EnumSet set) const {
+ return EnumSet(bits_ & ~set.bits_);
+ }
EnumSet& operator|=(EnumSet set) { return *this = *this | set; }
EnumSet& operator&=(EnumSet set) { return *this = *this & set; }
EnumSet& operator-=(EnumSet set) { return *this = *this - set; }
- EnumSet operator|(E element) const { return EnumSet(bits_ | Mask(element)); }
- EnumSet operator&(E element) const { return EnumSet(bits_ & Mask(element)); }
- EnumSet operator-(E element) const { return EnumSet(bits_ & ~Mask(element)); }
+ constexpr EnumSet operator|(E element) const {
+ return EnumSet(bits_ | Mask(element));
+ }
+ constexpr EnumSet operator&(E element) const {
+ return EnumSet(bits_ & Mask(element));
+ }
+ constexpr EnumSet operator-(E element) const {
+ return EnumSet(bits_ & ~Mask(element));
+ }
EnumSet& operator|=(E element) { return *this = *this | element; }
EnumSet& operator&=(E element) { return *this = *this & element; }
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 67746173d5..9e3a04579f 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -322,6 +322,10 @@ void* OS::GetRandomMmapAddr() {
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
// to fulfill request.
raw_addr &= uint64_t{0xFFFFFF0000};
+#elif V8_TARGET_ARCH_RISCV64
+ // TODO(RISCV): We need more information from the kernel to correctly mask
+ // this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
+ raw_addr &= uint64_t{0xFFFFFF0000};
#else
raw_addr &= 0x3FFFF000;
@@ -520,6 +524,8 @@ void OS::DebugBreak() {
#elif V8_HOST_ARCH_S390
// Software breakpoint instruction is 0x0001
asm volatile(".word 0x0001");
+#elif V8_HOST_ARCH_RISCV64
+ asm("ebreak");
#else
#error Unsupported host architecture.
#endif
diff --git a/deps/v8/src/baseline/DEPS b/deps/v8/src/baseline/DEPS
new file mode 100644
index 0000000000..e3cbabdcd2
--- /dev/null
+++ b/deps/v8/src/baseline/DEPS
@@ -0,0 +1,5 @@
+specific_include_rules = {
+ "baseline-compiler\.h": [
+ "+src/interpreter/interpreter-intrinsics.h",
+ ],
+}
diff --git a/deps/v8/src/baseline/OWNERS b/deps/v8/src/baseline/OWNERS
new file mode 100644
index 0000000000..f9e17a90b1
--- /dev/null
+++ b/deps/v8/src/baseline/OWNERS
@@ -0,0 +1,6 @@
+cbruni@chromium.org
+leszeks@chromium.org
+marja@chromium.org
+pthier@chromium.org
+verwaest@chromium.org
+victorgomes@chromium.org \ No newline at end of file
diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
new file mode 100644
index 0000000000..021df8d9cf
--- /dev/null
+++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -0,0 +1,544 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
+#define V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ wrapped_scope_(assembler->masm()) {
+ if (!assembler_->scratch_register_scope_) {
+ // If we haven't opened a scratch scope yet, for the first one add a
+ // couple of extra registers.
+ wrapped_scope_.Include(x14, x15);
+ }
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() { return wrapped_scope_.AcquireX(); }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ UseScratchRegisterScope wrapped_scope_;
+};
+
+// TODO(v8:11461): Unify condition names in the MacroAssembler.
+enum class Condition : uint8_t {
+ kEqual = eq,
+ kNotEqual = ne,
+
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+
+ kUnsignedLessThan = lo,
+ kUnsignedGreaterThan = hi,
+ kUnsignedLessThanEqual = ls,
+ kUnsignedGreaterThanEqual = hs,
+
+ kOverflow = vs,
+ kNoOverflow = vc,
+
+ kZero = eq,
+ kNotZero = ne,
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.base() == target || op.regoffset() == target;
+}
+#endif
+
+} // namespace detail
+
+#define __ masm_->
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) {
+ // All baseline compiler binds on arm64 are assumed to be for jump targets.
+ __ BindJumpTarget(label);
+}
+
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ B(target);
+}
+void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) {
+ __ B(AsMasmCondition(cc), target);
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfNotRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfNotSmi(value, target);
+}
+
+void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Call(temp);
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
+ // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" (i.e.]
+ // `bti j`) landing pads for the tail-called code.
+ Register temp = x17;
+
+ // Make sure we're don't use this register as a temporary.
+ UseScratchRegisterScope temps(masm());
+ temps.Exclude(temp);
+
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Jump(temp);
+}
+
+void BaselineAssembler::Test(Register value, int mask) {
+ __ Tst(value, Immediate(mask));
+}
+
+void BaselineAssembler::CmpObjectType(Register object,
+ InstanceType instance_type,
+ Register map) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ CompareObjectType(object, map, type, instance_type);
+}
+void BaselineAssembler::CmpInstanceType(Register map,
+ InstanceType instance_type) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ if (emit_debug_code()) {
+ __ AssertNotSmi(map);
+ __ CompareObjectType(map, type, type, MAP_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+ __ CompareInstanceType(map, type, instance_type);
+}
+void BaselineAssembler::Cmp(Register value, Smi smi) { __ Cmp(value, smi); }
+void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Ldr(tmp, operand);
+ __ Cmp(value, tmp);
+}
+void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ __ CmpTagged(lhs, rhs);
+}
+void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Ldr(tmp, operand);
+ __ CmpTagged(value, tmp);
+}
+void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Ldr(tmp, operand);
+ __ CmpTagged(tmp, value);
+}
+void BaselineAssembler::CompareByte(Register value, int32_t byte) {
+ __ Cmp(value, Immediate(byte));
+}
+
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ Move(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ Mov(output, Immediate(value.ptr()));
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ Str(source, output);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ Mov(output, Operand(reference));
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ Mov(output, Operand(value));
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ Mov(output, Immediate(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ Mov(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ Mov(output, source);
+}
+
+namespace detail {
+
+template <typename Arg>
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Arg arg) {
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
+ return reg;
+}
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Register reg) {
+ return reg;
+}
+
+template <typename... Args>
+struct CountPushHelper;
+template <>
+struct CountPushHelper<> {
+ static int Count() { return 0; }
+};
+template <typename Arg, typename... Args>
+struct CountPushHelper<Arg, Args...> {
+ static int Count(Arg arg, Args... args) {
+ return 1 + CountPushHelper<Args...>::Count(args...);
+ }
+};
+template <typename... Args>
+struct CountPushHelper<interpreter::RegisterList, Args...> {
+ static int Count(interpreter::RegisterList list, Args... args) {
+ return list.register_count() + CountPushHelper<Args...>::Count(args...);
+ }
+};
+
+template <typename... Args>
+struct PushAllHelper;
+template <typename... Args>
+inline void PushAll(BaselineAssembler* basm, Args... args) {
+ PushAllHelper<Args...>::Push(basm, args...);
+}
+template <typename... Args>
+inline void PushAllReverse(BaselineAssembler* basm, Args... args) {
+ PushAllHelper<Args...>::PushReverse(basm, args...);
+}
+
+template <>
+struct PushAllHelper<> {
+ static void Push(BaselineAssembler* basm) {}
+ static void PushReverse(BaselineAssembler* basm) {}
+};
+template <typename Arg>
+struct PushAllHelper<Arg> {
+ static void Push(BaselineAssembler* basm, Arg) { FATAL("Unaligned push"); }
+ static void PushReverse(BaselineAssembler* basm, Arg arg) {
+ // Push the padding register to round up the amount of values pushed.
+ return PushAllReverse(basm, arg, padreg);
+ }
+};
+template <typename Arg1, typename Arg2, typename... Args>
+struct PushAllHelper<Arg1, Arg2, Args...> {
+ static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
+ Args... args) {
+ {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg1),
+ ToRegister(basm, &scope, arg2));
+ }
+ PushAll(basm, args...);
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
+ Args... args) {
+ PushAllReverse(basm, args...);
+ {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg2),
+ ToRegister(basm, &scope, arg1));
+ }
+ }
+};
+// Currently RegisterLists are always be the last argument, so we don't
+// specialize for the case where they're not. We do still specialise for the
+// aligned and unaligned cases.
+template <typename Arg>
+struct PushAllHelper<Arg, interpreter::RegisterList> {
+ static void Push(BaselineAssembler* basm, Arg arg,
+ interpreter::RegisterList list) {
+ DCHECK_EQ(list.register_count() % 2, 1);
+ PushAll(basm, arg, list[0], list.PopLeft());
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg arg,
+ interpreter::RegisterList list) {
+ if (list.register_count() == 0) {
+ PushAllReverse(basm, arg);
+ } else {
+ PushAllReverse(basm, arg, list[0], list.PopLeft());
+ }
+ }
+};
+template <>
+struct PushAllHelper<interpreter::RegisterList> {
+ static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ DCHECK_EQ(list.register_count() % 2, 0);
+ for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
+ PushAll(basm, list[reg_index], list[reg_index + 1]);
+ }
+ }
+ static void PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ int reg_index = list.register_count() - 1;
+ if (reg_index % 2 == 0) {
+ // Push the padding register to round up the amount of values pushed.
+ PushAllReverse(basm, list[reg_index], padreg);
+ reg_index--;
+ }
+ for (; reg_index >= 1; reg_index -= 2) {
+ PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
+ }
+ }
+};
+
+template <typename... T>
+struct PopAllHelper;
+template <>
+struct PopAllHelper<> {
+ static void Pop(BaselineAssembler* basm) {}
+};
+template <>
+struct PopAllHelper<Register> {
+ static void Pop(BaselineAssembler* basm, Register reg) {
+ basm->masm()->Pop(reg, padreg);
+ }
+};
+template <typename... T>
+struct PopAllHelper<Register, Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
+ T... tail) {
+ basm->masm()->Pop(reg1, reg2);
+ PopAllHelper<T...>::Pop(basm, tail...);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ // We have to count the pushes first, to decide whether to add padding before
+ // the first push.
+ int push_count = detail::CountPushHelper<T...>::Count(vals...);
+ if (push_count % 2 == 0) {
+ detail::PushAll(this, vals...);
+ } else {
+ detail::PushAll(this, padreg, vals...);
+ }
+ return push_count;
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ detail::PopAllHelper<T...>::Pop(this, registers...);
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ __ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ __ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ __ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ Ldrb(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Mov(tmp, Operand(value));
+ __ StoreTaggedField(tmp, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ __ StoreTaggedField(value, FieldMemOperand(target, offset));
+ __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ __ StoreTaggedField(value, FieldMemOperand(target, offset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch().W();
+ __ Ldr(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ Adds(interrupt_budget, interrupt_budget, weight);
+ __ Str(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(Register weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch().W();
+ __ Ldr(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ Adds(interrupt_budget, interrupt_budget, weight.W());
+ __ Str(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ if (SmiValuesAre31Bits()) {
+ __ Add(lhs.W(), lhs.W(), Immediate(rhs));
+ } else {
+ DCHECK(lhs.IsX());
+ __ Add(lhs, lhs, Immediate(rhs));
+ }
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ Label fallthrough;
+ if (case_value_base > 0) {
+ __ Sub(reg, reg, Immediate(case_value_base));
+ }
+
+ // Mostly copied from code-generator-arm64.cc
+ ScratchRegisterScope scope(this);
+ Register temp = scope.AcquireScratch();
+ Label table;
+ __ Cmp(reg, num_labels);
+ JumpIf(Condition::kUnsignedGreaterThanEqual, &fallthrough);
+ __ Adr(temp, &table);
+ int entry_size_log2 = 2;
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ ++entry_size_log2; // Account for BTI.
+#endif
+ __ Add(temp, temp, Operand(reg, UXTW, entry_size_log2));
+ __ Br(temp);
+ {
+ TurboAssembler::BlockPoolsScope block_pools(masm_, num_labels * kInstrSize);
+ __ Bind(&table);
+ for (int i = 0; i < num_labels; ++i) {
+ __ JumpTarget();
+ __ B(labels[i]);
+ }
+ __ JumpTarget();
+ __ Bind(&fallthrough);
+ }
+}
+
+#undef __
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ __ RecordComment("[ Update Interrupt Budget");
+ __ AddToInterruptBudget(weight);
+
+ // Use compare flags set by add
+ Label skip_interrupt_label;
+ __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
+ {
+ __ masm()->SmiTag(params_size);
+ __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ LoadFunction(kJSFunctionRegister);
+ __ masm()->PushArgument(kJSFunctionRegister);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->SmiUntag(params_size);
+ }
+ __ RecordComment("]");
+
+ __ Bind(&skip_interrupt_label);
+
+ BaselineAssembler::ScratchRegisterScope temps(&basm);
+ Register actual_params_size = temps.AcquireScratch();
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Move(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->Cmp(params_size, actual_params_size);
+ __ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count);
+ __ masm()->Mov(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ __ masm()->Add(params_size, params_size, 1); // Include the receiver.
+ __ masm()->DropArguments(params_size);
+ __ masm()->Ret();
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
new file mode 100644
index 0000000000..2ce652d1a0
--- /dev/null
+++ b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
@@ -0,0 +1,116 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_ARM64_BASELINE_COMPILER_ARM64_INL_H_
+#define V8_BASELINE_ARM64_BASELINE_COMPILER_ARM64_INL_H_
+
+#include "src/baseline/baseline-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ __ masm()->Mov(kInterpreterBytecodeArrayRegister, Operand(bytecode_));
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ // Enter the frame here, since CallBuiltin will override lr.
+ __ masm()->EnterFrame(StackFrame::BASELINE);
+ CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
+ kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ kInterpreterBytecodeArrayRegister,
+ kJavaScriptCallNewTargetRegister);
+
+ __ masm()->AssertSpAligned();
+ PrologueFillFrame();
+ __ masm()->AssertSpAligned();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ __ RecordComment("[ Fill frame");
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ // BaselineOutOfLinePrologue already pushed one undefined.
+ register_count -= 1;
+ if (has_new_target) {
+ if (new_target_index == 0) {
+ // Oops, need to fix up that undefined that BaselineOutOfLinePrologue
+ // pushed.
+ __ masm()->Poke(kJavaScriptCallNewTargetRegister, Operand(0));
+ } else {
+ DCHECK_LE(new_target_index, register_count);
+ int index = 1;
+ for (; index + 2 <= new_target_index; index += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ if (index == new_target_index) {
+ __ masm()->Push(kJavaScriptCallNewTargetRegister,
+ kInterpreterAccumulatorRegister);
+ } else {
+ DCHECK_EQ(index, new_target_index - 1);
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kJavaScriptCallNewTargetRegister);
+ }
+ // We pushed "index" registers, minus the one the prologue pushed, plus
+ // the two registers that included new_target.
+ register_count -= (index - 1 + 2);
+ }
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ for (int i = 0; i < register_count; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ } else {
+ BaselineAssembler::ScratchRegisterScope temps(&basm_);
+ Register scratch = temps.AcquireScratch();
+
+ // Extract the first few registers to round to the unroll size.
+ int first_registers = register_count % kLoopUnrollSize;
+ for (int i = 0; i < first_registers; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ __ Move(scratch, register_count / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at least
+ // once.
+ DCHECK_GT(register_count / kLoopUnrollSize, 0);
+ Label loop;
+ __ Bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ __ masm()->Subs(scratch, scratch, 1);
+ __ JumpIf(Condition::kGreaterThan, &loop);
+ }
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ __ masm()->Add(x15, sp,
+ RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size(),
+ 2 * kSystemPointerSize));
+ __ masm()->Cmp(x15, fp);
+ __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer);
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_ARM64_BASELINE_COMPILER_ARM64_INL_H_
diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h
new file mode 100644
index 0000000000..d949425a19
--- /dev/null
+++ b/deps/v8/src/baseline/baseline-assembler-inl.h
@@ -0,0 +1,134 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
+#define V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
+
+// TODO(v8:11421): Remove #if once baseline compiler is ported to other
+// architectures.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+
+#include <type_traits>
+#include <unordered_map>
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/objects/feedback-cell.h"
+#include "src/objects/js-function.h"
+#include "src/objects/map.h"
+
+#if V8_TARGET_ARCH_X64
+#include "src/baseline/x64/baseline-assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/baseline/arm64/baseline-assembler-arm64-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ masm_->
+
+void BaselineAssembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+ __ GetCode(isolate, desc);
+}
+int BaselineAssembler::pc_offset() const { return __ pc_offset(); }
+bool BaselineAssembler::emit_debug_code() const { return __ emit_debug_code(); }
+void BaselineAssembler::CodeEntry() const { __ CodeEntry(); }
+void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); }
+void BaselineAssembler::RecordComment(const char* string) {
+ __ RecordComment(string);
+}
+void BaselineAssembler::Trap() { __ Trap(); }
+void BaselineAssembler::DebugBreak() { __ DebugBreak(); }
+void BaselineAssembler::CallRuntime(Runtime::FunctionId function, int nargs) {
+ __ CallRuntime(function, nargs);
+}
+
+MemOperand BaselineAssembler::ContextOperand() {
+ return RegisterFrameOperand(interpreter::Register::current_context());
+}
+MemOperand BaselineAssembler::FunctionOperand() {
+ return RegisterFrameOperand(interpreter::Register::function_closure());
+}
+
+void BaselineAssembler::LoadMap(Register output, Register value) {
+ __ LoadMap(output, value);
+}
+void BaselineAssembler::LoadRoot(Register output, RootIndex index) {
+ __ LoadRoot(output, index);
+}
+void BaselineAssembler::LoadNativeContextSlot(Register output, uint32_t index) {
+ __ LoadNativeContextSlot(index, output);
+}
+
+void BaselineAssembler::Move(Register output, interpreter::Register source) {
+ return __ Move(output, RegisterFrameOperand(source));
+}
+void BaselineAssembler::Move(Register output, RootIndex source) {
+ return __ LoadRoot(output, source);
+}
+void BaselineAssembler::Move(Register output, Register source) {
+ __ Move(output, source);
+}
+void BaselineAssembler::Move(Register output, MemOperand operand) {
+ __ Move(output, operand);
+}
+void BaselineAssembler::Move(Register output, Smi value) {
+ __ Move(output, value);
+}
+
+void BaselineAssembler::SmiUntag(Register reg) { __ SmiUntag(reg); }
+void BaselineAssembler::SmiUntag(Register output, Register value) {
+ __ SmiUntag(output, value);
+}
+
+void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
+ int32_t index) {
+ LoadTaggedAnyField(output, array,
+ FixedArray::kHeaderSize + index * kTaggedSize);
+}
+
+void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
+ __ LoadMap(prototype, object);
+ LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
+}
+void BaselineAssembler::LoadContext(Register output) {
+ LoadRegister(output, interpreter::Register::current_context());
+}
+void BaselineAssembler::LoadFunction(Register output) {
+ LoadRegister(output, interpreter::Register::function_closure());
+}
+void BaselineAssembler::StoreContext(Register context) {
+ StoreRegister(interpreter::Register::current_context(), context);
+}
+void BaselineAssembler::LoadRegister(Register output,
+ interpreter::Register source) {
+ Move(output, source);
+}
+void BaselineAssembler::StoreRegister(interpreter::Register output,
+ Register value) {
+ Move(output, value);
+}
+
+SaveAccumulatorScope::SaveAccumulatorScope(BaselineAssembler* assembler)
+ : assembler_(assembler) {
+ assembler_->Push(kInterpreterAccumulatorRegister);
+}
+
+SaveAccumulatorScope::~SaveAccumulatorScope() {
+ assembler_->Pop(kInterpreterAccumulatorRegister);
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif
+
+#endif // V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h
new file mode 100644
index 0000000000..de6bd23911
--- /dev/null
+++ b/deps/v8/src/baseline/baseline-assembler.h
@@ -0,0 +1,187 @@
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_BASELINE_ASSEMBLER_H_
+#define V8_BASELINE_BASELINE_ASSEMBLER_H_
+
+// TODO(v8:11421): Remove #if once baseline compiler is ported to other
+// architectures.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+
+#include "src/codegen/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+enum class Condition : uint8_t;
+
+class BaselineAssembler {
+ public:
+ class ScratchRegisterScope;
+
+ explicit BaselineAssembler(MacroAssembler* masm) : masm_(masm) {}
+ inline static MemOperand RegisterFrameOperand(
+ interpreter::Register interpreter_register);
+ inline MemOperand ContextOperand();
+ inline MemOperand FunctionOperand();
+ inline MemOperand FeedbackVectorOperand();
+
+ inline void GetCode(Isolate* isolate, CodeDesc* desc);
+ inline int pc_offset() const;
+ inline bool emit_debug_code() const;
+ inline void CodeEntry() const;
+ inline void ExceptionHandler() const;
+ inline void RecordComment(const char* string);
+ inline void Trap();
+ inline void DebugBreak();
+
+ inline void Bind(Label* label);
+ inline void JumpIf(Condition cc, Label* target,
+ Label::Distance distance = Label::kFar);
+ inline void Jump(Label* target, Label::Distance distance = Label::kFar);
+ inline void JumpIfRoot(Register value, RootIndex index, Label* target,
+ Label::Distance distance = Label::kFar);
+ inline void JumpIfNotRoot(Register value, RootIndex index, Label* target,
+ Label ::Distance distance = Label::kFar);
+ inline void JumpIfSmi(Register value, Label* target,
+ Label::Distance distance = Label::kFar);
+ inline void JumpIfNotSmi(Register value, Label* target,
+ Label::Distance distance = Label::kFar);
+
+ inline void Test(Register value, int mask);
+
+ inline void CmpObjectType(Register object, InstanceType instance_type,
+ Register map);
+ inline void CmpInstanceType(Register map, InstanceType instance_type);
+ inline void Cmp(Register value, Smi smi);
+ inline void ComparePointer(Register value, MemOperand operand);
+ inline Condition CheckSmi(Register value);
+ inline void SmiCompare(Register lhs, Register rhs);
+ inline void CompareTagged(Register value, MemOperand operand);
+ inline void CompareTagged(MemOperand operand, Register value);
+ inline void CompareByte(Register value, int32_t byte);
+
+ inline void LoadMap(Register output, Register value);
+ inline void LoadRoot(Register output, RootIndex index);
+ inline void LoadNativeContextSlot(Register output, uint32_t index);
+
+ inline void Move(Register output, Register source);
+ inline void Move(Register output, MemOperand operand);
+ inline void Move(Register output, Smi value);
+ inline void Move(Register output, TaggedIndex value);
+ inline void Move(Register output, interpreter::Register source);
+ inline void Move(interpreter::Register output, Register source);
+ inline void Move(Register output, RootIndex source);
+ inline void Move(MemOperand output, Register source);
+ inline void Move(Register output, ExternalReference reference);
+ inline void Move(Register output, Handle<HeapObject> value);
+ inline void Move(Register output, int32_t immediate);
+ inline void MoveMaybeSmi(Register output, Register source);
+ inline void MoveSmi(Register output, Register source);
+
+ // Push the given values, in the given order. If the stack needs alignment
+ // (looking at you Arm64), the stack is padded from the front (i.e. before the
+ // first value is pushed).
+ //
+ // This supports pushing a RegisterList as the last value -- the list is
+ // iterated and each interpreter Register is pushed.
+ //
+ // The total number of values pushed is returned. Note that this might be
+ // different from sizeof(T...), specifically if there was a RegisterList.
+ template <typename... T>
+ inline int Push(T... vals);
+
+ // Like Push(vals...), but pushes in reverse order, to support our reversed
+ // order argument JS calling convention. Doesn't return the number of
+ // arguments pushed though.
+ //
+ // Note that padding is still inserted before the first pushed value (i.e. the
+ // last value).
+ template <typename... T>
+ inline void PushReverse(T... vals);
+
+ // Pop values off the stack into the given registers.
+ //
+ // Note that this inserts into registers in the given order, i.e. in reverse
+ // order if the registers were pushed. This means that to spill registers,
+ // push and pop have to be in reverse order, e.g.
+ //
+ // Push(r1, r2, ..., rN);
+ // ClobberRegisters();
+ // Pop(rN, ..., r2, r1);
+ //
+ // On stack-alignment architectures, any padding is popped off after the last
+ // register. This the behaviour of Push, which means that the above code still
+ // works even if the number of registers doesn't match stack alignment.
+ template <typename... T>
+ inline void Pop(T... registers);
+
+ inline void CallBuiltin(Builtins::Name builtin);
+ inline void TailCallBuiltin(Builtins::Name builtin);
+ inline void CallRuntime(Runtime::FunctionId function, int nargs);
+
+ inline void LoadTaggedPointerField(Register output, Register source,
+ int offset);
+ inline void LoadTaggedSignedField(Register output, Register source,
+ int offset);
+ inline void LoadTaggedAnyField(Register output, Register source, int offset);
+ inline void LoadByteField(Register output, Register source, int offset);
+ inline void StoreTaggedSignedField(Register target, int offset, Smi value);
+ inline void StoreTaggedFieldWithWriteBarrier(Register target, int offset,
+ Register value);
+ inline void StoreTaggedFieldNoWriteBarrier(Register target, int offset,
+ Register value);
+ inline void LoadFixedArrayElement(Register output, Register array,
+ int32_t index);
+ inline void LoadPrototype(Register prototype, Register object);
+
+ // Loads the feedback cell from the function, and sets flags on add so that
+ // we can compare afterward.
+ inline void AddToInterruptBudget(int32_t weight);
+ inline void AddToInterruptBudget(Register weight);
+
+ inline void AddSmi(Register lhs, Smi rhs);
+ inline void SmiUntag(Register value);
+ inline void SmiUntag(Register output, Register value);
+
+ inline void Switch(Register reg, int case_value_base, Label** labels,
+ int num_labels);
+
+ // Register operands.
+ inline void LoadRegister(Register output, interpreter::Register source);
+ inline void StoreRegister(interpreter::Register output, Register value);
+
+ // Frame values
+ inline void LoadFunction(Register output);
+ inline void LoadContext(Register output);
+ inline void StoreContext(Register context);
+
+ inline static void EmitReturn(MacroAssembler* masm);
+
+ MacroAssembler* masm() { return masm_; }
+
+ private:
+ MacroAssembler* masm_;
+ ScratchRegisterScope* scratch_register_scope_ = nullptr;
+};
+
+class SaveAccumulatorScope final {
+ public:
+ inline explicit SaveAccumulatorScope(BaselineAssembler* assembler);
+
+ inline ~SaveAccumulatorScope();
+
+ private:
+ BaselineAssembler* assembler_;
+};
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif
+
+#endif // V8_BASELINE_BASELINE_ASSEMBLER_H_
diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc
new file mode 100644
index 0000000000..60be8c8386
--- /dev/null
+++ b/deps/v8/src/baseline/baseline-compiler.cc
@@ -0,0 +1,2180 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(v8:11421): Remove #if once baseline compiler is ported to other
+// architectures.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+
+#include "src/baseline/baseline-compiler.h"
+
+#include <type_traits>
+#include <unordered_map>
+
+#include "src/baseline/baseline-assembler-inl.h"
+#include "src/builtins/builtins-constructor.h"
+#include "src/builtins/builtins-descriptors.h"
+#include "src/builtins/builtins.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/common/globals.h"
+#include "src/execution/frame-constants.h"
+#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-flags.h"
+#include "src/objects/code.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/instance-type.h"
+#include "src/objects/shared-function-info-inl.h"
+#include "src/roots/roots.h"
+
+#if V8_TARGET_ARCH_X64
+#include "src/baseline/x64/baseline-compiler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/baseline/arm64/baseline-compiler-arm64-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+template <typename LocalIsolate>
+Handle<ByteArray> BytecodeOffsetTableBuilder::ToBytecodeOffsetTable(
+ LocalIsolate* isolate) {
+ if (bytes_.empty()) return isolate->factory()->empty_byte_array();
+ Handle<ByteArray> table = isolate->factory()->NewByteArray(
+ static_cast<int>(bytes_.size()), AllocationType::kOld);
+ MemCopy(table->GetDataStartAddress(), bytes_.data(), bytes_.size());
+ return table;
+}
+
+namespace detail {
+
+#ifdef DEBUG
+bool Clobbers(Register target, Register reg) { return target == reg; }
+bool Clobbers(Register target, Handle<Object> handle) { return false; }
+bool Clobbers(Register target, Smi smi) { return false; }
+bool Clobbers(Register target, TaggedIndex index) { return false; }
+bool Clobbers(Register target, int32_t imm) { return false; }
+bool Clobbers(Register target, RootIndex index) { return false; }
+bool Clobbers(Register target, interpreter::Register reg) { return false; }
+
+// We don't know what's inside machine registers or operands, so assume they
+// match.
+bool MachineTypeMatches(MachineType type, Register reg) { return true; }
+bool MachineTypeMatches(MachineType type, MemOperand reg) { return true; }
+bool MachineTypeMatches(MachineType type, Handle<HeapObject> handle) {
+ return type.IsTagged() && !type.IsTaggedSigned();
+}
+bool MachineTypeMatches(MachineType type, Smi handle) {
+ return type.IsTagged() && !type.IsTaggedPointer();
+}
+bool MachineTypeMatches(MachineType type, TaggedIndex handle) {
+ // TaggedIndex doesn't have a separate type, so check for the same type as for
+ // Smis.
+ return type.IsTagged() && !type.IsTaggedPointer();
+}
+bool MachineTypeMatches(MachineType type, int32_t imm) {
+ // 32-bit immediates can be used for 64-bit params -- they'll be
+ // zero-extended.
+ return type.representation() == MachineRepresentation::kWord32 ||
+ type.representation() == MachineRepresentation::kWord64;
+}
+bool MachineTypeMatches(MachineType type, RootIndex index) {
+ return type.IsTagged() && !type.IsTaggedSigned();
+}
+bool MachineTypeMatches(MachineType type, interpreter::Register reg) {
+ return type.IsTagged();
+}
+
+template <typename... Args>
+struct CheckArgsHelper;
+
+template <>
+struct CheckArgsHelper<> {
+ static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
+ int i) {
+ if (descriptor.AllowVarArgs()) {
+ CHECK_GE(i, descriptor.GetParameterCount());
+ } else {
+ CHECK_EQ(i, descriptor.GetParameterCount());
+ }
+ }
+};
+
+template <typename Arg, typename... Args>
+struct CheckArgsHelper<Arg, Args...> {
+ static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
+ int i, Arg arg, Args... args) {
+ if (i >= descriptor.GetParameterCount()) {
+ CHECK(descriptor.AllowVarArgs());
+ return;
+ }
+ CHECK(MachineTypeMatches(descriptor.GetParameterType(i), arg));
+ CheckArgsHelper<Args...>::Check(masm, descriptor, i + 1, args...);
+ }
+};
+
+template <typename... Args>
+struct CheckArgsHelper<interpreter::RegisterList, Args...> {
+ static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
+ int i, interpreter::RegisterList list, Args... args) {
+ for (int reg_index = 0; reg_index < list.register_count();
+ ++reg_index, ++i) {
+ if (i >= descriptor.GetParameterCount()) {
+ CHECK(descriptor.AllowVarArgs());
+ return;
+ }
+ CHECK(
+ MachineTypeMatches(descriptor.GetParameterType(i), list[reg_index]));
+ }
+ CheckArgsHelper<Args...>::Check(masm, descriptor, i, args...);
+ }
+};
+
+template <typename... Args>
+void CheckArgs(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
+ Args... args) {
+ CheckArgsHelper<Args...>::Check(masm, descriptor, 0, args...);
+}
+
+#else // DEBUG
+
+template <typename... Args>
+void CheckArgs(Args... args) {}
+
+#endif // DEBUG
+
+template <typename... Args>
+struct ArgumentSettingHelper;
+
+template <>
+struct ArgumentSettingHelper<> {
+ static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
+ int i) {}
+ static void CheckSettingDoesntClobber(Register target, int arg_index) {}
+};
+
+template <typename Arg, typename... Args>
+struct ArgumentSettingHelper<Arg, Args...> {
+ static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
+ int i, Arg arg, Args... args) {
+ if (i < descriptor.GetRegisterParameterCount()) {
+ Register target = descriptor.GetRegisterParameter(i);
+ ArgumentSettingHelper<Args...>::CheckSettingDoesntClobber(target, i + 1,
+ args...);
+ masm->Move(target, arg);
+ ArgumentSettingHelper<Args...>::Set(masm, descriptor, i + 1, args...);
+ } else if (descriptor.GetStackArgumentOrder() ==
+ StackArgumentOrder::kDefault) {
+ masm->Push(arg, args...);
+ } else {
+ masm->PushReverse(arg, args...);
+ }
+ }
+ static void CheckSettingDoesntClobber(Register target, int arg_index, Arg arg,
+ Args... args) {
+ DCHECK(!Clobbers(target, arg));
+ ArgumentSettingHelper<Args...>::CheckSettingDoesntClobber(
+ target, arg_index + 1, args...);
+ }
+};
+
+// Specialization for interpreter::RegisterList which iterates it.
+// RegisterLists are only allowed to be the last argument.
+template <>
+struct ArgumentSettingHelper<interpreter::RegisterList> {
+ static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
+ int i, interpreter::RegisterList list) {
+ // Either all the values are in machine registers, or they're all on the
+ // stack.
+ if (i < descriptor.GetRegisterParameterCount()) {
+ for (int reg_index = 0; reg_index < list.register_count();
+ ++reg_index, ++i) {
+ Register target = descriptor.GetRegisterParameter(i);
+ masm->Move(target, masm->RegisterFrameOperand(list[reg_index]));
+ }
+ } else if (descriptor.GetStackArgumentOrder() ==
+ StackArgumentOrder::kDefault) {
+ masm->Push(list);
+ } else {
+ masm->PushReverse(list);
+ }
+ }
+ static void CheckSettingDoesntClobber(Register target, int arg_index,
+ interpreter::RegisterList arg) {}
+};
+
+template <typename... Args>
+void MoveArgumentsForDescriptor(BaselineAssembler* masm,
+ CallInterfaceDescriptor descriptor,
+ Args... args) {
+ CheckArgs(masm, descriptor, args...);
+ ArgumentSettingHelper<Args...>::Set(masm, descriptor, 0, args...);
+}
+
+} // namespace detail
+
+
+BaselineCompiler::BaselineCompiler(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
+ Handle<BytecodeArray> bytecode)
+ : isolate_(isolate),
+ stats_(isolate->counters()->runtime_call_stats()),
+ shared_function_info_(shared_function_info),
+ bytecode_(bytecode),
+ masm_(isolate, CodeObjectRequired::kNo),
+ basm_(&masm_),
+ iterator_(bytecode_),
+ zone_(isolate->allocator(), ZONE_NAME),
+ labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())),
+ handler_offsets_(&zone_) {
+ MemsetPointer(labels_, nullptr, bytecode_->length());
+}
+
+#define __ basm_.
+
+void BaselineCompiler::GenerateCode() {
+ HandlerTable table(*bytecode_);
+ {
+ RuntimeCallTimerScope runtimeTimer(
+ stats_, RuntimeCallCounterId::kCompileBaselinePrepareHandlerOffsets);
+ for (int i = 0; i < table.NumberOfRangeEntries(); ++i) {
+ int handler_offset = table.GetRangeHandler(i);
+ handler_offsets_.insert(handler_offset);
+ }
+ }
+
+ {
+ RuntimeCallTimerScope runtimeTimer(
+ stats_, RuntimeCallCounterId::kCompileBaselinePreVisit);
+ for (; !iterator_.done(); iterator_.Advance()) {
+ PreVisitSingleBytecode();
+ }
+ iterator_.Reset();
+ }
+
+ // No code generated yet.
+ DCHECK_EQ(__ pc_offset(), 0);
+ __ CodeEntry();
+
+ {
+ RuntimeCallTimerScope runtimeTimer(
+ stats_, RuntimeCallCounterId::kCompileBaselineVisit);
+ Prologue();
+ for (; !iterator_.done(); iterator_.Advance()) {
+ VisitSingleBytecode();
+ }
+ }
+}
+
+Handle<Code> BaselineCompiler::Build(Isolate* isolate) {
+ CodeDesc desc;
+ __ GetCode(isolate, &desc);
+ // Allocate the bytecode offset table.
+ Handle<ByteArray> bytecode_offset_table =
+ bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate);
+ return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
+ .set_bytecode_offset_table(bytecode_offset_table)
+ .Build();
+}
+
+interpreter::Register BaselineCompiler::RegisterOperand(int operand_index) {
+ return accessor().GetRegisterOperand(operand_index);
+}
+
+void BaselineCompiler::LoadRegister(Register output, int operand_index) {
+ __ LoadRegister(output, RegisterOperand(operand_index));
+}
+
+void BaselineCompiler::StoreRegister(int operand_index, Register value) {
+ __ Move(RegisterOperand(operand_index), value);
+}
+
+void BaselineCompiler::StoreRegisterPair(int operand_index, Register val0,
+ Register val1) {
+ interpreter::Register reg0, reg1;
+ std::tie(reg0, reg1) = accessor().GetRegisterPairOperand(operand_index);
+ __ StoreRegister(reg0, val0);
+ __ StoreRegister(reg1, val1);
+}
+template <typename Type>
+Handle<Type> BaselineCompiler::Constant(int operand_index) {
+ return Handle<Type>::cast(
+ accessor().GetConstantForIndexOperand(operand_index, isolate_));
+}
+Smi BaselineCompiler::ConstantSmi(int operand_index) {
+ return accessor().GetConstantAtIndexAsSmi(operand_index);
+}
+template <typename Type>
+void BaselineCompiler::LoadConstant(Register output, int operand_index) {
+ __ Move(output, Constant<Type>(operand_index));
+}
+uint32_t BaselineCompiler::Uint(int operand_index) {
+ return accessor().GetUnsignedImmediateOperand(operand_index);
+}
+int32_t BaselineCompiler::Int(int operand_index) {
+ return accessor().GetImmediateOperand(operand_index);
+}
+uint32_t BaselineCompiler::Index(int operand_index) {
+ return accessor().GetIndexOperand(operand_index);
+}
+uint32_t BaselineCompiler::Flag(int operand_index) {
+ return accessor().GetFlagOperand(operand_index);
+}
+uint32_t BaselineCompiler::RegisterCount(int operand_index) {
+ return accessor().GetRegisterCountOperand(operand_index);
+}
+TaggedIndex BaselineCompiler::IndexAsTagged(int operand_index) {
+ return TaggedIndex::FromIntptr(Index(operand_index));
+}
+TaggedIndex BaselineCompiler::UintAsTagged(int operand_index) {
+ return TaggedIndex::FromIntptr(Uint(operand_index));
+}
+Smi BaselineCompiler::IndexAsSmi(int operand_index) {
+ return Smi::FromInt(Index(operand_index));
+}
+Smi BaselineCompiler::IntAsSmi(int operand_index) {
+ return Smi::FromInt(Int(operand_index));
+}
+Smi BaselineCompiler::FlagAsSmi(int operand_index) {
+ return Smi::FromInt(Flag(operand_index));
+}
+
+MemOperand BaselineCompiler::FeedbackVector() {
+ return __ FeedbackVectorOperand();
+}
+
+void BaselineCompiler::LoadFeedbackVector(Register output) {
+ __ RecordComment("[ LoadFeedbackVector");
+ __ Move(output, __ FeedbackVectorOperand());
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
+ LoadFeedbackVector(output);
+ __ LoadTaggedPointerField(output, output,
+ FeedbackVector::kClosureFeedbackCellArrayOffset);
+}
+
+void BaselineCompiler::SelectBooleanConstant(
+ Register output, std::function<void(Label*, Label::Distance)> jump_func) {
+ Label done, set_true;
+ jump_func(&set_true, Label::kNear);
+ __ LoadRoot(output, RootIndex::kFalseValue);
+ __ Jump(&done, Label::kNear);
+ __ Bind(&set_true);
+ __ LoadRoot(output, RootIndex::kTrueValue);
+ __ Bind(&done);
+}
+
+void BaselineCompiler::AddPosition() {
+ bytecode_offset_table_builder_.AddPosition(__ pc_offset(),
+ accessor().current_offset());
+}
+
+void BaselineCompiler::PreVisitSingleBytecode() {
+ if (accessor().current_bytecode() == interpreter::Bytecode::kJumpLoop) {
+ EnsureLabels(accessor().GetJumpTargetOffset());
+ }
+}
+
+void BaselineCompiler::VisitSingleBytecode() {
+ int offset = accessor().current_offset();
+ if (labels_[offset]) {
+ // Bind labels for this offset that have already been linked to a
+ // jump (i.e. forward jumps, excluding jump tables).
+ for (auto&& label : labels_[offset]->linked) {
+ __ Bind(&label->label);
+ }
+#ifdef DEBUG
+ labels_[offset]->linked.Clear();
+#endif
+ __ Bind(&labels_[offset]->unlinked);
+ }
+
+ // Record positions of exception handlers.
+ if (handler_offsets_.find(accessor().current_offset()) !=
+ handler_offsets_.end()) {
+ AddPosition();
+ __ ExceptionHandler();
+ }
+
+ if (FLAG_code_comments) {
+ std::ostringstream str;
+ str << "[ ";
+ accessor().PrintTo(str);
+ __ RecordComment(str.str().c_str());
+ }
+
+ VerifyFrame();
+
+#ifdef V8_TRACE_UNOPTIMIZED
+ TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
+#endif
+
+ switch (accessor().current_bytecode()) {
+#define BYTECODE_CASE(name, ...) \
+ case interpreter::Bytecode::k##name: \
+ Visit##name(); \
+ break;
+ BYTECODE_LIST(BYTECODE_CASE)
+#undef BYTECODE_CASE
+ }
+ __ RecordComment("]");
+
+#ifdef V8_TRACE_UNOPTIMIZED
+ TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit);
+#endif
+}
+
+void BaselineCompiler::VerifyFrame() {
+ if (__ emit_debug_code()) {
+ __ RecordComment("[ Verify frame");
+ __ RecordComment(" -- Verify frame size");
+ VerifyFrameSize();
+
+ __ RecordComment(" -- Verify feedback vector");
+ {
+ BaselineAssembler::ScratchRegisterScope temps(&basm_);
+ Register scratch = temps.AcquireScratch();
+ __ Move(scratch, __ FeedbackVectorOperand());
+ Label is_smi, is_ok;
+ __ JumpIfSmi(scratch, &is_smi);
+ __ CmpObjectType(scratch, FEEDBACK_VECTOR_TYPE, scratch);
+ __ JumpIf(Condition::kEqual, &is_ok);
+ __ Bind(&is_smi);
+ __ masm()->Abort(AbortReason::kExpectedFeedbackVector);
+ __ Bind(&is_ok);
+ }
+
+ // TODO(leszeks): More verification.
+
+ __ RecordComment("]");
+ }
+}
+
+#ifdef V8_TRACE_UNOPTIMIZED
+void BaselineCompiler::TraceBytecode(Runtime::FunctionId function_id) {
+ if (!FLAG_trace_baseline_exec) return;
+
+ __ RecordComment(function_id == Runtime::kTraceUnoptimizedBytecodeEntry
+ ? "[ Trace bytecode entry"
+ : "[ Trace bytecode exit");
+ SaveAccumulatorScope accumulator_scope(&basm_);
+ CallRuntime(function_id, bytecode_,
+ Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ accessor().current_offset()),
+ kInterpreterAccumulatorRegister);
+ __ RecordComment("]");
+}
+#endif
+
+#define DECLARE_VISITOR(name, ...) void Visit##name();
+BYTECODE_LIST(DECLARE_VISITOR)
+#undef DECLARE_VISITOR
+
+#define DECLARE_VISITOR(name, ...) \
+ void VisitIntrinsic##name(interpreter::RegisterList args);
+INTRINSICS_LIST(DECLARE_VISITOR)
+#undef DECLARE_VISITOR
+
+void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel(
+ int weight, Label* label, Label* skip_interrupt_label) {
+ __ RecordComment("[ Update Interrupt Budget");
+ __ AddToInterruptBudget(weight);
+
+ if (weight < 0) {
+ // Use compare flags set by AddToInterruptBudget
+ __ JumpIf(Condition::kGreaterThanEqual, skip_interrupt_label);
+ SaveAccumulatorScope accumulator_scope(&basm_);
+ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode,
+ __ FunctionOperand());
+ }
+ if (label) __ Jump(label);
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJump() {
+ int weight = accessor().GetRelativeJumpTargetOffset();
+ UpdateInterruptBudgetAndJumpToLabel(weight, BuildForwardJumpLabel(), nullptr);
+}
+
+void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJumpIfRoot(
+ RootIndex root) {
+ Label dont_jump;
+ __ JumpIfNotRoot(kInterpreterAccumulatorRegister, root, &dont_jump,
+ Label::kNear);
+ UpdateInterruptBudgetAndDoInterpreterJump();
+ __ Bind(&dont_jump);
+}
+
+void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot(
+ RootIndex root) {
+ Label dont_jump;
+ __ JumpIfRoot(kInterpreterAccumulatorRegister, root, &dont_jump,
+ Label::kNear);
+ UpdateInterruptBudgetAndDoInterpreterJump();
+ __ Bind(&dont_jump);
+}
+
+Label* BaselineCompiler::BuildForwardJumpLabel() {
+ int target_offset = accessor().GetJumpTargetOffset();
+ ThreadedLabel* threaded_label = zone_.New<ThreadedLabel>();
+ EnsureLabels(target_offset)->linked.Add(threaded_label);
+ return &threaded_label->label;
+}
+
+template <typename... Args>
+void BaselineCompiler::CallBuiltin(Builtins::Name builtin, Args... args) {
+ __ RecordComment("[ CallBuiltin");
+ CallInterfaceDescriptor descriptor =
+ Builtins::CallInterfaceDescriptorFor(builtin);
+ detail::MoveArgumentsForDescriptor(&basm_, descriptor, args...);
+ if (descriptor.HasContextParameter()) {
+ __ LoadContext(descriptor.ContextRegister());
+ }
+ __ CallBuiltin(builtin);
+ AddPosition();
+ __ RecordComment("]");
+}
+
+template <typename... Args>
+void BaselineCompiler::TailCallBuiltin(Builtins::Name builtin, Args... args) {
+ CallInterfaceDescriptor descriptor =
+ Builtins::CallInterfaceDescriptorFor(builtin);
+ detail::MoveArgumentsForDescriptor(&basm_, descriptor, args...);
+ if (descriptor.HasContextParameter()) {
+ __ LoadContext(descriptor.ContextRegister());
+ }
+ __ TailCallBuiltin(builtin);
+}
+
+template <typename... Args>
+void BaselineCompiler::CallRuntime(Runtime::FunctionId function, Args... args) {
+ __ LoadContext(kContextRegister);
+ int nargs = __ Push(args...);
+ __ CallRuntime(function, nargs);
+ AddPosition();
+}
+
+// Returns into kInterpreterAccumulatorRegister
+void BaselineCompiler::JumpIfToBoolean(bool do_jump_if_true, Register reg,
+ Label* label, Label::Distance distance) {
+ Label end;
+ Label::Distance end_distance = Label::kNear;
+
+ Label* true_label = do_jump_if_true ? label : &end;
+ Label::Distance true_distance = do_jump_if_true ? distance : end_distance;
+ Label* false_label = do_jump_if_true ? &end : label;
+ Label::Distance false_distance = do_jump_if_true ? end_distance : distance;
+
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register to_boolean = scratch_scope.AcquireScratch();
+ {
+ SaveAccumulatorScope accumulator_scope(&basm_);
+ CallBuiltin(Builtins::kToBoolean, reg);
+ __ Move(to_boolean, kInterpreterAccumulatorRegister);
+ }
+ __ JumpIfRoot(to_boolean, RootIndex::kTrueValue, true_label, true_distance);
+ if (false_label != &end) __ Jump(false_label, false_distance);
+
+ __ Bind(&end);
+}
+
+void BaselineCompiler::VisitLdaZero() {
+ __ Move(kInterpreterAccumulatorRegister, Smi::FromInt(0));
+}
+
+void BaselineCompiler::VisitLdaSmi() {
+ Smi constant = Smi::FromInt(accessor().GetImmediateOperand(0));
+ __ Move(kInterpreterAccumulatorRegister, constant);
+}
+
+void BaselineCompiler::VisitLdaUndefined() {
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+}
+
+void BaselineCompiler::VisitLdaNull() {
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue);
+}
+
+void BaselineCompiler::VisitLdaTheHole() {
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTheHoleValue);
+}
+
+void BaselineCompiler::VisitLdaTrue() {
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
+}
+
+void BaselineCompiler::VisitLdaFalse() {
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+}
+
+void BaselineCompiler::VisitLdaConstant() {
+ LoadConstant<HeapObject>(kInterpreterAccumulatorRegister, 0);
+}
+
+void BaselineCompiler::VisitLdaGlobal() {
+ CallBuiltin(Builtins::kLoadGlobalICBaseline,
+ Constant<Name>(0), // name
+ IndexAsTagged(1)); // slot
+}
+
+void BaselineCompiler::VisitLdaGlobalInsideTypeof() {
+ CallBuiltin(Builtins::kLoadGlobalICInsideTypeofBaseline,
+ Constant<Name>(0), // name
+ IndexAsTagged(1)); // slot
+}
+
+void BaselineCompiler::VisitStaGlobal() {
+ CallBuiltin(Builtins::kStoreGlobalICBaseline,
+ Constant<Name>(0), // name
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(1)); // slot
+}
+
+void BaselineCompiler::VisitPushContext() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register context = scratch_scope.AcquireScratch();
+ __ LoadContext(context);
+ __ StoreContext(kInterpreterAccumulatorRegister);
+ StoreRegister(0, context);
+}
+
+void BaselineCompiler::VisitPopContext() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register context = scratch_scope.AcquireScratch();
+ LoadRegister(context, 0);
+ __ StoreContext(context);
+}
+
+void BaselineCompiler::VisitLdaContextSlot() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register context = scratch_scope.AcquireScratch();
+ LoadRegister(context, 0);
+ int depth = Uint(2);
+ for (; depth > 0; --depth) {
+ __ LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ }
+ __ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(Index(1)));
+}
+
+void BaselineCompiler::VisitLdaImmutableContextSlot() { VisitLdaContextSlot(); }
+
+void BaselineCompiler::VisitLdaCurrentContextSlot() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register context = scratch_scope.AcquireScratch();
+ __ LoadContext(context);
+ __ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
+ Context::OffsetOfElementAt(Index(0)));
+}
+
+void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
+ VisitLdaCurrentContextSlot();
+}
+
+void BaselineCompiler::VisitStaContextSlot() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register context = scratch_scope.AcquireScratch();
+ LoadRegister(context, 0);
+ int depth = Uint(2);
+ for (; depth > 0; --depth) {
+ __ LoadTaggedPointerField(context, context, Context::kPreviousOffset);
+ }
+ Register value = scratch_scope.AcquireScratch();
+ __ Move(value, kInterpreterAccumulatorRegister);
+ __ StoreTaggedFieldWithWriteBarrier(
+ context, Context::OffsetOfElementAt(accessor().GetIndexOperand(1)),
+ value);
+}
+
+void BaselineCompiler::VisitStaCurrentContextSlot() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register context = scratch_scope.AcquireScratch();
+ __ LoadContext(context);
+ Register value = scratch_scope.AcquireScratch();
+ __ Move(value, kInterpreterAccumulatorRegister);
+ __ StoreTaggedFieldWithWriteBarrier(
+ context, Context::OffsetOfElementAt(Index(0)), value);
+}
+
+void BaselineCompiler::VisitLdaLookupSlot() {
+ CallRuntime(Runtime::kLoadLookupSlot, Constant<Name>(0));
+}
+
+void BaselineCompiler::VisitLdaLookupContextSlot() {
+ CallBuiltin(Builtins::kLookupContextBaseline, Constant<Name>(0),
+ UintAsTagged(2), IndexAsTagged(1));
+}
+
+void BaselineCompiler::VisitLdaLookupGlobalSlot() {
+ CallBuiltin(Builtins::kLookupGlobalICBaseline, Constant<Name>(0),
+ UintAsTagged(2), IndexAsTagged(1));
+}
+
+void BaselineCompiler::VisitLdaLookupSlotInsideTypeof() {
+ CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, Constant<Name>(0));
+}
+
+void BaselineCompiler::VisitLdaLookupContextSlotInsideTypeof() {
+ CallBuiltin(Builtins::kLookupContextInsideTypeofBaseline, Constant<Name>(0),
+ UintAsTagged(2), IndexAsTagged(1));
+}
+
+void BaselineCompiler::VisitLdaLookupGlobalSlotInsideTypeof() {
+ CallBuiltin(Builtins::kLookupGlobalICInsideTypeofBaseline, Constant<Name>(0),
+ UintAsTagged(2), IndexAsTagged(1));
+}
+
+void BaselineCompiler::VisitStaLookupSlot() {
+ uint32_t flags = Flag(1);
+ Runtime::FunctionId function_id;
+ if (flags & interpreter::StoreLookupSlotFlags::LanguageModeBit::kMask) {
+ function_id = Runtime::kStoreLookupSlot_Strict;
+ } else if (flags &
+ interpreter::StoreLookupSlotFlags::LookupHoistingModeBit::kMask) {
+ function_id = Runtime::kStoreLookupSlot_SloppyHoisting;
+ } else {
+ function_id = Runtime::kStoreLookupSlot_Sloppy;
+ }
+ CallRuntime(function_id, Constant<Name>(0), // name
+ kInterpreterAccumulatorRegister); // value
+}
+
+void BaselineCompiler::VisitLdar() {
+ LoadRegister(kInterpreterAccumulatorRegister, 0);
+}
+
+void BaselineCompiler::VisitStar() {
+ StoreRegister(0, kInterpreterAccumulatorRegister);
+}
+
+#define SHORT_STAR_VISITOR(Name, ...) \
+ void BaselineCompiler::Visit##Name() { \
+ __ StoreRegister( \
+ interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name), \
+ kInterpreterAccumulatorRegister); \
+ }
+SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
+#undef SHORT_STAR_VISITOR
+
+void BaselineCompiler::VisitMov() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register scratch = scratch_scope.AcquireScratch();
+ LoadRegister(scratch, 0);
+ StoreRegister(1, scratch);
+}
+
+void BaselineCompiler::VisitLdaNamedProperty() {
+ CallBuiltin(Builtins::kLoadICBaseline,
+ RegisterOperand(0), // object
+ Constant<Name>(1), // name
+ IndexAsTagged(2)); // slot
+}
+
+void BaselineCompiler::VisitLdaNamedPropertyNoFeedback() {
+ CallBuiltin(Builtins::kGetProperty, RegisterOperand(0), Constant<Name>(1));
+}
+
+void BaselineCompiler::VisitLdaNamedPropertyFromSuper() {
+ __ LoadPrototype(
+ LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
+ kInterpreterAccumulatorRegister);
+
+ CallBuiltin(Builtins::kLoadSuperICBaseline,
+ RegisterOperand(0), // object
+ LoadWithReceiverAndVectorDescriptor::
+ LookupStartObjectRegister(), // lookup start
+ Constant<Name>(1), // name
+ IndexAsTagged(2)); // slot
+}
+
+void BaselineCompiler::VisitLdaKeyedProperty() {
+ CallBuiltin(Builtins::kKeyedLoadICBaseline,
+ RegisterOperand(0), // object
+ kInterpreterAccumulatorRegister, // key
+ IndexAsTagged(1)); // slot
+}
+
+void BaselineCompiler::VisitLdaModuleVariable() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register scratch = scratch_scope.AcquireScratch();
+ __ LoadContext(scratch);
+ int depth = Uint(1);
+ for (; depth > 0; --depth) {
+ __ LoadTaggedPointerField(scratch, scratch, Context::kPreviousOffset);
+ }
+ __ LoadTaggedPointerField(scratch, scratch, Context::kExtensionOffset);
+ int cell_index = Int(0);
+ if (cell_index > 0) {
+ __ LoadTaggedPointerField(scratch, scratch,
+ SourceTextModule::kRegularExportsOffset);
+ // The actual array index is (cell_index - 1).
+ cell_index -= 1;
+ } else {
+ __ LoadTaggedPointerField(scratch, scratch,
+ SourceTextModule::kRegularImportsOffset);
+ // The actual array index is (-cell_index - 1).
+ cell_index = -cell_index - 1;
+ }
+ __ LoadFixedArrayElement(scratch, scratch, cell_index);
+ __ LoadTaggedAnyField(kInterpreterAccumulatorRegister, scratch,
+ Cell::kValueOffset);
+}
+
+void BaselineCompiler::VisitStaModuleVariable() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register scratch = scratch_scope.AcquireScratch();
+ __ LoadContext(scratch);
+ int depth = Uint(1);
+ for (; depth > 0; --depth) {
+ __ LoadTaggedPointerField(scratch, scratch, Context::kPreviousOffset);
+ }
+ __ LoadTaggedPointerField(scratch, scratch, Context::kExtensionOffset);
+ int cell_index = Int(0);
+ if (cell_index > 0) {
+ __ LoadTaggedPointerField(scratch, scratch,
+ SourceTextModule::kRegularExportsOffset);
+ // The actual array index is (cell_index - 1).
+ cell_index -= 1;
+ __ LoadFixedArrayElement(scratch, scratch, cell_index);
+ SaveAccumulatorScope save_accumulator(&basm_);
+ __ StoreTaggedFieldWithWriteBarrier(scratch, Cell::kValueOffset,
+ kInterpreterAccumulatorRegister);
+ } else {
+ // Not supported (probably never).
+ CallRuntime(Runtime::kAbort,
+ Smi::FromInt(static_cast<int>(
+ AbortReason::kUnsupportedModuleOperation)));
+ __ Trap();
+ }
+}
+
+void BaselineCompiler::VisitStaNamedProperty() {
+ CallBuiltin(Builtins::kStoreICBaseline,
+ RegisterOperand(0), // object
+ Constant<Name>(1), // name
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(2)); // slot
+}
+
+void BaselineCompiler::VisitStaNamedPropertyNoFeedback() {
+ CallRuntime(Runtime::kSetNamedProperty,
+ RegisterOperand(0), // object
+ Constant<Name>(1), // name
+ kInterpreterAccumulatorRegister); // value
+}
+
+void BaselineCompiler::VisitStaNamedOwnProperty() {
+ // TODO(v8:11429,ishell): Currently we use StoreOwnIC only for storing
+ // properties that already exist in the boilerplate therefore we can use
+ // StoreIC.
+ VisitStaNamedProperty();
+}
+
+void BaselineCompiler::VisitStaKeyedProperty() {
+ CallBuiltin(Builtins::kKeyedStoreICBaseline,
+ RegisterOperand(0), // object
+ RegisterOperand(1), // key
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(2)); // slot
+}
+
+void BaselineCompiler::VisitStaInArrayLiteral() {
+ CallBuiltin(Builtins::kStoreInArrayLiteralICBaseline,
+ RegisterOperand(0), // object
+ RegisterOperand(1), // name
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(2)); // slot
+}
+
+void BaselineCompiler::VisitStaDataPropertyInLiteral() {
+ CallRuntime(Runtime::kDefineDataPropertyInLiteral,
+ RegisterOperand(0), // object
+ RegisterOperand(1), // name
+ kInterpreterAccumulatorRegister, // value
+ FlagAsSmi(2), // flags
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(3)); // slot
+}
+
+void BaselineCompiler::VisitCollectTypeProfile() {
+ CallRuntime(Runtime::kCollectTypeProfile,
+ IntAsSmi(0), // position
+ kInterpreterAccumulatorRegister, // value
+ FeedbackVector()); // feedback vector
+}
+
+void BaselineCompiler::VisitAdd() {
+ CallBuiltin(Builtins::kAdd_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitSub() {
+ CallBuiltin(Builtins::kSubtract_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitMul() {
+ CallBuiltin(Builtins::kMultiply_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitDiv() {
+ CallBuiltin(Builtins::kDivide_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitMod() {
+ CallBuiltin(Builtins::kModulus_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitExp() {
+ CallBuiltin(Builtins::kExponentiate_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitBitwiseOr() {
+ CallBuiltin(Builtins::kBitwiseOr_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitBitwiseXor() {
+ CallBuiltin(Builtins::kBitwiseXor_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitBitwiseAnd() {
+ CallBuiltin(Builtins::kBitwiseAnd_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitShiftLeft() {
+ CallBuiltin(Builtins::kShiftLeft_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitShiftRight() {
+ CallBuiltin(Builtins::kShiftRight_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::VisitShiftRightLogical() {
+ CallBuiltin(Builtins::kShiftRightLogical_Baseline, RegisterOperand(0),
+ kInterpreterAccumulatorRegister, Index(1));
+}
+
+void BaselineCompiler::BuildBinopWithConstant(Builtins::Name builtin_name) {
+ CallBuiltin(builtin_name, kInterpreterAccumulatorRegister, IntAsSmi(0),
+ Index(1));
+}
+
+void BaselineCompiler::VisitAddSmi() {
+ BuildBinopWithConstant(Builtins::kAdd_Baseline);
+}
+
+void BaselineCompiler::VisitSubSmi() {
+ BuildBinopWithConstant(Builtins::kSubtract_Baseline);
+}
+
+void BaselineCompiler::VisitMulSmi() {
+ BuildBinopWithConstant(Builtins::kMultiply_Baseline);
+}
+
+void BaselineCompiler::VisitDivSmi() {
+ BuildBinopWithConstant(Builtins::kDivide_Baseline);
+}
+
+void BaselineCompiler::VisitModSmi() {
+ BuildBinopWithConstant(Builtins::kModulus_Baseline);
+}
+
+void BaselineCompiler::VisitExpSmi() {
+ BuildBinopWithConstant(Builtins::kExponentiate_Baseline);
+}
+
+void BaselineCompiler::VisitBitwiseOrSmi() {
+ BuildBinopWithConstant(Builtins::kBitwiseOr_Baseline);
+}
+
+void BaselineCompiler::VisitBitwiseXorSmi() {
+ BuildBinopWithConstant(Builtins::kBitwiseXor_Baseline);
+}
+
+void BaselineCompiler::VisitBitwiseAndSmi() {
+ BuildBinopWithConstant(Builtins::kBitwiseAnd_Baseline);
+}
+
+void BaselineCompiler::VisitShiftLeftSmi() {
+ BuildBinopWithConstant(Builtins::kShiftLeft_Baseline);
+}
+
+void BaselineCompiler::VisitShiftRightSmi() {
+ BuildBinopWithConstant(Builtins::kShiftRight_Baseline);
+}
+
+void BaselineCompiler::VisitShiftRightLogicalSmi() {
+ BuildBinopWithConstant(Builtins::kShiftRightLogical_Baseline);
+}
+
+void BaselineCompiler::BuildUnop(Builtins::Name builtin_name) {
+ CallBuiltin(builtin_name,
+ kInterpreterAccumulatorRegister, // value
+ Index(0)); // slot
+}
+
+void BaselineCompiler::VisitInc() { BuildUnop(Builtins::kIncrement_Baseline); }
+
+void BaselineCompiler::VisitDec() { BuildUnop(Builtins::kDecrement_Baseline); }
+
+void BaselineCompiler::VisitNegate() { BuildUnop(Builtins::kNegate_Baseline); }
+
+void BaselineCompiler::VisitBitwiseNot() {
+ BuildUnop(Builtins::kBitwiseNot_Baseline);
+}
+
+void BaselineCompiler::VisitToBooleanLogicalNot() {
+ SelectBooleanConstant(kInterpreterAccumulatorRegister,
+ [&](Label* if_true, Label::Distance distance) {
+ JumpIfToBoolean(false,
+ kInterpreterAccumulatorRegister,
+ if_true, distance);
+ });
+}
+
+void BaselineCompiler::VisitLogicalNot() {
+ SelectBooleanConstant(kInterpreterAccumulatorRegister,
+ [&](Label* if_true, Label::Distance distance) {
+ __ JumpIfRoot(kInterpreterAccumulatorRegister,
+ RootIndex::kFalseValue, if_true,
+ distance);
+ });
+}
+
+void BaselineCompiler::VisitTypeOf() {
+ CallBuiltin(Builtins::kTypeof, kInterpreterAccumulatorRegister);
+}
+
+void BaselineCompiler::VisitDeletePropertyStrict() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register scratch = scratch_scope.AcquireScratch();
+ __ Move(scratch, kInterpreterAccumulatorRegister);
+ CallBuiltin(Builtins::kDeleteProperty, RegisterOperand(0), scratch,
+ Smi::FromEnum(LanguageMode::kStrict));
+}
+
+void BaselineCompiler::VisitDeletePropertySloppy() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register scratch = scratch_scope.AcquireScratch();
+ __ Move(scratch, kInterpreterAccumulatorRegister);
+ CallBuiltin(Builtins::kDeleteProperty, RegisterOperand(0), scratch,
+ Smi::FromEnum(LanguageMode::kSloppy));
+}
+
+void BaselineCompiler::VisitGetSuperConstructor() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register prototype = scratch_scope.AcquireScratch();
+ __ LoadPrototype(prototype, kInterpreterAccumulatorRegister);
+ StoreRegister(0, prototype);
+}
+template <typename... Args>
+void BaselineCompiler::BuildCall(ConvertReceiverMode mode, uint32_t slot,
+ uint32_t arg_count, Args... args) {
+ Builtins::Name builtin;
+ switch (mode) {
+ case ConvertReceiverMode::kAny:
+ builtin = Builtins::kCall_ReceiverIsAny_Baseline;
+ break;
+ case ConvertReceiverMode::kNullOrUndefined:
+ builtin = Builtins::kCall_ReceiverIsNullOrUndefined_Baseline;
+ break;
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ builtin = Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ CallBuiltin(builtin,
+ RegisterOperand(0), // kFunction
+ arg_count, // kActualArgumentsCount
+ slot, // kSlot
+ args...); // Arguments
+}
+
+void BaselineCompiler::VisitCallAnyReceiver() {
+ interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ BuildCall(ConvertReceiverMode::kAny, Index(3), arg_count, args);
+}
+
+void BaselineCompiler::VisitCallProperty() {
+ interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), arg_count,
+ args);
+}
+
+void BaselineCompiler::VisitCallProperty0() {
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(2), 0,
+ RegisterOperand(1));
+}
+
+void BaselineCompiler::VisitCallProperty1() {
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), 1,
+ RegisterOperand(1), RegisterOperand(2));
+}
+
+void BaselineCompiler::VisitCallProperty2() {
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(4), 2,
+ RegisterOperand(1), RegisterOperand(2), RegisterOperand(3));
+}
+
+void BaselineCompiler::VisitCallUndefinedReceiver() {
+ interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ uint32_t arg_count = args.register_count();
+ BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), arg_count,
+ RootIndex::kUndefinedValue, args);
+}
+
+void BaselineCompiler::VisitCallUndefinedReceiver0() {
+ BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(1), 0,
+ RootIndex::kUndefinedValue);
+}
+
+void BaselineCompiler::VisitCallUndefinedReceiver1() {
+ BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(2), 1,
+ RootIndex::kUndefinedValue, RegisterOperand(1));
+}
+
+void BaselineCompiler::VisitCallUndefinedReceiver2() {
+ BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), 2,
+ RootIndex::kUndefinedValue, RegisterOperand(1), RegisterOperand(2));
+}
+
+void BaselineCompiler::VisitCallNoFeedback() {
+ interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ uint32_t arg_count = args.register_count();
+ CallBuiltin(Builtins::kCall_ReceiverIsAny,
+ RegisterOperand(0), // kFunction
+ arg_count - 1, // kActualArgumentsCount
+ args);
+}
+
+void BaselineCompiler::VisitCallWithSpread() {
+ interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+
+ // Do not push the spread argument
+ interpreter::Register spread_register = args.last_register();
+ args = args.Truncate(args.register_count() - 1);
+
+ uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+
+ CallBuiltin(Builtins::kCallWithSpread_Baseline,
+ RegisterOperand(0), // kFunction
+ arg_count, // kActualArgumentsCount
+ spread_register, // kSpread
+ Index(3), // kSlot
+ args);
+}
+
+void BaselineCompiler::VisitCallRuntime() {
+ CallRuntime(accessor().GetRuntimeIdOperand(0),
+ accessor().GetRegisterListOperand(1));
+}
+
+void BaselineCompiler::VisitCallRuntimeForPair() {
+ CallRuntime(accessor().GetRuntimeIdOperand(0),
+ accessor().GetRegisterListOperand(1));
+ StoreRegisterPair(3, kReturnRegister0, kReturnRegister1);
+}
+
+void BaselineCompiler::VisitCallJSRuntime() {
+ interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ uint32_t arg_count = args.register_count();
+
+ // Load context for LoadNativeContextSlot.
+ __ LoadContext(kContextRegister);
+ __ LoadNativeContextSlot(kJavaScriptCallTargetRegister,
+ accessor().GetNativeContextIndexOperand(0));
+ CallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined,
+ kJavaScriptCallTargetRegister, // kFunction
+ arg_count, // kActualArgumentsCount
+ RootIndex::kUndefinedValue, // kReceiver
+ args);
+}
+
+void BaselineCompiler::VisitInvokeIntrinsic() {
+ Runtime::FunctionId intrinsic_id = accessor().GetIntrinsicIdOperand(0);
+ interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ switch (intrinsic_id) {
+#define CASE(Name, ...) \
+ case Runtime::kInline##Name: \
+ VisitIntrinsic##Name(args); \
+ break;
+ INTRINSICS_LIST(CASE)
+#undef CASE
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+void BaselineCompiler::VisitIntrinsicIsJSReceiver(
+ interpreter::RegisterList args) {
+ SelectBooleanConstant(
+ kInterpreterAccumulatorRegister,
+ [&](Label* is_true, Label::Distance distance) {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ __ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
+
+ Label is_smi;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
+
+ // If we ever added more instance types after LAST_JS_RECEIVER_TYPE,
+ // this would have to become a range check.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(kInterpreterAccumulatorRegister,
+ FIRST_JS_RECEIVER_TYPE,
+ scratch_scope.AcquireScratch());
+ __ JumpIf(Condition::kGreaterThanEqual, is_true, distance);
+
+ __ Bind(&is_smi);
+ });
+}
+
+void BaselineCompiler::VisitIntrinsicIsArray(interpreter::RegisterList args) {
+ SelectBooleanConstant(
+ kInterpreterAccumulatorRegister,
+ [&](Label* is_true, Label::Distance distance) {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ __ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
+
+ Label is_smi;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
+
+ __ CmpObjectType(kInterpreterAccumulatorRegister, JS_ARRAY_TYPE,
+ scratch_scope.AcquireScratch());
+ __ JumpIf(Condition::kEqual, is_true, distance);
+
+ __ Bind(&is_smi);
+ });
+}
+
+void BaselineCompiler::VisitIntrinsicIsSmi(interpreter::RegisterList args) {
+ SelectBooleanConstant(
+ kInterpreterAccumulatorRegister,
+ [&](Label* is_true, Label::Distance distance) {
+ __ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, is_true, distance);
+ });
+}
+
+void BaselineCompiler::VisitIntrinsicCopyDataProperties(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kCopyDataProperties, args);
+}
+
+void BaselineCompiler::VisitIntrinsicCreateIterResultObject(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kCreateIterResultObject, args);
+}
+
+void BaselineCompiler::VisitIntrinsicHasProperty(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kHasProperty, args);
+}
+
+void BaselineCompiler::VisitIntrinsicToString(interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kToString, args);
+}
+
+void BaselineCompiler::VisitIntrinsicToLength(interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kToLength, args);
+}
+
+void BaselineCompiler::VisitIntrinsicToObject(interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kToObject, args);
+}
+
+void BaselineCompiler::VisitIntrinsicCall(interpreter::RegisterList args) {
+ // First argument register contains the function target.
+ __ LoadRegister(kJavaScriptCallTargetRegister, args.first_register());
+
+ // The arguments for the target function are from the second runtime call
+ // argument.
+ args = args.PopLeft();
+
+ uint32_t arg_count = args.register_count();
+ CallBuiltin(Builtins::kCall_ReceiverIsAny,
+ kJavaScriptCallTargetRegister, // kFunction
+ arg_count - 1, // kActualArgumentsCount
+ args);
+}
+
+void BaselineCompiler::VisitIntrinsicCreateAsyncFromSyncIterator(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kCreateAsyncFromSyncIteratorBaseline, args[0]);
+}
+
+void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kCreateGeneratorObject, args);
+}
+
+void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
+ interpreter::RegisterList args) {
+ __ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
+ __ LoadTaggedAnyField(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister,
+ JSGeneratorObject::kResumeModeOffset);
+}
+
+void BaselineCompiler::VisitIntrinsicGeneratorClose(
+ interpreter::RegisterList args) {
+ __ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
+ __ StoreTaggedSignedField(kInterpreterAccumulatorRegister,
+ JSGeneratorObject::kContinuationOffset,
+ Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+}
+
+void BaselineCompiler::VisitIntrinsicGetImportMetaObject(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kGetImportMetaObjectBaseline);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncFunctionAwaitCaught(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncFunctionAwaitCaught, args);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncFunctionAwaitUncaught(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncFunctionAwaitUncaught, args);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncFunctionEnter(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncFunctionEnter, args);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncFunctionReject(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncFunctionReject, args);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncFunctionResolve(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncFunctionResolve, args);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncGeneratorAwaitCaught(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncGeneratorAwaitCaught, args);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncGeneratorAwaitUncaught(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncGeneratorAwaitUncaught, args);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncGeneratorReject(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncGeneratorReject, args);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncGeneratorResolve(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncGeneratorResolve, args);
+}
+
+void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield(
+ interpreter::RegisterList args) {
+ CallBuiltin(Builtins::kAsyncGeneratorYield, args);
+}
+
+void BaselineCompiler::VisitConstruct() {
+ interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+ uint32_t arg_count = args.register_count();
+ CallBuiltin(Builtins::kConstruct_Baseline,
+ RegisterOperand(0), // kFunction
+ kInterpreterAccumulatorRegister, // kNewTarget
+ arg_count, // kActualArgumentsCount
+ Index(3), // kSlot
+ RootIndex::kUndefinedValue, // kReceiver
+ args);
+}
+
+void BaselineCompiler::VisitConstructWithSpread() {
+ interpreter::RegisterList args = accessor().GetRegisterListOperand(1);
+
+ // Do not push the spread argument
+ interpreter::Register spread_register = args.last_register();
+ args = args.Truncate(args.register_count() - 1);
+
+ uint32_t arg_count = args.register_count();
+
+ Register new_target =
+ Builtins::CallInterfaceDescriptorFor(
+ Builtins::kConstructWithSpread_Baseline)
+ .GetRegisterParameter(
+ ConstructWithSpread_BaselineDescriptor::kNewTarget);
+ __ Move(new_target, kInterpreterAccumulatorRegister);
+
+ CallBuiltin(Builtins::kConstructWithSpread_Baseline,
+ RegisterOperand(0), // kFunction
+ new_target, // kNewTarget
+ arg_count, // kActualArgumentsCount
+ Index(3), // kSlot
+ spread_register, // kSpread
+ RootIndex::kUndefinedValue, // kReceiver
+ args);
+}
+
+void BaselineCompiler::BuildCompare(Builtins::Name builtin_name) {
+ CallBuiltin(builtin_name, RegisterOperand(0), // lhs
+ kInterpreterAccumulatorRegister, // rhs
+ Index(1)); // slot
+}
+
+void BaselineCompiler::VisitTestEqual() {
+ BuildCompare(Builtins::kEqual_Baseline);
+}
+
+void BaselineCompiler::VisitTestEqualStrict() {
+ BuildCompare(Builtins::kStrictEqual_Baseline);
+}
+
+void BaselineCompiler::VisitTestLessThan() {
+ BuildCompare(Builtins::kLessThan_Baseline);
+}
+
+void BaselineCompiler::VisitTestGreaterThan() {
+ BuildCompare(Builtins::kGreaterThan_Baseline);
+}
+
+void BaselineCompiler::VisitTestLessThanOrEqual() {
+ BuildCompare(Builtins::kLessThanOrEqual_Baseline);
+}
+
+void BaselineCompiler::VisitTestGreaterThanOrEqual() {
+ BuildCompare(Builtins::kGreaterThanOrEqual_Baseline);
+}
+
+void BaselineCompiler::VisitTestReferenceEqual() {
+ SelectBooleanConstant(kInterpreterAccumulatorRegister,
+ [&](Label* is_true, Label::Distance distance) {
+ __ CompareTagged(
+ __ RegisterFrameOperand(RegisterOperand(0)),
+ kInterpreterAccumulatorRegister);
+ __ JumpIf(Condition::kEqual, is_true, distance);
+ });
+}
+
+void BaselineCompiler::VisitTestInstanceOf() {
+ Register callable =
+ Builtins::CallInterfaceDescriptorFor(Builtins::kInstanceOf_Baseline)
+ .GetRegisterParameter(Compare_BaselineDescriptor::kRight);
+ __ Move(callable, kInterpreterAccumulatorRegister);
+ CallBuiltin(Builtins::kInstanceOf_Baseline,
+ RegisterOperand(0), // object
+ callable, // callable
+ Index(1)); // slot
+}
+
+void BaselineCompiler::VisitTestIn() {
+ CallBuiltin(Builtins::kKeyedHasICBaseline,
+ kInterpreterAccumulatorRegister, // object
+ RegisterOperand(0), // name
+ IndexAsSmi(1)); // slot
+}
+
+void BaselineCompiler::VisitTestUndetectable() {
+ Label done, set_false;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &set_false, Label::kNear);
+
+ Register map_bit_field = kInterpreterAccumulatorRegister;
+ __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
+ __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
+ __ Test(map_bit_field, Map::Bits1::IsUndetectableBit::kMask);
+ __ JumpIf(Condition::kZero, &set_false, Label::kNear);
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
+ __ Jump(&done, Label::kNear);
+
+ __ Bind(&set_false);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
+ __ Bind(&done);
+}
+
+void BaselineCompiler::VisitTestNull() {
+ SelectBooleanConstant(kInterpreterAccumulatorRegister,
+ [&](Label* is_true, Label::Distance distance) {
+ __ JumpIfRoot(kInterpreterAccumulatorRegister,
+ RootIndex::kNullValue, is_true,
+ distance);
+ });
+}
+
+void BaselineCompiler::VisitTestUndefined() {
+ SelectBooleanConstant(kInterpreterAccumulatorRegister,
+ [&](Label* is_true, Label::Distance distance) {
+ __ JumpIfRoot(kInterpreterAccumulatorRegister,
+ RootIndex::kUndefinedValue, is_true,
+ distance);
+ });
+}
+
+void BaselineCompiler::VisitTestTypeOf() {
+ uint32_t literal_flag = Flag(0);
+ CallBuiltin(Builtins::kTypeof, kInterpreterAccumulatorRegister);
+
+#define TYPEOF_FLAG_VALUE(type_name) \
+ static_cast< \
+ std::underlying_type<interpreter::TestTypeOfFlags::LiteralFlag>::type>( \
+ interpreter::TestTypeOfFlags::LiteralFlag::k##type_name)
+#define TYPEOF_COMPARE(type_name) \
+ SelectBooleanConstant(kInterpreterAccumulatorRegister, \
+ [&](Label* is_true, Label::Distance distance) { \
+ __ JumpIfRoot(kInterpreterAccumulatorRegister, \
+ RootIndex::k##type_name##_string, \
+ is_true, distance); \
+ });
+
+#define TYPEOF_CASE(type_upper, type_lower) \
+ case TYPEOF_FLAG_VALUE(type_upper): \
+ TYPEOF_COMPARE(type_lower); \
+ break;
+
+ switch (literal_flag) {
+ default:
+ __ Trap();
+ break;
+ TYPEOF_LITERAL_LIST(TYPEOF_CASE)
+ }
+
+#undef TYPEOF_COMPARE
+#undef TYPEOF_FLAG_VALUE
+#undef TYPEOF_CASE
+}
+
+void BaselineCompiler::VisitToName() {
+ SaveAccumulatorScope save_accumulator(&basm_);
+ CallBuiltin(Builtins::kToName, kInterpreterAccumulatorRegister);
+ StoreRegister(0, kInterpreterAccumulatorRegister);
+}
+
+void BaselineCompiler::VisitToNumber() {
+ CallBuiltin(Builtins::kToNumber_Baseline, kInterpreterAccumulatorRegister,
+ Index(0));
+}
+
+void BaselineCompiler::VisitToNumeric() {
+ CallBuiltin(Builtins::kToNumeric_Baseline, kInterpreterAccumulatorRegister,
+ Index(0));
+}
+
+void BaselineCompiler::VisitToObject() {
+ SaveAccumulatorScope save_accumulator(&basm_);
+ CallBuiltin(Builtins::kToObject, kInterpreterAccumulatorRegister);
+ StoreRegister(0, kInterpreterAccumulatorRegister);
+}
+
+void BaselineCompiler::VisitToString() {
+ CallBuiltin(Builtins::kToString, kInterpreterAccumulatorRegister);
+}
+
+void BaselineCompiler::VisitCreateRegExpLiteral() {
+ CallBuiltin(Builtins::kCreateRegExpLiteral,
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<HeapObject>(0), // pattern
+ FlagAsSmi(2)); // flags
+}
+
+void BaselineCompiler::VisitCreateArrayLiteral() {
+ uint32_t flags = Flag(2);
+ if (flags &
+ interpreter::CreateArrayLiteralFlags::FastCloneSupportedBit::kMask) {
+ CallBuiltin(Builtins::kCreateShallowArrayLiteral,
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<HeapObject>(0)); // constant elements
+ } else {
+ int32_t flags_raw = static_cast<int32_t>(
+ interpreter::CreateArrayLiteralFlags::FlagsBits::decode(flags));
+ CallRuntime(Runtime::kCreateArrayLiteral,
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<HeapObject>(0), // constant elements
+ Smi::FromInt(flags_raw)); // flags
+ }
+}
+
+void BaselineCompiler::VisitCreateArrayFromIterable() {
+ CallBuiltin(Builtins::kIterableToListWithSymbolLookup,
+ kInterpreterAccumulatorRegister); // iterable
+}
+
+void BaselineCompiler::VisitCreateEmptyArrayLiteral() {
+ CallBuiltin(Builtins::kCreateEmptyArrayLiteral, FeedbackVector(),
+ IndexAsTagged(0));
+}
+
+void BaselineCompiler::VisitCreateObjectLiteral() {
+ uint32_t flags = Flag(2);
+ int32_t flags_raw = static_cast<int32_t>(
+ interpreter::CreateObjectLiteralFlags::FlagsBits::decode(flags));
+ if (flags &
+ interpreter::CreateObjectLiteralFlags::FastCloneSupportedBit::kMask) {
+ CallBuiltin(Builtins::kCreateShallowObjectLiteral,
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<ObjectBoilerplateDescription>(0), // boilerplate
+ Smi::FromInt(flags_raw)); // flags
+ } else {
+ CallRuntime(Runtime::kCreateObjectLiteral,
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<ObjectBoilerplateDescription>(0), // boilerplate
+ Smi::FromInt(flags_raw)); // flags
+ }
+}
+
+void BaselineCompiler::VisitCreateEmptyObjectLiteral() {
+ CallBuiltin(Builtins::kCreateEmptyLiteralObject);
+}
+
+void BaselineCompiler::VisitCloneObject() {
+ uint32_t flags = Flag(1);
+ int32_t raw_flags =
+ interpreter::CreateObjectLiteralFlags::FlagsBits::decode(flags);
+ CallBuiltin(Builtins::kCloneObjectICBaseline,
+ RegisterOperand(0), // source
+ Smi::FromInt(raw_flags), // flags
+ IndexAsTagged(2)); // slot
+}
+
+void BaselineCompiler::VisitGetTemplateObject() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ CallBuiltin(Builtins::kGetTemplateObject,
+ shared_function_info_, // shared function info
+ Constant<HeapObject>(0), // description
+ Index(1), // slot
+ FeedbackVector()); // feedback_vector
+}
+
+void BaselineCompiler::VisitCreateClosure() {
+ Register feedback_cell =
+ Builtins::CallInterfaceDescriptorFor(Builtins::kFastNewClosure)
+ .GetRegisterParameter(FastNewClosureDescriptor::kFeedbackCell);
+ LoadClosureFeedbackArray(feedback_cell);
+ __ LoadFixedArrayElement(feedback_cell, feedback_cell, Index(1));
+
+ uint32_t flags = Flag(2);
+ if (interpreter::CreateClosureFlags::FastNewClosureBit::decode(flags)) {
+ CallBuiltin(Builtins::kFastNewClosure, Constant<SharedFunctionInfo>(0),
+ feedback_cell);
+ } else {
+ Runtime::FunctionId function_id =
+ interpreter::CreateClosureFlags::PretenuredBit::decode(flags)
+ ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure;
+ CallRuntime(function_id, Constant<SharedFunctionInfo>(0), feedback_cell);
+ }
+}
+
+void BaselineCompiler::VisitCreateBlockContext() {
+ CallRuntime(Runtime::kPushBlockContext, Constant<ScopeInfo>(0));
+}
+
+void BaselineCompiler::VisitCreateCatchContext() {
+ CallRuntime(Runtime::kPushCatchContext,
+ RegisterOperand(0), // exception
+ Constant<ScopeInfo>(1));
+}
+
+void BaselineCompiler::VisitCreateFunctionContext() {
+ Handle<ScopeInfo> info = Constant<ScopeInfo>(0);
+ uint32_t slot_count = Uint(1);
+ if (slot_count < static_cast<uint32_t>(
+ ConstructorBuiltins::MaximumFunctionContextSlots())) {
+ DCHECK_EQ(info->scope_type(), ScopeType::FUNCTION_SCOPE);
+ CallBuiltin(Builtins::kFastNewFunctionContextFunction, info, slot_count);
+ } else {
+ CallRuntime(Runtime::kNewFunctionContext, Constant<ScopeInfo>(0));
+ }
+}
+
+void BaselineCompiler::VisitCreateEvalContext() {
+ Handle<ScopeInfo> info = Constant<ScopeInfo>(0);
+ uint32_t slot_count = Uint(1);
+ if (slot_count < static_cast<uint32_t>(
+ ConstructorBuiltins::MaximumFunctionContextSlots())) {
+ DCHECK_EQ(info->scope_type(), ScopeType::EVAL_SCOPE);
+ CallBuiltin(Builtins::kFastNewFunctionContextEval, info, slot_count);
+ } else {
+ CallRuntime(Runtime::kNewFunctionContext, Constant<ScopeInfo>(0));
+ }
+}
+
+void BaselineCompiler::VisitCreateWithContext() {
+ CallRuntime(Runtime::kPushWithContext,
+ RegisterOperand(0), // object
+ Constant<ScopeInfo>(1));
+}
+
+void BaselineCompiler::VisitCreateMappedArguments() {
+ if (shared_function_info_->has_duplicate_parameters()) {
+ CallRuntime(Runtime::kNewSloppyArguments, __ FunctionOperand());
+ } else {
+ CallBuiltin(Builtins::kFastNewSloppyArguments, __ FunctionOperand());
+ }
+}
+
+void BaselineCompiler::VisitCreateUnmappedArguments() {
+ CallBuiltin(Builtins::kFastNewStrictArguments, __ FunctionOperand());
+}
+
+void BaselineCompiler::VisitCreateRestParameter() {
+ CallBuiltin(Builtins::kFastNewRestArguments, __ FunctionOperand());
+}
+
+void BaselineCompiler::VisitJumpLoop() {
+ BaselineAssembler::ScratchRegisterScope scope(&basm_);
+ Register scratch = scope.AcquireScratch();
+ Label osr_not_armed;
+ __ RecordComment("[ OSR Check Armed");
+ Register osr_level = scratch;
+ __ LoadRegister(osr_level, interpreter::Register::bytecode_array());
+ __ LoadByteField(osr_level, osr_level, BytecodeArray::kOsrNestingLevelOffset);
+ int loop_depth = accessor().GetImmediateOperand(1);
+ __ CompareByte(osr_level, loop_depth);
+ __ JumpIf(Condition::kUnsignedLessThanEqual, &osr_not_armed);
+ CallBuiltin(Builtins::kBaselineOnStackReplacement);
+ __ RecordComment("]");
+
+ __ Bind(&osr_not_armed);
+ Label* label = &labels_[accessor().GetJumpTargetOffset()]->unlinked;
+ int weight = accessor().GetRelativeJumpTargetOffset();
+ // We can pass in the same label twice since it's a back edge and thus already
+ // bound.
+ DCHECK(label->is_bound());
+ UpdateInterruptBudgetAndJumpToLabel(weight, label, label);
+}
+
+void BaselineCompiler::VisitJump() {
+ UpdateInterruptBudgetAndDoInterpreterJump();
+}
+
+void BaselineCompiler::VisitJumpConstant() { VisitJump(); }
+
+void BaselineCompiler::VisitJumpIfNullConstant() { VisitJumpIfNull(); }
+
+void BaselineCompiler::VisitJumpIfNotNullConstant() { VisitJumpIfNotNull(); }
+
+void BaselineCompiler::VisitJumpIfUndefinedConstant() {
+ VisitJumpIfUndefined();
+}
+
+void BaselineCompiler::VisitJumpIfNotUndefinedConstant() {
+ VisitJumpIfNotUndefined();
+}
+
+void BaselineCompiler::VisitJumpIfUndefinedOrNullConstant() {
+ VisitJumpIfUndefinedOrNull();
+}
+
+void BaselineCompiler::VisitJumpIfTrueConstant() { VisitJumpIfTrue(); }
+
+void BaselineCompiler::VisitJumpIfFalseConstant() { VisitJumpIfFalse(); }
+
+void BaselineCompiler::VisitJumpIfJSReceiverConstant() {
+ VisitJumpIfJSReceiver();
+}
+
+void BaselineCompiler::VisitJumpIfToBooleanTrueConstant() {
+ VisitJumpIfToBooleanTrue();
+}
+
+void BaselineCompiler::VisitJumpIfToBooleanFalseConstant() {
+ VisitJumpIfToBooleanFalse();
+}
+
+void BaselineCompiler::VisitJumpIfToBooleanTrue() {
+ Label dont_jump;
+ JumpIfToBoolean(false, kInterpreterAccumulatorRegister, &dont_jump,
+ Label::kNear);
+ UpdateInterruptBudgetAndDoInterpreterJump();
+ __ Bind(&dont_jump);
+}
+
+void BaselineCompiler::VisitJumpIfToBooleanFalse() {
+ Label dont_jump;
+ JumpIfToBoolean(true, kInterpreterAccumulatorRegister, &dont_jump,
+ Label::kNear);
+ UpdateInterruptBudgetAndDoInterpreterJump();
+ __ Bind(&dont_jump);
+}
+
+void BaselineCompiler::VisitJumpIfTrue() {
+ UpdateInterruptBudgetAndDoInterpreterJumpIfRoot(RootIndex::kTrueValue);
+}
+
+void BaselineCompiler::VisitJumpIfFalse() {
+ UpdateInterruptBudgetAndDoInterpreterJumpIfRoot(RootIndex::kFalseValue);
+}
+
+void BaselineCompiler::VisitJumpIfNull() {
+ UpdateInterruptBudgetAndDoInterpreterJumpIfRoot(RootIndex::kNullValue);
+}
+
+void BaselineCompiler::VisitJumpIfNotNull() {
+ UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot(RootIndex::kNullValue);
+}
+
+void BaselineCompiler::VisitJumpIfUndefined() {
+ UpdateInterruptBudgetAndDoInterpreterJumpIfRoot(RootIndex::kUndefinedValue);
+}
+
+void BaselineCompiler::VisitJumpIfNotUndefined() {
+ UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot(
+ RootIndex::kUndefinedValue);
+}
+
+void BaselineCompiler::VisitJumpIfUndefinedOrNull() {
+ Label do_jump, dont_jump;
+ __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue,
+ &do_jump);
+ __ JumpIfNotRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue,
+ &dont_jump, Label::kNear);
+ __ Bind(&do_jump);
+ UpdateInterruptBudgetAndDoInterpreterJump();
+ __ Bind(&dont_jump);
+}
+
+void BaselineCompiler::VisitJumpIfJSReceiver() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+
+ Label is_smi, dont_jump;
+ __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
+
+ __ CmpObjectType(kInterpreterAccumulatorRegister, FIRST_JS_RECEIVER_TYPE,
+ scratch_scope.AcquireScratch());
+ __ JumpIf(Condition::kLessThan, &dont_jump);
+ UpdateInterruptBudgetAndDoInterpreterJump();
+
+ __ Bind(&is_smi);
+ __ Bind(&dont_jump);
+}
+
+void BaselineCompiler::VisitSwitchOnSmiNoFeedback() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ interpreter::JumpTableTargetOffsets offsets =
+ accessor().GetJumpTableTargetOffsets();
+
+ if (offsets.size() == 0) return;
+
+ int case_value_base = (*offsets.begin()).case_value;
+
+ std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(offsets.size());
+ for (const interpreter::JumpTableTargetOffset& offset : offsets) {
+ labels[offset.case_value - case_value_base] =
+ &EnsureLabels(offset.target_offset)->unlinked;
+ }
+ Register case_value = scratch_scope.AcquireScratch();
+ __ SmiUntag(case_value, kInterpreterAccumulatorRegister);
+ __ Switch(case_value, case_value_base, labels.get(), offsets.size());
+}
+
+void BaselineCompiler::VisitForInEnumerate() {
+ CallBuiltin(Builtins::kForInEnumerate, RegisterOperand(0));
+}
+
+void BaselineCompiler::VisitForInPrepare() {
+ StoreRegister(0, kInterpreterAccumulatorRegister);
+ CallBuiltin(Builtins::kForInPrepare, kInterpreterAccumulatorRegister,
+ IndexAsTagged(1), FeedbackVector());
+ interpreter::Register first = accessor().GetRegisterOperand(0);
+ interpreter::Register second(first.index() + 1);
+ interpreter::Register third(first.index() + 2);
+ __ StoreRegister(second, kReturnRegister0);
+ __ StoreRegister(third, kReturnRegister1);
+}
+
+void BaselineCompiler::VisitForInContinue() {
+ SelectBooleanConstant(kInterpreterAccumulatorRegister,
+ [&](Label* is_true, Label::Distance distance) {
+ LoadRegister(kInterpreterAccumulatorRegister, 0);
+ __ CompareTagged(
+ kInterpreterAccumulatorRegister,
+ __ RegisterFrameOperand(RegisterOperand(1)));
+ __ JumpIf(Condition::kNotEqual, is_true, distance);
+ });
+}
+
+void BaselineCompiler::VisitForInNext() {
+ interpreter::Register cache_type, cache_array;
+ std::tie(cache_type, cache_array) = accessor().GetRegisterPairOperand(2);
+ CallBuiltin(Builtins::kForInNext,
+ Index(3), // vector slot
+ RegisterOperand(0), // object
+ cache_array, // cache array
+ cache_type, // cache type
+ RegisterOperand(1), // index
+ FeedbackVector()); // feedback vector
+}
+
+void BaselineCompiler::VisitForInStep() {
+ LoadRegister(kInterpreterAccumulatorRegister, 0);
+ __ AddSmi(kInterpreterAccumulatorRegister, Smi::FromInt(1));
+}
+
+void BaselineCompiler::VisitSetPendingMessage() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register pending_message = scratch_scope.AcquireScratch();
+ __ Move(pending_message,
+ ExternalReference::address_of_pending_message_obj(isolate_));
+ Register tmp = scratch_scope.AcquireScratch();
+ __ Move(tmp, kInterpreterAccumulatorRegister);
+ __ Move(kInterpreterAccumulatorRegister, MemOperand(pending_message, 0));
+ __ Move(MemOperand(pending_message, 0), tmp);
+}
+
+void BaselineCompiler::VisitThrow() {
+ CallRuntime(Runtime::kThrow, kInterpreterAccumulatorRegister);
+ __ Trap();
+}
+
+void BaselineCompiler::VisitReThrow() {
+ CallRuntime(Runtime::kReThrow, kInterpreterAccumulatorRegister);
+ __ Trap();
+}
+
+void BaselineCompiler::VisitReturn() {
+ __ RecordComment("[ Return");
+ int profiling_weight = accessor().current_offset();
+ int parameter_count = bytecode_->parameter_count();
+
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ int parameter_count_without_receiver =
+ parameter_count - 1; // Exclude the receiver to simplify the
+ // computation. We'll account for it at the end.
+ TailCallBuiltin(Builtins::kBaselineLeaveFrame,
+ parameter_count_without_receiver, -profiling_weight);
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::VisitThrowReferenceErrorIfHole() {
+ Label done;
+ __ JumpIfNotRoot(kInterpreterAccumulatorRegister, RootIndex::kTheHoleValue,
+ &done);
+ CallRuntime(Runtime::kThrowAccessedUninitializedVariable, Constant<Name>(0));
+ // Unreachable.
+ __ Trap();
+ __ Bind(&done);
+}
+
+void BaselineCompiler::VisitThrowSuperNotCalledIfHole() {
+ Label done;
+ __ JumpIfNotRoot(kInterpreterAccumulatorRegister, RootIndex::kTheHoleValue,
+ &done);
+ CallRuntime(Runtime::kThrowSuperNotCalled);
+ // Unreachable.
+ __ Trap();
+ __ Bind(&done);
+}
+
+void BaselineCompiler::VisitThrowSuperAlreadyCalledIfNotHole() {
+ Label done;
+ __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kTheHoleValue,
+ &done);
+ CallRuntime(Runtime::kThrowSuperAlreadyCalledError);
+ // Unreachable.
+ __ Trap();
+ __ Bind(&done);
+}
+
+void BaselineCompiler::VisitThrowIfNotSuperConstructor() {
+ Label done;
+
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register reg = scratch_scope.AcquireScratch();
+ LoadRegister(reg, 0);
+ Register map_bit_field = scratch_scope.AcquireScratch();
+ __ LoadMap(map_bit_field, reg);
+ __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
+ __ Test(map_bit_field, Map::Bits1::IsConstructorBit::kMask);
+ __ JumpIf(Condition::kNotZero, &done, Label::kNear);
+
+ CallRuntime(Runtime::kThrowNotSuperConstructor, reg, __ FunctionOperand());
+
+ __ Bind(&done);
+}
+
+void BaselineCompiler::VisitSwitchOnGeneratorState() {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+
+ Label fallthrough;
+
+ Register generator_object = scratch_scope.AcquireScratch();
+ LoadRegister(generator_object, 0);
+ __ JumpIfRoot(generator_object, RootIndex::kUndefinedValue, &fallthrough);
+
+ Register continuation = scratch_scope.AcquireScratch();
+ __ LoadTaggedAnyField(continuation, generator_object,
+ JSGeneratorObject::kContinuationOffset);
+ __ StoreTaggedSignedField(
+ generator_object, JSGeneratorObject::kContinuationOffset,
+ Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+
+ Register context = scratch_scope.AcquireScratch();
+ __ LoadTaggedAnyField(context, generator_object,
+ JSGeneratorObject::kContextOffset);
+ __ StoreContext(context);
+
+ interpreter::JumpTableTargetOffsets offsets =
+ accessor().GetJumpTableTargetOffsets();
+
+ if (0 < offsets.size()) {
+ DCHECK_EQ(0, (*offsets.begin()).case_value);
+
+ std::unique_ptr<Label*[]> labels =
+ std::make_unique<Label*[]>(offsets.size());
+ for (const interpreter::JumpTableTargetOffset& offset : offsets) {
+ labels[offset.case_value] = &EnsureLabels(offset.target_offset)->unlinked;
+ }
+ __ SmiUntag(continuation);
+ __ Switch(continuation, 0, labels.get(), offsets.size());
+ // We should never fall through this switch.
+ // TODO(v8:11429,leszeks): Maybe remove the fallthrough check in the Switch?
+ __ Trap();
+ }
+
+ __ Bind(&fallthrough);
+}
+
+void BaselineCompiler::VisitSuspendGenerator() {
+ DCHECK_EQ(accessor().GetRegisterOperand(1), interpreter::Register(0));
+ int register_count = RegisterCount(2);
+ uint32_t suspend_id = Uint(3);
+
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register generator_object = scratch_scope.AcquireScratch();
+ Register parameters_and_registers_array = scratch_scope.AcquireScratch();
+ Register value = scratch_scope.AcquireScratch();
+
+ LoadRegister(generator_object, 0);
+ __ LoadTaggedPointerField(parameters_and_registers_array, generator_object,
+ JSGeneratorObject::kParametersAndRegistersOffset);
+
+ int formal_parameter_count =
+ shared_function_info_->internal_formal_parameter_count();
+ for (int i = 0; i < formal_parameter_count; ++i) {
+ __ LoadRegister(value, interpreter::Register::FromParameterIndex(
+ i + 1, bytecode_->parameter_count()));
+ __ StoreTaggedFieldWithWriteBarrier(parameters_and_registers_array,
+ FixedArray::OffsetOfElementAt(i),
+ value);
+ }
+ for (int i = 0; i < register_count; ++i) {
+ __ LoadRegister(value, interpreter::Register(i));
+ __ StoreTaggedFieldWithWriteBarrier(
+ parameters_and_registers_array,
+ FixedArray::OffsetOfElementAt(formal_parameter_count + i), value);
+ }
+
+ __ LoadContext(value);
+ __ StoreTaggedFieldWithWriteBarrier(generator_object,
+ JSGeneratorObject::kContextOffset, value);
+
+ __ StoreTaggedSignedField(generator_object,
+ JSGeneratorObject::kContinuationOffset,
+ Smi::FromInt(suspend_id));
+
+ __ StoreTaggedSignedField(
+ generator_object, JSGeneratorObject::kInputOrDebugPosOffset,
+ Smi::FromInt(BytecodeArray::kHeaderSize + accessor().current_offset()));
+ VisitReturn();
+}
+
+void BaselineCompiler::VisitResumeGenerator() {
+ DCHECK_EQ(accessor().GetRegisterOperand(1), interpreter::Register(0));
+ int register_count = RegisterCount(2);
+
+ BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
+ Register generator_object = scratch_scope.AcquireScratch();
+ Register parameters_and_registers_array = scratch_scope.AcquireScratch();
+ Register value = scratch_scope.AcquireScratch();
+
+ LoadRegister(generator_object, 0);
+ __ LoadTaggedPointerField(parameters_and_registers_array, generator_object,
+ JSGeneratorObject::kParametersAndRegistersOffset);
+
+ int formal_parameter_count =
+ shared_function_info_->internal_formal_parameter_count();
+ for (int i = 0; i < register_count; ++i) {
+ __ LoadTaggedAnyField(
+ value, parameters_and_registers_array,
+ FixedArray::OffsetOfElementAt(formal_parameter_count + i));
+ __ StoreRegister(interpreter::Register(i), value);
+ }
+
+ __ LoadTaggedAnyField(kInterpreterAccumulatorRegister, generator_object,
+ JSGeneratorObject::kInputOrDebugPosOffset);
+}
+
+void BaselineCompiler::VisitGetIterator() {
+ CallBuiltin(Builtins::kGetIteratorBaseline,
+ RegisterOperand(0), // receiver
+ IndexAsTagged(1), // load_slot
+ IndexAsTagged(2)); // call_slot
+}
+
+void BaselineCompiler::VisitDebugger() {
+ CallBuiltin(Builtins::kHandleDebuggerStatement);
+}
+
+void BaselineCompiler::VisitIncBlockCounter() {
+ CallBuiltin(Builtins::kIncBlockCounter, __ FunctionOperand(),
+ IndexAsSmi(0)); // coverage array slot
+}
+
+void BaselineCompiler::VisitAbort() {
+ CallRuntime(Runtime::kAbort, Smi::FromInt(Index(0)));
+ __ Trap();
+}
+
+void BaselineCompiler::VisitWide() {
+ // Consumed by the BytecodeArrayIterator.
+ UNREACHABLE();
+}
+
+void BaselineCompiler::VisitExtraWide() {
+ // Consumed by the BytecodeArrayIterator.
+ UNREACHABLE();
+}
+
+void BaselineCompiler::VisitIllegal() {
+ // Not emitted in valid bytecode.
+ UNREACHABLE();
+}
+#define DEBUG_BREAK(Name, ...) \
+ void BaselineCompiler::Visit##Name() { UNREACHABLE(); }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
+#undef DEBUG_BREAK
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h
new file mode 100644
index 0000000000..2ddd8fdb16
--- /dev/null
+++ b/deps/v8/src/baseline/baseline-compiler.h
@@ -0,0 +1,213 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_BASELINE_COMPILER_H_
+#define V8_BASELINE_BASELINE_COMPILER_H_
+
+// TODO(v8:11421): Remove #if once baseline compiler is ported to other
+// architectures.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+
+#include <unordered_map>
+
+#include "src/base/logging.h"
+#include "src/base/threaded-list.h"
+#include "src/baseline/baseline-assembler.h"
+#include "src/handles/handles.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/logging/counters.h"
+#include "src/objects/map.h"
+#include "src/objects/tagged-index.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace baseline {
+
+class BytecodeOffsetTableBuilder {
+ public:
+ void AddPosition(size_t pc_offset, size_t bytecode_offset) {
+ WriteUint(pc_offset - previous_pc_);
+ WriteUint(bytecode_offset - previous_bytecode_);
+ previous_pc_ = pc_offset;
+ previous_bytecode_ = bytecode_offset;
+ }
+
+ template <typename LocalIsolate>
+ Handle<ByteArray> ToBytecodeOffsetTable(LocalIsolate* isolate);
+
+ private:
+ void WriteUint(size_t value) {
+ bool has_next;
+ do {
+ uint8_t byte = value & ((1 << 7) - 1);
+ value >>= 7;
+ has_next = value != 0;
+ byte |= (has_next << 7);
+ bytes_.push_back(byte);
+ } while (has_next);
+ }
+
+ size_t previous_pc_ = 0;
+ size_t previous_bytecode_ = 0;
+ std::vector<byte> bytes_;
+};
+
+class BaselineCompiler {
+ public:
+ explicit BaselineCompiler(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_function_info,
+ Handle<BytecodeArray> bytecode);
+
+ void GenerateCode();
+ Handle<Code> Build(Isolate* isolate);
+
+ private:
+ void Prologue();
+ void PrologueFillFrame();
+ void PrologueHandleOptimizationState(Register feedback_vector);
+
+ void PreVisitSingleBytecode();
+ void VisitSingleBytecode();
+
+ void VerifyFrame();
+ void VerifyFrameSize();
+
+ // Register operands.
+ interpreter::Register RegisterOperand(int operand_index);
+ void LoadRegister(Register output, int operand_index);
+ void StoreRegister(int operand_index, Register value);
+ void StoreRegisterPair(int operand_index, Register val0, Register val1);
+
+ // Constant pool operands.
+ template <typename Type>
+ Handle<Type> Constant(int operand_index);
+ Smi ConstantSmi(int operand_index);
+ template <typename Type>
+ void LoadConstant(Register output, int operand_index);
+
+ // Immediate value operands.
+ uint32_t Uint(int operand_index);
+ int32_t Int(int operand_index);
+ uint32_t Index(int operand_index);
+ uint32_t Flag(int operand_index);
+ uint32_t RegisterCount(int operand_index);
+ TaggedIndex IndexAsTagged(int operand_index);
+ TaggedIndex UintAsTagged(int operand_index);
+ Smi IndexAsSmi(int operand_index);
+ Smi IntAsSmi(int operand_index);
+ Smi FlagAsSmi(int operand_index);
+
+ // Jump helpers.
+ Label* NewLabel();
+ Label* BuildForwardJumpLabel();
+ void UpdateInterruptBudgetAndJumpToLabel(int weight, Label* label,
+ Label* skip_interrupt_label);
+ void UpdateInterruptBudgetAndDoInterpreterJump();
+ void UpdateInterruptBudgetAndDoInterpreterJumpIfRoot(RootIndex root);
+ void UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot(RootIndex root);
+
+ // Feedback vector.
+ MemOperand FeedbackVector();
+ void LoadFeedbackVector(Register output);
+ void LoadClosureFeedbackArray(Register output);
+
+ // Position mapping.
+ void AddPosition();
+
+ // Misc. helpers.
+
+ // Select the root boolean constant based on the jump in the given
+ // `jump_func` -- the function should jump to the given label if we want to
+ // select "true", otherwise it should fall through.
+ void SelectBooleanConstant(
+ Register output, std::function<void(Label*, Label::Distance)> jump_func);
+
+ // Returns ToBoolean result into kInterpreterAccumulatorRegister.
+ void JumpIfToBoolean(bool do_jump_if_true, Register reg, Label* label,
+ Label::Distance distance = Label::kFar);
+
+ // Call helpers.
+ template <typename... Args>
+ void CallBuiltin(Builtins::Name builtin, Args... args);
+ template <typename... Args>
+ void CallRuntime(Runtime::FunctionId function, Args... args);
+
+ template <typename... Args>
+ void TailCallBuiltin(Builtins::Name builtin, Args... args);
+
+ void BuildBinop(
+ Builtins::Name builtin_name, bool fast_path = false,
+ bool check_overflow = false,
+ std::function<void(Register, Register)> instruction = [](Register,
+ Register) {});
+ void BuildUnop(Builtins::Name builtin_name);
+ void BuildCompare(Builtins::Name builtin_name);
+ void BuildBinopWithConstant(Builtins::Name builtin_name);
+
+ template <typename... Args>
+ void BuildCall(ConvertReceiverMode mode, uint32_t slot, uint32_t arg_count,
+ Args... args);
+
+#ifdef V8_TRACE_UNOPTIMIZED
+ void TraceBytecode(Runtime::FunctionId function_id);
+#endif
+
+ // Single bytecode visitors.
+#define DECLARE_VISITOR(name, ...) void Visit##name();
+ BYTECODE_LIST(DECLARE_VISITOR)
+#undef DECLARE_VISITOR
+
+ // Intrinsic call visitors.
+#define DECLARE_VISITOR(name, ...) \
+ void VisitIntrinsic##name(interpreter::RegisterList args);
+ INTRINSICS_LIST(DECLARE_VISITOR)
+#undef DECLARE_VISITOR
+
+ const interpreter::BytecodeArrayAccessor& accessor() { return iterator_; }
+
+ Isolate* isolate_;
+ RuntimeCallStats* stats_;
+ Handle<SharedFunctionInfo> shared_function_info_;
+ Handle<BytecodeArray> bytecode_;
+ MacroAssembler masm_;
+ BaselineAssembler basm_;
+ interpreter::BytecodeArrayIterator iterator_;
+ BytecodeOffsetTableBuilder bytecode_offset_table_builder_;
+ Zone zone_;
+
+ struct ThreadedLabel {
+ Label label;
+ ThreadedLabel* ptr;
+ ThreadedLabel** next() { return &ptr; }
+ };
+
+ struct BaselineLabels {
+ base::ThreadedList<ThreadedLabel> linked;
+ Label unlinked;
+ };
+
+ BaselineLabels* EnsureLabels(int i) {
+ if (labels_[i] == nullptr) {
+ labels_[i] = zone_.New<BaselineLabels>();
+ }
+ return labels_[i];
+ }
+
+ BaselineLabels** labels_;
+ ZoneSet<int> handler_offsets_;
+};
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif
+
+#endif // V8_BASELINE_BASELINE_COMPILER_H_
diff --git a/deps/v8/src/baseline/baseline.cc b/deps/v8/src/baseline/baseline.cc
new file mode 100644
index 0000000000..3229c134f4
--- /dev/null
+++ b/deps/v8/src/baseline/baseline.cc
@@ -0,0 +1,58 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/baseline/baseline.h"
+
+// TODO(v8:11421): Remove #if once baseline compiler is ported to other
+// architectures.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+
+#include "src/baseline/baseline-assembler-inl.h"
+#include "src/baseline/baseline-compiler.h"
+#include "src/heap/factory-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/script-inl.h"
+#include "src/objects/shared-function-info-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> GenerateBaselineCode(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ RuntimeCallCounterId::kCompileBaseline);
+ baseline::BaselineCompiler compiler(
+ isolate, shared, handle(shared->GetBytecodeArray(isolate), isolate));
+
+ compiler.GenerateCode();
+ Handle<Code> code = compiler.Build(isolate);
+ if (FLAG_print_code) {
+ code->Print();
+ }
+ return code;
+}
+
+void EmitReturnBaseline(MacroAssembler* masm) {
+ baseline::BaselineAssembler::EmitReturn(masm);
+}
+
+} // namespace internal
+} // namespace v8
+
+#else
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> GenerateBaselineCode(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
+ UNREACHABLE();
+}
+
+void EmitReturnBaseline(MacroAssembler* masm) { UNREACHABLE(); }
+
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/baseline/baseline.h b/deps/v8/src/baseline/baseline.h
new file mode 100644
index 0000000000..071c0bdbfb
--- /dev/null
+++ b/deps/v8/src/baseline/baseline.h
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_BASELINE_H_
+#define V8_BASELINE_BASELINE_H_
+
+#include "src/handles/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Code;
+class SharedFunctionInfo;
+class MacroAssembler;
+
+Handle<Code> GenerateBaselineCode(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared);
+
+void EmitReturnBaseline(MacroAssembler* masm);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_BASELINE_H_
diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
new file mode 100644
index 0000000000..8fd564442e
--- /dev/null
+++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -0,0 +1,439 @@
+// Use of this source code is governed by a BSD-style license that can be
+// Copyright 2021 the V8 project authors. All rights reserved.
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
+#define V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/x64/register-x64.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+namespace detail {
+
+// Avoid using kScratchRegister(==r10) since the macro-assembler doesn't use
+// this scope and will conflict.
+static constexpr Register kScratchRegisters[] = {r8, r9, r11, r12, r14, r15};
+static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
+
+} // namespace detail
+
+// TODO(v8:11429): Move BaselineAssembler to baseline-assembler-<arch>-inl.h
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ registers_used_(prev_scope_ == nullptr ? 0
+ : prev_scope_->registers_used_) {
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() {
+ DCHECK_LT(registers_used_, detail::kNumScratchRegisters);
+ return detail::kScratchRegisters[registers_used_++];
+ }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ int registers_used_;
+};
+
+// TODO(v8:11461): Unify condition names in the MacroAssembler.
+enum class Condition : uint8_t {
+ kEqual = equal,
+ kNotEqual = not_equal,
+
+ kLessThan = less,
+ kGreaterThan = greater,
+ kLessThanEqual = less_equal,
+ kGreaterThanEqual = greater_equal,
+
+ kUnsignedLessThan = below,
+ kUnsignedGreaterThan = above,
+ kUnsignedLessThanEqual = below_equal,
+ kUnsignedGreaterThanEqual = above_equal,
+
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+
+ kZero = zero,
+ kNotZero = not_zero,
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#define __ masm_->
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.AddressUsesRegister(target);
+}
+#endif
+
+} // namespace detail
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(rbp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(rbp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ jmp(target, distance);
+}
+void BaselineAssembler::JumpIf(Condition cc, Label* target,
+ Label::Distance distance) {
+ __ j(AsMasmCondition(cc), target, distance);
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance distance) {
+ __ JumpIfRoot(value, index, target, distance);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance distance) {
+ __ JumpIfNotRoot(value, index, target, distance);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance distance) {
+ __ JumpIfSmi(value, target, distance);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance distance) {
+ __ JumpIfNotSmi(value, target, distance);
+}
+
+void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ __ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
+ if (FLAG_code_comments) __ RecordComment("]");
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ __ Jump(__ EntryFromBuiltinIndexAsOperand(builtin));
+ if (FLAG_code_comments) __ RecordComment("]");
+}
+
+void BaselineAssembler::Test(Register value, int mask) {
+ if ((mask & 0xff) == mask) {
+ __ testb(value, Immediate(mask));
+ } else {
+ __ testl(value, Immediate(mask));
+ }
+}
+
+void BaselineAssembler::CmpObjectType(Register object,
+ InstanceType instance_type,
+ Register map) {
+ __ AssertNotSmi(object);
+ __ CmpObjectType(object, instance_type, map);
+}
+void BaselineAssembler::CmpInstanceType(Register map,
+ InstanceType instance_type) {
+ if (emit_debug_code()) {
+ __ AssertNotSmi(map);
+ __ CmpObjectType(map, MAP_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kUnexpectedValue);
+ }
+ __ CmpInstanceType(map, instance_type);
+}
+void BaselineAssembler::Cmp(Register value, Smi smi) { __ Cmp(value, smi); }
+void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
+ __ cmpq(value, operand);
+}
+void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
+ __ SmiCompare(lhs, rhs);
+}
+// cmp_tagged
+void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
+ __ cmp_tagged(value, operand);
+}
+void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
+ __ cmp_tagged(operand, value);
+}
+void BaselineAssembler::CompareByte(Register value, int32_t byte) {
+ __ cmpb(value, Immediate(byte));
+}
+
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ return __ movq(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ Move(output, value);
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ movq(output, source);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ Move(output, reference);
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ Move(output, value);
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ Move(output, Immediate(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ mov_tagged(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ mov_tagged(output, source);
+}
+
+namespace detail {
+inline void PushSingle(MacroAssembler* masm, RootIndex source) {
+ masm->PushRoot(source);
+}
+inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
+inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
+ masm->Push(value);
+}
+inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
+inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
+ masm->Push(object);
+}
+inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
+ masm->Push(Immediate(immediate));
+}
+inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
+ masm->Push(operand);
+}
+inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
+ return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
+}
+
+template <typename Arg>
+struct PushHelper {
+ static int Push(BaselineAssembler* basm, Arg arg) {
+ PushSingle(basm->masm(), arg);
+ return 1;
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg) {
+ return Push(basm, arg);
+ }
+};
+
+template <>
+struct PushHelper<interpreter::RegisterList> {
+ static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
+ PushSingle(basm->masm(), list[reg_index]);
+ }
+ return list.register_count();
+ }
+ static int PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ for (int reg_index = list.register_count() - 1; reg_index >= 0;
+ --reg_index) {
+ PushSingle(basm->masm(), list[reg_index]);
+ }
+ return list.register_count();
+ }
+};
+
+template <typename... Args>
+struct PushAllHelper;
+template <>
+struct PushAllHelper<> {
+ static int Push(BaselineAssembler* masm) { return 0; }
+ static int PushReverse(BaselineAssembler* masm) { return 0; }
+};
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static int Push(BaselineAssembler* masm, Arg arg, Args... args) {
+ int nargs = PushHelper<Arg>::Push(masm, arg);
+ return nargs + PushAllHelper<Args...>::Push(masm, args...);
+ }
+ static int PushReverse(BaselineAssembler* masm, Arg arg, Args... args) {
+ int nargs = PushAllHelper<Args...>::PushReverse(masm, args...);
+ return nargs + PushHelper<Arg>::PushReverse(masm, arg);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ return detail::PushAllHelper<T...>::Push(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ ITERATE_PACK(__ Pop(registers));
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ __ LoadTaggedPointerField(output, FieldOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ __ LoadTaggedSignedField(output, FieldOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ __ LoadAnyTaggedField(output, FieldOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ movb(output, FieldOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ __ StoreTaggedSignedField(FieldOperand(target, offset), value);
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+
+ Register value) {
+ BaselineAssembler::ScratchRegisterScope scratch_scope(this);
+ Register scratch = scratch_scope.AcquireScratch();
+ DCHECK_NE(target, scratch);
+ DCHECK_NE(value, scratch);
+ __ StoreTaggedField(FieldOperand(target, offset), value);
+ __ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ __ StoreTaggedField(FieldOperand(target, offset), value);
+}
+
+void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+ __ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
+ Immediate(weight));
+}
+
+void BaselineAssembler::AddToInterruptBudget(Register weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+ __ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
+ weight);
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ if (rhs.value() == 0) return;
+ if (SmiValuesAre31Bits()) {
+ __ addl(lhs, Immediate(rhs));
+ } else {
+ ScratchRegisterScope scratch_scope(this);
+ Register rhs_reg = scratch_scope.AcquireScratch();
+ __ Move(rhs_reg, rhs);
+ __ addq(lhs, rhs_reg);
+ }
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ ScratchRegisterScope scope(this);
+ Register table = scope.AcquireScratch();
+ Label fallthrough, jump_table;
+ if (case_value_base > 0) {
+ __ subq(reg, Immediate(case_value_base));
+ }
+ __ cmpq(reg, Immediate(num_labels));
+ __ j(above_equal, &fallthrough);
+ __ leaq(table, MemOperand(&jump_table));
+ __ jmp(MemOperand(table, reg, times_8, 0));
+ // Emit the jump table inline, under the assumption that it's not too big.
+ __ Align(kSystemPointerSize);
+ __ bind(&jump_table);
+ for (int i = 0; i < num_labels; ++i) {
+ __ dq(labels[i]);
+ }
+ __ bind(&fallthrough);
+}
+
+#undef __
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ __ RecordComment("[ Update Interrupt Budget");
+ __ AddToInterruptBudget(weight);
+
+ // Use compare flags set by AddToInterruptBudget
+ Label skip_interrupt_label;
+ __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
+ {
+ __ masm()->SmiTag(params_size);
+ __ Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ Push(MemOperand(rbp, InterpreterFrameConstants::kFunctionOffset));
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->SmiUntag(params_size);
+ }
+ __ RecordComment("]");
+
+ __ Bind(&skip_interrupt_label);
+
+ BaselineAssembler::ScratchRegisterScope scope(&basm);
+ Register scratch = scope.AcquireScratch();
+
+ Register actual_params_size = scratch;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ masm()->movq(actual_params_size,
+ MemOperand(rbp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->cmpq(params_size, actual_params_size);
+ __ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count, Label::kNear);
+ __ masm()->movq(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ Register return_pc = scratch;
+ __ masm()->PopReturnAddressTo(return_pc);
+ __ masm()->leaq(rsp, MemOperand(rsp, params_size, times_system_pointer_size,
+ kSystemPointerSize));
+ __ masm()->PushReturnAddressFrom(return_pc);
+ __ masm()->Ret();
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
diff --git a/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h
new file mode 100644
index 0000000000..e4f123e8e0
--- /dev/null
+++ b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h
@@ -0,0 +1,92 @@
+// Use of this source code is governed by a BSD-style license that can be
+// Copyright 2021 the V8 project authors. All rights reserved.
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_X64_BASELINE_COMPILER_X64_INL_H_
+#define V8_BASELINE_X64_BASELINE_COMPILER_X64_INL_H_
+
+#include "src/base/macros.h"
+#include "src/baseline/baseline-compiler.h"
+#include "src/codegen/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ __ Move(kInterpreterBytecodeArrayRegister, bytecode_);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
+ kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ kInterpreterBytecodeArrayRegister,
+ kJavaScriptCallNewTargetRegister);
+
+ PrologueFillFrame();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ __ RecordComment("[ Fill frame");
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ if (has_new_target) {
+ DCHECK_LE(new_target_index, register_count);
+ for (int i = 0; i < new_target_index; i++) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ for (int i = 0; i < register_count; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ } else {
+ // Extract the first few registers to round to the unroll size.
+ int first_registers = register_count % kLoopUnrollSize;
+ for (int i = 0; i < first_registers; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ BaselineAssembler::ScratchRegisterScope scope(&basm_);
+ Register scratch = scope.AcquireScratch();
+ __ Move(scratch, register_count / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at least
+ // once.
+ DCHECK_GT(register_count / kLoopUnrollSize, 0);
+ Label loop;
+ __ Bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; ++i) {
+ __ Push(kInterpreterAccumulatorRegister);
+ }
+ __ masm()->decl(scratch);
+ __ JumpIf(Condition::kGreaterThan, &loop);
+ }
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ __ Move(kScratchRegister, rsp);
+ __ masm()->addq(kScratchRegister,
+ Immediate(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
+ __ masm()->cmpq(kScratchRegister, rbp);
+ __ masm()->Assert(equal, AbortReason::kUnexpectedStackPointer);
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_X64_BASELINE_COMPILER_X64_INL_H_
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 197a2de860..cf2a18a34d 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -469,20 +469,9 @@ Handle<JSObject> GetFrameArguments(Isolate* isolate,
return ArgumentsForInlinedFunction(frame, function_index);
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- const int length = frame->GetActualArgumentCount();
-#else
- // Find the frame that holds the actual arguments passed to the function.
- if (it->frame()->has_adapted_arguments()) {
- it->AdvanceOneFrame();
- DCHECK(it->frame()->is_arguments_adaptor());
- }
- frame = it->frame();
- const int length = frame->ComputeParametersCount();
-#endif
-
// Construct an arguments object mirror for the right frame and the underlying
// function.
+ const int length = frame->GetActualArgumentCount();
Handle<JSFunction> function(frame->function(), isolate);
Handle<JSObject> arguments =
isolate->factory()->NewArgumentsObject(function, length);
@@ -783,8 +772,8 @@ void Accessors::ErrorStackGetter(
Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
// Retrieve the stack trace. It can either be structured data in the form of
- // a FrameArray, an already formatted stack trace (string) or whatever the
- // "prepareStackTrace" callback produced.
+ // a FixedArray of StackFrameInfo objects, an already formatted stack trace
+ // (string) or whatever the "prepareStackTrace" callback produced.
Handle<Object> stack_trace;
Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
@@ -853,32 +842,5 @@ Handle<AccessorInfo> Accessors::MakeErrorStackInfo(Isolate* isolate) {
&ErrorStackGetter, &ErrorStackSetter);
}
-//
-// Accessors::RegExpResultIndices
-//
-
-void Accessors::RegExpResultIndicesGetter(
- v8::Local<v8::Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- HandleScope scope(isolate);
- Handle<JSRegExpResult> regexp_result(
- Handle<JSRegExpResult>::cast(Utils::OpenHandle(*info.Holder())));
- MaybeHandle<JSArray> maybe_indices(
- JSRegExpResult::GetAndCacheIndices(isolate, regexp_result));
- Handle<JSArray> indices;
- if (!maybe_indices.ToHandle(&indices)) {
- isolate->OptionalRescheduleException(false);
- Handle<Object> result = isolate->factory()->undefined_value();
- info.GetReturnValue().Set(Utils::ToLocal(result));
- } else {
- info.GetReturnValue().Set(Utils::ToLocal(indices));
- }
-}
-
-Handle<AccessorInfo> Accessors::MakeRegExpResultIndicesInfo(Isolate* isolate) {
- return MakeAccessor(isolate, isolate->factory()->indices_string(),
- &RegExpResultIndicesGetter, nullptr);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h
index 7bc8075e55..0148b8e3d1 100644
--- a/deps/v8/src/builtins/accessors.h
+++ b/deps/v8/src/builtins/accessors.h
@@ -44,8 +44,6 @@ class JavaScriptFrame;
kHasSideEffectToReceiver) \
V(_, function_prototype, FunctionPrototype, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
- V(_, regexp_result_indices, RegExpResultIndices, kHasSideEffectToReceiver, \
- kHasSideEffectToReceiver) \
V(_, string_length, StringLength, kHasNoSideEffect, kHasSideEffectToReceiver)
#define ACCESSOR_SETTER_LIST(V) \
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index f7630a4753..2762c61bde 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -464,10 +464,10 @@ namespace {
// Total size of the stack space pushed by JSEntryVariant.
// JSEntryTrampoline uses this to access on stack arguments passed to
// JSEntryVariant.
-constexpr int kPushedStackSpace = kNumCalleeSaved * kPointerSize +
- kPointerSize /* LR */ +
+constexpr int kPushedStackSpace = kNumCalleeSaved * kPointerSize -
+ kPointerSize /* FP */ +
kNumDoubleCalleeSaved * kDoubleSize +
- 4 * kPointerSize /* r5, r6, r7, scratch */ +
+ 5 * kPointerSize /* r5, r6, r7, fp, lr */ +
EntryFrameConstants::kCallerFPOffset;
// Assert that the EntryFrameConstants are in sync with the builtin.
@@ -500,6 +500,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// r1: microtask_queue
// Preserve all but r0 and pass them to entry_trampoline.
Label invoke, handler_entry, exit;
+ const RegList kCalleeSavedWithoutFp = kCalleeSaved & ~fp.bit();
// Update |pushed_stack_space| when we manipulate the stack.
int pushed_stack_space = EntryFrameConstants::kCallerFPOffset;
@@ -508,10 +509,10 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Called from C, so do not pop argc and args on exit (preserve sp)
// No need to save register-passed args
- // Save callee-saved registers (incl. cp and fp), sp, and lr
- __ stm(db_w, sp, kCalleeSaved | lr.bit());
+ // Save callee-saved registers (incl. cp), but without fp
+ __ stm(db_w, sp, kCalleeSavedWithoutFp);
pushed_stack_space +=
- kNumCalleeSaved * kPointerSize + kPointerSize /* LR */;
+ kNumCalleeSaved * kPointerSize - kPointerSize /* FP */;
// Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
@@ -529,18 +530,19 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// r0: root_register_value
__ mov(r7, Operand(StackFrame::TypeToMarker(type)));
__ mov(r6, Operand(StackFrame::TypeToMarker(type)));
- __ Move(r5, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ __ Move(r4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
masm->isolate()));
- __ ldr(r5, MemOperand(r5));
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
+ __ ldr(r5, MemOperand(r4));
- // Push a bad frame pointer to fail if it is used.
- __ mov(scratch, Operand(-1));
- __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | scratch.bit());
- pushed_stack_space += 4 * kPointerSize /* r5, r6, r7, scratch */;
- }
+ __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | fp.bit() | lr.bit());
+ pushed_stack_space += 5 * kPointerSize /* r5, r6, r7, fp, lr */;
+
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ mov(r5, Operand::Zero());
+ __ str(r5, MemOperand(r4));
Register scratch = r6;
@@ -628,19 +630,21 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ str(r3, MemOperand(scratch));
// Reset the stack to the callee saved registers.
- __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+ __ add(sp, sp,
+ Operand(-EntryFrameConstants::kCallerFPOffset -
+ kSystemPointerSize /* already popped one */));
- // Restore callee-saved registers and return.
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
// Restore callee-saved vfp registers.
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
+ __ ldm(ia_w, sp, kCalleeSavedWithoutFp);
+
+ __ mov(pc, lr);
+
+ // Emit constant pool.
+ __ CheckConstPool(true, false);
}
} // namespace
@@ -800,7 +804,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ ldr(params_size,
FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
__ ldr(actual_params_size,
@@ -812,7 +815,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// arguments.
__ cmp(params_size, actual_params_size);
__ mov(params_size, actual_params_size, LeaveCC, lt);
-#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
@@ -947,11 +949,11 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Update table to the wide scaled table.
__ add(bytecode_size_table, bytecode_size_table,
- Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
// Conditionally update table to the extra wide scaled table. We are taking
// advantage of the fact that the extra wide follows the wide one.
__ add(bytecode_size_table, bytecode_size_table,
- Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount), LeaveCC,
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount), LeaveCC,
ne);
__ bind(&process_bytecode);
@@ -981,12 +983,34 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
+ __ ldrb(scratch1, MemOperand(bytecode_size_table, bytecode));
__ add(bytecode_offset, bytecode_offset, scratch1);
__ bind(&end);
}
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ Label maybe_has_optimized_code;
+ // Check if optimized code is available
+ __ tst(
+ optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ b(eq, &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ ldr(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1190,26 +1214,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
- Label maybe_has_optimized_code;
-
- // Check if optimized code is available
- __ tst(
- optimization_state,
- Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
- __ b(eq, &maybe_has_optimized_code);
-
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
-
- __ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = optimization_state;
- __ ldr(optimization_marker,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1767,29 +1773,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ SmiTag(r0);
- __ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
- fp.bit() | lr.bit());
- __ Push(Smi::zero()); // Padding.
- __ add(fp, sp,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
-}
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then tear down the parameters.
- __ ldr(r1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
- __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
- __ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
-}
-
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -1902,38 +1885,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
- // code is erased.
- __ mov(r4, fp);
- __ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
-#else
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(scratch,
- MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(scratch,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &arguments_adaptor);
- {
- __ ldr(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ ldr(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
- __ ldrh(r5, FieldMemOperand(
- r5, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r4, fp);
- }
- __ b(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- // Load the length from the ArgumentsAdaptorFrame.
- __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(r5);
- }
- __ bind(&arguments_done);
-#endif
-
Label stack_done, stack_overflow;
+ __ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ sub(r5, r5, r2, SetCC);
__ b(le, &stack_done);
{
@@ -1943,7 +1896,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- r1 : the target to call (can be any Object)
// -- r2 : start index (to support rest parameters)
// -- r3 : the new.target (for [[Construct]] calls)
- // -- r4 : point to the caller stack frame
+ // -- fp : point to the caller stack frame
// -- r5 : number of arguments to copy, i.e. arguments count - start index
// -----------------------------------
@@ -1952,7 +1905,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
// Point to the first argument to copy (skipping the receiver).
- __ add(r4, r4,
+ __ add(r4, fp,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
__ add(r4, r4, Operand(r2, LSL, kSystemPointerSizeLog2));
@@ -2211,9 +2164,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
Label non_callable, non_smi;
__ JumpIfSmi(r1, &non_callable);
__ bind(&non_smi);
- __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
+ __ LoadMap(r4, r1);
+ __ CompareInstanceTypeRange(r4, r5, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
- RelocInfo::CODE_TARGET, eq);
+ RelocInfo::CODE_TARGET, ls);
__ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
@@ -2319,9 +2274,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ b(eq, &non_constructor);
// Dispatch based on instance type.
- __ CompareInstanceType(r4, r5, JS_FUNCTION_TYPE);
+ __ CompareInstanceTypeRange(r4, r5, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
+ RelocInfo::CODE_TARGET, ls);
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2353,146 +2309,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : actual number of arguments
- // -- r1 : function (passed through to callee)
- // -- r2 : expected number of arguments
- // -- r3 : new target (passed through to callee)
- // -----------------------------------
-
- Label dont_adapt_arguments, stack_overflow;
- __ cmp(r2, Operand(kDontAdaptArgumentsSentinel));
- __ b(eq, &dont_adapt_arguments);
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
-
- // -------------------------------------------
- // Adapt arguments.
- // -------------------------------------------
- {
- Label under_application, over_application, invoke;
- __ cmp(r0, r2);
- __ b(lt, &under_application);
-
- // Enough parameters: actual >= expected
- __ bind(&over_application);
- {
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(r2, r5, &stack_overflow);
-
- // Calculate copy start address into r0 and copy end address into r4.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ add(r0, fp, Operand(r2, LSL, kSystemPointerSizeLog2));
- // adjust for return address and receiver
- __ add(r0, r0, Operand(2 * kSystemPointerSize));
- __ sub(r4, r0, Operand(r2, LSL, kSystemPointerSizeLog2));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- // r4: copy end address
-
- Label copy;
- __ bind(&copy);
- __ ldr(r5, MemOperand(r0, 0));
- __ push(r5);
- __ cmp(r0, r4); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kSystemPointerSize));
- __ b(ne, &copy);
-
- __ b(&invoke);
- }
-
- // Too few parameters: Actual < expected
- __ bind(&under_application);
- {
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(r2, r5, &stack_overflow);
-
- // Fill the remaining expected arguments with undefined.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ LoadRoot(r5, RootIndex::kUndefinedValue);
- __ sub(r6, r2, Operand::SmiUntag(r0));
- __ sub(r4, fp, Operand(r6, LSL, kPointerSizeLog2));
- // Adjust for frame.
- __ sub(r4, r4,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r5);
- __ cmp(sp, r4);
- __ b(ne, &fill);
-
- // Calculate copy start address into r0 and copy end address is fp.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
-
- // Adjust load for return address and receiver.
- __ ldr(r5, MemOperand(r0, 2 * kPointerSize));
- __ push(r5);
-
- __ cmp(r0, fp); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ mov(r0, r2);
- // r0 : expected number of arguments
- // r1 : function (passed through to callee)
- // r3 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ CallCodeObject(r2);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
- masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Jump(lr);
- }
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ JumpCodeObject(r2);
-
- __ bind(&stack_overflow);
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bkpt(0);
- }
-}
-
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
@@ -2502,12 +2318,27 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
- // Save all parameter registers (see wasm-linkage.cc). They might be
+ // Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf(r0, r1, r2, r3);
- constexpr DwVfpRegister lowest_fp_reg = d0;
- constexpr DwVfpRegister highest_fp_reg = d7;
+ RegList gp_regs = 0;
+ for (Register gp_param_reg : wasm::kGpParamRegisters) {
+ gp_regs |= gp_param_reg.bit();
+ }
+ DwVfpRegister lowest_fp_reg = std::begin(wasm::kFpParamRegisters)[0];
+ DwVfpRegister highest_fp_reg = std::end(wasm::kFpParamRegisters)[-1];
+ for (DwVfpRegister fp_param_reg : wasm::kFpParamRegisters) {
+ CHECK(fp_param_reg.code() >= lowest_fp_reg.code() &&
+ fp_param_reg.code() <= highest_fp_reg.code());
+ }
+
+ CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
+ arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(NumRegs(gp_regs),
+ WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs);
+ CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
+ WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs);
__ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
@@ -2697,6 +2528,16 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Move(scratch, ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate()));
+ __ mov(r1, Operand::Zero());
+ __ str(r1, MemOperand(scratch));
+ }
+
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ Move(r1, pending_handler_entrypoint_address);
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index e4db765ada..f5a3cd9869 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -18,6 +18,7 @@
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-generator.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
@@ -407,11 +408,16 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
- Register sfi_data,
- Register scratch1) {
+// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
+// the more general dispatch.
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
+ __ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
+ __ B(eq, is_baseline);
+ __ Cmp(scratch1, INTERPRETER_DATA_TYPE);
__ B(ne, &done);
__ LoadTaggedPointerField(
sfi_data,
@@ -514,13 +520,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label is_baseline;
__ LoadTaggedPointerField(
x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, x3, x0);
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
+ __ bind(&is_baseline);
}
// Resume (Ignition/TurboFan) generator object.
@@ -603,12 +611,11 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
NoRootArrayScope no_root_array(masm);
#if defined(V8_OS_WIN)
- // Windows ARM64 relies on a frame pointer (fp/x29 which are aliases to each
- // other) chain to do stack unwinding, but JSEntry breaks that by setting fp
- // to point to bad_frame_pointer below. To fix unwind information for this
- // case, JSEntry registers the offset (from current fp to the caller's fp
- // saved by PushCalleeSavedRegisters on stack) to xdata_encoder which then
- // emits the offset value as part of result unwind data accordingly.
+ // In order to allow Windows debugging tools to reconstruct a call stack, we
+ // must generate information describing how to recover at least fp, sp, and
+ // pc for the calling frame. Here, JSEntry registers offsets to
+ // xdata_encoder which then emits the offset values as part of the unwind
+ // data accordingly.
win64_unwindinfo::XdataEncoder* xdata_encoder = masm->GetXdataEncoder();
if (xdata_encoder) {
xdata_encoder->onFramePointerAdjustment(
@@ -627,49 +634,58 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Mov(kRootRegister, x0);
}
+ // Set up fp. It points to the {fp, lr} pair pushed as the last step in
+ // PushCalleeSavedRegisters.
+ STATIC_ASSERT(
+ EntryFrameConstants::kCalleeSavedRegisterBytesPushedAfterFpLrPair == 0);
+ STATIC_ASSERT(EntryFrameConstants::kOffsetToCalleeSavedRegisters == 0);
+ __ Mov(fp, sp);
+
// Build an entry frame (see layout below).
- int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
- __ Mov(x13, bad_frame_pointer);
+
+ // Push frame type markers.
__ Mov(x12, StackFrame::TypeToMarker(type));
+ __ Push(x12, xzr);
+
__ Mov(x11, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
masm->isolate()));
- __ Ldr(x10, MemOperand(x11));
+ __ Ldr(x10, MemOperand(x11)); // x10 = C entry FP.
- // x13 (the bad frame pointer) is the first item pushed.
- STATIC_ASSERT(EntryFrameConstants::kOffsetToCalleeSavedRegisters ==
- 1 * kSystemPointerSize);
+ // Clear c_entry_fp, now we've loaded its value to be pushed on the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ Str(xzr, MemOperand(x11));
- __ Push(x13, x12, xzr, x10);
- // Set up fp.
- __ Sub(fp, sp, EntryFrameConstants::kCallerFPOffset);
-
- // Push the JS entry frame marker. Also set js_entry_sp if this is the
- // outermost JS call.
+ // Set js_entry_sp if this is the outermost JS call.
Label done;
ExternalReference js_entry_sp = ExternalReference::Create(
IsolateAddressId::kJSEntrySPAddress, masm->isolate());
- __ Mov(x10, js_entry_sp);
- __ Ldr(x11, MemOperand(x10));
+ __ Mov(x12, js_entry_sp);
+ __ Ldr(x11, MemOperand(x12)); // x11 = previous JS entry SP.
// Select between the inner and outermost frame marker, based on the JS entry
// sp. We assert that the inner marker is zero, so we can use xzr to save a
// move instruction.
DCHECK_EQ(StackFrame::INNER_JSENTRY_FRAME, 0);
__ Cmp(x11, 0); // If x11 is zero, this is the outermost frame.
- __ Csel(x12, xzr, StackFrame::OUTERMOST_JSENTRY_FRAME, ne);
+ // x11 = JS entry frame marker.
+ __ Csel(x11, xzr, StackFrame::OUTERMOST_JSENTRY_FRAME, ne);
__ B(ne, &done);
- __ Str(fp, MemOperand(x10));
+ __ Str(fp, MemOperand(x12));
__ Bind(&done);
- __ Push(x12, padreg);
+
+ __ Push(x10, x11);
// The frame set up looks like this:
- // sp[0] : padding.
- // sp[1] : JS entry frame marker.
- // sp[2] : C entry FP.
- // sp[3] : stack frame marker.
- // sp[4] : stack frame marker.
- // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
+ // sp[0] : JS entry frame marker.
+ // sp[1] : C entry FP.
+ // sp[2] : stack frame marker (0).
+ // sp[3] : stack frame marker (type).
+ // sp[4] : saved fp <- fp points here.
+ // sp[5] : saved lr
+ // sp[6,24) : other saved registers
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -690,7 +706,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // fp will be invalid because UnwindAndFindHandler sets it to 0 to
// signal the existence of the JSEntry frame.
__ Mov(x10,
ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
@@ -747,18 +763,19 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// x0 holds the result.
// The stack pointer points to the top of the entry frame pushed on entry from
// C++ (at the beginning of this stub):
- // sp[0] : padding.
- // sp[1] : JS entry frame marker.
- // sp[2] : C entry FP.
- // sp[3] : stack frame marker.
- // sp[4] : stack frame marker.
- // sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
+ // sp[0] : JS entry frame marker.
+ // sp[1] : C entry FP.
+ // sp[2] : stack frame marker (0).
+ // sp[3] : stack frame marker (type).
+ // sp[4] : saved fp <- fp might point here, or might be zero.
+ // sp[5] : saved lr
+ // sp[6,24) : other saved registers
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
{
Register c_entry_fp = x11;
- __ PeekPair(x10, c_entry_fp, 1 * kSystemPointerSize);
+ __ PeekPair(x10, c_entry_fp, 0);
__ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
__ B(ne, &non_outermost_js_2);
__ Mov(x12, js_entry_sp);
@@ -950,7 +967,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Ldr(params_size.W(),
FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
__ Ldr(actual_params_size,
@@ -965,7 +981,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ B(ge, &corrected_args_count);
__ Mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);
-#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
@@ -1111,13 +1126,13 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Update table to the wide scaled table.
__ Add(bytecode_size_table, bytecode_size_table,
- Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ B(&process_bytecode);
__ Bind(&extra_wide);
// Update table to the extra wide scaled table.
__ Add(bytecode_size_table, bytecode_size_table,
- Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ Bind(&process_bytecode);
@@ -1140,12 +1155,196 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
+ __ Ldrb(scratch1.W(), MemOperand(bytecode_size_table, bytecode));
__ Add(bytecode_offset, bytecode_offset, scratch1);
__ Bind(&end);
}
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ __ RecordComment("[ Check optimization state");
+
+ __ Ldr(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ __ TestAndBranchIfAnySet(
+ optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
+ has_optimized_code_or_marker);
+
+ __ RecordComment("]");
+}
+
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ Label maybe_has_optimized_code;
+ // Check if optimized code is available
+ __ TestAndBranchIfAllClear(
+ optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = x7;
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
+}
+
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ // Need a few extra registers
+ temps.Include(x14, x15);
+
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(
+ Builtins::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = temps.AcquireX();
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ if (__ emit_debug_code()) {
+ __ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector);
+ }
+
+ __ RecordComment("[ Check optimization state");
+
+ // Check for an optimization marker.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = temps.AcquireW();
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+
+ // Increment invocation count for the function.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.AcquireW();
+ __ Ldr(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add(invocation_count, invocation_count, Operand(1));
+ __ Str(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ }
+
+ __ RecordComment("[ Frame Setup");
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ // Normally the first thing we'd do here is Push(lr, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value lr had before the call to this BaselineOutOfLinePrologue builtin.
+
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ Strh(wzr, FieldMemOperand(bytecodeArray,
+ BytecodeArray::kOsrNestingLevelOffset));
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (__ emit_debug_code()) {
+ __ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector);
+ }
+ // Our stack is currently aligned. We have have to push something along with
+ // the feedback vector to keep it that way -- we may as well start
+ // initialising the register frame.
+ // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
+ // `undefined` in the accumulator register, to skip the load in the baseline
+ // code.
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Push(feedback_vector, kInterpreterAccumulatorRegister);
+ __ RecordComment("]");
+
+ __ RecordComment("[ Stack/interrupt check");
+ Label call_stack_guard;
+ {
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ UseScratchRegisterScope temps(masm);
+
+ Register frame_size = temps.AcquireW();
+ __ Ldr(frame_size,
+ FieldMemOperand(bytecodeArray, BytecodeArray::kFrameSizeOffset));
+ Register sp_minus_frame_size = frame_size.X();
+ __ Sub(sp_minus_frame_size, sp, frame_size.X());
+ Register interrupt_limit = temps.AcquireX();
+ __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
+ __ Cmp(sp_minus_frame_size, interrupt_limit);
+ __ B(lo, &call_stack_guard);
+ __ RecordComment("]");
+ }
+
+ // Do "fast" return to the caller pc in lr.
+ // TODO(v8:11429): Document this frame setup better.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ __ RecordComment("[ Optimized marker check");
+ // Drop the frame created by the baseline call.
+ __ Pop<TurboAssembler::kAuthLR>(fp, lr);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ __ RecordComment("]");
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ Register new_target = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
+
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ RecordComment("[ Stack/interrupt call");
+ // Save incoming new target or generator
+ __ Push(padreg, new_target);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ Pop(new_target, padreg);
+ __ RecordComment("]");
+ }
+ __ Ret();
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1171,7 +1370,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadTaggedPointerField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, x11);
+
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, x11, &is_baseline);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
@@ -1196,19 +1398,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame);
- // Read off the optimized state in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
- Register optimization_state = w7;
- __ Ldr(optimization_state,
- FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
-
- // Check if there is optimized code or a optimization marker that needes to be
- // processed.
+ // Check for an optimization marker.
Label has_optimized_code_or_marker;
- __ TestAndBranchIfAnySet(
- optimization_state,
- FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
- &has_optimized_code_or_marker);
+ Register optimization_state = w7;
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1364,27 +1558,56 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
- Label maybe_has_optimized_code;
- // Check if optimized code is available
- __ TestAndBranchIfAllClear(
- optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
- &maybe_has_optimized_code);
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ LoadTaggedPointerField(
+ x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
+ __ Cmp(x7, FEEDBACK_VECTOR_TYPE);
+ __ B(ne, &install_baseline_code);
+
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+
+ // Read off the optimization state in the feedback vector.
+ // TODO(v8:11429): Is this worth doing here? Baseline code will check it
+ // anyway...
+ __ Ldr(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if there is optimized code or a optimization marker that needes to
+ // be processed.
+ __ TestAndBranchIfAnySet(
+ optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
+ &has_optimized_code_or_marker);
+
+ // Load the baseline code into the closure.
+ __ LoadTaggedPointerField(
+ x2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BaselineData::kBaselineCodeOffset));
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, x2, closure);
+ __ JumpCodeObject(x2);
- __ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = x7;
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1785,7 +2008,14 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, temps.AcquireX());
+}
+
+namespace {
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
@@ -1798,9 +2028,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
__ Bind(&skip);
- // Drop the handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- __ LeaveFrame(StackFrame::STUB);
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
@@ -1821,6 +2053,15 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// And "return" to the OSR entry point of the function.
__ Ret();
}
+} // namespace
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, false);
+}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -2074,30 +2315,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
namespace {
-void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ Push<TurboAssembler::kSignLR>(lr, fp);
- __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
- __ Push(x11, x1); // x1: function
- __ SmiTag(x11, x0); // x0: number of arguments.
- __ Push(x11, padreg);
- __ Add(fp, sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
-}
-
-void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then drop the parameters and the receiver.
- __ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Mov(sp, fp);
- __ Pop<TurboAssembler::kAuthLR>(fp, lr);
-
- // Drop actual parameters and receiver.
- __ SmiUntag(x10);
- __ DropArguments(x10, TurboAssembler::kCountExcludesReceiver);
-}
-
// Prepares the stack for copying the varargs. First we claim the necessary
// slots, taking care of potential padding. Then we copy the existing arguments
// one slot up or one slot down, as needed.
@@ -2247,49 +2464,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Bind(&new_target_constructor);
}
- Register args_fp = x5;
Register len = x6;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
- // code is erased.
- __ Mov(args_fp, fp);
- __ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset));
-#else
- // Check if we have an arguments adaptor frame below the function frame.
- // args_fp will point to the frame that contains the actual arguments, which
- // will be the current frame unless we have an arguments adaptor frame, in
- // which case args_fp points to the arguments adaptor frame.
- {
- Label arguments_adaptor, arguments_done;
- Register scratch = x10;
- __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x4, MemOperand(args_fp,
- CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpTagged(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &arguments_adaptor);
- {
- __ Ldr(scratch, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
- scratch,
- FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrh(len,
- FieldMemOperand(scratch,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ Mov(args_fp, fp);
- }
- __ B(&arguments_done);
- __ Bind(&arguments_adaptor);
- {
- // Just load the length from ArgumentsAdaptorFrame.
- __ SmiUntag(
- len,
- MemOperand(args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- }
- __ Bind(&arguments_done);
- }
-#endif
-
Label stack_done, stack_overflow;
+ __ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ Subs(len, len, start_index);
__ B(le, &stack_done);
// Check for stack overflow.
@@ -2299,9 +2476,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Push varargs.
{
+ Register args_fp = x5;
Register dst = x13;
// Point to the fist argument to copy from (skipping receiver).
- __ Add(args_fp, args_fp,
+ __ Add(args_fp, fp,
CommonFrameConstants::kFixedFrameSizeAboveFp + kSystemPointerSize);
__ lsl(start_index, start_index, kSystemPointerSizeLog2);
__ Add(args_fp, args_fp, start_index);
@@ -2584,9 +2762,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
Label non_callable, non_smi;
__ JumpIfSmi(x1, &non_callable);
__ Bind(&non_smi);
- __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
+ __ LoadMap(x4, x1);
+ __ CompareInstanceTypeRange(x4, x5, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
- RelocInfo::CODE_TARGET, eq);
+ RelocInfo::CODE_TARGET, ls);
__ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
@@ -2700,9 +2880,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
&non_constructor);
// Dispatch based on instance type.
- __ CompareInstanceType(x4, x5, JS_FUNCTION_TYPE);
+ __ CompareInstanceTypeRange(x4, x5, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
+ RelocInfo::CODE_TARGET, ls);
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2735,188 +2916,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
- // ----------- S t a t e -------------
- // -- x0 : actual number of arguments
- // -- x1 : function (passed through to callee)
- // -- x2 : expected number of arguments
- // -- x3 : new target (passed through to callee)
- // -----------------------------------
-
- // The frame we are about to construct will look like:
- //
- // slot Adaptor frame
- // +-----------------+--------------------------------
- // -n-1 | receiver | ^
- // | (parameter 0) | |
- // |- - - - - - - - -| |
- // -n | | Caller
- // ... | ... | frame slots --> actual args
- // -2 | parameter n-1 | |
- // |- - - - - - - - -| |
- // -1 | parameter n | v
- // -----+-----------------+--------------------------------
- // 0 | return addr | ^
- // |- - - - - - - - -| |
- // 1 | saved frame ptr | <-- frame ptr |
- // |- - - - - - - - -| |
- // 2 |Frame Type Marker| |
- // |- - - - - - - - -| |
- // 3 | function | Callee
- // |- - - - - - - - -| frame slots
- // 4 | num of | |
- // | actual args | |
- // |- - - - - - - - -| |
- // 5 | padding | |
- // |-----------------+---- |
- // [6] | [padding] | ^ |
- // |- - - - - - - - -| | |
- // 6+pad | receiver | | |
- // | (parameter 0) | | |
- // |- - - - - - - - -| | |
- // 7+pad | parameter 1 | | |
- // |- - - - - - - - -| Frame slots ----> expected args
- // 8+pad | parameter 2 | | |
- // |- - - - - - - - -| | |
- // | | | |
- // ... | ... | | |
- // | parameter m | | |
- // |- - - - - - - - -| | |
- // | [undefined] | | |
- // |- - - - - - - - -| | |
- // | | | |
- // | ... | | |
- // | [undefined] | v <-- stack ptr v
- // -----+-----------------+---------------------------------
- //
- // There is an optional slot of padding above the receiver to ensure stack
- // alignment of the arguments.
- // If the number of expected arguments is larger than the number of actual
- // arguments, the remaining expected slots will be filled with undefined.
- // TODO(v8:10201) update comment once reversed arguments order sticks
-
- Register argc_actual = x0; // Excluding the receiver.
- Register argc_expected = x2; // Excluding the receiver.
- Register function = x1;
-
- Label create_adaptor_frame, dont_adapt_arguments, stack_overflow;
-
- __ Cmp(argc_expected, kDontAdaptArgumentsSentinel);
- __ B(eq, &dont_adapt_arguments);
-
- // -------------------------------------------
- // Create an arguments adaptor frame.
- // -------------------------------------------
- __ Bind(&create_adaptor_frame);
- {
- __ RecordComment("-- Adapt arguments --");
- EnterArgumentsAdaptorFrame(masm);
-
- Register copy_from = x10;
- Register copy_to = x12;
- Register copy_end = x11;
- Register argc_to_copy = x13;
- Register scratch1 = x15;
-
- // We need slots for the expected arguments, with one extra slot for the
- // receiver.
- __ RecordComment("-- Stack check --");
- __ Add(scratch1, argc_expected, 1);
- __ StackOverflowCheck(scratch1, &stack_overflow);
-
- // Round up number of slots to be even, to maintain stack alignment.
- __ RecordComment("-- Allocate callee frame slots --");
- __ Add(scratch1, scratch1, 1);
- __ Bic(scratch1, scratch1, 1);
- __ Claim(scratch1, kSystemPointerSize);
-
- // If we don't have enough arguments, fill the remaining expected
- // arguments with undefined, otherwise skip this step.
- Label enough_arguments;
- __ Cmp(argc_actual, argc_expected);
- __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
- __ Add(argc_to_copy, argc_to_copy, 1); // Include receiver.
- __ B(ge, &enough_arguments);
-
- // Fill the remaining expected arguments with undefined.
- __ RecordComment("-- Fill slots with undefined --");
- Label fill;
- // scratch1 still contains the size of the claimed area,
- // which is RoundUp(argc_expected + 1, 2).
- __ SlotAddress(copy_to, scratch1);
- __ SlotAddress(copy_end, argc_to_copy);
- __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
- // Now we can write pairs of undefineds, potentially overwriting one word
- // below copy_end, but that's ok because that slot is still within claimed
- // region. This loop will execute at least once because at this point we
- // know that there's at least one undefined to be pushed and
- // argc_to_copy >= 1.
- __ Bind(&fill);
- __ Stp(scratch1, scratch1,
- MemOperand(copy_to, -2 * kSystemPointerSize, PreIndex));
- __ Cmp(copy_to, copy_end);
- __ B(hi, &fill);
-
- // Enough arguments.
- __ Bind(&enough_arguments);
-
- // Store padding if needed, when expected arguments is even.
- __ RecordComment("-- Store padding --");
- Label skip_padding;
- __ Tbnz(argc_expected, 0, &skip_padding);
- __ SlotAddress(scratch1, argc_expected);
- __ Str(padreg, MemOperand(scratch1, kSystemPointerSize));
- __ bind(&skip_padding);
-
- // Copy arguments.
- __ RecordComment("-- Copy actual arguments --");
- __ Mov(copy_to, sp);
- __ Add(copy_from, fp, 2 * kSystemPointerSize);
- __ CopyDoubleWords(copy_to, copy_from, argc_to_copy);
-
- // Arguments have been adapted. Now call the entry point.
- __ RecordComment("-- Call entry point --");
- __ Mov(argc_actual, argc_expected);
- // x0 : expected number of arguments
- // x1 : function (passed through to callee)
- // x3 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ LoadTaggedPointerField(
- x2, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ CallCodeObject(x2);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
- masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Ret();
- }
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ Bind(&dont_adapt_arguments);
- {
- // Call the entry point without adapting the arguments.
- __ RecordComment("-- Call without adapting args --");
- static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ LoadTaggedPointerField(
- x2, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ JumpCodeObject(x2);
- }
-
- __ Bind(&stack_overflow);
- __ RecordComment("-- Stack overflow --");
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ Unreachable();
- }
-}
-
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in w8 by the jump table trampoline.
// Sign extend and convert to Smi for the runtime call.
@@ -2930,15 +2929,33 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
- // Save all parameter registers (see wasm-linkage.cc). They might be
+ // Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs =
- Register::ListOf(x0, x1, x2, x3, x4, x5, x6, x7);
- constexpr RegList fp_regs =
- Register::ListOf(d0, d1, d2, d3, d4, d5, d6, d7);
+ RegList gp_regs = 0;
+ for (Register gp_param_reg : wasm::kGpParamRegisters) {
+ gp_regs |= gp_param_reg.bit();
+ }
+ // Also push x1, because we must push multiples of 16 bytes (see
+ // {TurboAssembler::PushCPURegList}.
+ CHECK_EQ(1, NumRegs(gp_regs) % 2);
+ gp_regs |= x1.bit();
+ CHECK_EQ(0, NumRegs(gp_regs) % 2);
+
+ RegList fp_regs = 0;
+ for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
+ fp_regs |= fp_param_reg.bit();
+ }
+
+ CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters) + 1);
+ CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
+ NumRegs(gp_regs));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
+ NumRegs(fp_regs));
+
__ PushXRegList(gp_regs);
- __ PushDRegList(fp_regs);
+ __ PushQRegList(fp_regs);
// Pass instance and function index as explicit arguments to the runtime
// function.
@@ -2955,7 +2972,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Mov(x17, kReturnRegister0);
// Restore registers.
- __ PopDRegList(fp_regs);
+ __ PopQRegList(fp_regs);
__ PopXRegList(gp_regs);
}
// Finally, jump to the entrypoint.
@@ -3182,6 +3199,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
+ {
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate()));
+ __ Str(xzr, MemOperand(scratch));
+ }
+
// Compute the handler entry address and jump to it. We use x17 here for the
// jump target, as this jump can occasionally end up at the start of
// InterpreterEnterBytecodeDispatch, which when CFI is enabled starts with
@@ -3621,7 +3647,7 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// making the call GC safe. The irregexp backend relies on this.
__ Poke<TurboAssembler::kSignLR>(lr, 0); // Store the return address.
- __ Blr(x10); // Call the C++ function.
+ __ Blr(x10); // Call the C++ function.
__ Peek<TurboAssembler::kAuthLR>(lr, 0); // Return to calling code.
__ AssertFPCRState();
__ Ret();
@@ -3818,12 +3844,10 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
__ Mov(x5, unwind_limit);
__ CopyDoubleWords(x3, x1, x5);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Since {unwind_limit} is the frame size up to the parameter count, we might
// end up with a unaligned stack pointer. This is later recovered when
// setting the stack pointer to {caller_frame_top_offset}.
__ Bic(unwind_limit, unwind_limit, 1);
-#endif
__ Drop(unwind_limit);
// Compute the output frame in the deoptimizer.
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index 23266c4e5a..3d76ff851d 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -163,7 +163,7 @@ struct Buffer {
const nofSeparatorsInt: intptr = nofSeparators;
const sepsLen: intptr = separatorLength * nofSeparatorsInt;
// Detect integer overflow
- // TODO(tebbi): Replace with overflow-checked multiplication.
+ // TODO(turbofan): Replace with overflow-checked multiplication.
if (sepsLen / separatorLength != nofSeparatorsInt) deferred {
ThrowInvalidStringLength(context);
}
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index fe9df2b9b5..b154483d06 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -32,7 +32,7 @@ macro StoreElement<ElementsAccessor : type extends ElementsKind, T: type>(
StoreElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
elements: FixedArrayBase, index: Smi, value: Smi) {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- StoreFixedArrayElement(elems, index, value, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(elems, index, value);
}
StoreElement<array::FastPackedObjectElements, JSAny>(implicit context: Context)(
diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq
index 7b82f2bda3..435431f49d 100644
--- a/deps/v8/src/builtins/array-slice.tq
+++ b/deps/v8/src/builtins/array-slice.tq
@@ -66,7 +66,7 @@ macro HandleFastAliasedSloppyArgumentsSlice(
unmappedElements.objects[current]);
// It is safe to skip the write barrier here because resultElements was
// allocated together with result in a folded allocation.
- // TODO(tebbi): The verification of this fails at the moment due to
+ // TODO(turbofan): The verification of this fails at the moment due to
// missing load elimination.
StoreFixedArrayElement(
resultElements, indexOut++, newElement, UNSAFE_SKIP_WRITE_BARRIER);
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index c8499caf6f..cfdc7cc98d 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -70,9 +70,9 @@ macro WeakToStrong<T: type>(x: Weak<T>): T labels ClearedWeakPointer {
// Doesn't include PrivateSymbol.
type PropertyKey = String|PublicSymbol;
-// TODO(tebbi): PrivateSymbol is only exposed to JavaScript through the debugger
-// API. We should reconsider this and try not to expose it at all. Then JSAny
-// would not need to contain it.
+// TODO(turbofan): PrivateSymbol is only exposed to JavaScript through the
+// debugger API. We should reconsider this and try not to expose it at all. Then
+// JSAny would not need to contain it.
// A JavaScript primitive value as defined in
// https://tc39.es/ecma262/#sec-primitive-value.
@@ -112,6 +112,32 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
+// Represents a std::function which produces the generated TNode type of T.
+// Useful for passing values to and from CSA code that uses LazyNode<T>, which
+// is a typedef for std::function<TNode<T>()>. Can be created with %MakeLazy and
+// accessed with RunLazy.
+type Lazy<T: type>;
+
+// Makes a Lazy. The first parameter is the name of a macro, which is looked up
+// in the context where %MakeLazy is called, as a workaround for the fact that
+// macros can't be used as values directly. The other parameters are saved and
+// passed to the macro when somebody runs the resulting Lazy object. Torque
+// syntax doesn't allow for arbitrary-length generic macros, but the internals
+// support any number of parameters, so if you need more parameters, feel free
+// to add additional declarations here.
+intrinsic %MakeLazy<T: type>(getter: constexpr string): Lazy<T>;
+intrinsic %MakeLazy<T: type, A1: type>(
+ getter: constexpr string, arg1: A1): Lazy<T>;
+intrinsic %MakeLazy<T: type, A1: type, A2: type>(
+ getter: constexpr string, arg1: A1, arg2: A2): Lazy<T>;
+intrinsic %MakeLazy<T: type, A1: type, A2: type, A3: type>(
+ getter: constexpr string, arg1: A1, arg2: A2, arg3: A3): Lazy<T>;
+
+// Executes a Lazy and returns the result. The CSA-side definition is a
+// template, but Torque doesn't understand how to use templates for extern
+// macros, so just add whatever overload definitions you need here.
+extern macro RunLazy(Lazy<Smi>): Smi;
+
// A Smi value containing a bitfield struct as its integer data.
@useParentTypeChecker type SmiTagged<T : type extends uint31> extends Smi;
@@ -228,6 +254,10 @@ type Callable = JSFunction|JSBoundFunction|CallableJSProxy|CallableApiObject;
type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
+extern enum UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback }
+extern operator '==' macro UpdateFeedbackModeEqual(
+ constexpr UpdateFeedbackMode, constexpr UpdateFeedbackMode): constexpr bool;
+
extern enum UnicodeEncoding { UTF16, UTF32 }
// Promise constants
@@ -237,8 +267,6 @@ extern enum PromiseState extends int31 constexpr 'Promise::PromiseState' {
kRejected
}
-type FrameArray extends FixedArray;
-
const kTaggedSize: constexpr int31 generates 'kTaggedSize';
const kDoubleSize: constexpr int31 generates 'kDoubleSize';
const kVariableSizeSentinel:
@@ -430,6 +458,8 @@ const kNameDictionaryInitialCapacity:
constexpr int32 generates 'NameDictionary::kInitialCapacity';
const kOrderedNameDictionaryInitialCapacity:
constexpr int32 generates 'OrderedNameDictionary::kInitialCapacity';
+const kSwissNameDictionaryGroupWidth:
+ constexpr int32 generates 'SwissNameDictionary::kGroupWidth';
const kWasmArrayHeaderSize:
constexpr int32 generates 'WasmArray::kHeaderSize';
@@ -463,6 +493,7 @@ extern macro NameStringConstant(): String;
extern macro NullConstant(): Null;
extern macro NumberStringConstant(): String;
extern macro ReturnStringConstant(): String;
+extern macro SearchSymbolConstant(): Symbol;
extern macro StringStringConstant(): String;
extern macro TheHoleConstant(): TheHole;
extern macro ToPrimitiveSymbolConstant(): PublicSymbol;
@@ -1301,7 +1332,7 @@ macro CheckIntegerIndexAdditionOverflow(
}
}
-// TODO(tebbi): Define enum here once they appear in Torque.
+// TODO(turbofan): Define enum here once they appear in Torque.
//
// The value is a SafeInteger that fits into uintptr range, so no bounds checks
// are necessary.
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 20277acbbc..feaa733031 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -178,7 +178,7 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
BIND(&*it);
Label done(this);
source_elements_kind_ = static_cast<ElementsKind>(elements_kinds[i]);
- // TODO(tebbi): Silently cancelling the loop on buffer detachment is a
+ // TODO(turbofan): Silently cancelling the loop on buffer detachment is a
// spec violation. Should go to &throw_detached and throw a TypeError
// instead.
VisitAllTypedArrayElements(array_buffer, processor, &done, direction,
@@ -242,7 +242,9 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&fast);
{
TNode<JSArray> array_receiver = CAST(receiver);
- TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(array_receiver));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+ TNode<IntPtrT> length =
+ LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements(this);
// 2) Ensure that the length is writable.
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 3270ccbcf8..0c3707cee4 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -113,7 +113,7 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
// Adding elements to the array prototype would break code that makes sure
// it has no elements. Handle that elsewhere.
- if (isolate->IsAnyInitialArrayPrototype(array)) return false;
+ if (isolate->IsAnyInitialArrayPrototype(*array)) return false;
// Need to ensure that the arguments passed in args can be contained in
// the array.
@@ -609,7 +609,7 @@ BUILTIN(ArrayUnshift) {
DCHECK(array->map().is_extensible());
DCHECK(!IsDictionaryElementsKind(array->GetElementsKind()));
DCHECK(IsJSArrayFastElementMovingAllowed(isolate, *array));
- DCHECK(!isolate->IsAnyInitialArrayPrototype(array));
+ DCHECK(!isolate->IsAnyInitialArrayPrototype(*array));
MatchArrayElementsKindToArguments(isolate, array, &args, 1,
args.length() - 1);
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index c847d838b6..374b13dd63 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -242,10 +242,10 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>(
request, AsyncGeneratorRequest::kPromiseOffset);
- SetGeneratorAwaiting(async_generator_object);
Await(context, async_generator_object, value, outer_promise,
AsyncGeneratorAwaitResolveSharedFunConstant(),
AsyncGeneratorAwaitRejectSharedFunConstant(), is_catchable);
+ SetGeneratorAwaiting(async_generator_object);
Return(UndefinedConstant());
}
@@ -570,10 +570,10 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
const TNode<JSPromise> outer_promise =
LoadPromiseFromAsyncGeneratorRequest(request);
- SetGeneratorAwaiting(generator);
Await(context, generator, value, outer_promise,
AsyncGeneratorYieldResolveSharedFunConstant(),
AsyncGeneratorAwaitRejectSharedFunConstant(), is_caught);
+ SetGeneratorAwaiting(generator);
Return(UndefinedConstant());
}
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index ffe7aa40e9..664f57aadb 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -64,16 +64,48 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
masm->isolate()->builtins()->CallFunction());
}
+TF_BUILTIN(Call_ReceiverIsNullOrUndefined_Baseline,
+ CallOrConstructBuiltinsAssembler) {
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = LoadContextFromBaseline();
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ CollectCallFeedback(target, context, feedback_vector, slot);
+ TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
+ argc);
+}
+
+TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_Baseline,
+ CallOrConstructBuiltinsAssembler) {
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = LoadContextFromBaseline();
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ CollectCallFeedback(target, context, feedback_vector, slot);
+ TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
+ argc);
+}
+
+TF_BUILTIN(Call_ReceiverIsAny_Baseline, CallOrConstructBuiltinsAssembler) {
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = LoadContextFromBaseline();
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ CollectCallFeedback(target, context, feedback_vector, slot);
+ TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
+}
+
TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
CallOrConstructBuiltinsAssembler) {
auto target = Parameter<Object>(Descriptor::kFunction);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
- auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
- CollectCallFeedback(target, context, maybe_feedback_vector,
- Unsigned(ChangeInt32ToIntPtr(slot)));
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ CollectCallFeedback(target, context, feedback_vector, slot);
TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
argc);
}
@@ -83,11 +115,9 @@ TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback,
auto target = Parameter<Object>(Descriptor::kFunction);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
- auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
- CollectCallFeedback(target, context, maybe_feedback_vector,
- Unsigned(ChangeInt32ToIntPtr(slot)));
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ CollectCallFeedback(target, context, feedback_vector, slot);
TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
argc);
}
@@ -96,11 +126,9 @@ TF_BUILTIN(Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto target = Parameter<Object>(Descriptor::kFunction);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
- auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
- CollectCallFeedback(target, context, maybe_feedback_vector,
- Unsigned(ChangeInt32ToIntPtr(slot)));
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ CollectCallFeedback(target, context, feedback_vector, slot);
TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
}
@@ -434,11 +462,9 @@ TF_BUILTIN(CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) {
base::Optional<TNode<Object>> new_target = base::nullopt;
auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
- auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
- CollectCallFeedback(target, context, maybe_feedback_vector,
- Unsigned(ChangeInt32ToIntPtr(slot)));
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ CollectCallFeedback(target, context, feedback_vector, slot);
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
@@ -451,17 +477,27 @@ TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
+TF_BUILTIN(CallWithSpread_Baseline, CallOrConstructBuiltinsAssembler) {
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ base::Optional<TNode<Object>> new_target = base::nullopt;
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count = UncheckedParameter<Int32T>(Descriptor::kArgumentsCount);
+ auto context = LoadContextFromBaseline();
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ CollectCallFeedback(target, context, feedback_vector, slot);
+ CallOrConstructWithSpread(target, new_target, spread, args_count, context);
+}
+
TF_BUILTIN(CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
auto spread = Parameter<Object>(Descriptor::kSpread);
auto args_count = UncheckedParameter<Int32T>(Descriptor::kArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
- auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
- CollectCallFeedback(target, context, maybe_feedback_vector,
- Unsigned(ChangeInt32ToIntPtr(slot)));
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ CollectCallFeedback(target, context, feedback_vector, slot);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
@@ -517,8 +553,7 @@ TNode<JSReceiver> CallOrConstructBuiltinsAssembler::GetCompatibleReceiver(
//
var_template = CAST(constructor);
TNode<Uint16T> template_type = LoadInstanceType(var_template.value());
- GotoIf(InstanceTypeEqual(template_type, JS_FUNCTION_TYPE),
- &template_from_closure);
+ GotoIf(IsJSFunctionInstanceType(template_type), &template_from_closure);
Branch(InstanceTypeEqual(template_type, MAP_TYPE), &template_map_loop,
&template_loop);
}
diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h
index d54e4405e0..c938662d5e 100644
--- a/deps/v8/src/builtins/builtins-call-gen.h
+++ b/deps/v8/src/builtins/builtins-call-gen.h
@@ -40,6 +40,17 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
TNode<FunctionTemplateInfo> function_template_info,
TNode<IntPtrT> argc, TNode<Context> context);
+ void BuildConstruct(TNode<Object> target, TNode<Object> new_target,
+ TNode<Int32T> argc, const LazyNode<Context>& context,
+ const LazyNode<HeapObject>& feedback_vector,
+ TNode<UintPtrT> slot, UpdateFeedbackMode mode);
+
+ void BuildConstructWithSpread(TNode<Object> target, TNode<Object> new_target,
+ TNode<Object> spread, TNode<Int32T> argc,
+ const LazyNode<Context>& context,
+ const LazyNode<HeapObject>& feedback_vector,
+ TNode<UintPtrT> slot, UpdateFeedbackMode mode);
+
private:
TNode<JSReceiver> GetCompatibleReceiver(TNode<JSReceiver> receiver,
TNode<HeapObject> signature,
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 63e4d7a572..5c32e04f32 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -6,227 +6,173 @@
#include "src/builtins/builtins.h"
#include "src/heap/heap-inl.h" // For ToBoolean.
#include "src/logging/counters.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/objects-inl.h"
-#include "src/objects/stack-frame-info.h"
+#include "src/objects/stack-frame-info-inl.h"
namespace v8 {
namespace internal {
-#define CHECK_CALLSITE(recv, method) \
- CHECK_RECEIVER(JSObject, recv, method); \
- if (!JSReceiver::HasOwnProperty( \
- recv, isolate->factory()->call_site_frame_array_symbol()) \
- .FromMaybe(false)) { \
+#define CHECK_CALLSITE(frame, method) \
+ CHECK_RECEIVER(JSObject, receiver, method); \
+ LookupIterator it(isolate, receiver, \
+ isolate->factory()->call_site_frame_info_symbol(), \
+ LookupIterator::OWN_SKIP_INTERCEPTOR); \
+ if (it.state() != LookupIterator::DATA) { \
THROW_NEW_ERROR_RETURN_FAILURE( \
isolate, \
NewTypeError(MessageTemplate::kCallSiteMethod, \
isolate->factory()->NewStringFromAsciiChecked(method))); \
- }
-
+ } \
+ Handle<StackFrameInfo> frame = Handle<StackFrameInfo>::cast(it.GetDataValue())
namespace {
Object PositiveNumberOrNull(int value, Isolate* isolate) {
- if (value >= 0) return *isolate->factory()->NewNumberFromInt(value);
+ if (value > 0) return *isolate->factory()->NewNumberFromInt(value);
return ReadOnlyRoots(isolate).null_value();
}
-Handle<FrameArray> GetFrameArray(Isolate* isolate, Handle<JSObject> object) {
- Handle<Object> frame_array_obj = JSObject::GetDataProperty(
- object, isolate->factory()->call_site_frame_array_symbol());
- return Handle<FrameArray>::cast(frame_array_obj);
-}
-
-int GetFrameIndex(Isolate* isolate, Handle<JSObject> object) {
- Handle<Object> frame_index_obj = JSObject::GetDataProperty(
- object, isolate->factory()->call_site_frame_index_symbol());
- return Smi::ToInt(*frame_index_obj);
-}
-
} // namespace
BUILTIN(CallSitePrototypeGetColumnNumber) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getColumnNumber");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return PositiveNumberOrNull(it.Frame()->GetColumnNumber(), isolate);
+ CHECK_CALLSITE(frame, "getColumnNumber");
+ return PositiveNumberOrNull(StackFrameInfo::GetColumnNumber(frame), isolate);
}
BUILTIN(CallSitePrototypeGetEnclosingColumnNumber) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getEnclosingColumnNumber");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return PositiveNumberOrNull(it.Frame()->GetEnclosingColumnNumber(), isolate);
+ CHECK_CALLSITE(frame, "getEnclosingColumnNumber");
+ return PositiveNumberOrNull(StackFrameInfo::GetEnclosingColumnNumber(frame),
+ isolate);
}
BUILTIN(CallSitePrototypeGetEnclosingLineNumber) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getEnclosingLineNumber");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return PositiveNumberOrNull(it.Frame()->GetEnclosingLineNumber(), isolate);
+ CHECK_CALLSITE(frame, "getEnclosingLineNumber");
+ return PositiveNumberOrNull(StackFrameInfo::GetEnclosingLineNumber(frame),
+ isolate);
}
BUILTIN(CallSitePrototypeGetEvalOrigin) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getEvalOrigin");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return *it.Frame()->GetEvalOrigin();
+ CHECK_CALLSITE(frame, "getEvalOrigin");
+ return *StackFrameInfo::GetEvalOrigin(frame);
}
BUILTIN(CallSitePrototypeGetFileName) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getFileName");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return *it.Frame()->GetFileName();
+ CHECK_CALLSITE(frame, "getFileName");
+ return frame->GetScriptName();
}
BUILTIN(CallSitePrototypeGetFunction) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getFunction");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
-
- StackFrameBase* frame = it.Frame();
+ CHECK_CALLSITE(frame, "getFunction");
if (frame->IsStrict() ||
- (frame->GetFunction()->IsJSFunction() &&
- JSFunction::cast(*frame->GetFunction()).shared().is_toplevel())) {
+ (frame->function().IsJSFunction() &&
+ JSFunction::cast(frame->function()).shared().is_toplevel())) {
return ReadOnlyRoots(isolate).undefined_value();
}
-
isolate->CountUsage(v8::Isolate::kCallSiteAPIGetFunctionSloppyCall);
-
- return *frame->GetFunction();
+ return frame->function();
}
BUILTIN(CallSitePrototypeGetFunctionName) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getFunctionName");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return *it.Frame()->GetFunctionName();
+ CHECK_CALLSITE(frame, "getFunctionName");
+ return *StackFrameInfo::GetFunctionName(frame);
}
BUILTIN(CallSitePrototypeGetLineNumber) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getLineNumber");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return PositiveNumberOrNull(it.Frame()->GetLineNumber(), isolate);
+ CHECK_CALLSITE(frame, "getLineNumber");
+ return PositiveNumberOrNull(StackFrameInfo::GetLineNumber(frame), isolate);
}
BUILTIN(CallSitePrototypeGetMethodName) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getMethodName");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return *it.Frame()->GetMethodName();
+ CHECK_CALLSITE(frame, "getMethodName");
+ return *StackFrameInfo::GetMethodName(frame);
}
BUILTIN(CallSitePrototypeGetPosition) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getPosition");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return Smi::FromInt(it.Frame()->GetPosition());
+ CHECK_CALLSITE(frame, "getPosition");
+ return Smi::FromInt(StackFrameInfo::GetSourcePosition(frame));
}
BUILTIN(CallSitePrototypeGetPromiseIndex) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getPromiseIndex");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return PositiveNumberOrNull(it.Frame()->GetPromiseIndex(), isolate);
+ CHECK_CALLSITE(frame, "getPromiseIndex");
+ if (!frame->IsPromiseAll() && !frame->IsPromiseAny()) {
+ return ReadOnlyRoots(isolate).null_value();
+ }
+ return Smi::FromInt(StackFrameInfo::GetSourcePosition(frame));
}
BUILTIN(CallSitePrototypeGetScriptNameOrSourceURL) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getScriptNameOrSourceUrl");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return *it.Frame()->GetScriptNameOrSourceUrl();
+ CHECK_CALLSITE(frame, "getScriptNameOrSourceUrl");
+ return frame->GetScriptNameOrSourceURL();
}
BUILTIN(CallSitePrototypeGetThis) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getThis");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
-
- StackFrameBase* frame = it.Frame();
+ CHECK_CALLSITE(frame, "getThis");
if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
-
isolate->CountUsage(v8::Isolate::kCallSiteAPIGetThisSloppyCall);
-
- return *frame->GetReceiver();
+ if (frame->IsAsmJsWasm()) {
+ return frame->GetWasmInstance().native_context().global_proxy();
+ }
+ return frame->receiver_or_instance();
}
BUILTIN(CallSitePrototypeGetTypeName) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getTypeName");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return *it.Frame()->GetTypeName();
+ CHECK_CALLSITE(frame, "getTypeName");
+ return *StackFrameInfo::GetTypeName(frame);
}
BUILTIN(CallSitePrototypeIsAsync) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "isAsync");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return isolate->heap()->ToBoolean(it.Frame()->IsAsync());
+ CHECK_CALLSITE(frame, "isAsync");
+ return isolate->heap()->ToBoolean(frame->IsAsync());
}
BUILTIN(CallSitePrototypeIsConstructor) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "isConstructor");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return isolate->heap()->ToBoolean(it.Frame()->IsConstructor());
+ CHECK_CALLSITE(frame, "isConstructor");
+ return isolate->heap()->ToBoolean(frame->IsConstructor());
}
BUILTIN(CallSitePrototypeIsEval) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "isEval");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return isolate->heap()->ToBoolean(it.Frame()->IsEval());
+ CHECK_CALLSITE(frame, "isEval");
+ return isolate->heap()->ToBoolean(frame->IsEval());
}
BUILTIN(CallSitePrototypeIsNative) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "isNative");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return isolate->heap()->ToBoolean(it.Frame()->IsNative());
+ CHECK_CALLSITE(frame, "isNative");
+ return isolate->heap()->ToBoolean(frame->IsNative());
}
BUILTIN(CallSitePrototypeIsPromiseAll) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "isPromiseAll");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return isolate->heap()->ToBoolean(it.Frame()->IsPromiseAll());
+ CHECK_CALLSITE(frame, "isPromiseAll");
+ return isolate->heap()->ToBoolean(frame->IsPromiseAll());
}
BUILTIN(CallSitePrototypeIsToplevel) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "isToplevel");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return isolate->heap()->ToBoolean(it.Frame()->IsToplevel());
+ CHECK_CALLSITE(frame, "isToplevel");
+ return isolate->heap()->ToBoolean(frame->IsToplevel());
}
BUILTIN(CallSitePrototypeToString) {
HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "toString");
- Handle<StackTraceFrame> frame = isolate->factory()->NewStackTraceFrame(
- GetFrameArray(isolate, recv), GetFrameIndex(isolate, recv));
- RETURN_RESULT_OR_FAILURE(isolate, SerializeStackTraceFrame(isolate, frame));
+ CHECK_CALLSITE(frame, "toString");
+ RETURN_RESULT_OR_FAILURE(isolate, SerializeStackFrameInfo(isolate, frame));
}
#undef CHECK_CALLSITE
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index e75514d786..e268ac868b 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -37,28 +37,51 @@ void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
BUILTIN_CODE(masm->isolate(), ConstructFunction));
}
+TF_BUILTIN(Construct_Baseline, CallOrConstructBuiltinsAssembler) {
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+
+ BuildConstruct(
+ target, new_target, argc, [=] { return LoadContextFromBaseline(); },
+ [=] { return LoadFeedbackVectorFromBaseline(); }, slot,
+ UpdateFeedbackMode::kGuaranteedFeedback);
+}
+
TF_BUILTIN(Construct_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto target = Parameter<Object>(Descriptor::kTarget);
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
- auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+
+ BuildConstruct(
+ target, new_target, argc, [=] { return context; },
+ [=] { return feedback_vector; }, slot,
+ UpdateFeedbackMode::kOptionalFeedback);
+}
+void CallOrConstructBuiltinsAssembler::BuildConstruct(
+ TNode<Object> target, TNode<Object> new_target, TNode<Int32T> argc,
+ const LazyNode<Context>& context,
+ const LazyNode<HeapObject>& feedback_vector, TNode<UintPtrT> slot,
+ UpdateFeedbackMode mode) {
TVARIABLE(AllocationSite, allocation_site);
Label if_construct_generic(this), if_construct_array(this);
- CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
- Unsigned(ChangeInt32ToIntPtr(slot)),
- &if_construct_generic, &if_construct_array,
- &allocation_site);
+ TNode<Context> eager_context = context();
+ CollectConstructFeedback(eager_context, target, new_target, feedback_vector(),
+ slot, mode, &if_construct_generic,
+ &if_construct_array, &allocation_site);
BIND(&if_construct_generic);
- TailCallBuiltin(Builtins::kConstruct, context, target, new_target, argc);
+ TailCallBuiltin(Builtins::kConstruct, eager_context, target, new_target,
+ argc);
BIND(&if_construct_array);
- TailCallBuiltin(Builtins::kArrayConstructorImpl, context, target, new_target,
- argc, allocation_site.value());
+ TailCallBuiltin(Builtins::kArrayConstructorImpl, eager_context, target,
+ new_target, argc, allocation_site.value());
}
TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
@@ -75,14 +98,13 @@ TF_BUILTIN(ConstructWithArrayLike_WithFeedback,
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
- auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
TVARIABLE(AllocationSite, allocation_site);
Label if_construct_generic(this), if_construct_array(this);
- CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
- Unsigned(ChangeInt32ToIntPtr(slot)),
+ CollectConstructFeedback(context, target, new_target, feedback_vector, slot,
+ UpdateFeedbackMode::kOptionalFeedback,
&if_construct_generic, &if_construct_array,
&allocation_site);
@@ -103,6 +125,20 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
+TF_BUILTIN(ConstructWithSpread_Baseline, CallOrConstructBuiltinsAssembler) {
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count =
+ UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ return BuildConstructWithSpread(
+ target, new_target, spread, args_count,
+ [=] { return LoadContextFromBaseline(); },
+ [=] { return LoadFeedbackVectorFromBaseline(); }, slot,
+ UpdateFeedbackMode::kGuaranteedFeedback);
+}
+
TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto target = Parameter<Object>(Descriptor::kTarget);
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
@@ -110,14 +146,25 @@ TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto args_count =
UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
- auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
+ auto feedback_vector = Parameter<HeapObject>(Descriptor::kFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ return BuildConstructWithSpread(
+ target, new_target, spread, args_count, [=] { return context; },
+ [=] { return feedback_vector; }, slot,
+ UpdateFeedbackMode::kGuaranteedFeedback);
+}
+
+void CallOrConstructBuiltinsAssembler::BuildConstructWithSpread(
+ TNode<Object> target, TNode<Object> new_target, TNode<Object> spread,
+ TNode<Int32T> argc, const LazyNode<Context>& context,
+ const LazyNode<HeapObject>& feedback_vector, TNode<UintPtrT> slot,
+ UpdateFeedbackMode mode) {
TVARIABLE(AllocationSite, allocation_site);
Label if_construct_generic(this), if_construct_array(this);
- CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
- Unsigned(ChangeInt32ToIntPtr(slot)),
+ TNode<Context> eager_context = context();
+ CollectConstructFeedback(eager_context, target, new_target, feedback_vector(),
+ slot, UpdateFeedbackMode::kGuaranteedFeedback,
&if_construct_generic, &if_construct_array,
&allocation_site);
@@ -125,7 +172,7 @@ TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
Goto(&if_construct_generic); // Not implemented.
BIND(&if_construct_generic);
- CallOrConstructWithSpread(target, new_target, spread, args_count, context);
+ CallOrConstructWithSpread(target, new_target, spread, argc, eager_context);
}
using Node = compiler::Node;
@@ -358,15 +405,49 @@ TNode<JSRegExp> ConstructorBuiltinsAssembler::CreateRegExpLiteral(
CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
GotoIfNot(HasBoilerplate(literal_site), &call_runtime);
{
- TNode<JSRegExp> boilerplate = CAST(literal_site);
- int size =
- JSRegExp::kHeaderSize + JSRegExp::kInObjectFieldCount * kTaggedSize;
- TNode<HeapObject> copy = Allocate(size);
- for (int offset = 0; offset < size; offset += kTaggedSize) {
- TNode<Object> value = LoadObjectField(boilerplate, offset);
- StoreObjectFieldNoWriteBarrier(copy, offset, value);
- }
- result = CAST(copy);
+ STATIC_ASSERT(JSRegExp::kDataOffset == JSObject::kHeaderSize);
+ STATIC_ASSERT(JSRegExp::kSourceOffset ==
+ JSRegExp::kDataOffset + kTaggedSize);
+ STATIC_ASSERT(JSRegExp::kFlagsOffset ==
+ JSRegExp::kSourceOffset + kTaggedSize);
+ STATIC_ASSERT(JSRegExp::kHeaderSize ==
+ JSRegExp::kFlagsOffset + kTaggedSize);
+ STATIC_ASSERT(JSRegExp::kLastIndexOffset == JSRegExp::kHeaderSize);
+ DCHECK_EQ(JSRegExp::Size(), JSRegExp::kLastIndexOffset + kTaggedSize);
+
+ TNode<RegExpBoilerplateDescription> boilerplate = CAST(literal_site);
+ TNode<HeapObject> new_object = Allocate(JSRegExp::Size());
+
+ // Initialize Object fields.
+ TNode<JSFunction> regexp_function = CAST(LoadContextElement(
+ LoadNativeContext(context), Context::REGEXP_FUNCTION_INDEX));
+ TNode<Map> initial_map = CAST(LoadObjectField(
+ regexp_function, JSFunction::kPrototypeOrInitialMapOffset));
+ StoreMapNoWriteBarrier(new_object, initial_map);
+ // Initialize JSReceiver fields.
+ StoreObjectFieldRoot(new_object, JSReceiver::kPropertiesOrHashOffset,
+ RootIndex::kEmptyFixedArray);
+ // Initialize JSObject fields.
+ StoreObjectFieldRoot(new_object, JSObject::kElementsOffset,
+ RootIndex::kEmptyFixedArray);
+ // Initialize JSRegExp fields.
+ StoreObjectFieldNoWriteBarrier(
+ new_object, JSRegExp::kDataOffset,
+ LoadObjectField(boilerplate,
+ RegExpBoilerplateDescription::kDataOffset));
+ StoreObjectFieldNoWriteBarrier(
+ new_object, JSRegExp::kSourceOffset,
+ LoadObjectField(boilerplate,
+ RegExpBoilerplateDescription::kSourceOffset));
+ StoreObjectFieldNoWriteBarrier(
+ new_object, JSRegExp::kFlagsOffset,
+ LoadObjectField(boilerplate,
+ RegExpBoilerplateDescription::kFlagsOffset));
+ StoreObjectFieldNoWriteBarrier(
+ new_object, JSRegExp::kLastIndexOffset,
+ SmiConstant(JSRegExp::kInitialLastIndexValue));
+
+ result = CAST(new_object);
Goto(&end);
}
@@ -550,39 +631,26 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
// Copy over in-object properties.
Label continue_with_write_barrier(this), done_init(this);
TVARIABLE(IntPtrT, offset, IntPtrConstant(JSObject::kHeaderSize));
- // Heap numbers are only mutable on 32-bit platforms.
- bool may_use_mutable_heap_numbers = !FLAG_unbox_double_fields;
{
Comment("Copy in-object properties fast");
Label continue_fast(this, &offset);
Branch(IntPtrEqual(offset.value(), instance_size), &done_init,
&continue_fast);
BIND(&continue_fast);
- if (may_use_mutable_heap_numbers) {
- TNode<Object> field = LoadObjectField(boilerplate, offset.value());
- Label store_field(this);
- GotoIf(TaggedIsSmi(field), &store_field);
- // TODO(leszeks): Read the field descriptor to decide if this heap
- // number is mutable or not.
- GotoIf(IsHeapNumber(CAST(field)), &continue_with_write_barrier);
- Goto(&store_field);
- BIND(&store_field);
- StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
- } else {
- // Copy fields as raw data.
- TNode<TaggedT> field =
- LoadObjectField<TaggedT>(boilerplate, offset.value());
- StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
- }
+ TNode<Object> field = LoadObjectField(boilerplate, offset.value());
+ Label store_field(this);
+ GotoIf(TaggedIsSmi(field), &store_field);
+ // TODO(leszeks): Read the field descriptor to decide if this heap
+ // number is mutable or not.
+ GotoIf(IsHeapNumber(CAST(field)), &continue_with_write_barrier);
+ Goto(&store_field);
+ BIND(&store_field);
+ StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
offset = IntPtrAdd(offset.value(), IntPtrConstant(kTaggedSize));
Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
&done_init);
}
- if (!may_use_mutable_heap_numbers) {
- BIND(&done_init);
- return copy;
- }
// Continue initializing the literal after seeing the first sub-object
// potentially causing allocation. In this case we prepare the new literal
// by copying all pending fields over from the boilerplate and emit full
@@ -626,7 +694,6 @@ void ConstructorBuiltinsAssembler::CopyMutableHeapNumbersInObject(
TNode<IntPtrT> end_offset) {
// Iterate over all object properties of a freshly copied object and
// duplicate mutable heap numbers.
- if (FLAG_unbox_double_fields) return;
Comment("Copy mutable HeapNumber values");
BuildFastLoop<IntPtrT>(
start_offset, end_offset,
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 35865c70cb..093b5e978a 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/tnode.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
@@ -20,6 +21,34 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) {
Return(ToNumber(context, input));
}
+TF_BUILTIN(ToNumber_Baseline, CodeStubAssembler) {
+ auto input = Parameter<Object>(Descriptor::kArgument);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ auto context = [this] { return LoadContextFromBaseline(); };
+
+ TVARIABLE(Smi, var_type_feedback);
+ TNode<Number> result = CAST(ToNumberOrNumeric(
+ context, input, &var_type_feedback, Object::Conversion::kToNumber));
+
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
+ Return(result);
+}
+
+TF_BUILTIN(ToNumeric_Baseline, CodeStubAssembler) {
+ auto input = Parameter<Object>(Descriptor::kArgument);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+ auto context = [this] { return LoadContextFromBaseline(); };
+
+ TVARIABLE(Smi, var_type_feedback);
+ TNode<Numeric> result = ToNumberOrNumeric(context, input, &var_type_feedback,
+ Object::Conversion::kToNumeric);
+
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
+ Return(result);
+}
+
TF_BUILTIN(PlainPrimitiveToNumber, CodeStubAssembler) {
auto input = Parameter<Object>(Descriptor::kArgument);
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 6c97ac96ce..3819c122fe 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -40,7 +40,6 @@ namespace internal {
TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \
\
/* Calls */ \
- ASM(ArgumentsAdaptorTrampoline, ArgumentsAdaptor) \
/* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
ASM(CallFunction_ReceiverIsNullOrUndefined, CallTrampoline) \
ASM(CallFunction_ReceiverIsNotNullOrUndefined, CallTrampoline) \
@@ -51,6 +50,9 @@ namespace internal {
ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsAny, CallTrampoline) \
+ TFC(Call_ReceiverIsNullOrUndefined_Baseline, CallTrampoline_Baseline) \
+ TFC(Call_ReceiverIsNotNullOrUndefined_Baseline, CallTrampoline_Baseline) \
+ TFC(Call_ReceiverIsAny_Baseline, CallTrampoline_Baseline) \
TFC(Call_ReceiverIsNullOrUndefined_WithFeedback, \
CallTrampoline_WithFeedback) \
TFC(Call_ReceiverIsNotNullOrUndefined_WithFeedback, \
@@ -61,6 +63,7 @@ namespace internal {
TFC(CallProxy, CallTrampoline) \
ASM(CallVarargs, CallVarargs) \
TFC(CallWithSpread, CallWithSpread) \
+ TFC(CallWithSpread_Baseline, CallWithSpread_Baseline) \
TFC(CallWithSpread_WithFeedback, CallWithSpread_WithFeedback) \
TFC(CallWithArrayLike, CallWithArrayLike) \
TFC(CallWithArrayLike_WithFeedback, CallWithArrayLike_WithFeedback) \
@@ -83,12 +86,14 @@ namespace internal {
ASM(Construct, JSTrampoline) \
ASM(ConstructVarargs, ConstructVarargs) \
TFC(ConstructWithSpread, ConstructWithSpread) \
+ TFC(ConstructWithSpread_Baseline, ConstructWithSpread_Baseline) \
TFC(ConstructWithSpread_WithFeedback, ConstructWithSpread_WithFeedback) \
TFC(ConstructWithArrayLike, ConstructWithArrayLike) \
TFC(ConstructWithArrayLike_WithFeedback, \
ConstructWithArrayLike_WithFeedback) \
ASM(ConstructForwardVarargs, ConstructForwardVarargs) \
ASM(ConstructFunctionForwardVarargs, ConstructForwardVarargs) \
+ TFC(Construct_Baseline, Construct_Baseline) \
TFC(Construct_WithFeedback, Construct_WithFeedback) \
ASM(JSConstructStubGeneric, Dummy) \
ASM(JSBuiltinsConstructStub, Dummy) \
@@ -111,7 +116,6 @@ namespace internal {
TFC(StringEqual, Compare) \
TFC(StringGreaterThan, Compare) \
TFC(StringGreaterThanOrEqual, Compare) \
- TFS(StringIndexOf, kReceiver, kSearchString, kPosition) \
TFC(StringLessThan, Compare) \
TFC(StringLessThanOrEqual, Compare) \
TFC(StringSubstring, StringSubstring) \
@@ -133,6 +137,11 @@ namespace internal {
ASM(InterpreterEnterBytecodeDispatch, Dummy) \
ASM(InterpreterOnStackReplacement, ContextOnly) \
\
+ /* Baseline Compiler */ \
+ ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \
+ ASM(BaselineOnStackReplacement, ContextOnly) \
+ ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
+ \
/* Code life-cycle */ \
TFC(CompileLazy, JSTrampoline) \
TFC(CompileLazyDeoptimizedCode, JSTrampoline) \
@@ -193,6 +202,8 @@ namespace internal {
\
/* Type conversions */ \
TFC(ToNumber, TypeConversion) \
+ TFC(ToNumber_Baseline, TypeConversion_Baseline) \
+ TFC(ToNumeric_Baseline, TypeConversion_Baseline) \
TFC(PlainPrimitiveToNumber, TypeConversionNoContext) \
TFC(ToNumberConvertBigInt, TypeConversion) \
TFC(Typeof, Typeof) \
@@ -202,7 +213,9 @@ namespace internal {
TFC(I32PairToBigInt, I32PairToBigInt) \
\
/* Type conversions continuations */ \
- TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter) \
+ TFC(ToBooleanLazyDeoptContinuation, SingleParameterOnStack) \
+ \
+ ASM(TailCallOptimizedCodeSlot, TailCallOptimizedCodeSlot) \
\
/* Handlers */ \
TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \
@@ -543,26 +556,41 @@ namespace internal {
TFH(LoadIC_Megamorphic, LoadWithVector) \
TFH(LoadIC_Noninlined, LoadWithVector) \
TFH(LoadICTrampoline, Load) \
+ TFH(LoadICBaseline, LoadBaseline) \
TFH(LoadICTrampoline_Megamorphic, Load) \
TFH(LoadSuperIC, LoadWithReceiverAndVector) \
+ TFH(LoadSuperICBaseline, LoadWithReceiverBaseline) \
TFH(KeyedLoadIC, LoadWithVector) \
TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \
TFH(KeyedLoadICTrampoline, Load) \
+ TFH(KeyedLoadICBaseline, LoadBaseline) \
TFH(KeyedLoadICTrampoline_Megamorphic, Load) \
TFH(StoreGlobalIC, StoreGlobalWithVector) \
TFH(StoreGlobalICTrampoline, StoreGlobal) \
+ TFH(StoreGlobalICBaseline, StoreGlobalBaseline) \
TFH(StoreIC, StoreWithVector) \
TFH(StoreICTrampoline, Store) \
+ TFH(StoreICBaseline, StoreBaseline) \
TFH(KeyedStoreIC, StoreWithVector) \
TFH(KeyedStoreICTrampoline, Store) \
+ TFH(KeyedStoreICBaseline, StoreBaseline) \
TFH(StoreInArrayLiteralIC, StoreWithVector) \
+ TFH(StoreInArrayLiteralICBaseline, StoreBaseline) \
+ TFH(LookupContextBaseline, LookupBaseline) \
+ TFH(LookupContextInsideTypeofBaseline, LookupBaseline) \
TFH(LoadGlobalIC, LoadGlobalWithVector) \
TFH(LoadGlobalICInsideTypeof, LoadGlobalWithVector) \
TFH(LoadGlobalICTrampoline, LoadGlobal) \
+ TFH(LoadGlobalICBaseline, LoadGlobalBaseline) \
TFH(LoadGlobalICInsideTypeofTrampoline, LoadGlobal) \
+ TFH(LoadGlobalICInsideTypeofBaseline, LoadGlobalBaseline) \
+ TFH(LookupGlobalICBaseline, LookupBaseline) \
+ TFH(LookupGlobalICInsideTypeofBaseline, LookupBaseline) \
TFH(CloneObjectIC, CloneObjectWithVector) \
+ TFH(CloneObjectICBaseline, CloneObjectBaseline) \
TFH(CloneObjectIC_Slow, CloneObjectWithVector) \
TFH(KeyedHasIC, LoadWithVector) \
+ TFH(KeyedHasICBaseline, LoadBaseline) \
TFH(KeyedHasIC_Megamorphic, LoadWithVector) \
\
/* IterableToList */ \
@@ -608,6 +636,19 @@ namespace internal {
TFC(SameValueNumbersOnly, Compare) \
\
/* Binary ops with feedback collection */ \
+ TFC(Add_Baseline, BinaryOp_Baseline) \
+ TFC(Subtract_Baseline, BinaryOp_Baseline) \
+ TFC(Multiply_Baseline, BinaryOp_Baseline) \
+ TFC(Divide_Baseline, BinaryOp_Baseline) \
+ TFC(Modulus_Baseline, BinaryOp_Baseline) \
+ TFC(Exponentiate_Baseline, BinaryOp_Baseline) \
+ TFC(BitwiseAnd_Baseline, BinaryOp_Baseline) \
+ TFC(BitwiseOr_Baseline, BinaryOp_Baseline) \
+ TFC(BitwiseXor_Baseline, BinaryOp_Baseline) \
+ TFC(ShiftLeft_Baseline, BinaryOp_Baseline) \
+ TFC(ShiftRight_Baseline, BinaryOp_Baseline) \
+ TFC(ShiftRightLogical_Baseline, BinaryOp_Baseline) \
+ \
TFC(Add_WithFeedback, BinaryOp_WithFeedback) \
TFC(Subtract_WithFeedback, BinaryOp_WithFeedback) \
TFC(Multiply_WithFeedback, BinaryOp_WithFeedback) \
@@ -622,6 +663,13 @@ namespace internal {
TFC(ShiftRightLogical_WithFeedback, BinaryOp_WithFeedback) \
\
/* Compare ops with feedback collection */ \
+ TFC(Equal_Baseline, Compare_Baseline) \
+ TFC(StrictEqual_Baseline, Compare_Baseline) \
+ TFC(LessThan_Baseline, Compare_Baseline) \
+ TFC(GreaterThan_Baseline, Compare_Baseline) \
+ TFC(LessThanOrEqual_Baseline, Compare_Baseline) \
+ TFC(GreaterThanOrEqual_Baseline, Compare_Baseline) \
+ \
TFC(Equal_WithFeedback, Compare_WithFeedback) \
TFC(StrictEqual_WithFeedback, Compare_WithFeedback) \
TFC(LessThan_WithFeedback, Compare_WithFeedback) \
@@ -630,6 +678,10 @@ namespace internal {
TFC(GreaterThanOrEqual_WithFeedback, Compare_WithFeedback) \
\
/* Unary ops with feedback collection */ \
+ TFC(BitwiseNot_Baseline, UnaryOp_Baseline) \
+ TFC(Decrement_Baseline, UnaryOp_Baseline) \
+ TFC(Increment_Baseline, UnaryOp_Baseline) \
+ TFC(Negate_Baseline, UnaryOp_Baseline) \
TFC(BitwiseNot_WithFeedback, UnaryOp_WithFeedback) \
TFC(Decrement_WithFeedback, UnaryOp_WithFeedback) \
TFC(Increment_WithFeedback, UnaryOp_WithFeedback) \
@@ -670,9 +722,11 @@ namespace internal {
TFC(OrdinaryHasInstance, Compare) \
TFC(InstanceOf, Compare) \
TFC(InstanceOf_WithFeedback, Compare_WithFeedback) \
+ TFC(InstanceOf_Baseline, Compare_Baseline) \
\
/* for-in */ \
TFS(ForInEnumerate, kReceiver) \
+ TFC(ForInPrepare, ForInPrepare) \
TFS(ForInFilter, kKey, kObject) \
\
/* Reflect */ \
@@ -751,22 +805,14 @@ namespace internal {
CPP(StringFromCodePoint) \
/* ES6 #sec-string.fromcharcode */ \
TFJ(StringFromCharCode, kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-string.prototype.includes */ \
- TFJ(StringPrototypeIncludes, kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-string.prototype.indexof */ \
- TFJ(StringPrototypeIndexOf, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.lastindexof */ \
CPP(StringPrototypeLastIndexOf) \
- /* ES6 #sec-string.prototype.match */ \
- TFJ(StringPrototypeMatch, 1, kReceiver, kRegexp) \
/* ES #sec-string.prototype.matchAll */ \
TFJ(StringPrototypeMatchAll, 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.localecompare */ \
CPP(StringPrototypeLocaleCompare) \
/* ES6 #sec-string.prototype.replace */ \
TFJ(StringPrototypeReplace, 2, kReceiver, kSearch, kReplace) \
- /* ES6 #sec-string.prototype.search */ \
- TFJ(StringPrototypeSearch, 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.raw */ \
@@ -816,7 +862,7 @@ namespace internal {
TFC(WasmFloat64ToNumber, WasmFloat64ToNumber) \
TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
- TFS(WasmAllocatePair, kValue1, kValue2) \
+ TFC(JSToWasmLazyDeoptContinuation, SingleParameterOnStack) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index fc6b21e0bd..81bf6379ec 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -27,24 +27,33 @@ IC_BUILTIN(LoadIC_Megamorphic)
IC_BUILTIN(LoadIC_Noninlined)
IC_BUILTIN(LoadIC_NoFeedback)
IC_BUILTIN(LoadICTrampoline)
+IC_BUILTIN(LoadICBaseline)
IC_BUILTIN(LoadICTrampoline_Megamorphic)
IC_BUILTIN(LoadSuperIC)
+IC_BUILTIN(LoadSuperICBaseline)
IC_BUILTIN(KeyedLoadIC)
IC_BUILTIN(KeyedLoadIC_Megamorphic)
IC_BUILTIN(KeyedLoadIC_PolymorphicName)
IC_BUILTIN(KeyedLoadICTrampoline)
+IC_BUILTIN(KeyedLoadICBaseline)
IC_BUILTIN(KeyedLoadICTrampoline_Megamorphic)
IC_BUILTIN(LoadGlobalIC_NoFeedback)
IC_BUILTIN(StoreGlobalIC)
IC_BUILTIN(StoreGlobalICTrampoline)
+IC_BUILTIN(StoreGlobalICBaseline)
IC_BUILTIN(StoreIC)
IC_BUILTIN(StoreICTrampoline)
+IC_BUILTIN(StoreICBaseline)
IC_BUILTIN(KeyedStoreIC)
IC_BUILTIN(KeyedStoreICTrampoline)
+IC_BUILTIN(KeyedStoreICBaseline)
IC_BUILTIN(StoreInArrayLiteralIC)
+IC_BUILTIN(StoreInArrayLiteralICBaseline)
IC_BUILTIN(CloneObjectIC)
+IC_BUILTIN(CloneObjectICBaseline)
IC_BUILTIN(CloneObjectIC_Slow)
IC_BUILTIN(KeyedHasIC)
+IC_BUILTIN(KeyedHasICBaseline)
IC_BUILTIN(KeyedHasIC_Megamorphic)
IC_BUILTIN(KeyedHasIC_PolymorphicName)
@@ -54,6 +63,17 @@ IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline,
NOT_INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline,
INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadGlobalICBaseline, LoadGlobalICBaseline, NOT_INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofBaseline, LoadGlobalICBaseline,
+ INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LookupGlobalICBaseline, LookupGlobalICBaseline,
+ NOT_INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LookupGlobalICInsideTypeofBaseline, LookupGlobalICBaseline,
+ INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LookupContextBaseline, LookupContextBaseline,
+ NOT_INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LookupContextInsideTypeofBaseline, LookupContextBaseline,
+ INSIDE_TYPEOF)
TF_BUILTIN(DynamicCheckMaps, CodeStubAssembler) {
auto map = Parameter<Map>(Descriptor::kMap);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 4e315e5ef5..29cca9d93a 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/api/api.h"
+#include "src/baseline/baseline.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
@@ -687,6 +688,20 @@ TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
TailCallRuntime(Runtime::kForInEnumerate, context, receiver);
}
+TF_BUILTIN(ForInPrepare, CodeStubAssembler) {
+ // The {enumerator} is either a Map or a FixedArray.
+ auto enumerator = Parameter<HeapObject>(Descriptor::kEnumerator);
+ auto index = Parameter<TaggedIndex>(Descriptor::kVectorIndex);
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
+ TNode<UintPtrT> vector_index = Unsigned(TaggedIndexToIntPtr(index));
+
+ TNode<FixedArray> cache_array;
+ TNode<Smi> cache_length;
+ ForInPrepare(enumerator, vector_index, feedback_vector, &cache_array,
+ &cache_length, UpdateFeedbackMode::kGuaranteedFeedback);
+ Return(cache_array, cache_length);
+}
+
TF_BUILTIN(ForInFilter, CodeStubAssembler) {
auto key = Parameter<String>(Descriptor::kKey);
auto object = Parameter<HeapObject>(Descriptor::kObject);
@@ -750,7 +765,6 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
TVARIABLE(Int32T, pushed_argc, actual_argc);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(target);
TNode<Int32T> formal_count =
@@ -770,7 +784,6 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
pushed_argc = formal_count;
Goto(&done_argc);
BIND(&done_argc);
-#endif
// Update arguments count for CEntry to contain the number of arguments
// including the receiver and the extra arguments.
@@ -911,6 +924,28 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
}
#endif // V8_TARGET_ARCH_IA32
+// TODO(v8:11421): Remove #if once baseline compiler is ported to other
+// architectures.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
+ EmitReturnBaseline(masm);
+}
+#else
+// Stub out implementations of arch-specific baseline builtins.
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ masm->Trap();
+}
+void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
+ masm->Trap();
+}
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ masm->Trap();
+}
+void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
+ masm->Trap();
+}
+#endif
+
// ES6 [[Get]] operation.
TF_BUILTIN(GetProperty, CodeStubAssembler) {
auto object = Parameter<Object>(Descriptor::kObject);
@@ -1089,7 +1124,6 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
Runtime::kInstantiateAsmJs, context, function, stdlib, foreign, heap);
GotoIf(TaggedIsSmi(maybe_result_or_smi_zero), &tailcall_to_function);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(function);
TNode<Int32T> parameter_count =
UncheckedCast<Int32T>(LoadSharedFunctionInfoFormalParameterCount(shared));
@@ -1103,7 +1137,6 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
PopAndReturn(Int32Add(parameter_count, Int32Constant(1)),
maybe_result_or_smi_zero);
BIND(&argc_ge_param_count);
-#endif
args.PopAndReturn(maybe_result_or_smi_zero);
BIND(&tailcall_to_function);
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index fe0cd9756a..843adf7122 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -280,16 +280,14 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
// 4. Let this be the this value.
if (args.new_target()->IsUndefined(isolate)) {
Handle<Object> receiver = args.receiver();
-
- // 5. If NewTarget is undefined and ? InstanceofOperator(this, %<T>%)
+ // 5. If NewTarget is undefined and ? OrdinaryHasInstance(%<T>%, this)
// is true, then Look up the intrinsic value that has been stored on
// the context.
- Handle<Object> is_instance_of_obj;
+ Handle<Object> ordinary_has_instance_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, is_instance_of_obj,
- Object::InstanceOf(isolate, receiver, constructor));
-
- if (is_instance_of_obj->BooleanValue(isolate)) {
+ isolate, ordinary_has_instance_obj,
+ Object::OrdinaryHasInstance(isolate, constructor, receiver));
+ if (ordinary_has_instance_obj->BooleanValue(isolate)) {
if (!receiver->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 30701bdc15..88cb3b88dc 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -143,13 +143,28 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
isolate(), CompileLazy))));
StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
+ Label tailcall_code(this);
+ Label baseline(this);
+
+ TVARIABLE(Code, code);
+
+ // Check if we have baseline code.
+ // TODO(v8:11429): We already know if we have baseline code in
+ // GetSharedFunctionInfoCode, make that jump to here.
+ TNode<Uint32T> code_flags =
+ LoadObjectField<Uint32T>(sfi_code, Code::kFlagsOffset);
+ TNode<Uint32T> code_kind = DecodeWord32<Code::KindField>(code_flags);
+ TNode<BoolT> is_baseline =
+ IsEqualInWord32<Code::KindField>(code_kind, CodeKind::BASELINE);
+ GotoIf(is_baseline, &baseline);
+
// Finally, check for presence of an NCI cached Code object - if an entry
// possibly exists, call into runtime to query the cache.
TNode<Uint8T> flags2 =
LoadObjectField<Uint8T>(shared, SharedFunctionInfo::kFlags2Offset);
TNode<BoolT> may_have_cached_code =
IsSetWord32<SharedFunctionInfo::MayHaveCachedCodeBit>(flags2);
- TNode<Code> code = Select<Code>(
+ code = Select<Code>(
may_have_cached_code,
[=]() {
return CAST(CallRuntime(Runtime::kTryInstallNCICode,
@@ -157,9 +172,21 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
function));
},
[=]() { return sfi_code; });
+ Goto(&tailcall_code);
+ BIND(&baseline);
+ // Ensure we have a feedback vector.
+ code = Select<Code>(
+ IsFeedbackVector(feedback_cell_value), [=]() { return sfi_code; },
+ [=]() {
+ return CAST(CallRuntime(Runtime::kInstallBaselineCode,
+ Parameter<Context>(Descriptor::kContext),
+ function));
+ });
+ Goto(&tailcall_code);
+ BIND(&tailcall_code);
// Jump to the selected code entry.
- GenerateTailCallToJSCode(code, function);
+ GenerateTailCallToJSCode(code.value(), function);
BIND(&compile_function);
GenerateTailCallToReturnedCode(Runtime::kCompileLazy, function);
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 0e57959aad..390552836d 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -14,20 +14,22 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 20.1 Number Objects
-#define DEF_BINOP(Name, Generator) \
- TF_BUILTIN(Name, CodeStubAssembler) { \
- auto lhs = Parameter<Object>(Descriptor::kLeft); \
- auto rhs = Parameter<Object>(Descriptor::kRight); \
- auto context = Parameter<Context>(Descriptor::kContext); \
- auto maybe_feedback_vector = \
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
- auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
- \
- BinaryOpAssembler binop_asm(state()); \
- TNode<Object> result = binop_asm.Generator(context, lhs, rhs, slot, \
- maybe_feedback_vector, false); \
- \
- Return(result); \
+#define DEF_BINOP(Name, Generator) \
+ TF_BUILTIN(Name, CodeStubAssembler) { \
+ auto lhs = Parameter<Object>(Descriptor::kLeft); \
+ auto rhs = Parameter<Object>(Descriptor::kRight); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ auto feedback_vector = \
+ Parameter<FeedbackVector>(Descriptor::kFeedbackVector); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
+ \
+ BinaryOpAssembler binop_asm(state()); \
+ TNode<Object> result = \
+ binop_asm.Generator([&]() { return context; }, lhs, rhs, slot, \
+ [&]() { return feedback_vector; }, \
+ UpdateFeedbackMode::kGuaranteedFeedback, false); \
+ \
+ Return(result); \
}
DEF_BINOP(Add_WithFeedback, Generate_AddWithFeedback)
DEF_BINOP(Subtract_WithFeedback, Generate_SubtractWithFeedback)
@@ -44,19 +46,48 @@ DEF_BINOP(ShiftRightLogical_WithFeedback,
Generate_ShiftRightLogicalWithFeedback)
#undef DEF_BINOP
-#define DEF_UNOP(Name, Generator) \
- TF_BUILTIN(Name, CodeStubAssembler) { \
- auto value = Parameter<Object>(Descriptor::kValue); \
- auto context = Parameter<Context>(Descriptor::kContext); \
- auto maybe_feedback_vector = \
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
- auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
- \
- UnaryOpAssembler a(state()); \
- TNode<Object> result = \
- a.Generator(context, value, slot, maybe_feedback_vector); \
- \
- Return(result); \
+#define DEF_BINOP(Name, Generator) \
+ TF_BUILTIN(Name, CodeStubAssembler) { \
+ auto lhs = Parameter<Object>(Descriptor::kLeft); \
+ auto rhs = Parameter<Object>(Descriptor::kRight); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
+ \
+ BinaryOpAssembler binop_asm(state()); \
+ TNode<Object> result = binop_asm.Generator( \
+ [&]() { return LoadContextFromBaseline(); }, lhs, rhs, slot, \
+ [&]() { return LoadFeedbackVectorFromBaseline(); }, \
+ UpdateFeedbackMode::kGuaranteedFeedback, false); \
+ \
+ Return(result); \
+ }
+DEF_BINOP(Add_Baseline, Generate_AddWithFeedback)
+DEF_BINOP(Subtract_Baseline, Generate_SubtractWithFeedback)
+DEF_BINOP(Multiply_Baseline, Generate_MultiplyWithFeedback)
+DEF_BINOP(Divide_Baseline, Generate_DivideWithFeedback)
+DEF_BINOP(Modulus_Baseline, Generate_ModulusWithFeedback)
+DEF_BINOP(Exponentiate_Baseline, Generate_ExponentiateWithFeedback)
+DEF_BINOP(BitwiseOr_Baseline, Generate_BitwiseOrWithFeedback)
+DEF_BINOP(BitwiseXor_Baseline, Generate_BitwiseXorWithFeedback)
+DEF_BINOP(BitwiseAnd_Baseline, Generate_BitwiseAndWithFeedback)
+DEF_BINOP(ShiftLeft_Baseline, Generate_ShiftLeftWithFeedback)
+DEF_BINOP(ShiftRight_Baseline, Generate_ShiftRightWithFeedback)
+DEF_BINOP(ShiftRightLogical_Baseline, Generate_ShiftRightLogicalWithFeedback)
+#undef DEF_BINOP
+
+#define DEF_UNOP(Name, Generator) \
+ TF_BUILTIN(Name, CodeStubAssembler) { \
+ auto value = Parameter<Object>(Descriptor::kValue); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ auto feedback_vector = \
+ Parameter<FeedbackVector>(Descriptor::kFeedbackVector); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
+ \
+ UnaryOpAssembler a(state()); \
+ TNode<Object> result = \
+ a.Generator(context, value, slot, feedback_vector, \
+ UpdateFeedbackMode::kGuaranteedFeedback); \
+ \
+ Return(result); \
}
DEF_UNOP(BitwiseNot_WithFeedback, Generate_BitwiseNotWithFeedback)
DEF_UNOP(Decrement_WithFeedback, Generate_DecrementWithFeedback)
@@ -64,19 +95,39 @@ DEF_UNOP(Increment_WithFeedback, Generate_IncrementWithFeedback)
DEF_UNOP(Negate_WithFeedback, Generate_NegateWithFeedback)
#undef DEF_UNOP
+#define DEF_UNOP(Name, Generator) \
+ TF_BUILTIN(Name, CodeStubAssembler) { \
+ auto value = Parameter<Object>(Descriptor::kValue); \
+ auto context = LoadContextFromBaseline(); \
+ auto feedback_vector = LoadFeedbackVectorFromBaseline(); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
+ \
+ UnaryOpAssembler a(state()); \
+ TNode<Object> result = \
+ a.Generator(context, value, slot, feedback_vector, \
+ UpdateFeedbackMode::kGuaranteedFeedback); \
+ \
+ Return(result); \
+ }
+DEF_UNOP(BitwiseNot_Baseline, Generate_BitwiseNotWithFeedback)
+DEF_UNOP(Decrement_Baseline, Generate_DecrementWithFeedback)
+DEF_UNOP(Increment_Baseline, Generate_IncrementWithFeedback)
+DEF_UNOP(Negate_Baseline, Generate_NegateWithFeedback)
+#undef DEF_UNOP
+
#define DEF_COMPARE(Name) \
TF_BUILTIN(Name##_WithFeedback, CodeStubAssembler) { \
auto lhs = Parameter<Object>(Descriptor::kLeft); \
auto rhs = Parameter<Object>(Descriptor::kRight); \
auto context = Parameter<Context>(Descriptor::kContext); \
- auto maybe_feedback_vector = \
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
+ auto feedback_vector = \
+ Parameter<FeedbackVector>(Descriptor::kFeedbackVector); \
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
\
TVARIABLE(Smi, var_type_feedback); \
TNode<Oddball> result = RelationalComparison(Operation::k##Name, lhs, rhs, \
context, &var_type_feedback); \
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot); \
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot); \
\
Return(result); \
}
@@ -86,17 +137,38 @@ DEF_COMPARE(GreaterThan)
DEF_COMPARE(GreaterThanOrEqual)
#undef DEF_COMPARE
+#define DEF_COMPARE(Name) \
+ TF_BUILTIN(Name##_Baseline, CodeStubAssembler) { \
+ auto lhs = Parameter<Object>(Descriptor::kLeft); \
+ auto rhs = Parameter<Object>(Descriptor::kRight); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
+ \
+ TVARIABLE(Smi, var_type_feedback); \
+ TNode<Oddball> result = RelationalComparison( \
+ Operation::k##Name, lhs, rhs, \
+ [&]() { return LoadContextFromBaseline(); }, &var_type_feedback); \
+ auto feedback_vector = LoadFeedbackVectorFromBaseline(); \
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot); \
+ \
+ Return(result); \
+ }
+DEF_COMPARE(LessThan)
+DEF_COMPARE(LessThanOrEqual)
+DEF_COMPARE(GreaterThan)
+DEF_COMPARE(GreaterThanOrEqual)
+#undef DEF_COMPARE
+
TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) {
auto lhs = Parameter<Object>(Descriptor::kLeft);
auto rhs = Parameter<Object>(Descriptor::kRight);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
TVARIABLE(Smi, var_type_feedback);
- TNode<Oddball> result = Equal(lhs, rhs, context, &var_type_feedback);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot);
+ TNode<Oddball> result = Equal(
+ lhs, rhs, [&]() { return context; }, &var_type_feedback);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
Return(result);
}
@@ -104,13 +176,40 @@ TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) {
TF_BUILTIN(StrictEqual_WithFeedback, CodeStubAssembler) {
auto lhs = Parameter<Object>(Descriptor::kLeft);
auto rhs = Parameter<Object>(Descriptor::kRight);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+
+ TVARIABLE(Smi, var_type_feedback);
+ TNode<Oddball> result = StrictEqual(lhs, rhs, &var_type_feedback);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
+
+ Return(result);
+}
+
+TF_BUILTIN(Equal_Baseline, CodeStubAssembler) {
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+
+ TVARIABLE(Smi, var_type_feedback);
+ TNode<Oddball> result = Equal(
+ lhs, rhs, [&]() { return LoadContextFromBaseline(); },
+ &var_type_feedback);
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
+
+ Return(result);
+}
+
+TF_BUILTIN(StrictEqual_Baseline, CodeStubAssembler) {
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
TVARIABLE(Smi, var_type_feedback);
TNode<Oddball> result = StrictEqual(lhs, rhs, &var_type_feedback);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot);
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot);
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index ad1b6d2330..b4d8372a03 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -737,16 +737,15 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
var_holder = receiver_heap_object;
TNode<Uint16T> receiver_instance_type = LoadMapInstanceType(receiver_map);
GotoIf(IsPrimitiveInstanceType(receiver_instance_type), &if_primitive);
+ GotoIf(IsFunctionInstanceType(receiver_instance_type), &if_function);
const struct {
InstanceType value;
Label* label;
} kJumpTable[] = {{JS_OBJECT_TYPE, &if_object},
{JS_ARRAY_TYPE, &if_array},
- {JS_FUNCTION_TYPE, &if_function},
{JS_REG_EXP_TYPE, &if_regexp},
{JS_ARGUMENTS_OBJECT_TYPE, &if_arguments},
{JS_DATE_TYPE, &if_date},
- {JS_BOUND_FUNCTION_TYPE, &if_function},
{JS_API_OBJECT_TYPE, &if_object},
{JS_SPECIAL_API_OBJECT_TYPE, &if_object},
{JS_PROXY_TYPE, &if_proxy},
@@ -1172,11 +1171,21 @@ TF_BUILTIN(InstanceOf_WithFeedback, ObjectBuiltinsAssembler) {
auto object = Parameter<Object>(Descriptor::kLeft);
auto callable = Parameter<Object>(Descriptor::kRight);
auto context = Parameter<Context>(Descriptor::kContext);
- auto maybe_feedback_vector =
- Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto feedback_vector = Parameter<HeapObject>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectInstanceOfFeedback(callable, context, maybe_feedback_vector, slot);
+ CollectInstanceOfFeedback(callable, context, feedback_vector, slot);
+ Return(InstanceOf(object, callable, context));
+}
+
+TF_BUILTIN(InstanceOf_Baseline, ObjectBuiltinsAssembler) {
+ auto object = Parameter<Object>(Descriptor::kLeft);
+ auto callable = Parameter<Object>(Descriptor::kRight);
+ auto context = LoadContextFromBaseline();
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
+
+ CollectInstanceOfFeedback(callable, context, feedback_vector, slot);
Return(InstanceOf(object, callable, context));
}
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 16f81dc3d0..ee4f03eed8 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -157,8 +157,9 @@ Object ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
case LookupIterator::ACCESSOR: {
Handle<Object> maybe_pair = it.GetAccessors();
if (maybe_pair->IsAccessorPair()) {
- Handle<NativeContext> native_context =
- it.GetHolder<JSReceiver>()->GetCreationContext();
+ Handle<NativeContext> native_context = it.GetHolder<JSReceiver>()
+ ->GetCreationContext()
+ .ToHandleChecked();
return *AccessorPair::GetComponent(
isolate, native_context, Handle<AccessorPair>::cast(maybe_pair),
component);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 4cf3ae1437..0debd125e3 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -18,7 +18,6 @@
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
-#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
@@ -88,48 +87,66 @@ TNode<RawPtrT> RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode<Code> code) {
TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
TNode<String> input, TNode<JSRegExp> regexp, TNode<Number> last_index,
- TNode<FixedArray>* elements_out) {
+ TNode<BoolT> has_indices, TNode<FixedArray>* elements_out) {
CSA_ASSERT(this, SmiLessThanOrEqual(
length, SmiConstant(JSArray::kMaxFastArrayLength)));
CSA_ASSERT(this, SmiGreaterThan(length, SmiConstant(0)));
// Allocate.
+ Label result_has_indices(this), allocated(this);
const ElementsKind elements_kind = PACKED_ELEMENTS;
- TNode<Map> map = CAST(LoadContextElement(LoadNativeContext(context),
- Context::REGEXP_RESULT_MAP_INDEX));
base::Optional<TNode<AllocationSite>> no_gc_site = base::nullopt;
TNode<IntPtrT> length_intptr = SmiUntag(length);
+ // Note: The returned `var_elements` may be in young large object space, but
+ // `var_array` is guaranteed to be in new space so we could skip write
+ // barriers below.
+ TVARIABLE(JSArray, var_array);
+ TVARIABLE(FixedArrayBase, var_elements);
- // Note: The returned `elements` may be in young large object space, but
- // `array` is guaranteed to be in new space so we could skip write barriers
- // below.
- TNode<JSArray> array;
- TNode<FixedArrayBase> elements;
- std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- elements_kind, map, length, no_gc_site, length_intptr,
- kAllowLargeObjectAllocation, JSRegExpResult::kSize);
+ GotoIf(has_indices, &result_has_indices);
+ {
+ TNode<Map> map = CAST(LoadContextElement(LoadNativeContext(context),
+ Context::REGEXP_RESULT_MAP_INDEX));
+ std::tie(var_array, var_elements) =
+ AllocateUninitializedJSArrayWithElements(
+ elements_kind, map, length, no_gc_site, length_intptr,
+ kAllowLargeObjectAllocation, JSRegExpResult::kSize);
+ Goto(&allocated);
+ }
+
+ BIND(&result_has_indices);
+ {
+ TNode<Map> map =
+ CAST(LoadContextElement(LoadNativeContext(context),
+ Context::REGEXP_RESULT_WITH_INDICES_MAP_INDEX));
+ std::tie(var_array, var_elements) =
+ AllocateUninitializedJSArrayWithElements(
+ elements_kind, map, length, no_gc_site, length_intptr,
+ kAllowLargeObjectAllocation, JSRegExpResultWithIndices::kSize);
+ Goto(&allocated);
+ }
+
+ BIND(&allocated);
// Finish result initialization.
- TNode<JSRegExpResult> result = UncheckedCast<JSRegExpResult>(array);
+ TNode<JSRegExpResult> result =
+ UncheckedCast<JSRegExpResult>(var_array.value());
// Load undefined value once here to avoid multiple LoadRoots.
TNode<Oddball> undefined_value = UncheckedCast<Oddball>(
CodeAssembler::LoadRoot(RootIndex::kUndefinedValue));
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index);
- // TODO(jgruber,tebbi): Could skip barrier but the MemoryOptimizer complains.
+ // TODO(jgruber,turbofan): Could skip barrier but the MemoryOptimizer
+ // complains.
StoreObjectField(result, JSRegExpResult::kInputOffset, input);
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kGroupsOffset,
undefined_value);
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kNamesOffset,
undefined_value);
- // Stash regexp in order to re-execute and build JSRegExpResultIndices lazily
- // when the 'indices' property is accessed.
- StoreObjectField(result, JSRegExpResult::kCachedIndicesOrRegexpOffset,
- regexp);
StoreObjectField(result, JSRegExpResult::kRegexpInputOffset, input);
// If non-smi last_index then store an SmiZero instead.
@@ -141,12 +158,25 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
last_index_smi);
}
+ Label finish_initialization(this);
+ GotoIfNot(has_indices, &finish_initialization);
+ {
+ static_assert(
+ std::is_base_of<JSRegExpResult, JSRegExpResultWithIndices>::value,
+ "JSRegExpResultWithIndices is a subclass of JSRegExpResult");
+ StoreObjectFieldNoWriteBarrier(
+ result, JSRegExpResultWithIndices::kIndicesOffset, undefined_value);
+ Goto(&finish_initialization);
+ }
+
+ BIND(&finish_initialization);
+
// Finish elements initialization.
- FillFixedArrayWithValue(elements_kind, elements, IntPtrZero(), length_intptr,
- RootIndex::kUndefinedValue);
+ FillFixedArrayWithValue(elements_kind, var_elements.value(), IntPtrZero(),
+ length_intptr, RootIndex::kUndefinedValue);
- if (elements_out) *elements_out = CAST(elements);
+ if (elements_out) *elements_out = CAST(var_elements.value());
return result;
}
@@ -184,7 +214,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<Context> context, TNode<JSRegExp> regexp,
TNode<RegExpMatchInfo> match_info, TNode<String> string,
TNode<Number> last_index) {
- Label named_captures(this), out(this);
+ Label named_captures(this), maybe_build_indices(this), out(this);
TNode<IntPtrT> num_indices = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
match_info, RegExpMatchInfo::kNumberOfCapturesIndex)));
@@ -200,15 +230,19 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<String> first =
CAST(CallBuiltin(Builtins::kSubString, context, string, start, end));
+ // Load flags and check if the result object needs to have indices.
+ const TNode<Smi> flags =
+ CAST(LoadObjectField(regexp, JSRegExp::kFlagsOffset));
+ const TNode<BoolT> has_indices = IsSetSmi(flags, JSRegExp::kHasIndices);
TNode<FixedArray> result_elements;
TNode<JSRegExpResult> result =
AllocateRegExpResult(context, num_results, start, string, regexp,
- last_index, &result_elements);
+ last_index, has_indices, &result_elements);
UnsafeStoreFixedArrayElement(result_elements, 0, first);
// If no captures exist we can skip named capture handling as well.
- GotoIf(SmiEqual(num_results, SmiConstant(1)), &out);
+ GotoIf(SmiEqual(num_results, SmiConstant(1)), &maybe_build_indices);
// Store all remaining captures.
TNode<IntPtrT> limit = IntPtrAdd(
@@ -272,7 +306,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// index at odd indices.
TNode<Object> maybe_names =
LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureNameMapIndex);
- GotoIf(TaggedEqual(maybe_names, SmiZero()), &out);
+ GotoIf(TaggedEqual(maybe_names, SmiZero()), &maybe_build_indices);
// One or more named captures exist, add a property for each one.
@@ -342,8 +376,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
}
var_i = i_plus_2;
- Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length), &out,
- &loop);
+ Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length),
+ &maybe_build_indices, &loop);
if (!V8_DICT_MODE_PROTOTYPES_BOOL) {
// TODO(v8:11167) make unconditional once OrderedNameDictionary
@@ -357,6 +391,22 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
}
}
+ // Build indices if needed (i.e. if the /d flag is present) after named
+ // capture groups are processed.
+ BIND(&maybe_build_indices);
+ GotoIfNot(has_indices, &out);
+ {
+ const TNode<Object> maybe_names =
+ LoadObjectField(result, JSRegExpResultWithIndices::kNamesOffset);
+ const TNode<JSRegExpResultIndices> indices =
+ UncheckedCast<JSRegExpResultIndices>(
+ CallRuntime(Runtime::kRegExpBuildIndices, context, regexp,
+ match_info, maybe_names));
+ StoreObjectField(result, JSRegExpResultWithIndices::kIndicesOffset,
+ indices);
+ Goto(&out);
+ }
+
BIND(&out);
return result;
}
@@ -385,7 +435,8 @@ void RegExpBuiltinsAssembler::GetStringPointers(
TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string,
- TNode<Number> last_index, TNode<RegExpMatchInfo> match_info) {
+ TNode<Number> last_index, TNode<RegExpMatchInfo> match_info,
+ RegExp::ExecQuirks exec_quirks) {
ToDirectStringAssembler to_direct(state(), string);
TVARIABLE(HeapObject, var_result);
@@ -625,6 +676,14 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
BIND(&if_success);
{
+ if (exec_quirks == RegExp::ExecQuirks::kTreatMatchAtEndAsFailure) {
+ static constexpr int kMatchStartOffset = 0;
+ TNode<IntPtrT> value = ChangeInt32ToIntPtr(UncheckedCast<Int32T>(
+ Load(MachineType::Int32(), static_offsets_vector_address,
+ IntPtrConstant(kMatchStartOffset))));
+ GotoIf(UintPtrGreaterThanOrEqual(value, int_string_length), &if_failure);
+ }
+
// Check that the last match info has space for the capture registers and
// the additional information. Ensure no overflow in add.
STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
@@ -687,8 +746,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TNode<ExternalReference> pending_exception_address =
ExternalConstant(ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, isolate()));
- CSA_ASSERT(this, IsTheHole(Load(MachineType::AnyTagged(),
- pending_exception_address)));
+ CSA_ASSERT(this, IsTheHole(Load<Object>(pending_exception_address)));
#endif // DEBUG
CallRuntime(Runtime::kThrowStackOverflow, context);
Unreachable();
@@ -696,15 +754,22 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
BIND(&retry_experimental);
{
- var_result =
- CAST(CallRuntime(Runtime::kRegExpExperimentalOneshotExec, context,
- regexp, string, last_index, match_info));
+ auto target_fn =
+ exec_quirks == RegExp::ExecQuirks::kTreatMatchAtEndAsFailure
+ ? Runtime::kRegExpExperimentalOneshotExecTreatMatchAtEndAsFailure
+ : Runtime::kRegExpExperimentalOneshotExec;
+ var_result = CAST(CallRuntime(target_fn, context, regexp, string,
+ last_index, match_info));
Goto(&out);
}
BIND(&runtime);
{
- var_result = CAST(CallRuntime(Runtime::kRegExpExec, context, regexp, string,
+ auto target_fn =
+ exec_quirks == RegExp::ExecQuirks::kTreatMatchAtEndAsFailure
+ ? Runtime::kRegExpExecTreatMatchAtEndAsFailure
+ : Runtime::kRegExpExec;
+ var_result = CAST(CallRuntime(target_fn, context, regexp, string,
last_index, match_info));
Goto(&out);
}
@@ -807,6 +872,29 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(
prototype_check_assembler.CheckAndBranch(prototype, if_isunmodified,
if_ismodified);
}
+void RegExpBuiltinsAssembler::BranchIfFastRegExpForSearch(
+ TNode<Context> context, TNode<HeapObject> object, Label* if_isunmodified,
+ Label* if_ismodified) {
+ BranchIfFastRegExp(
+ context, object, LoadMap(object),
+ PrototypeCheckAssembler::kCheckPrototypePropertyConstness,
+ DescriptorIndexNameValue{JSRegExp::kSymbolSearchFunctionDescriptorIndex,
+ RootIndex::ksearch_symbol,
+ Context::REGEXP_SEARCH_FUNCTION_INDEX},
+ if_isunmodified, if_ismodified);
+}
+
+void RegExpBuiltinsAssembler::BranchIfFastRegExpForMatch(
+ TNode<Context> context, TNode<HeapObject> object, Label* if_isunmodified,
+ Label* if_ismodified) {
+ BranchIfFastRegExp(
+ context, object, LoadMap(object),
+ PrototypeCheckAssembler::kCheckPrototypePropertyConstness,
+ DescriptorIndexNameValue{JSRegExp::kSymbolMatchFunctionDescriptorIndex,
+ RootIndex::kmatch_symbol,
+ Context::REGEXP_MATCH_FUNCTION_INDEX},
+ if_isunmodified, if_ismodified);
+}
void RegExpBuiltinsAssembler::BranchIfFastRegExp_Strict(
TNode<Context> context, TNode<HeapObject> object, Label* if_isunmodified,
@@ -835,8 +923,20 @@ void RegExpBuiltinsAssembler::BranchIfRegExpResult(const TNode<Context> context,
const TNode<Object> initial_regexp_result_map =
LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
+ Label maybe_result_with_indices(this);
Branch(TaggedEqual(map, initial_regexp_result_map), if_isunmodified,
- if_ismodified);
+ &maybe_result_with_indices);
+ BIND(&maybe_result_with_indices);
+ {
+ static_assert(
+ std::is_base_of<JSRegExpResult, JSRegExpResultWithIndices>::value,
+ "JSRegExpResultWithIndices is a subclass of JSRegExpResult");
+ const TNode<Object> initial_regexp_result_with_indices_map =
+ LoadContextElement(native_context,
+ Context::REGEXP_RESULT_WITH_INDICES_MAP_INDEX);
+ Branch(TaggedEqual(map, initial_regexp_result_with_indices_map),
+ if_isunmodified, if_ismodified);
+ }
}
// Fast path stub for ATOM regexps. String matching is done by StringIndexOf,
@@ -865,6 +965,14 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
const TNode<String> needle_string =
CAST(UnsafeLoadFixedArrayElement(data, JSRegExp::kAtomPatternIndex));
+ // ATOM patterns are guaranteed to not be the empty string (these are
+ // intercepted and replaced in JSRegExp::Initialize.
+ //
+ // This is especially relevant for crbug.com/1075514: atom patterns are
+ // non-empty and thus guaranteed not to match at the end of the string.
+ CSA_ASSERT(this, IntPtrGreaterThan(LoadStringLengthAsWord(needle_string),
+ IntPtrConstant(0)));
+
const TNode<Smi> match_from =
CAST(CallBuiltin(Builtins::kStringIndexOf, context, subject_string,
needle_string, last_index));
@@ -944,6 +1052,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
BIND(&next); \
} while (false)
+ CASE_FOR_FLAG(JSRegExp::kHasIndices);
CASE_FOR_FLAG(JSRegExp::kGlobal);
CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
CASE_FOR_FLAG(JSRegExp::kLinear);
@@ -980,35 +1089,38 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
#undef CASE_FOR_FLAG
- {
- Label next(this);
-
- // Check the runtime value of FLAG_enable_experimental_regexp_engine
- // first.
- TNode<Word32T> flag_value = UncheckedCast<Word32T>(
- Load(MachineType::Uint8(),
- ExternalConstant(
- ExternalReference::
- address_of_enable_experimental_regexp_engine())));
- GotoIf(Word32Equal(Word32And(flag_value, Int32Constant(0xFF)),
- Int32Constant(0)),
- &next);
+#define CASE_FOR_FLAG(NAME, V8_FLAG_EXTERN_REF, FLAG) \
+ do { \
+ Label next(this); \
+ TNode<Word32T> flag_value = UncheckedCast<Word32T>( \
+ Load(MachineType::Uint8(), ExternalConstant(V8_FLAG_EXTERN_REF))); \
+ GotoIf(Word32Equal(Word32And(flag_value, Int32Constant(0xFF)), \
+ Int32Constant(0)), \
+ &next); \
+ const TNode<Object> flag = GetProperty( \
+ context, regexp, isolate->factory()->InternalizeUtf8String(NAME)); \
+ Label if_isflagset(this); \
+ BranchIfToBooleanIsTrue(flag, &if_isflagset, &next); \
+ BIND(&if_isflagset); \
+ var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \
+ var_flags = Signed(WordOr(var_flags.value(), IntPtrConstant(FLAG))); \
+ Goto(&next); \
+ BIND(&next); \
+ } while (false)
- const TNode<Object> flag = GetProperty(
- context, regexp, isolate->factory()->InternalizeUtf8String("linear"));
- Label if_isflagset(this);
- BranchIfToBooleanIsTrue(flag, &if_isflagset, &next);
- BIND(&if_isflagset);
- var_length = Uint32Add(var_length.value(), Uint32Constant(1));
- var_flags =
- Signed(WordOr(var_flags.value(), IntPtrConstant(JSRegExp::kLinear)));
- Goto(&next);
- BIND(&next);
- }
+ CASE_FOR_FLAG(
+ "hasIndices",
+ ExternalReference::address_of_harmony_regexp_match_indices_flag(),
+ JSRegExp::kHasIndices);
+ CASE_FOR_FLAG(
+ "linear",
+ ExternalReference::address_of_enable_experimental_regexp_engine(),
+ JSRegExp::kLinear);
+#undef CASE_FOR_FLAG
}
- // Allocate a string of the required length and fill it with the corresponding
- // char for each set flag.
+ // Allocate a string of the required length and fill it with the
+ // corresponding char for each set flag.
{
const TNode<String> result = AllocateSeqOneByteString(var_length.value());
@@ -1028,6 +1140,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
BIND(&next); \
} while (false)
+ CASE_FOR_FLAG(JSRegExp::kHasIndices, 'd');
CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
CASE_FOR_FLAG(JSRegExp::kLinear, 'l');
@@ -1282,6 +1395,9 @@ TNode<BoolT> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
case JSRegExp::kUnicode:
name = isolate()->factory()->unicode_string();
break;
+ case JSRegExp::kHasIndices:
+ name = isolate()->factory()->has_indices_string();
+ break;
case JSRegExp::kLinear:
name = isolate()->factory()->linear_string();
break;
@@ -1515,9 +1631,9 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
const TNode<Object> last_match_info = LoadContextElement(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- const TNode<HeapObject> match_indices_ho =
- CAST(CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
- next_search_from, last_match_info));
+ const TNode<HeapObject> match_indices_ho = RegExpExecInternal(
+ context, regexp, string, next_search_from, CAST(last_match_info),
+ RegExp::ExecQuirks::kTreatMatchAtEndAsFailure);
// We're done if no match was found.
{
@@ -1529,16 +1645,9 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
TNode<FixedArray> match_indices = CAST(match_indices_ho);
const TNode<Smi> match_from = CAST(UnsafeLoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex));
-
- // We're done if the match starts beyond the string.
- {
- Label next(this);
- Branch(SmiEqual(match_from, string_length), &push_suffix_and_out, &next);
- BIND(&next);
- }
-
const TNode<Smi> match_to = CAST(UnsafeLoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1));
+ CSA_ASSERT(this, SmiNotEqual(match_from, string_length));
// Advance index and continue if the match is empty.
{
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 273e315599..e55af65f81 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -8,6 +8,7 @@
#include "src/base/optional.h"
#include "src/codegen/code-stub-assembler.h"
#include "src/common/message-template.h"
+#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
@@ -22,13 +23,14 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<RawPtrT> LoadCodeObjectEntry(TNode<Code> code);
- // Allocate a RegExpResult with the given length (the number of captures,
- // including the match itself), index (the index where the match starts),
- // and input string.
+ // Allocate either a JSRegExpResult or a JSRegExpResultWithIndices (depending
+ // on has_indices) with the given length (the number of captures, including
+ // the match itself), index (the index where the match starts), and input
+ // string.
TNode<JSRegExpResult> AllocateRegExpResult(
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
TNode<String> input, TNode<JSRegExp> regexp, TNode<Number> last_index,
- TNode<FixedArray>* elements_out = nullptr);
+ TNode<BoolT> has_indices, TNode<FixedArray>* elements_out = nullptr);
TNode<Object> FastLoadLastIndexBeforeSmiCheck(TNode<JSRegExp> regexp);
TNode<Smi> FastLoadLastIndex(TNode<JSRegExp> regexp) {
@@ -50,11 +52,10 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TVariable<RawPtrT>* var_string_end);
// Low level logic around the actual call into pattern matching code.
- TNode<HeapObject> RegExpExecInternal(TNode<Context> context,
- TNode<JSRegExp> regexp,
- TNode<String> string,
- TNode<Number> last_index,
- TNode<RegExpMatchInfo> match_info);
+ TNode<HeapObject> RegExpExecInternal(
+ TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string,
+ TNode<Number> last_index, TNode<RegExpMatchInfo> match_info,
+ RegExp::ExecQuirks exec_quirks = RegExp::ExecQuirks::kNone);
TNode<JSRegExpResult> ConstructNewResultFromMatchInfo(
TNode<Context> context, TNode<JSRegExp> regexp,
@@ -97,6 +98,14 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
base::Optional<DescriptorIndexNameValue> additional_property_to_check,
Label* if_isunmodified, Label* if_ismodified);
+ void BranchIfFastRegExpForSearch(TNode<Context> context,
+ TNode<HeapObject> object,
+ Label* if_isunmodified,
+ Label* if_ismodified);
+ void BranchIfFastRegExpForMatch(TNode<Context> context,
+ TNode<HeapObject> object,
+ Label* if_isunmodified, Label* if_ismodified);
+
// Strict: Does not tolerate any changes to the prototype map.
// Permissive: Allows changes to the prototype map except for the exec
// property.
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index e82e051630..2609632688 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -375,7 +375,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// 2. Let i be ? ValidateAtomicAccess(typedArray, index).
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
USE(array_buffer);
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_number,
@@ -476,7 +476,8 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
-#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ||
+ // V8_TARGET_ARCH_RISCV64
BIND(&detached);
{
@@ -505,7 +506,8 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_RISCV64
USE(array_buffer);
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
@@ -627,6 +629,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
+ // || V8_TARGET_ARCH_RISCV64
BIND(&detached);
{
@@ -678,7 +681,8 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_RISCV64
USE(array_buffer);
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(runtime_function, context, array, index_number, value));
@@ -771,6 +775,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
+ // || V8_TARGET_ARCH_RISCV64
BIND(&detached);
ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 0b08f8a743..d46bbacadb 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -48,46 +48,6 @@ TNode<RawPtrT> StringBuiltinsAssembler::DirectStringData(
return var_data.value();
}
-void StringBuiltinsAssembler::DispatchOnStringEncodings(
- TNode<Word32T> const lhs_instance_type,
- TNode<Word32T> const rhs_instance_type, Label* if_one_one,
- Label* if_one_two, Label* if_two_one, Label* if_two_two) {
- STATIC_ASSERT(kStringEncodingMask == 0x8);
- STATIC_ASSERT(kTwoByteStringTag == 0x0);
- STATIC_ASSERT(kOneByteStringTag == 0x8);
-
- // First combine the encodings.
-
- const TNode<Int32T> encoding_mask = Int32Constant(kStringEncodingMask);
- const TNode<Word32T> lhs_encoding =
- Word32And(lhs_instance_type, encoding_mask);
- const TNode<Word32T> rhs_encoding =
- Word32And(rhs_instance_type, encoding_mask);
-
- const TNode<Word32T> combined_encodings =
- Word32Or(lhs_encoding, Word32Shr(rhs_encoding, 1));
-
- // Then dispatch on the combined encoding.
-
- Label unreachable(this, Label::kDeferred);
-
- int32_t values[] = {
- kOneByteStringTag | (kOneByteStringTag >> 1),
- kOneByteStringTag | (kTwoByteStringTag >> 1),
- kTwoByteStringTag | (kOneByteStringTag >> 1),
- kTwoByteStringTag | (kTwoByteStringTag >> 1),
- };
- Label* labels[] = {
- if_one_one, if_one_two, if_two_one, if_two_two,
- };
-
- STATIC_ASSERT(arraysize(values) == arraysize(labels));
- Switch(combined_encodings, &unreachable, values, labels, arraysize(values));
-
- BIND(&unreachable);
- Unreachable();
-}
-
template <typename SubjectChar, typename PatternChar>
TNode<IntPtrT> StringBuiltinsAssembler::CallSearchStringRaw(
const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
@@ -111,15 +71,57 @@ TNode<IntPtrT> StringBuiltinsAssembler::CallSearchStringRaw(
return result;
}
-
-TNode<RawPtrT> StringBuiltinsAssembler::PointerToStringDataAtIndex(
- TNode<RawPtrT> string_data, TNode<IntPtrT> index,
- String::Encoding encoding) {
- const ElementsKind kind = (encoding == String::ONE_BYTE_ENCODING)
- ? UINT8_ELEMENTS
- : UINT16_ELEMENTS;
- TNode<IntPtrT> offset_in_bytes = ElementOffsetFromIndex(index, kind);
- return RawPtrAdd(string_data, offset_in_bytes);
+TNode<IntPtrT> StringBuiltinsAssembler::SearchOneByteStringInTwoByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> search_length,
+ const TNode<IntPtrT> start_position) {
+ return CallSearchStringRaw<const uc16, const uint8_t>(
+ subject_ptr, subject_length, search_ptr, search_length, start_position);
+}
+TNode<IntPtrT> StringBuiltinsAssembler::SearchOneByteStringInOneByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> search_length,
+ const TNode<IntPtrT> start_position) {
+ return CallSearchStringRaw<const uint8_t, const uint8_t>(
+ subject_ptr, subject_length, search_ptr, search_length, start_position);
+}
+TNode<IntPtrT> StringBuiltinsAssembler::SearchTwoByteStringInTwoByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> search_length,
+ const TNode<IntPtrT> start_position) {
+ return CallSearchStringRaw<const uc16, const uc16>(
+ subject_ptr, subject_length, search_ptr, search_length, start_position);
+}
+TNode<IntPtrT> StringBuiltinsAssembler::SearchTwoByteStringInOneByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> search_length,
+ const TNode<IntPtrT> start_position) {
+ return CallSearchStringRaw<const uint8_t, const uc16>(
+ subject_ptr, subject_length, search_ptr, search_length, start_position);
+}
+TNode<IntPtrT> StringBuiltinsAssembler::SearchOneByteInOneByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> start_position) {
+ const TNode<RawPtrT> subject_start_ptr =
+ RawPtrAdd(subject_ptr, start_position);
+ const TNode<IntPtrT> search_byte =
+ ChangeInt32ToIntPtr(Load<Uint8T>(search_ptr));
+ const TNode<UintPtrT> search_length =
+ Unsigned(IntPtrSub(subject_length, start_position));
+ const TNode<ExternalReference> memchr =
+ ExternalConstant(ExternalReference::libc_memchr_function());
+ const TNode<RawPtrT> result_address = UncheckedCast<RawPtrT>(
+ CallCFunction(memchr, MachineType::Pointer(),
+ std::make_pair(MachineType::Pointer(), subject_start_ptr),
+ std::make_pair(MachineType::IntPtr(), search_byte),
+ std::make_pair(MachineType::UintPtr(), search_length)));
+ return Select<IntPtrT>(
+ WordEqual(result_address, IntPtrConstant(0)),
+ [=] { return IntPtrConstant(-1); },
+ [=] {
+ return IntPtrAdd(RawPtrSub(result_address, subject_start_ptr),
+ start_position);
+ });
}
void StringBuiltinsAssembler::GenerateStringEqual(TNode<String> left,
@@ -887,273 +889,6 @@ TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) {
}
}
-void StringBuiltinsAssembler::StringIndexOf(
- const TNode<String> subject_string, const TNode<String> search_string,
- const TNode<Smi> position,
- const std::function<void(TNode<Smi>)>& f_return) {
- const TNode<IntPtrT> int_zero = IntPtrConstant(0);
- const TNode<IntPtrT> search_length = LoadStringLengthAsWord(search_string);
- const TNode<IntPtrT> subject_length = LoadStringLengthAsWord(subject_string);
- const TNode<IntPtrT> start_position = IntPtrMax(SmiUntag(position), int_zero);
-
- Label zero_length_needle(this), return_minus_1(this);
- {
- GotoIf(IntPtrEqual(int_zero, search_length), &zero_length_needle);
-
- // Check that the needle fits in the start position.
- GotoIfNot(IntPtrLessThanOrEqual(search_length,
- IntPtrSub(subject_length, start_position)),
- &return_minus_1);
- }
-
- // If the string pointers are identical, we can just return 0. Note that this
- // implies {start_position} == 0 since we've passed the check above.
- Label return_zero(this);
- GotoIf(TaggedEqual(subject_string, search_string), &return_zero);
-
- // Try to unpack subject and search strings. Bail to runtime if either needs
- // to be flattened.
- ToDirectStringAssembler subject_to_direct(state(), subject_string);
- ToDirectStringAssembler search_to_direct(state(), search_string);
-
- Label call_runtime_unchecked(this, Label::kDeferred);
-
- subject_to_direct.TryToDirect(&call_runtime_unchecked);
- search_to_direct.TryToDirect(&call_runtime_unchecked);
-
- // Load pointers to string data.
- const TNode<RawPtrT> subject_ptr =
- subject_to_direct.PointerToData(&call_runtime_unchecked);
- const TNode<RawPtrT> search_ptr =
- search_to_direct.PointerToData(&call_runtime_unchecked);
-
- const TNode<IntPtrT> subject_offset = subject_to_direct.offset();
- const TNode<IntPtrT> search_offset = search_to_direct.offset();
-
- // Like String::IndexOf, the actual matching is done by the optimized
- // SearchString method in string-search.h. Dispatch based on string instance
- // types, then call straight into C++ for matching.
-
- CSA_ASSERT(this, IntPtrGreaterThan(search_length, int_zero));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(start_position, int_zero));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(subject_length, start_position));
- CSA_ASSERT(this,
- IntPtrLessThanOrEqual(search_length,
- IntPtrSub(subject_length, start_position)));
-
- Label one_one(this), one_two(this), two_one(this), two_two(this);
- DispatchOnStringEncodings(subject_to_direct.instance_type(),
- search_to_direct.instance_type(), &one_one,
- &one_two, &two_one, &two_two);
-
- using onebyte_t = const uint8_t;
- using twobyte_t = const uc16;
-
- BIND(&one_one);
- {
- const TNode<RawPtrT> adjusted_subject_ptr = PointerToStringDataAtIndex(
- subject_ptr, subject_offset, String::ONE_BYTE_ENCODING);
- const TNode<RawPtrT> adjusted_search_ptr = PointerToStringDataAtIndex(
- search_ptr, search_offset, String::ONE_BYTE_ENCODING);
-
- Label direct_memchr_call(this), generic_fast_path(this);
- Branch(IntPtrEqual(search_length, IntPtrConstant(1)), &direct_memchr_call,
- &generic_fast_path);
-
- // An additional fast path that calls directly into memchr for 1-length
- // search strings.
- BIND(&direct_memchr_call);
- {
- const TNode<RawPtrT> string_addr =
- RawPtrAdd(adjusted_subject_ptr, start_position);
- const TNode<IntPtrT> search_length =
- IntPtrSub(subject_length, start_position);
- const TNode<IntPtrT> search_byte =
- ChangeInt32ToIntPtr(Load<Uint8T>(adjusted_search_ptr));
-
- const TNode<ExternalReference> memchr =
- ExternalConstant(ExternalReference::libc_memchr_function());
- const TNode<RawPtrT> result_address = UncheckedCast<RawPtrT>(
- CallCFunction(memchr, MachineType::Pointer(),
- std::make_pair(MachineType::Pointer(), string_addr),
- std::make_pair(MachineType::IntPtr(), search_byte),
- std::make_pair(MachineType::UintPtr(), search_length)));
- GotoIf(WordEqual(result_address, int_zero), &return_minus_1);
- const TNode<IntPtrT> result_index =
- IntPtrAdd(RawPtrSub(result_address, string_addr), start_position);
- f_return(SmiTag(result_index));
- }
-
- BIND(&generic_fast_path);
- {
- const TNode<IntPtrT> result = CallSearchStringRaw<onebyte_t, onebyte_t>(
- adjusted_subject_ptr, subject_length, adjusted_search_ptr,
- search_length, start_position);
- f_return(SmiTag(result));
- }
- }
-
- BIND(&one_two);
- {
- const TNode<RawPtrT> adjusted_subject_ptr = PointerToStringDataAtIndex(
- subject_ptr, subject_offset, String::ONE_BYTE_ENCODING);
- const TNode<RawPtrT> adjusted_search_ptr = PointerToStringDataAtIndex(
- search_ptr, search_offset, String::TWO_BYTE_ENCODING);
-
- const TNode<IntPtrT> result = CallSearchStringRaw<onebyte_t, twobyte_t>(
- adjusted_subject_ptr, subject_length, adjusted_search_ptr,
- search_length, start_position);
- f_return(SmiTag(result));
- }
-
- BIND(&two_one);
- {
- const TNode<RawPtrT> adjusted_subject_ptr = PointerToStringDataAtIndex(
- subject_ptr, subject_offset, String::TWO_BYTE_ENCODING);
- const TNode<RawPtrT> adjusted_search_ptr = PointerToStringDataAtIndex(
- search_ptr, search_offset, String::ONE_BYTE_ENCODING);
-
- const TNode<IntPtrT> result = CallSearchStringRaw<twobyte_t, onebyte_t>(
- adjusted_subject_ptr, subject_length, adjusted_search_ptr,
- search_length, start_position);
- f_return(SmiTag(result));
- }
-
- BIND(&two_two);
- {
- const TNode<RawPtrT> adjusted_subject_ptr = PointerToStringDataAtIndex(
- subject_ptr, subject_offset, String::TWO_BYTE_ENCODING);
- const TNode<RawPtrT> adjusted_search_ptr = PointerToStringDataAtIndex(
- search_ptr, search_offset, String::TWO_BYTE_ENCODING);
-
- const TNode<IntPtrT> result = CallSearchStringRaw<twobyte_t, twobyte_t>(
- adjusted_subject_ptr, subject_length, adjusted_search_ptr,
- search_length, start_position);
- f_return(SmiTag(result));
- }
-
- BIND(&return_minus_1);
- f_return(SmiConstant(-1));
-
- BIND(&return_zero);
- f_return(SmiConstant(0));
-
- BIND(&zero_length_needle);
- {
- Comment("0-length search_string");
- f_return(SmiTag(IntPtrMin(subject_length, start_position)));
- }
-
- BIND(&call_runtime_unchecked);
- {
- // Simplified version of the runtime call where the types of the arguments
- // are already known due to type checks in this stub.
- Comment("Call Runtime Unchecked");
- TNode<Smi> result =
- CAST(CallRuntime(Runtime::kStringIndexOfUnchecked, NoContextConstant(),
- subject_string, search_string, position));
- f_return(result);
- }
-}
-
-// ES6 String.prototype.indexOf(searchString [, position])
-// #sec-string.prototype.indexof
-// Unchecked helper for builtins lowering.
-TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
- auto receiver = Parameter<String>(Descriptor::kReceiver);
- auto search_string = Parameter<String>(Descriptor::kSearchString);
- auto position = Parameter<Smi>(Descriptor::kPosition);
- StringIndexOf(receiver, search_string, position,
- [this](TNode<Smi> result) { this->Return(result); });
-}
-
-// ES6 String.prototype.includes(searchString [, position])
-// #sec-string.prototype.includes
-TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
- TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
- auto context = Parameter<Context>(Descriptor::kContext);
- Generate(kIncludes, argc, context);
-}
-
-// ES6 String.prototype.indexOf(searchString [, position])
-// #sec-string.prototype.indexof
-TF_BUILTIN(StringPrototypeIndexOf, StringIncludesIndexOfAssembler) {
- TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
- auto context = Parameter<Context>(Descriptor::kContext);
- Generate(kIndexOf, argc, context);
-}
-
-void StringIncludesIndexOfAssembler::Generate(SearchVariant variant,
- TNode<IntPtrT> argc,
- TNode<Context> context) {
- CodeStubArguments arguments(this, argc);
- const TNode<Object> receiver = arguments.GetReceiver();
-
- TVARIABLE(Object, var_search_string);
- TVARIABLE(Object, var_position);
- Label argc_1(this), argc_2(this), call_runtime(this, Label::kDeferred),
- fast_path(this);
-
- GotoIf(IntPtrEqual(arguments.GetLength(), IntPtrConstant(1)), &argc_1);
- GotoIf(IntPtrGreaterThan(arguments.GetLength(), IntPtrConstant(1)), &argc_2);
- {
- Comment("0 Argument case");
- CSA_ASSERT(this, IntPtrEqual(arguments.GetLength(), IntPtrConstant(0)));
- TNode<Oddball> undefined = UndefinedConstant();
- var_search_string = undefined;
- var_position = undefined;
- Goto(&call_runtime);
- }
- BIND(&argc_1);
- {
- Comment("1 Argument case");
- var_search_string = arguments.AtIndex(0);
- var_position = SmiConstant(0);
- Goto(&fast_path);
- }
- BIND(&argc_2);
- {
- Comment("2 Argument case");
- var_search_string = arguments.AtIndex(0);
- var_position = arguments.AtIndex(1);
- GotoIfNot(TaggedIsSmi(var_position.value()), &call_runtime);
- Goto(&fast_path);
- }
- BIND(&fast_path);
- {
- Comment("Fast Path");
- const TNode<Object> search = var_search_string.value();
- const TNode<Smi> position = CAST(var_position.value());
- GotoIf(TaggedIsSmi(receiver), &call_runtime);
- GotoIf(TaggedIsSmi(search), &call_runtime);
- GotoIfNot(IsString(CAST(receiver)), &call_runtime);
- GotoIfNot(IsString(CAST(search)), &call_runtime);
-
- StringIndexOf(CAST(receiver), CAST(search), position,
- [&](TNode<Smi> result) {
- if (variant == kIndexOf) {
- arguments.PopAndReturn(result);
- } else {
- arguments.PopAndReturn(SelectBooleanConstant(
- SmiGreaterThanOrEqual(result, SmiConstant(0))));
- }
- });
- }
- BIND(&call_runtime);
- {
- Comment("Call Runtime");
- Runtime::FunctionId runtime = variant == kIndexOf
- ? Runtime::kStringIndexOf
- : Runtime::kStringIncludes;
- const TNode<Object> result =
- CallRuntime(runtime, context, receiver, var_search_string.value(),
- var_position.value());
- arguments.PopAndReturn(result);
- }
-}
-
void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
const TNode<Context> context, const TNode<Object> object,
const TNode<Object> maybe_string, Handle<Symbol> symbol,
@@ -1425,87 +1160,6 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
}
}
-class StringMatchSearchAssembler : public StringBuiltinsAssembler {
- public:
- explicit StringMatchSearchAssembler(compiler::CodeAssemblerState* state)
- : StringBuiltinsAssembler(state) {}
-
- protected:
- enum Variant { kMatch, kSearch };
-
- void Generate(Variant variant, const char* method_name,
- TNode<Object> receiver, TNode<Object> maybe_regexp,
- TNode<Context> context) {
- Label call_regexp_match_search(this);
-
- Builtins::Name builtin;
- Handle<Symbol> symbol;
- DescriptorIndexNameValue property_to_check;
- if (variant == kMatch) {
- builtin = Builtins::kRegExpMatchFast;
- symbol = isolate()->factory()->match_symbol();
- property_to_check = DescriptorIndexNameValue{
- JSRegExp::kSymbolMatchFunctionDescriptorIndex,
- RootIndex::kmatch_symbol, Context::REGEXP_MATCH_FUNCTION_INDEX};
- } else {
- builtin = Builtins::kRegExpSearchFast;
- symbol = isolate()->factory()->search_symbol();
- property_to_check = DescriptorIndexNameValue{
- JSRegExp::kSymbolSearchFunctionDescriptorIndex,
- RootIndex::ksearch_symbol, Context::REGEXP_SEARCH_FUNCTION_INDEX};
- }
-
- RequireObjectCoercible(context, receiver, method_name);
-
- MaybeCallFunctionAtSymbol(
- context, maybe_regexp, receiver, symbol, property_to_check,
- [=] { Return(CallBuiltin(builtin, context, maybe_regexp, receiver)); },
- [=](TNode<Object> fn) {
- Return(Call(context, fn, maybe_regexp, receiver));
- });
-
- // maybe_regexp is not a RegExp nor has [@@match / @@search] property.
- {
- RegExpBuiltinsAssembler regexp_asm(state());
-
- TNode<String> receiver_string = ToString_Inline(context, receiver);
- TNode<NativeContext> native_context = LoadNativeContext(context);
- TNode<HeapObject> regexp_function = CAST(
- LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
- TNode<Map> initial_map = CAST(LoadObjectField(
- regexp_function, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<Object> regexp = regexp_asm.RegExpCreate(
- context, initial_map, maybe_regexp, EmptyStringConstant());
-
- // TODO(jgruber): Handle slow flag accesses on the fast path and make this
- // permissive.
- Label fast_path(this), slow_path(this);
- regexp_asm.BranchIfFastRegExp(
- context, CAST(regexp), initial_map,
- PrototypeCheckAssembler::kCheckPrototypePropertyConstness,
- property_to_check, &fast_path, &slow_path);
-
- BIND(&fast_path);
- Return(CallBuiltin(builtin, context, regexp, receiver_string));
-
- BIND(&slow_path);
- {
- TNode<Object> maybe_func = GetProperty(context, regexp, symbol);
- Return(Call(context, maybe_func, regexp, receiver_string));
- }
- }
- }
-};
-
-// ES6 #sec-string.prototype.match
-TF_BUILTIN(StringPrototypeMatch, StringMatchSearchAssembler) {
- auto receiver = Parameter<Object>(Descriptor::kReceiver);
- auto maybe_regexp = Parameter<Object>(Descriptor::kRegexp);
- auto context = Parameter<Context>(Descriptor::kContext);
-
- Generate(kMatch, "String.prototype.match", receiver, maybe_regexp, context);
-}
-
// ES #sec-string.prototype.matchAll
TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
char const* method_name = "String.prototype.matchAll";
@@ -1605,14 +1259,6 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
Return(Call(context, match_all_func, rx, s));
}
-// ES6 #sec-string.prototype.search
-TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
- auto receiver = Parameter<Object>(Descriptor::kReceiver);
- auto maybe_regexp = Parameter<Object>(Descriptor::kRegexp);
- auto context = Parameter<Context>(Descriptor::kContext);
- Generate(kSearch, "String.prototype.search", receiver, maybe_regexp, context);
-}
-
TNode<JSArray> StringBuiltinsAssembler::StringToArray(
TNode<NativeContext> context, TNode<String> subject_string,
TNode<Smi> subject_length, TNode<Number> limit_number) {
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 5e3ee93f17..bd1390dc24 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -61,6 +61,29 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
String::Encoding from_encoding,
String::Encoding to_encoding);
+ // Torque wrapper methods for CallSearchStringRaw for each combination of
+ // search and subject character widths (char8/char16). This is a workaround
+ // for Torque's current lack of support for extern macros with generics.
+ TNode<IntPtrT> SearchOneByteStringInTwoByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> search_length,
+ const TNode<IntPtrT> start_position);
+ TNode<IntPtrT> SearchOneByteStringInOneByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> search_length,
+ const TNode<IntPtrT> start_position);
+ TNode<IntPtrT> SearchTwoByteStringInTwoByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> search_length,
+ const TNode<IntPtrT> start_position);
+ TNode<IntPtrT> SearchTwoByteStringInOneByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> search_length,
+ const TNode<IntPtrT> start_position);
+ TNode<IntPtrT> SearchOneByteInOneByteString(
+ const TNode<RawPtrT> subject_ptr, const TNode<IntPtrT> subject_length,
+ const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> start_position);
+
protected:
void StringEqual_Loop(TNode<String> lhs, TNode<Word32T> lhs_instance_type,
MachineType lhs_type, TNode<String> rhs,
@@ -70,11 +93,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
TNode<RawPtrT> DirectStringData(TNode<String> string,
TNode<Word32T> string_instance_type);
- void DispatchOnStringEncodings(const TNode<Word32T> lhs_instance_type,
- const TNode<Word32T> rhs_instance_type,
- Label* if_one_one, Label* if_one_two,
- Label* if_two_one, Label* if_two_two);
-
template <typename SubjectChar, typename PatternChar>
TNode<IntPtrT> CallSearchStringRaw(const TNode<RawPtrT> subject_ptr,
const TNode<IntPtrT> subject_length,
@@ -82,10 +100,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
const TNode<IntPtrT> search_length,
const TNode<IntPtrT> start_position);
- TNode<RawPtrT> PointerToStringDataAtIndex(TNode<RawPtrT> string_data,
- TNode<IntPtrT> index,
- String::Encoding encoding);
-
void GenerateStringEqual(TNode<String> left, TNode<String> right);
void GenerateStringRelationalComparison(TNode<String> left,
TNode<String> right, Operation op);
@@ -93,11 +107,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
using StringAtAccessor = std::function<TNode<Object>(
TNode<String> receiver, TNode<IntPtrT> length, TNode<IntPtrT> index)>;
- void StringIndexOf(const TNode<String> subject_string,
- const TNode<String> search_string,
- const TNode<Smi> position,
- const std::function<void(TNode<Smi>)>& f_return);
-
const TNode<Smi> IndexOfDollarChar(const TNode<Context> context,
const TNode<String> string);
@@ -172,18 +181,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
TNode<IntPtrT> character_count);
};
-class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
- public:
- explicit StringIncludesIndexOfAssembler(compiler::CodeAssemblerState* state)
- : StringBuiltinsAssembler(state) {}
-
- protected:
- enum SearchVariant { kIncludes, kIndexOf };
-
- void Generate(SearchVariant variant, TNode<IntPtrT> argc,
- TNode<Context> context);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq
index b7f8a671c2..4111155fd2 100644
--- a/deps/v8/src/builtins/builtins-string.tq
+++ b/deps/v8/src/builtins/builtins-string.tq
@@ -116,9 +116,7 @@ IfInBounds(String, uintptr, uintptr), IfOutOfBounds {
const index: uintptr = Unsigned(Convert<intptr>(indexSmi));
// Max string length fits Smi range, so we can do an unsigned bounds
// check.
- const kMaxStringLengthFitsSmi: constexpr bool =
- kStringMaxLengthUintptr < kSmiMaxValue;
- static_assert(kMaxStringLengthFitsSmi);
+ StaticAssertStringLengthFitsSmi();
if (index >= length) goto IfOutOfBounds;
goto IfInBounds(string, index, length);
}
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 9bf6f416c2..0704d8681b 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -107,26 +107,19 @@ TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
Return(Unsigned(SmiToInt32(result_smi)));
}
-TF_BUILTIN(WasmAllocatePair, WasmBuiltinsAssembler) {
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<HeapObject> value1 = Parameter<HeapObject>(Descriptor::kValue1);
- TNode<HeapObject> value2 = Parameter<HeapObject>(Descriptor::kValue2);
-
- TNode<IntPtrT> roots = LoadObjectField<IntPtrT>(
- instance, WasmInstanceObject::kIsolateRootOffset);
- TNode<Map> map = CAST(Load(
- MachineType::AnyTagged(), roots,
- IntPtrConstant(IsolateData::root_slot_offset(RootIndex::kTuple2Map))));
-
- TNode<IntPtrT> instance_size =
- TimesTaggedSize(LoadMapInstanceSizeInWords(map));
- TNode<Tuple2> result = UncheckedCast<Tuple2>(Allocate(instance_size));
-
- StoreMap(result, map);
- StoreObjectField(result, Tuple2::kValue1Offset, value1);
- StoreObjectField(result, Tuple2::kValue2Offset, value2);
-
- Return(result);
+TF_BUILTIN(JSToWasmLazyDeoptContinuation, WasmBuiltinsAssembler) {
+ // Reset thread_in_wasm_flag.
+ TNode<ExternalReference> thread_in_wasm_flag_address_address =
+ ExternalConstant(
+ ExternalReference::thread_in_wasm_flag_address_address(isolate()));
+ auto thread_in_wasm_flag_address =
+ Load<RawPtrT>(thread_in_wasm_flag_address_address);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32,
+ thread_in_wasm_flag_address, Int32Constant(0));
+
+ // Return the argument.
+ auto value = Parameter<Object>(Descriptor::kArgument);
+ Return(value);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 541f9ffac9..1e94dac811 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -88,14 +88,14 @@ const BuiltinMetadata builtin_metadata[] = {BUILTIN_LIST(
} // namespace
-BailoutId Builtins::GetContinuationBailoutId(Name name) {
+BytecodeOffset Builtins::GetContinuationBytecodeOffset(Name name) {
DCHECK(Builtins::KindOf(name) == TFJ || Builtins::KindOf(name) == TFC ||
Builtins::KindOf(name) == TFS);
- return BailoutId(BailoutId::kFirstBuiltinContinuationId + name);
+ return BytecodeOffset(BytecodeOffset::kFirstBuiltinContinuationId + name);
}
-Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) {
- int builtin_index = id.ToInt() - BailoutId::kFirstBuiltinContinuationId;
+Builtins::Name Builtins::GetBuiltinFromBytecodeOffset(BytecodeOffset id) {
+ int builtin_index = id.ToInt() - BytecodeOffset::kFirstBuiltinContinuationId;
DCHECK(Builtins::KindOf(builtin_index) == TFJ ||
Builtins::KindOf(builtin_index) == TFC ||
Builtins::KindOf(builtin_index) == TFS);
@@ -483,7 +483,6 @@ bool Builtins::CodeObjectIsExecutable(int builtin_index) {
case Builtins::kCall_ReceiverIsNullOrUndefined:
case Builtins::kCall_ReceiverIsNotNullOrUndefined:
case Builtins::kCall_ReceiverIsAny:
- case Builtins::kArgumentsAdaptorTrampoline:
case Builtins::kHandleApiCall:
case Builtins::kInstantiateAsmJs:
case Builtins::kGenericJSToWasmWrapper:
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index f16ce84b0d..7bb957bb66 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -20,7 +20,7 @@ class Handle;
class Isolate;
// Forward declarations.
-class BailoutId;
+class BytecodeOffset;
class RootVisitor;
enum class InterpreterPushArgsMode : unsigned;
namespace compiler {
@@ -79,8 +79,8 @@ class Builtins {
// The different builtin kinds are documented in builtins-definitions.h.
enum Kind { CPP, TFJ, TFC, TFS, TFH, BCH, ASM };
- static BailoutId GetContinuationBailoutId(Name name);
- static Name GetBuiltinFromBailoutId(BailoutId);
+ static BytecodeOffset GetContinuationBytecodeOffset(Name name);
+ static Name GetBuiltinFromBytecodeOffset(BytecodeOffset);
// Convenience wrappers.
Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index 056cf8bc74..b490055a19 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -258,7 +258,7 @@ Cast<Boolean>(o: HeapObject): Boolean labels CastError {
return Cast<Boolean>(o) otherwise CastError;
}
-// TODO(tebbi): These trivial casts for union types should be generated
+// TODO(turbofan): These trivial casts for union types should be generated
// automatically.
Cast<JSPrimitive>(o: Object): JSPrimitive labels CastError {
diff --git a/deps/v8/src/builtins/constructor.tq b/deps/v8/src/builtins/constructor.tq
index 14304b1d55..53088c627d 100644
--- a/deps/v8/src/builtins/constructor.tq
+++ b/deps/v8/src/builtins/constructor.tq
@@ -6,11 +6,11 @@
namespace runtime {
extern runtime CreateArrayLiteral(
- Context, FeedbackVector, TaggedIndex, ArrayBoilerplateDescription,
- Smi): HeapObject;
+ Context, Undefined | FeedbackVector, TaggedIndex,
+ ArrayBoilerplateDescription, Smi): HeapObject;
extern runtime CreateObjectLiteral(
- Context, FeedbackVector, TaggedIndex, ObjectBoilerplateDescription,
- Smi): HeapObject;
+ Context, Undefined | FeedbackVector, TaggedIndex,
+ ObjectBoilerplateDescription, Smi): HeapObject;
}
namespace constructor {
@@ -22,8 +22,8 @@ extern enum AllocationSiteMode {
TRACK_ALLOCATION_SITE
}
-const kIsShallowAndDisableMementos: constexpr int31
- generates 'AggregateLiteral::Flags::kIsShallowAndDisableMementos';
+const kIsShallow: constexpr int31
+ generates 'AggregateLiteral::Flags::kIsShallow';
const kEvalScope: constexpr ScopeType generates 'ScopeType::EVAL_SCOPE';
const kFunctionScope:
constexpr ScopeType generates 'ScopeType::FUNCTION_SCOPE';
@@ -60,17 +60,18 @@ builtin CreateRegExpLiteral(implicit context: Context)(
}
builtin CreateShallowArrayLiteral(implicit context: Context)(
- feedbackVector: FeedbackVector, slot: TaggedIndex,
+ maybeFeedbackVector: Undefined|FeedbackVector, slot: TaggedIndex,
constantElements: ArrayBoilerplateDescription): HeapObject {
try {
+ const vector = Cast<FeedbackVector>(maybeFeedbackVector)
+ otherwise CallRuntime;
return CreateShallowArrayLiteral(
- feedbackVector, slot, context,
- AllocationSiteMode::DONT_TRACK_ALLOCATION_SITE)
+ vector, slot, context, AllocationSiteMode::TRACK_ALLOCATION_SITE)
otherwise CallRuntime;
} label CallRuntime deferred {
tail runtime::CreateArrayLiteral(
- context, feedbackVector, slot, constantElements,
- SmiConstant(kIsShallowAndDisableMementos));
+ context, maybeFeedbackVector, slot, constantElements,
+ SmiConstant(kIsShallow));
}
}
@@ -80,14 +81,16 @@ builtin CreateEmptyArrayLiteral(implicit context: Context)(
}
builtin CreateShallowObjectLiteral(implicit context: Context)(
- feedbackVector: FeedbackVector, slot: TaggedIndex,
+ maybeFeedbackVector: Undefined|FeedbackVector, slot: TaggedIndex,
desc: ObjectBoilerplateDescription, flags: Smi): HeapObject {
try {
+ const feedbackVector = Cast<FeedbackVector>(maybeFeedbackVector)
+ otherwise CallRuntime;
return CreateShallowObjectLiteral(feedbackVector, slot)
otherwise CallRuntime;
} label CallRuntime deferred {
tail runtime::CreateObjectLiteral(
- context, feedbackVector, slot, desc, flags);
+ context, maybeFeedbackVector, slot, desc, flags);
}
}
diff --git a/deps/v8/src/builtins/frame-arguments.tq b/deps/v8/src/builtins/frame-arguments.tq
index 27f3266e2d..5f25c97dc3 100644
--- a/deps/v8/src/builtins/frame-arguments.tq
+++ b/deps/v8/src/builtins/frame-arguments.tq
@@ -11,8 +11,6 @@ struct Arguments {
extern operator '[]' macro GetArgumentValue(Arguments, intptr): JSAny;
extern macro GetFrameArguments(FrameWithArguments, intptr): Arguments;
-const kNoArgumentsAdaptor:
- constexpr bool generates 'kNoArgumentsAdaptor';
struct ArgumentsIterator {
macro Next(): Object labels NoMore {
@@ -49,30 +47,13 @@ macro GetFrameWithArgumentsInfo(implicit context: Context)():
const shared: SharedFunctionInfo = f.shared_function_info;
const formalParameterCount: bint =
Convert<bint>(Convert<int32>(shared.formal_parameter_count));
- if constexpr (kNoArgumentsAdaptor) {
- // TODO(victorgomes): When removing the v8_disable_arguments_adaptor flag,
- // FrameWithArgumentsInfo can be simplified, since the frame field already
- // contains the argument count.
- const argumentCount: bint = Convert<bint>(frame.argument_count);
- return FrameWithArgumentsInfo{
- frame,
- argument_count: argumentCount,
- formal_parameter_count: formalParameterCount
- };
- } else {
- const argumentCount: bint = formalParameterCount;
-
- const adaptor = Cast<ArgumentsAdaptorFrame>(frame.caller)
- otherwise return FrameWithArgumentsInfo{
- frame,
- argument_count: argumentCount,
- formal_parameter_count: formalParameterCount
- };
-
- return FrameWithArgumentsInfo{
- frame: adaptor,
- argument_count: Convert<bint>(adaptor.length),
- formal_parameter_count: formalParameterCount
- };
- }
+ // TODO(victorgomes): When removing the v8_disable_arguments_adaptor flag,
+ // FrameWithArgumentsInfo can be simplified, since the frame field already
+ // contains the argument count.
+ const argumentCount: bint = Convert<bint>(frame.argument_count);
+ return FrameWithArgumentsInfo{
+ frame,
+ argument_count: argumentCount,
+ formal_parameter_count: formalParameterCount
+ };
}
diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq
index 3b716b7a64..03336bd464 100644
--- a/deps/v8/src/builtins/frames.tq
+++ b/deps/v8/src/builtins/frames.tq
@@ -3,8 +3,6 @@
// found in the LICENSE file.
type FrameType extends Smi constexpr 'StackFrame::Type';
-const ARGUMENTS_ADAPTOR_FRAME: constexpr FrameType
- generates 'StackFrame::ARGUMENTS_ADAPTOR';
const STUB_FRAME: constexpr FrameType
generates 'StackFrame::STUB';
const kFrameTypeCount:
@@ -31,9 +29,8 @@ Cast<FrameType>(o: Object): FrameType
type FrameBase extends RawPtr constexpr 'void*';
type StandardFrame extends FrameBase constexpr 'void*';
-type ArgumentsAdaptorFrame extends FrameBase constexpr 'void*';
type StubFrame extends FrameBase constexpr 'void*';
-type FrameWithArguments = StandardFrame|ArgumentsAdaptorFrame;
+type FrameWithArguments = StandardFrame;
type Frame = FrameWithArguments|StubFrame;
extern macro LoadFramePointer(): Frame;
@@ -46,9 +43,6 @@ macro LoadObjectFromFrame(f: Frame, o: constexpr int32): Object {
macro LoadPointerFromFrame(f: Frame, o: constexpr int32): RawPtr {
return LoadBufferPointer(f, o);
}
-macro LoadSmiFromFrame(f: Frame, o: constexpr int32): Smi {
- return LoadBufferSmi(f, o);
-}
macro LoadIntptrFromFrame(f: Frame, o: constexpr int32): intptr {
return LoadBufferIntptr(f, o);
}
@@ -102,14 +96,6 @@ macro LoadContextOrFrameTypeFromFrame(implicit context: Context)(f: Frame):
LoadObjectFromFrame(f, kStandardFrameContextOrFrameTypeOffset));
}
-const kArgumentsAdaptorFrameLengthOffset: constexpr int31
- generates 'ArgumentsAdaptorFrameConstants::kLengthOffset';
-operator '.length'
-macro LoadLengthFromAdapterFrame(implicit context: Context)(
- f: ArgumentsAdaptorFrame): Smi {
- return LoadSmiFromFrame(f, kArgumentsAdaptorFrameLengthOffset);
-}
-
operator '==' macro FrameTypeEquals(f1: FrameType, f2: FrameType): bool {
return TaggedEqual(f1, f2);
}
@@ -135,16 +121,6 @@ Cast<StandardFrame>(implicit context: Context)(f: Frame):
goto CastError;
}
-Cast<ArgumentsAdaptorFrame>(implicit context: Context)(f: Frame):
- ArgumentsAdaptorFrame labels CastError {
- const t: FrameType =
- Cast<FrameType>(f.context_or_frame_type) otherwise CastError;
- if (t == ARGUMENTS_ADAPTOR_FRAME) {
- return %RawDownCast<ArgumentsAdaptorFrame>(f);
- }
- goto CastError;
-}
-
// Load target function from the current JS frame.
// This is an alternative way of getting the target function in addition to
// Parameter(Descriptor::kJSTarget). The latter should be used near the
diff --git a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
index 7317402fd5..5b3c297360 100644
--- a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
+++ b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
@@ -19,9 +19,18 @@ void WriteBytecode(std::ofstream& out, Bytecode bytecode,
int table_index) {
DCHECK_NOT_NULL(count);
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
- out << " \\\n V(" << Bytecodes::ToString(bytecode, operand_scale, "")
- << "Handler, interpreter::OperandScale::k" << operand_scale
- << ", interpreter::Bytecode::k" << Bytecodes::ToString(bytecode) << ")";
+ std::string name = Bytecodes::ToString(bytecode, operand_scale, "");
+
+ // The handler for Star0 is used for all short star codes. Rename it to
+ // something more generic.
+ if (bytecode == Bytecode::kStar0) {
+ DCHECK_EQ(operand_scale, OperandScale::kSingle);
+ name = "ShortStar";
+ }
+
+ out << " \\\n V(" << name << "Handler, interpreter::OperandScale::k"
+ << operand_scale << ", interpreter::Bytecode::k"
+ << Bytecodes::ToString(bytecode) << ")";
offset_table[table_index] = *count;
(*count)++;
} else {
@@ -62,7 +71,8 @@ void WriteHeader(const char* header_filename) {
#undef ADD_BYTECODES
int extra_wide_count = count - wide_count - single_count;
CHECK_GT(single_count, wide_count);
- CHECK_EQ(single_count, Bytecodes::kBytecodeCount);
+ CHECK_EQ(single_count,
+ Bytecodes::kBytecodeCount - Bytecodes::kShortStarCount + 1);
CHECK_EQ(wide_count, extra_wide_count);
out << "\n\nconstexpr int kNumberOfBytecodeHandlers = " << single_count
<< ";\n"
@@ -73,9 +83,10 @@ void WriteHeader(const char* header_filename) {
<< "// Mapping from Bytecode to a dense form with all the illegal\n"
<< "// wide Bytecodes removed. Used to index into the builtins table.\n"
<< "constexpr uint8_t kWideBytecodeToBuiltinsMapping["
- << "kNumberOfBytecodeHandlers] = { \n";
+ << Bytecodes::kBytecodeCount << "] = { \n";
- for (int i = single_count; i < 2 * single_count; ++i) {
+ for (int i = Bytecodes::kBytecodeCount; i < 2 * Bytecodes::kBytecodeCount;
+ ++i) {
int offset = offset_table[i];
if (offset == kIllegalBytecodeHandler) {
offset = kIllegalBytecodeHandlerEncoding;
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 62c5af2dd6..03bad42d67 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -368,6 +368,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
IsolateAddressId::kCEntryFPAddress, masm->isolate());
__ push(__ ExternalReferenceAsOperand(c_entry_fp, edi));
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ mov(__ ExternalReferenceAsOperand(c_entry_fp, edi), Immediate(0));
+
// Store the context address in the previously-reserved slot.
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
@@ -714,7 +720,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ mov(params_size,
FieldOperand(params_size, BytecodeArray::kParameterSizeOffset));
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
__ mov(actual_params_size, Operand(ebp, StandardFrameConstants::kArgCOffset));
@@ -729,7 +734,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ j(greater_equal, &corrected_args_count, Label::kNear);
__ mov(params_size, actual_params_size);
__ bind(&corrected_args_count);
-#endif
// Leave the frame (also dropping the register file).
__ leave();
@@ -876,13 +880,13 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the next bytecode and update table to the wide scaled table.
__ add(bytecode_size_table,
- Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Immediate(kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Update table to the extra wide scaled table.
__ add(bytecode_size_table,
- Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Immediate(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ bind(&process_bytecode);
@@ -908,12 +912,37 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ add(bytecode_offset,
- Operand(bytecode_size_table, bytecode, times_int_size, 0));
+ __ movzx_b(bytecode_size_table,
+ Operand(bytecode_size_table, bytecode, times_1, 0));
+ __ add(bytecode_offset, bytecode_size_table);
__ bind(&end);
}
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ XMMRegister saved_feedback_vector) {
+ Label maybe_has_optimized_code;
+ // Check if optimized code is available
+ __ test(
+ optimization_state,
+ Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ j(zero, &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_marker;
+ Register feedback_vector = optimization_marker;
+ __ movd(feedback_vector, saved_feedback_vector); // Restore feedback vector.
+ __ mov(
+ optimized_code_entry,
+ FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1142,29 +1171,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
- Label maybe_has_optimized_code;
- // Restore actual argument count.
- __ movd(eax, xmm0);
-
- // Check if optimized code is available
- __ test(
- optimization_state,
- Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
- __ j(zero, &maybe_has_optimized_code);
-
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, optimization_marker);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
-
- __ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = optimization_marker;
- __ movd(optimized_code_entry, xmm1);
- __ mov(
- optimized_code_entry,
- FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
- TailCallOptimizedCodeSlot(masm, optimized_code_entry);
+ {
+ // Restore actual argument count.
+ __ movd(eax, xmm0);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ xmm1);
+ }
__ bind(&compile_lazy);
// Restore actual argument count.
@@ -1379,7 +1391,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Pop(kJavaScriptCallTargetRegister);
__ PushReturnAddressFrom(eax);
- __ AssertFunction(kJavaScriptCallTargetRegister);
+ __ AssertFunction(kJavaScriptCallTargetRegister, eax);
__ AssertUndefinedOrAllocationSite(kJavaScriptCallExtraArg1Register, eax);
__ movd(eax, xmm0); // Reload number of arguments.
@@ -1863,41 +1875,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(ebp);
- __ mov(ebp, esp);
-
- // Store the arguments adaptor context sentinel.
- __ push(Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Push the function on the stack.
- __ push(edi);
-
- // Preserve the number of arguments on the stack. Must preserve eax,
- // ebx and ecx because these registers are used when copying the
- // arguments and the receiver.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
- __ push(edi);
-
- __ Push(Immediate(0)); // Padding.
-}
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack.
- __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ leave();
-
- // Remove caller arguments from the stack.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, edi, times_half_system_pointer_size,
- 1 * kSystemPointerSize)); // 1 ~ receiver
- __ PushReturnAddressFrom(ecx);
-}
-
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -2051,37 +2028,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ movd(xmm1, edx); // Preserve new.target (in case of [[Construct]]).
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
- // code is erased.
- __ mov(scratch, ebp);
- __ mov(edx, Operand(ebp, StandardFrameConstants::kArgCOffset));
-#else
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ mov(scratch, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &arguments_adaptor, Label::kNear);
- {
- __ mov(edx, Operand(ebp, StandardFrameConstants::kFunctionOffset));
- __ mov(edx, FieldOperand(edx, JSFunction::kSharedFunctionInfoOffset));
- __ movzx_w(edx, FieldOperand(
- edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(scratch, ebp);
- }
- __ jmp(&arguments_done, Label::kNear);
- __ bind(&arguments_adaptor);
- {
- // Just load the length from the ArgumentsAdaptorFrame.
- __ mov(edx,
- Operand(scratch, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(edx);
- }
- __ bind(&arguments_done);
-#endif
-
Label stack_done, stack_overflow;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kArgCOffset));
__ sub(edx, ecx);
__ j(less_equal, &stack_done);
{
@@ -2091,7 +2039,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- ecx : start index (to support rest parameters)
// -- edx : number of arguments to copy, i.e. arguments count - start index
// -- edi : the target to call (can be any Object)
- // -- esi : point to the caller stack frame
+ // -- ebp : point to the caller stack frame
// -- xmm0 : context for the Call / Construct builtin
// -- xmm1 : the new target (for [[Construct]] calls)
// -----------------------------------
@@ -2103,17 +2051,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Register scratch = ebx;
- // Point to the first argument to copy (skipping receiver).
- __ lea(ecx, Operand(ecx, times_system_pointer_size,
- CommonFrameConstants::kFixedFrameSizeAboveFp +
- kSystemPointerSize));
- __ add(esi, ecx);
-
// Move the arguments already in the stack,
// including the receiver and the return address.
{
Label copy, check;
- Register src = ecx, current = edi;
+ Register src = esi, current = edi;
// Update stack pointer.
__ mov(src, esp);
__ lea(scratch, Operand(edx, times_system_pointer_size, 0));
@@ -2130,18 +2072,24 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&check);
__ cmp(current, eax);
__ j(less, &copy);
- __ lea(ecx, Operand(esp, eax, times_system_pointer_size, 0));
+ __ lea(esi, Operand(esp, eax, times_system_pointer_size, 0));
}
// Update total number of arguments.
__ sub(eax, Immediate(2));
__ add(eax, edx);
+ // Point to the first argument to copy (skipping receiver).
+ __ lea(ecx, Operand(ecx, times_system_pointer_size,
+ CommonFrameConstants::kFixedFrameSizeAboveFp +
+ kSystemPointerSize));
+ __ add(ecx, ebp);
+
// Copy the additional caller arguments onto the stack.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
- Register src = esi, dest = ecx, num = edx;
+ Register src = ecx, dest = esi, num = edx;
Label loop;
__ bind(&loop);
__ dec(num);
@@ -2175,7 +2123,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
StackArgumentsAccessor args(eax);
- __ AssertFunction(edi);
+ __ AssertFunction(edi, edx);
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
@@ -2390,12 +2338,15 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
non_jsboundfunction;
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &non_jsfunction);
+ __ LoadMap(ecx, edi);
+ __ CmpInstanceTypeRange(ecx, ecx, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
+ __ j(above, &non_jsfunction);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
__ bind(&non_jsfunction);
+ __ LoadMap(ecx, edi);
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
__ j(not_equal, &non_jsboundfunction);
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
@@ -2440,7 +2391,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
__ AssertConstructor(edi);
- __ AssertFunction(edi);
+ __ AssertFunction(edi, ecx);
Label call_generic_stub;
@@ -2512,14 +2463,16 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ j(zero, &non_constructor);
// Dispatch based on instance type.
- __ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
- __ j(not_equal, &non_jsfunction);
+ __ CmpInstanceTypeRange(ecx, ecx, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
+ __ j(above, &non_jsfunction);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET);
// Only dispatch to bound functions after checking whether they are
// constructors.
__ bind(&non_jsfunction);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
__ j(not_equal, &non_jsboundfunction);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
@@ -2550,123 +2503,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : actual number of arguments
- // -- ecx : expected number of arguments
- // -- edx : new target (passed through to callee)
- // -- edi : function (passed through to callee)
- // -----------------------------------
-
- const Register kExpectedNumberOfArgumentsRegister = ecx;
-
- Label invoke, dont_adapt_arguments, stack_overflow, enough, too_few;
- __ cmp(kExpectedNumberOfArgumentsRegister, kDontAdaptArgumentsSentinel);
- __ j(equal, &dont_adapt_arguments);
- __ cmp(eax, kExpectedNumberOfArgumentsRegister);
- __ j(less, &too_few);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
- // edi is used as a scratch register. It should be restored from the frame
- // when needed.
- __ StackOverflowCheck(kExpectedNumberOfArgumentsRegister, edi,
- &stack_overflow);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, ecx, times_system_pointer_size, offset));
- __ mov(eax, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ inc(eax);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kSystemPointerSize));
- __ cmp(eax, kExpectedNumberOfArgumentsRegister);
- __ j(less, &copy);
- // eax now contains the expected number of arguments.
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
- // edi is used as a scratch register. It should be restored from the frame
- // when needed.
- __ StackOverflowCheck(kExpectedNumberOfArgumentsRegister, edi,
- &stack_overflow);
-
- // Remember expected arguments in xmm0.
- __ movd(xmm0, kExpectedNumberOfArgumentsRegister);
-
- // Remember new target.
- __ movd(xmm1, edx);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ mov(edx, ecx);
- __ sub(edx, eax);
- __ bind(&fill);
- __ Push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ dec(edx);
- __ j(greater, &fill);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
- __ mov(edx, Immediate(-1));
-
- Label copy;
- __ bind(&copy);
- __ inc(edx);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kSystemPointerSize));
- __ cmp(edx, eax);
- __ j(less, &copy);
-
- // Restore new.target
- __ movd(edx, xmm1);
-
- // Restore expected arguments.
- __ movd(eax, xmm0);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- // Restore function pointer.
- __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- // eax : expected number of arguments
- // edx : new target (passed through to callee)
- // edi : function (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
- __ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
- __ CallCodeObject(ecx);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
- __ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
- __ JumpCodeObject(ecx);
-
- __ bind(&stack_overflow);
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ int3();
- }
-}
-
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2712,7 +2548,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
- // Save all parameter registers (see wasm-linkage.cc). They might be
+ // Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs ==
@@ -2936,6 +2772,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
__ bind(&skip);
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ ExternalReference c_entry_fp_address = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ __ mov(__ ExternalReferenceAsOperand(c_entry_fp_address, esi), Immediate(0));
+
// Compute the handler entry address and jump to it.
__ mov(edi, __ ExternalReferenceAsOperand(pending_handler_entrypoint_address,
edi));
diff --git a/deps/v8/src/builtins/ic-callable.tq b/deps/v8/src/builtins/ic-callable.tq
index 8ee7fab17a..85525c4c68 100644
--- a/deps/v8/src/builtins/ic-callable.tq
+++ b/deps/v8/src/builtins/ic-callable.tq
@@ -141,10 +141,25 @@ macro BothTaggedEqualArrayFunction(implicit context: Context)(
extern macro CreateAllocationSiteInFeedbackVector(
FeedbackVector, uintptr): AllocationSite;
+macro CastFeedbackVector(
+ maybeFeedbackVector: Undefined|FeedbackVector,
+ updateFeedbackMode: constexpr UpdateFeedbackMode):
+ FeedbackVector labels Fallback {
+ if constexpr (updateFeedbackMode == UpdateFeedbackMode::kGuaranteedFeedback) {
+ return UnsafeCast<FeedbackVector>(maybeFeedbackVector);
+ } else if constexpr (
+ updateFeedbackMode == UpdateFeedbackMode::kOptionalFeedback) {
+ return Cast<FeedbackVector>(maybeFeedbackVector) otherwise goto Fallback;
+ } else {
+ unreachable;
+ }
+}
+
macro CollectConstructFeedback(implicit context: Context)(
target: JSAny, newTarget: JSAny,
- maybeFeedbackVector: Undefined|FeedbackVector,
- slotId: uintptr): never labels ConstructGeneric,
+ maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr,
+ updateFeedbackMode: constexpr UpdateFeedbackMode):
+ never labels ConstructGeneric,
ConstructArray(AllocationSite) {
// TODO(v8:9891): Remove this assert once all callers are ported to Torque.
// This assert ensures correctness of maybeFeedbackVector's type which can
@@ -152,8 +167,10 @@ macro CollectConstructFeedback(implicit context: Context)(
assert(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
- const feedbackVector = Cast<FeedbackVector>(maybeFeedbackVector)
- otherwise goto ConstructGeneric;
+
+ const feedbackVector = CastFeedbackVector(
+ maybeFeedbackVector, updateFeedbackMode) otherwise goto ConstructGeneric;
+
IncrementCallCount(feedbackVector, slotId);
try {
diff --git a/deps/v8/src/builtins/ic.tq b/deps/v8/src/builtins/ic.tq
index 848d7aad58..49d4e78fa5 100644
--- a/deps/v8/src/builtins/ic.tq
+++ b/deps/v8/src/builtins/ic.tq
@@ -25,11 +25,12 @@ macro CollectInstanceOfFeedback(
@export
macro CollectConstructFeedback(implicit context: Context)(
target: JSAny, newTarget: JSAny,
- maybeFeedbackVector: Undefined|FeedbackVector,
- slotId: uintptr): never labels ConstructGeneric,
+ maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr,
+ updateFeedbackMode: constexpr UpdateFeedbackMode):
+ never labels ConstructGeneric,
ConstructArray(AllocationSite) {
callable::CollectConstructFeedback(
- target, newTarget, maybeFeedbackVector, slotId)
+ target, newTarget, maybeFeedbackVector, slotId, updateFeedbackMode)
otherwise ConstructGeneric, ConstructArray;
}
diff --git a/deps/v8/src/builtins/internal.tq b/deps/v8/src/builtins/internal.tq
index 7830cffb30..d0863f13a0 100644
--- a/deps/v8/src/builtins/internal.tq
+++ b/deps/v8/src/builtins/internal.tq
@@ -47,28 +47,22 @@ builtin BytecodeBudgetInterruptFromCode(implicit context: Context)(
tail runtime::BytecodeBudgetInterruptFromCode(feedbackCell);
}
-extern transitioning macro ForInPrepareForTorque(
- Map | FixedArray, uintptr, Undefined | FeedbackVector): FixedArray;
-
-transitioning builtin ForInPrepare(implicit _context: Context)(
- enumerator: Map|FixedArray, slot: uintptr,
- maybeFeedbackVector: Undefined|FeedbackVector): FixedArray {
- return ForInPrepareForTorque(enumerator, slot, maybeFeedbackVector);
-}
-
extern transitioning builtin ForInFilter(implicit context: Context)(
JSAny, HeapObject): JSAny;
extern enum ForInFeedback extends uint31 { kAny, ...}
extern macro UpdateFeedback(
- SmiTagged<ForInFeedback>, Undefined | FeedbackVector, uintptr);
+ SmiTagged<ForInFeedback>, Undefined | FeedbackVector, uintptr,
+ constexpr UpdateFeedbackMode);
@export
transitioning macro ForInNextSlow(
context: Context, slot: uintptr, receiver: JSAnyNotSmi, key: JSAny,
- cacheType: Object, maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
+ cacheType: Object, maybeFeedbackVector: Undefined|FeedbackVector,
+ guaranteedFeedback: constexpr UpdateFeedbackMode): JSAny {
assert(receiver.map != cacheType); // Handled on the fast path.
UpdateFeedback(
- SmiTag<ForInFeedback>(ForInFeedback::kAny), maybeFeedbackVector, slot);
+ SmiTag<ForInFeedback>(ForInFeedback::kAny), maybeFeedbackVector, slot,
+ guaranteedFeedback);
return ForInFilter(key, receiver);
}
@@ -77,7 +71,7 @@ transitioning macro ForInNextSlow(
transitioning builtin ForInNext(
context: Context, slot: uintptr, receiver: JSAnyNotSmi,
cacheArray: FixedArray, cacheType: Object, cacheIndex: Smi,
- maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
+ feedbackVector: FeedbackVector): JSAny {
// Load the next key from the enumeration array.
const key = UnsafeCast<JSAny>(cacheArray.objects[cacheIndex]);
@@ -87,7 +81,16 @@ transitioning builtin ForInNext(
}
return ForInNextSlow(
- context, slot, receiver, key, cacheType, maybeFeedbackVector);
+ context, slot, receiver, key, cacheType, feedbackVector,
+ UpdateFeedbackMode::kGuaranteedFeedback);
+}
+
+extern macro GetImportMetaObject(Context): Object;
+extern macro LoadContextFromBaseline(): Context;
+
+builtin GetImportMetaObjectBaseline(): Object {
+ const context: Context = LoadContextFromBaseline();
+ return GetImportMetaObject(context);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 5d65db0957..2d06ebb929 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -74,6 +74,29 @@ transitioning builtin GetIteratorWithFeedback(
context, receiver, iteratorMethod, callSlotSmi, maybeFeedbackVector);
}
+extern macro LoadContextFromBaseline(): Context;
+extern macro LoadFeedbackVectorFromBaseline(): FeedbackVector;
+
+transitioning builtin GetIteratorBaseline(
+ context: Context, receiver: JSAny, loadSlot: TaggedIndex,
+ callSlot: TaggedIndex): JSAny {
+ const feedback: FeedbackVector = LoadFeedbackVectorFromBaseline();
+ const iteratorMethod: JSAny =
+ LoadIC(context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
+ // TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
+ const callSlotSmi: Smi = TaggedIndexToSmi(callSlot);
+ return CallIteratorWithFeedback(
+ context, receiver, iteratorMethod, callSlotSmi, feedback);
+}
+
+extern macro CreateAsyncFromSyncIterator(Context, JSAny): JSAny;
+
+transitioning builtin CreateAsyncFromSyncIteratorBaseline(syncIterator: JSAny):
+ JSAny {
+ const context: Context = LoadContextFromBaseline();
+ return CreateAsyncFromSyncIterator(context, syncIterator);
+}
+
transitioning builtin CallIteratorWithFeedback(
context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi,
feedback: Undefined|FeedbackVector): JSAny {
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 805e66fe13..670238a45a 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -375,12 +375,18 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ li(t2, Operand(StackFrame::TypeToMarker(type)));
__ li(t1, Operand(StackFrame::TypeToMarker(type)));
- __ li(t0, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ __ li(t4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
masm->isolate()));
- __ lw(t0, MemOperand(t0));
+ __ lw(t0, MemOperand(t4));
__ Push(t3, t2, t1, t0);
pushed_stack_space += 4 * kPointerSize;
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ Sw(zero_reg, MemOperand(t4));
+
// Set up frame pointer for the frame to be pushed.
__ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
pushed_stack_space += EntryFrameConstants::kCallerFPOffset;
@@ -766,7 +772,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ lw(params_size,
FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
__ Lw(actual_params_size,
@@ -778,7 +783,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// arguments.
__ slt(t2, params_size, actual_params_size);
__ movn(params_size, actual_params_size, t2);
-#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
@@ -914,7 +918,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ Addu(scratch2, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch2));
__ Addu(bytecode_size_table, bytecode_size_table,
- Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&process_bytecode);
__ bind(&extra_wide);
@@ -923,7 +927,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ Addu(scratch2, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch2));
__ Addu(bytecode_size_table, bytecode_size_table,
- Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ bind(&process_bytecode);
@@ -946,13 +950,35 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ Lsa(scratch2, bytecode_size_table, bytecode, 2);
- __ lw(scratch2, MemOperand(scratch2));
+ __ Addu(scratch2, bytecode_size_table, bytecode);
+ __ lb(scratch2, MemOperand(scratch2));
__ Addu(bytecode_offset, bytecode_offset, scratch2);
__ bind(&end);
}
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is available
+ __ andi(t1, optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
+ __ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg));
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ Lw(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1156,26 +1182,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
-
- Label maybe_has_optimized_code;
- // Check if optimized code marker is available
- __ andi(t1, optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
- __ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg));
-
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
-
- __ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = optimization_state;
- __ Lw(optimization_marker,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
-
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1735,29 +1743,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ sll(a0, a0, kSmiTagSize);
- __ li(t0, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
- __ Push(Smi::zero()); // Padding.
- __ Addu(fp, sp,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
-}
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- v0 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then tear down the parameters.
- __ lw(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(sp, fp);
- __ MultiPop(fp.bit() | ra.bit());
- __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
- // Adjust for the receiver.
- __ Addu(sp, sp, Operand(kPointerSize));
-}
-
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -1864,37 +1849,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
- // code is erased.
- __ mov(t3, fp);
- __ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset));
-#else
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ lw(t3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(t2, MemOperand(t3, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&arguments_adaptor, eq, t2,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- {
- __ lw(t2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ lw(t2, FieldMemOperand(t2, JSFunction::kSharedFunctionInfoOffset));
- __ lhu(t2, FieldMemOperand(
- t2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(t3, fp);
- }
- __ Branch(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- // Just get the length from the ArgumentsAdaptorFrame.
- __ lw(t2, MemOperand(t3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(t2);
- }
- __ bind(&arguments_done);
-#endif
-
Label stack_done, stack_overflow;
+ __ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ Subu(t2, t2, a2);
__ Branch(&stack_done, le, t2, Operand(zero_reg));
{
@@ -1903,7 +1859,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
// Point to the first argument to copy (skipping the receiver).
- __ Addu(t3, t3,
+ __ Addu(t3, fp,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
__ Lsa(t3, t3, a2, kSystemPointerSizeLog2);
@@ -2138,9 +2094,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
Label non_callable, non_smi;
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
- __ GetObjectType(a1, t1, t2);
+ __ LoadMap(t1, a1);
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
- RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
@@ -2297,9 +2255,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
// Dispatch based on instance type.
- __ lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2329,128 +2288,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // State setup as expected by MacroAssembler::InvokePrologue.
- // ----------- S t a t e -------------
- // -- a0: actual arguments count
- // -- a1: function (passed through to callee)
- // -- a2: expected arguments count
- // -- a3: new target (passed through to callee)
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments, stack_overflow;
-
- Label enough, too_few;
- __ Branch(&dont_adapt_arguments, eq, a2,
- Operand(kDontAdaptArgumentsSentinel));
- // We use Uless as the number of argument should always be greater than 0.
- __ Branch(&too_few, Uless, a0, Operand(a2));
-
- { // Enough parameters: actual >= expected.
- // a0: actual number of arguments as a smi
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(a2, t1, kScratchReg, &stack_overflow);
-
- // Calculate copy start address into a0 and copy end address into t1.
- __ Lsa(a0, fp, a2, kPointerSizeLog2);
- // Adjust for return address and receiver.
- __ Addu(a0, a0, Operand(2 * kPointerSize));
- // Compute copy end address.
- __ sll(t1, a2, kPointerSizeLog2);
- __ subu(t1, a0, t1);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // a0: copy start address
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- // t1: copy end address
-
- Label copy;
- __ bind(&copy);
- __ lw(t0, MemOperand(a0));
- __ push(t0);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t1));
- __ addiu(a0, a0, -kPointerSize); // In delay slot.
-
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(a2, t1, kScratchReg, &stack_overflow);
-
- // Fill the remaining expected arguments with undefined.
- __ LoadRoot(t0, RootIndex::kUndefinedValue);
- __ SmiUntag(t2, a0);
- __ Subu(t2, a2, Operand(t2));
- __ sll(t1, t2, kSystemPointerSizeLog2);
- __ Subu(t1, fp, t1);
- // Adjust for frame.
- __ Subu(t1, t1,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kSystemPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(t0);
- __ Branch(&fill, ne, sp, Operand(t1));
-
- // Calculate copy start address into a0 and copy end address is fp.
- __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- Label copy;
- __ bind(&copy);
-
- // Adjust load for return address and receiver.
- __ Lw(t0, MemOperand(a0, 2 * kSystemPointerSize));
- __ push(t0);
-
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(fp));
- __ Subu(a0, a0, Operand(kSystemPointerSize));
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ mov(a0, a2);
- // a0 : expected number of arguments
- // a1 : function (passed through to callee)
- // a3 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
- __ Call(a2);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Ret();
-
- // -------------------------------------------
- // Don't adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(a2);
-
- __ bind(&stack_overflow);
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ break_(0xCC);
- }
-}
-
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call.
@@ -2459,7 +2296,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
- // Save all parameter registers (see wasm-linkage.cc). They might be
+ // Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
constexpr RegList gp_regs = Register::ListOf(a0, a2, a3);
@@ -2640,6 +2477,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ Sw(zero_reg, MemOperand(scratch));
+ }
+
// Compute the handler entry address and jump to it.
__ li(t9, pending_handler_entrypoint_address);
__ lw(t9, MemOperand(t9));
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 4c42a2aa0a..c33f46d1c1 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -524,9 +524,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ li(s3, Operand(StackFrame::TypeToMarker(type)));
ExternalReference c_entry_fp = ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate());
- __ li(s4, c_entry_fp);
- __ Ld(s4, MemOperand(s4));
+ __ li(s5, c_entry_fp);
+ __ Ld(s4, MemOperand(s5));
__ Push(s1, s2, s3, s4);
+
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ Sd(zero_reg, MemOperand(s5));
+
// Set up frame pointer for the frame to be pushed.
__ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
@@ -781,7 +788,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Lw(params_size,
FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
__ Ld(actual_params_size,
@@ -793,7 +799,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// arguments.
__ slt(t2, params_size, actual_params_size);
__ movn(params_size, actual_params_size, t2);
-#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
@@ -930,7 +935,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ Daddu(scratch2, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch2));
__ Daddu(bytecode_size_table, bytecode_size_table,
- Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&process_bytecode);
__ bind(&extra_wide);
@@ -939,7 +944,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ Daddu(scratch2, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch2));
__ Daddu(bytecode_size_table, bytecode_size_table,
- Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ bind(&process_bytecode);
@@ -962,13 +967,35 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
- __ Lw(scratch2, MemOperand(scratch2));
+ __ Daddu(scratch2, bytecode_size_table, bytecode);
+ __ Lb(scratch2, MemOperand(scratch2));
__ Daddu(bytecode_offset, bytecode_offset, scratch2);
__ bind(&end);
}
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is available
+ __ andi(t0, optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
+ __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ Ld(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1173,25 +1200,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
- Label maybe_has_optimized_code;
- // Check if optimized code marker is available
- __ andi(t0, optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
- __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
-
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
-
- __ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = optimization_state;
- __ Ld(optimization_marker,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
-
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1790,30 +1800,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ SmiTag(a0);
- __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
- __ Push(Smi::zero()); // Padding.
- __ Daddu(fp, sp,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
-}
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- v0 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then tear down the parameters.
- __ Ld(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(sp, fp);
- __ MultiPop(fp.bit() | ra.bit());
- __ SmiScale(a4, a1, kPointerSizeLog2);
- __ Daddu(sp, sp, a4);
- // Adjust for the receiver.
- __ Daddu(sp, sp, Operand(kPointerSize));
-}
-
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -1928,37 +1914,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
- // code is erased.
- __ mov(a6, fp);
- __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
-#else
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ Ld(a6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ld(a7, MemOperand(a6, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&arguments_adaptor, eq, a7,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- {
- __ Ld(a7, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ Ld(a7, FieldMemOperand(a7, JSFunction::kSharedFunctionInfoOffset));
- __ Lhu(a7, FieldMemOperand(
- a7, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(a6, fp);
- }
- __ Branch(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- // Just get the length from the ArgumentsAdaptorFrame.
- __ SmiUntag(a7,
- MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
- }
- __ bind(&arguments_done);
-#endif
-
Label stack_done, stack_overflow;
+ __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ Subu(a7, a7, a2);
__ Branch(&stack_done, le, a7, Operand(zero_reg));
{
@@ -1968,7 +1925,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
// Point to the first argument to copy (skipping the receiver).
- __ Daddu(a6, a6,
+ __ Daddu(a6, fp,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
__ Dlsa(a6, a6, a2, kSystemPointerSizeLog2);
@@ -2203,9 +2160,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
Label non_callable, non_smi;
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
- __ GetObjectType(a1, t1, t2);
+ __ LoadMap(t1, a1);
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
- RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
@@ -2360,9 +2319,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
// Dispatch based on instance type.
- __ Lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2392,131 +2352,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // State setup as expected by MacroAssembler::InvokePrologue.
- // ----------- S t a t e -------------
- // -- a0: actual arguments count
- // -- a1: function (passed through to callee)
- // -- a2: expected arguments count
- // -- a3: new target (passed through to callee)
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments, stack_overflow;
-
- Label enough, too_few;
- __ Branch(&dont_adapt_arguments, eq, a2,
- Operand(kDontAdaptArgumentsSentinel));
- // We use Uless as the number of argument should always be greater than 0.
- __ Branch(&too_few, Uless, a0, Operand(a2));
-
- { // Enough parameters: actual >= expected.
- // a0: actual number of arguments as a smi
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(a2, a5, kScratchReg, &stack_overflow);
-
- // Calculate copy start address into a0 and copy end address into a4.
- __ dsll(a0, a2, kPointerSizeLog2);
- __ Daddu(a0, fp, a0);
-
- // Adjust for return address and receiver.
- __ Daddu(a0, a0, Operand(2 * kPointerSize));
- // Compute copy end address.
- __ dsll(a4, a2, kPointerSizeLog2);
- __ dsubu(a4, a0, a4);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // a0: copy start address
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- // a4: copy end address
-
- Label copy;
- __ bind(&copy);
- __ Ld(a5, MemOperand(a0));
- __ push(a5);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a4));
- __ daddiu(a0, a0, -kPointerSize); // In delay slot.
-
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(a2, a5, kScratchReg, &stack_overflow);
-
- // Fill the remaining expected arguments with undefined.
- __ LoadRoot(t0, RootIndex::kUndefinedValue);
- __ SmiUntag(t1, a0);
- __ Dsubu(t2, a2, Operand(t1));
- __ dsll(a4, t2, kSystemPointerSizeLog2);
- __ Dsubu(a4, fp, a4);
- // Adjust for frame.
- __ Dsubu(a4, a4,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kSystemPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(t0);
- __ Branch(&fill, ne, sp, Operand(a4));
-
- // Calculate copy start address into r0 and copy end address is fp.
- __ SmiScale(a0, a0, kPointerSizeLog2);
- __ Daddu(a0, fp, a0);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- Label copy;
- __ bind(&copy);
-
- // Adjust load for return address and receiver.
- __ Ld(t0, MemOperand(a0, 2 * kSystemPointerSize));
- __ push(t0);
-
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(fp));
- __ Dsubu(a0, a0, Operand(kSystemPointerSize));
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ mov(a0, a2);
- // a0 : expected number of arguments
- // a1 : function (passed through to callee)
- // a3: new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(a2);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Ret();
-
- // -------------------------------------------
- // Don't adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(a2);
-
- __ bind(&stack_overflow);
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ break_(0xCC);
- }
-}
-
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call
@@ -2525,7 +2360,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
- // Save all parameter registers (see wasm-linkage.cc). They might be
+ // Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
constexpr RegList gp_regs =
@@ -2539,7 +2374,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
STATIC_ASSERT(num_to_push ==
WasmCompileLazyFrameConstants::kNumberOfSavedAllParamRegs);
__ MultiPush(gp_regs);
- __ MultiPushFPU(fp_regs);
+ if (CpuFeatures::IsSupported(MIPS_SIMD)) {
+ __ MultiPushMSA(fp_regs);
+ } else {
+ __ MultiPushFPU(fp_regs);
+ __ Dsubu(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+ }
// Pass instance and function index as an explicit arguments to the runtime
// function.
@@ -2550,7 +2390,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
// Restore registers.
- __ MultiPopFPU(fp_regs);
+ if (CpuFeatures::IsSupported(MIPS_SIMD)) {
+ __ MultiPopMSA(fp_regs);
+ } else {
+ __ Daddu(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+ __ MultiPopFPU(fp_regs);
+ }
__ MultiPop(gp_regs);
}
// Finally, jump to the entrypoint.
@@ -2707,6 +2552,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ Sd(zero_reg, MemOperand(scratch));
+ }
+
// Compute the handler entry address and jump to it.
__ li(t9, pending_handler_entrypoint_address);
__ Ld(t9, MemOperand(t9));
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 09b076c1c7..8fe4b004b4 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -554,6 +554,13 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ LoadP(r0, MemOperand(r3));
__ push(r0);
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ li(r0, Operand::Zero());
+ __ StoreP(r0, MemOperand(r3));
+
Register scratch = r9;
// Set up frame pointer for the frame to be pushed.
__ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -803,7 +810,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ lwz(params_size,
FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
__ LoadP(actual_params_size,
@@ -819,7 +825,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ bge(&corrected_args_count);
__ mr(params_size, actual_params_size);
__ bind(&corrected_args_count);
-#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
@@ -955,7 +960,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ addi(bytecode_offset, bytecode_offset, Operand(1));
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
- Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ b(&process_bytecode);
__ bind(&extra_wide);
@@ -963,7 +968,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ addi(bytecode_offset, bytecode_offset, Operand(1));
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
- Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
__ bind(&process_bytecode);
@@ -989,12 +994,35 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ ShiftLeftImm(scratch3, bytecode, Operand(2));
- __ lwzx(scratch3, MemOperand(bytecode_size_table, scratch3));
+ __ lbzx(scratch3, MemOperand(bytecode_size_table, bytecode));
__ add(bytecode_offset, bytecode_offset, scratch3);
__ bind(&end);
}
+
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ Label maybe_has_optimized_code;
+ // Check if optimized code is available
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ r0);
+ __ beq(&maybe_has_optimized_code, cr0);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ LoadAnyTaggedField(
+ optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1220,27 +1248,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
- Label maybe_has_optimized_code;
-
- // Check if optimized code is available
- __ TestBitMask(optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
- r0);
- __ beq(&maybe_has_optimized_code, cr0);
-
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
-
- __ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = optimization_state;
- __ LoadAnyTaggedField(
- optimization_marker,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1834,34 +1843,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ SmiTag(r3);
- __ mov(r7, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ mflr(r0);
- __ push(r0);
- if (FLAG_enable_embedded_constant_pool) {
- __ Push(fp, kConstantPoolRegister, r7, r4, r3);
- } else {
- __ Push(fp, r7, r4, r3);
- }
- __ Push(Smi::zero()); // Padding.
- __ addi(fp, sp,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
-}
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then tear down the parameters.
- __ LoadP(r4, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- int stack_adjustment = kSystemPointerSize; // adjust for receiver
- __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
- __ SmiToPtrArrayOffset(r0, r4);
- __ add(sp, sp, r0);
-}
-
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -1978,40 +1959,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
- // code is erased.
- __ mr(r7, fp);
- __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
-#else
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(scratch,
- MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmpi(scratch,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ beq(&arguments_adaptor);
- {
- __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
- r8, FieldMemOperand(r8, JSFunction::kSharedFunctionInfoOffset));
- __ LoadHalfWord(
- r8,
- FieldMemOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mr(r7, fp);
- }
- __ b(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- // Load the length from the ArgumentsAdaptorFrame.
- __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(r8);
- }
- __ bind(&arguments_done);
-#endif
-
Label stack_done, stack_overflow;
+ __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ sub(r8, r8, r5, LeaveOE, SetRC);
__ ble(&stack_done, cr0);
{
@@ -2021,7 +1970,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- r4 : the target to call (can be any Object)
// -- r5 : start index (to support rest parameters)
// -- r6 : the new.target (for [[Construct]] calls)
- // -- r7 : point to the caller stack frame
+ // -- fp : point to the caller stack frame
// -- r8 : number of arguments to copy, i.e. arguments count - start index
// -----------------------------------
@@ -2030,7 +1979,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
// Point to the first argument to copy (skipping the receiver).
- __ addi(r7, r7,
+ __ addi(r7, fp,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
__ ShiftLeftImm(scratch, r5, Operand(kSystemPointerSizeLog2));
@@ -2291,9 +2240,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
Label non_callable, non_smi;
__ JumpIfSmi(r4, &non_callable);
__ bind(&non_smi);
- __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
+ __ LoadMap(r7, r4);
+ __ CompareInstanceTypeRange(r7, r8, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
- RelocInfo::CODE_TARGET, eq);
+ RelocInfo::CODE_TARGET, le);
__ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
@@ -2405,9 +2356,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ beq(&non_constructor, cr0);
// Dispatch based on instance type.
- __ CompareInstanceType(r7, r8, JS_FUNCTION_TYPE);
+ __ CompareInstanceTypeRange(r7, r8, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
+ RelocInfo::CODE_TARGET, le);
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2439,153 +2391,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : actual number of arguments
- // -- r4 : function (passed through to callee)
- // -- r5 : expected number of arguments
- // -- r6 : new target (passed through to callee)
- // -----------------------------------
-
- Label dont_adapt_arguments, stack_overflow;
- __ cmpli(r5, Operand(kDontAdaptArgumentsSentinel));
- __ beq(&dont_adapt_arguments);
- __ LoadTaggedPointerField(
- r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
-
- // -------------------------------------------
- // Adapt arguments.
- // -------------------------------------------
- {
- Label under_application, over_application, invoke;
- __ cmp(r3, r5);
- __ blt(&under_application);
-
- // Enough parameters: actual >= expected
- __ bind(&over_application);
- {
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(r5, r8, &stack_overflow);
-
- // Calculate copy start address into r3 and copy end address into r7.
- // r3: actual number of arguments as a smi
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- __ ShiftLeftImm(r3, r5, Operand(kSystemPointerSizeLog2));
- __ add(r3, r3, fp);
- // adjust for return address and receiver
- __ addi(r3, r3, Operand(2 * kSystemPointerSize));
- __ ShiftLeftImm(r7, r5, Operand(kSystemPointerSizeLog2));
- __ sub(r7, r3, r7);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r3: copy start address
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- // r7: copy end address
-
- Label copy;
- __ bind(&copy);
- __ LoadP(r0, MemOperand(r3, 0));
- __ push(r0);
- __ cmp(r3, r7); // Compare before moving to next argument.
- __ subi(r3, r3, Operand(kSystemPointerSize));
- __ bne(&copy);
-
- __ b(&invoke);
- }
-
- // Too few parameters: Actual < expected
- __ bind(&under_application);
- {
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(r5, r8, &stack_overflow);
-
- // Fill the remaining expected arguments with undefined.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ LoadRoot(r8, RootIndex::kUndefinedValue);
- __ SmiUntag(r0, r3);
- __ sub(r9, r5, r0);
- __ ShiftLeftImm(r7, r9, Operand(kSystemPointerSizeLog2));
- __ sub(r7, fp, r7);
- // Adjust for frame.
- __ subi(r7, r7,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kSystemPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r8);
- __ cmp(sp, r7);
- __ b(ne, &fill);
-
- // Calculate copy start address into r0 and copy end address is fp.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r3, r3);
- __ add(r3, r3, fp);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
-
- // Adjust load for return address and receiver.
- __ LoadP(r8, MemOperand(r3, 2 * kSystemPointerSize));
- __ push(r8);
-
- __ cmp(r3, fp); // Compare before moving to next argument.
- __ subi(r3, r3, Operand(kSystemPointerSize));
- __ b(ne, &copy);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ mr(r3, r5);
- // r3 : expected number of arguments
- // r4 : function (passed through to callee)
- // r6 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ CallCodeObject(r5);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
- masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ blr();
- }
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ RecordComment("-- Call without adapting args --");
- static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ JumpCodeObject(r5);
-
- __ bind(&stack_overflow);
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bkpt(0);
- }
-}
-
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
@@ -2595,15 +2400,55 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
- // Save all parameter registers (see wasm-linkage.cc). They might be
+ // Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs =
- Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10);
- constexpr RegList fp_regs =
- DoubleRegister::ListOf(d1, d2, d3, d4, d5, d6, d7, d8);
+ RegList gp_regs = 0;
+ for (Register gp_param_reg : wasm::kGpParamRegisters) {
+ gp_regs |= gp_param_reg.bit();
+ }
+
+ RegList fp_regs = 0;
+ for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
+ fp_regs |= fp_param_reg.bit();
+ }
+
+ // List must match register numbers under kFpParamRegisters.
+ constexpr RegList simd_regs =
+ Simd128Register::ListOf(v1, v2, v3, v4, v5, v6, v7, v8);
+
+ CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(NumRegs(simd_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
+ NumRegs(gp_regs));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
+ NumRegs(fp_regs));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
+ NumRegs(simd_regs));
+
__ MultiPush(gp_regs);
__ MultiPushDoubles(fp_regs);
+ // V8 uses the same set of fp param registers as Simd param registers.
+ // As these registers are two different sets on ppc we must make
+ // sure to also save them when Simd is enabled.
+ // Check the comments under crrev.com/c/2645694 for more details.
+ Label push_empty_simd, simd_pushed;
+ __ Move(ip, ExternalReference::supports_wasm_simd_128_address());
+ __ LoadByte(ip, MemOperand(ip), r0);
+ __ cmpi(ip, Operand::Zero()); // If > 0 then simd is available.
+ __ ble(&push_empty_simd);
+ __ MultiPushV128(simd_regs);
+ __ b(&simd_pushed);
+ __ bind(&push_empty_simd);
+ // kFixedFrameSizeFromFp is hard coded to include space for Simd
+ // registers, so we still need to allocate space on the stack even if we
+ // are not pushing them.
+ __ addi(
+ sp, sp,
+ Operand(-static_cast<int8_t>(base::bits::CountPopulation(simd_regs)) *
+ kSimd128Size));
+ __ bind(&simd_pushed);
// Pass instance and function index as explicit arguments to the runtime
// function.
@@ -2616,6 +2461,19 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ mr(r11, kReturnRegister0);
// Restore registers.
+ Label pop_empty_simd, simd_popped;
+ __ Move(ip, ExternalReference::supports_wasm_simd_128_address());
+ __ LoadByte(ip, MemOperand(ip), r0);
+ __ cmpi(ip, Operand::Zero()); // If > 0 then simd is available.
+ __ ble(&pop_empty_simd);
+ __ MultiPopV128(simd_regs);
+ __ b(&simd_popped);
+ __ bind(&pop_empty_simd);
+ __ addi(
+ sp, sp,
+ Operand(static_cast<int8_t>(base::bits::CountPopulation(simd_regs)) *
+ kSimd128Size));
+ __ bind(&simd_popped);
__ MultiPopDoubles(fp_regs);
__ MultiPop(gp_regs);
}
@@ -2805,6 +2663,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ {
+ UseScratchRegisterScope temps(masm);
+ __ Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ mov(r0, Operand::Zero());
+ __ StoreP(r0, MemOperand(ip));
+ }
+
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ Move(ip, pending_handler_entrypoint_address);
diff --git a/deps/v8/src/builtins/promise-constructor.tq b/deps/v8/src/builtins/promise-constructor.tq
index 32de19f3b2..3c5a5e560d 100644
--- a/deps/v8/src/builtins/promise-constructor.tq
+++ b/deps/v8/src/builtins/promise-constructor.tq
@@ -101,7 +101,10 @@ PromisePrototypeCatch(
js-implicit context: Context, receiver: JSAny)(onRejected: JSAny): JSAny {
// 1. Let promise be the this value.
// 2. Return ? Invoke(promise, "then", « undefined, onRejected »).
- const nativeContext = LoadNativeContext(context);
+ // This builtin is attached to JSFunction created by the bootstrapper so
+ // `context` is the native context.
+ check(Is<NativeContext>(context));
+ const nativeContext = UnsafeCast<NativeContext>(context);
return UnsafeCast<JSAny>(
InvokeThen(nativeContext, receiver, Undefined, onRejected));
}
diff --git a/deps/v8/src/builtins/promise-finally.tq b/deps/v8/src/builtins/promise-finally.tq
index 90a60678dd..f576486850 100644
--- a/deps/v8/src/builtins/promise-finally.tq
+++ b/deps/v8/src/builtins/promise-finally.tq
@@ -169,7 +169,10 @@ PromisePrototypeFinally(
MessageTemplate::kCalledOnNonObject, 'Promise.prototype.finally');
// 3. Let C be ? SpeciesConstructor(promise, %Promise%).
- const nativeContext = LoadNativeContext(context);
+ // This builtin is attached to JSFunction created by the bootstrapper so
+ // `context` is the native context.
+ check(Is<NativeContext>(context));
+ const nativeContext = UnsafeCast<NativeContext>(context);
const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX);
let constructor: Constructor = UnsafeCast<Constructor>(promiseFun);
diff --git a/deps/v8/src/builtins/promise-race.tq b/deps/v8/src/builtins/promise-race.tq
index 23a7efc00a..973ddd8bac 100644
--- a/deps/v8/src/builtins/promise-race.tq
+++ b/deps/v8/src/builtins/promise-race.tq
@@ -13,7 +13,10 @@ PromiseRace(
const receiver = Cast<JSReceiver>(receiver)
otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Promise.race');
- const nativeContext = LoadNativeContext(context);
+ // This builtin is attached to JSFunction created by the bootstrapper so
+ // `context` is the native context.
+ check(Is<NativeContext>(context));
+ const nativeContext = UnsafeCast<NativeContext>(context);
// Let promiseCapability be ? NewPromiseCapability(C).
// Don't fire debugEvent so that forwarding the rejection through all does
diff --git a/deps/v8/src/builtins/regexp-match.tq b/deps/v8/src/builtins/regexp-match.tq
index 48fd8a42bf..5fca09893c 100644
--- a/deps/v8/src/builtins/regexp-match.tq
+++ b/deps/v8/src/builtins/regexp-match.tq
@@ -108,9 +108,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
// On the fast path, we can be certain that lastIndex can never be
// incremented to overflow the Smi range since the maximal string
// length is less than the maximal Smi value.
- const kMaxStringLengthFitsSmi: constexpr bool =
- kStringMaxLengthUintptr < kSmiMaxValue;
- static_assert(kMaxStringLengthFitsSmi);
+ StaticAssertStringLengthFitsSmi();
assert(TaggedIsPositiveSmi(newLastIndex));
}
diff --git a/deps/v8/src/builtins/regexp-source.tq b/deps/v8/src/builtins/regexp-source.tq
index 5f9c6b22c3..0bf42ba11e 100644
--- a/deps/v8/src/builtins/regexp-source.tq
+++ b/deps/v8/src/builtins/regexp-source.tq
@@ -21,7 +21,6 @@ transitioning javascript builtin RegExpPrototypeSourceGetter(
const methodName: constexpr string = 'RegExp.prototype.source';
ThrowTypeError(MessageTemplate::kRegExpNonRegExp, methodName);
}
- IncrementUseCounter(context, SmiConstant(kRegExpPrototypeSourceGetter));
return '(?:)';
}
}
diff --git a/deps/v8/src/builtins/regexp.tq b/deps/v8/src/builtins/regexp.tq
index 71ba53698d..29fad26736 100644
--- a/deps/v8/src/builtins/regexp.tq
+++ b/deps/v8/src/builtins/regexp.tq
@@ -6,6 +6,20 @@
namespace regexp {
+extern macro RegExpBuiltinsAssembler::BranchIfFastRegExpForMatch(
+ implicit context: Context)(HeapObject): never labels IsFast,
+ IsSlow;
+macro IsFastRegExpForMatch(implicit context: Context)(o: HeapObject): bool {
+ BranchIfFastRegExpForMatch(o) otherwise return true, return false;
+}
+
+extern macro RegExpBuiltinsAssembler::BranchIfFastRegExpForSearch(
+ implicit context: Context)(HeapObject): never labels IsFast,
+ IsSlow;
+macro IsFastRegExpForSearch(implicit context: Context)(o: HeapObject): bool {
+ BranchIfFastRegExpForSearch(o) otherwise return true, return false;
+}
+
extern macro RegExpBuiltinsAssembler::BranchIfFastRegExp_Strict(
implicit context: Context)(HeapObject): never labels IsFast,
IsSlow;
@@ -51,8 +65,8 @@ transitioning macro RegExpExec(implicit context: Context)(
}
extern macro RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
- implicit context: Context)(
- JSRegExp, RegExpMatchInfo, String, Number): JSRegExpResult;
+ implicit context: Context)(JSRegExp, RegExpMatchInfo, String, Number):
+ JSRegExpResult|JSRegExpResultWithIndices;
const kGlobalOrSticky: constexpr int31
generates 'JSRegExp::kGlobal | JSRegExp::kSticky';
@@ -185,11 +199,11 @@ extern enum Flag constexpr 'JSRegExp::Flag' {
kSticky,
kUnicode,
kDotAll,
+ kHasIndices,
kLinear
}
-const kRegExpPrototypeOldFlagGetter: constexpr int31
- generates 'v8::Isolate::kRegExpPrototypeOldFlagGetter';
+const kNoCounterFlagGetter: constexpr int31 = -1;
const kRegExpPrototypeStickyGetter: constexpr int31
generates 'v8::Isolate::kRegExpPrototypeStickyGetter';
const kRegExpPrototypeUnicodeGetter: constexpr int31
@@ -223,8 +237,7 @@ macro FlagGetter(implicit context: Context)(
transitioning javascript builtin RegExpPrototypeGlobalGetter(
js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
return FlagGetter(
- receiver, Flag::kGlobal, kRegExpPrototypeOldFlagGetter,
- 'RegExp.prototype.global');
+ receiver, Flag::kGlobal, kNoCounterFlagGetter, 'RegExp.prototype.global');
}
// ES6 21.2.5.5.
@@ -232,7 +245,7 @@ transitioning javascript builtin RegExpPrototypeGlobalGetter(
transitioning javascript builtin RegExpPrototypeIgnoreCaseGetter(
js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
return FlagGetter(
- receiver, Flag::kIgnoreCase, kRegExpPrototypeOldFlagGetter,
+ receiver, Flag::kIgnoreCase, kNoCounterFlagGetter,
'RegExp.prototype.ignoreCase');
}
@@ -241,23 +254,28 @@ transitioning javascript builtin RegExpPrototypeIgnoreCaseGetter(
transitioning javascript builtin RegExpPrototypeMultilineGetter(
js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
return FlagGetter(
- receiver, Flag::kMultiline, kRegExpPrototypeOldFlagGetter,
+ receiver, Flag::kMultiline, kNoCounterFlagGetter,
'RegExp.prototype.multiline');
}
+transitioning javascript builtin RegExpPrototypeHasIndicesGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return FlagGetter(
+ receiver, Flag::kHasIndices, kNoCounterFlagGetter,
+ 'RegExp.prototype.hasIndices');
+}
+
transitioning javascript builtin RegExpPrototypeLinearGetter(
js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
return FlagGetter(
- receiver, Flag::kLinear, kRegExpPrototypeOldFlagGetter,
- 'RegExp.prototype.linear');
+ receiver, Flag::kLinear, kNoCounterFlagGetter, 'RegExp.prototype.linear');
}
// ES #sec-get-regexp.prototype.dotAll
transitioning javascript builtin RegExpPrototypeDotAllGetter(
js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- const kNoCounter: constexpr int31 = -1;
return FlagGetter(
- receiver, Flag::kDotAll, kNoCounter, 'RegExp.prototype.dotAll');
+ receiver, Flag::kDotAll, kNoCounterFlagGetter, 'RegExp.prototype.dotAll');
}
// ES6 21.2.5.12.
@@ -365,8 +383,6 @@ const kRegExpMatchIsTrueishOnNonJSRegExp: constexpr UseCounterFeature
generates 'v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp';
const kRegExpMatchIsFalseishOnJSRegExp: constexpr UseCounterFeature
generates 'v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp';
-const kRegExpPrototypeSourceGetter: constexpr UseCounterFeature
- generates 'v8::Isolate::kRegExpPrototypeSourceGetter';
const kRegExpExecCalledOnSlowRegExp: constexpr UseCounterFeature
generates 'v8::Isolate::kRegExpExecCalledOnSlowRegExp';
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
new file mode 100644
index 0000000000..685f575598
--- /dev/null
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -0,0 +1,3316 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/riscv64/constants-riscv64.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-linkage.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
+ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the target function, the new target and the actual
+ // argument count.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(kJavaScriptCallArgCountRegister);
+ __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
+
+ __ CallRuntime(function_id, 1);
+ // Use the return value before restoring a0
+ __ Add64(a2, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore target function, new target and actual argument count.
+ __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister);
+ __ SmiUntag(kJavaScriptCallArgCountRegister);
+ }
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Jump(a2);
+}
+
+namespace {
+
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a3 : new target
+ // -- cp : context
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0);
+ __ SmiUntag(a0);
+
+ // Set up pointer to last argument (skip receiver).
+ __ Add64(
+ t2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+ // Copy arguments and receiver to the expression stack.
+ __ PushArray(t2, a0);
+ // The receiver for the builtin/api call.
+ __ PushRoot(RootIndex::kTheHoleValue);
+
+ // Call the function.
+ // a0: number of arguments (untagged)
+ // a1: constructor function
+ // a3: new target
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+
+ // Restore context from the frame.
+ __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld(t0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ }
+
+ // Remove caller arguments from the stack and return.
+ __ SmiScale(t0, t0, kPointerSizeLog2);
+ __ Add64(sp, sp, t0);
+ __ Add64(sp, sp, kPointerSize);
+ __ Ret();
+}
+
+} // namespace
+
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- cp: context
+ // -- ra: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0, a1);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Push(a3);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- sp[1*kPointerSize]: padding
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
+ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, t2,
+ t4);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ Branch(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(a0, RootIndex::kTheHoleValue);
+
+ // ----------- S t a t e -------------
+ // -- a0: receiver
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(a3);
+
+ // Push the allocated receiver to the stack.
+ __ Push(a0);
+
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in a6
+ // since a0 will store the return value of callRuntime.
+ __ Move(a6, a0);
+
+ // Set up pointer to last argument.
+ __ Add64(
+ t2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- r3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
+ // -----------------------------------
+
+ // Restore constructor function and argument count.
+ __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(a0);
+
+ Label stack_overflow;
+ __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
+
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
+ // Copy arguments and receiver to the expression stack.
+ __ PushArray(t2, a0);
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments,
+ __ Push(a6);
+
+ // Call the function.
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+
+ // ----------- S t a t e -------------
+ // -- a0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore the context from the frame.
+ __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_receiver;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ Ld(a0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
+
+ __ bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ __ LeaveFrame(StackFrame::CONSTRUCT);
+
+ // Remove caller arguments from the stack and return.
+ __ SmiScale(a4, a1, kPointerSizeLog2);
+ __ Add64(sp, sp, a4);
+ __ Add64(sp, sp, kPointerSize);
+ __ Ret();
+
+ __ bind(&check_receiver);
+ __ JumpIfSmi(a0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(a0, t2, t2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ Branch(&leave_and_return, greater_equal, t2,
+ Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(&use_receiver);
+
+ __ bind(&do_throw);
+ // Restore the context from the frame.
+ __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ __ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSBuiltinsConstructStubHelper(masm);
+}
+
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ GetObjectType(sfi_data, scratch1, scratch1);
+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ Ld(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the value to pass to the generator
+ // -- a1 : the JSGeneratorObject to resume
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(a1);
+
+ // Store input value into generator object.
+ __ Sd(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0, a3,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Load suspended function and context.
+ __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ li(a5, debug_hook);
+ __ Lb(a5, MemOperand(a5));
+ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ li(a5, debug_suspended_generator);
+ __ Ld(a5, MemOperand(a5));
+ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
+ __ bind(&stepping_prepared);
+
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&stack_overflow, Uless, sp, Operand(kScratchReg));
+
+ // ----------- S t a t e -------------
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a4 : generator function
+ // -- cp : generator context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Lhu(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ld(t1,
+ FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Sub64(a3, a3, Operand(1));
+ __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+ __ CalcScaledAddress(kScratchReg, t1, a3, kPointerSizeLog2);
+ __ Ld(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ // Push receiver.
+ __ Ld(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ Push(kScratchReg);
+ }
+
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, a3, a0);
+ __ GetObjectType(a3, a3, a3);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
+ Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Resume (Ignition/TurboFan) generator object.
+ {
+ __ Ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Lhu(a0, FieldMemOperand(
+ a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(a3, a1);
+ __ Move(a1, a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ JumpCodeObject(a2);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a4);
+ // Push hole as receiver since we do not use it for stepping.
+ __ PushRoot(RootIndex::kTheHoleValue);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
+ __ Pop(a1);
+ }
+ __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Branch(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(a1);
+ }
+ __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Branch(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC); // This should be unreachable.
+ }
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+// Clobbers scratch1 and scratch2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+ Register scratch1, Register scratch2) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
+ __ Sub64(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ __ Sll64(scratch2, argc, kPointerSizeLog2);
+ __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
+
+ // Out of stack space.
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+
+ __ bind(&okay);
+}
+
+namespace {
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** args)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtins::Name entry_trampoline) {
+ Label invoke, handler_entry, exit;
+
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // TODO(plind): unify the ABI description here.
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved | ra.bit());
+
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0);
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in a0.
+ __ Move(kRootRegister, a0);
+ }
+
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+
+ // We build an EntryFrame.
+ __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ li(s2, Operand(StackFrame::TypeToMarker(type)));
+ __ li(s3, Operand(StackFrame::TypeToMarker(type)));
+ ExternalReference c_entry_fp = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ __ li(s4, c_entry_fp);
+ __ Ld(s4, MemOperand(s4));
+ __ Push(s1, s2, s3, s4);
+ // Set up frame pointer for the frame to be pushed.
+ __ Add64(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+ // Registers:
+ // either
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a1: microtask_queue
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xFF...F) |
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ li(s1, js_entry_sp);
+ __ Ld(s2, MemOperand(s1));
+ __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
+ __ Sd(fp, MemOperand(s1));
+ __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ Branch(&cont);
+ __ bind(&non_outermost_js);
+ __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+ __ push(s3);
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ Branch(&invoke);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ li(s1, ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+ __ Sd(a0, MemOperand(s1)); // We come back from 'invoke'. result is in a0.
+ __ LoadRoot(a0, RootIndex::kException);
+ __ Branch(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+ //
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+ //
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit); // a0 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(a5);
+ __ Branch(&non_outermost_js_2, ne, a5,
+ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ li(a5, js_entry_sp);
+ __ Sd(zero_reg, MemOperand(a5));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(a5);
+ __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ Sd(a5, MemOperand(a4));
+
+ // Reset the stack to the callee saved registers.
+ __ Add64(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop(kCalleeSaved | ra.bit());
+ // Return.
+ __ Jump(ra);
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtins::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtins::kRunMicrotasksTrampoline);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // ----------- S t a t e -------------
+ // -- a1: new.target
+ // -- a2: function
+ // -- a3: receiver_pointer
+ // -- a4: argc
+ // -- a5: argv
+ // -----------------------------------
+
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address = ExternalReference::Create(
+ IsolateAddressId::kContextAddress, masm->isolate());
+ __ li(cp, context_address);
+ __ Ld(cp, MemOperand(cp));
+
+ // Push the function onto the stack.
+ __ Push(a2);
+
+ // Check if we have enough stack space to push all arguments.
+ __ Add64(a6, a4, 1);
+ Generate_CheckStackOverflow(masm, a6, a0, s2);
+
+ // Copy arguments to the stack in a loop.
+ // a4: argc
+ // a5: argv, i.e. points to first arg
+ Label loop, entry;
+ __ CalcScaledAddress(s1, a5, a4, kPointerSizeLog2);
+ __ Branch(&entry);
+ // s1 points past last arg.
+ __ bind(&loop);
+ __ Add64(s1, s1, -kPointerSize);
+ __ Ld(s2, MemOperand(s1)); // Read next parameter.
+ __ Ld(s2, MemOperand(s2)); // Dereference handle.
+ __ push(s2); // Push parameter.
+ __ bind(&entry);
+ __ Branch(&loop, ne, a5, Operand(s1));
+
+ // Push the receive.
+ __ Push(a3);
+
+ // a0: argc
+ // a1: function
+ // a3: new.target
+ __ Move(a3, a1);
+ __ Move(a1, a2);
+ __ Move(a0, a4);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(a4, RootIndex::kUndefinedValue);
+ __ Move(a5, a4);
+ __ Move(s1, a4);
+ __ Move(s2, a4);
+ __ Move(s3, a4);
+ __ Move(s4, a4);
+ __ Move(s5, a4);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? BUILTIN_CODE(masm->isolate(), Construct)
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
+
+ // Leave internal frame.
+ }
+ __ Jump(ra);
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // a1: microtask_queue
+ __ Move(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure,
+ Register scratch1,
+ Register scratch2) {
+ // Store code entry in the closure.
+ __ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
+ __ Move(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
+ __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
+ kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
+
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ Ld(params_size,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Lw(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+ Register actual_params_size = scratch2;
+ Label L1;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Ld(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ Sll64(actual_params_size, actual_params_size, kPointerSizeLog2);
+ __ Add64(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ Branch(&L1, le, actual_params_size, Operand(params_size));
+ __ Move(params_size, actual_params_size);
+ __ bind(&L1);
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::INTERPRETED);
+
+ // Drop receiver + arguments.
+ __ Add64(sp, sp, params_size);
+}
+
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry,
+ Register scratch1, Register scratch2) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -----------------------------------
+ DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
+
+ Register closure = a1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ __ Ld(a5,
+ FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
+ __ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&heal_optimized_code_slot, ne, a5, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch1, scratch2);
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ LoadCodeObjectEntry(a2, optimized_code_entry);
+ __ Jump(a2);
+
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
+}
+
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+}
+
+// Advance the current bytecode offset. This simulates what all bytecode
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Register scratch3,
+ Label* if_return) {
+ Register bytecode_size_table = scratch1;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch3;
+ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
+ bytecode_size_table, original_bytecode_offset));
+ __ Move(original_bytecode_offset, bytecode_offset);
+ __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
+
+ // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
+ Label process_bytecode, extra_wide;
+ STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
+ STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
+ STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
+ STATIC_ASSERT(3 ==
+ static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
+ __ Branch(&process_bytecode, Ugreater, bytecode, Operand(3));
+ __ And(scratch2, bytecode, Operand(1));
+ __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
+
+ // Load the next bytecode and update table to the wide scaled table.
+ __ Add64(bytecode_offset, bytecode_offset, Operand(1));
+ __ Add64(scratch2, bytecode_array, bytecode_offset);
+ __ Lbu(bytecode, MemOperand(scratch2));
+ __ Add64(bytecode_size_table, bytecode_size_table,
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
+ __ Branch(&process_bytecode);
+
+ __ bind(&extra_wide);
+ // Load the next bytecode and update table to the extra wide scaled table.
+ __ Add64(bytecode_offset, bytecode_offset, Operand(1));
+ __ Add64(scratch2, bytecode_array, bytecode_offset);
+ __ Lbu(bytecode, MemOperand(scratch2));
+ __ Add64(bytecode_size_table, bytecode_size_table,
+ Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
+
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ Branch(&not_jump_loop, ne, bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ Branch(&end);
+
+ __ bind(&not_jump_loop);
+ // Otherwise, load the size of the current bytecode and advance the offset.
+ __ Add64(scratch2, bytecode_size_table, bytecode);
+ __ Lb(scratch2, MemOperand(scratch2));
+ __ Add64(bytecode_offset, bytecode_offset, scratch2);
+
+ __ bind(&end);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.
+//
+// The live registers are:
+// o a0 : actual argument count (not including the receiver)
+// o a1: the JS function object being called.
+// o a3: the incoming new target or generator object
+// o cp: our context
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ Register closure = a1;
+ Register feedback_vector = a2;
+
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ Ld(kScratchReg,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
+ kScratchReg);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
+ __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
+
+ // Load the feedback vector from the closure.
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ Ld(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
+ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Read off the optimization state in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ Register optimization_state = a4;
+ __ Lw(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+
+ __ And(t0, optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
+ __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
+
+ Label not_optimized;
+ __ bind(&not_optimized);
+
+ // Increment invocation count for the function.
+ __ Lw(a4, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add32(a4, a4, Operand(1));
+ __ Sw(a4, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ __ bind(&push_stack_frame);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
+ // 8-bit fields next to each other, so we could just optimize by writing a
+ // 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
+
+ // Load initial bytecode offset.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
+ __ Push(kInterpreterBytecodeArrayRegister, a4);
+
+ // Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
+ {
+ // Load frame size (word) from the BytecodeArray object.
+ __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ __ Sub64(a5, sp, Operand(a4));
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&stack_overflow, Uless, a5, Operand(a2));
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ LoadRoot(a5, RootIndex::kUndefinedValue);
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(a5);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ Sub64(a4, a4, Operand(kPointerSize));
+ __ Branch(&loop_header, ge, a4, Operand(zero_reg));
+ }
+
+ // If the bytecode array has a valid incoming new target or generator object
+ // register, initialize it with incoming value which was passed in r3.
+ Label no_incoming_new_target_or_generator_register;
+ __ Lw(a5, FieldMemOperand(
+ kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
+ __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
+ Operand(zero_reg));
+ __ CalcScaledAddress(a5, fp, a5, kPointerSizeLog2);
+ __ Sd(a3, MemOperand(a5));
+ __ bind(&no_incoming_new_target_or_generator_register);
+
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&stack_check_interrupt, Uless, sp, Operand(a5));
+ __ bind(&after_stack_check_interrupt);
+
+ // Load accumulator as undefined.
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ // Load the dispatch table into a register and dispatch to the bytecode
+ // handler at the current bytecode offset.
+ Label do_dispatch;
+ __ bind(&do_dispatch);
+ __ li(kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+ __ Add64(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Lbu(a7, MemOperand(a1));
+ __ CalcScaledAddress(kScratchReg, kInterpreterDispatchTableRegister, a7,
+ kPointerSizeLog2);
+ __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
+ __ Call(kJavaScriptCallCodeStartRegister);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+ // Any returns to the entry trampoline are either due to the return bytecode
+ // or the interpreter tail calling a builtin and then a dispatch.
+
+ // Get bytecode array and bytecode offset from the stack frame.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Either return, or advance to the next bytecode and dispatch.
+ Label do_return;
+ __ Add64(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Lbu(a1, MemOperand(a1));
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ a4, &do_return);
+ __ Branch(&do_dispatch);
+
+ __ bind(&do_return);
+ // The return value is in a0.
+ LeaveInterpreterFrame(masm, t0, t1);
+ __ Jump(ra);
+
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ Sd(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
+ __ Sd(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ Branch(&after_stack_check_interrupt);
+
+ __ bind(&has_optimized_code_or_marker);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is available
+ __ And(t0, optimization_state, FeedbackVector::OptimizationTierBits::kMask);
+ __ Branch(&maybe_has_optimized_code, ne, t0, Operand(zero_reg));
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ // Fall through if there's no runnable optimized code.
+ __ Branch(&not_optimized);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ Ld(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, a5);
+
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ // Unreachable code.
+ __ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register num_args,
+ Register start_address,
+ Register scratch) {
+ // Find the address of the last argument.
+ __ Sub64(scratch, num_args, Operand(1));
+ __ Sll64(scratch, scratch, kPointerSizeLog2);
+ __ Sub64(start_address, start_address, scratch);
+
+ // Push the arguments.
+ __ PushArray(start_address, num_args,
+ TurboAssembler::PushArrayOrder::kReverse);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsThenCallImpl(
+ MacroAssembler* masm, ConvertReceiverMode receiver_mode,
+ InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+ Label stack_overflow;
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ Sub64(a0, a0, Operand(1));
+ }
+
+ __ Add64(a3, a0, Operand(1)); // Add one for receiver.
+
+ __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Don't copy receiver.
+ __ Move(a3, a0);
+ }
+
+ // This function modifies a2 and a4.
+ Generate_InterpreterPushArgs(masm, a3, a2, a4);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ __ PushRoot(RootIndex::kUndefinedValue);
+ }
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register a2.
+ // a2 already points to the penultime argument, the spread
+ // is below that.
+ __ Ld(a2, MemOperand(a2, -kSystemPointerSize));
+ }
+
+ // Call the target.
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (not including receiver)
+ // -- a3 : new target
+ // -- a1 : constructor to call
+ // -- a2 : allocation site feedback if available, undefined otherwise.
+ // -- a4 : address of the first argument
+ // -----------------------------------
+ Label stack_overflow;
+ __ Add64(a6, a0, 1);
+ __ StackOverflowCheck(a6, a5, t0, &stack_overflow);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ Sub64(a0, a0, Operand(1));
+ }
+
+ // Push the arguments, This function modifies a4 and a5.
+ Generate_InterpreterPushArgs(masm, a0, a4, a5);
+
+ // Push a slot for the receiver.
+ __ push(zero_reg);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register a2.
+ // a4 already points to the penultimate argument, the spread
+ // lies in the next interpreter register.
+ __ Ld(a2, MemOperand(a4, -kSystemPointerSize));
+ } else {
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ }
+
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
+ __ AssertFunction(a1);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Label builtin_trampoline, trampoline_loaded;
+ Smi interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
+
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
+ __ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ld(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(t0, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister);
+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
+ Operand(INTERPRETER_DATA_TYPE));
+
+ __ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Add64(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Branch(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
+ __ li(t0, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ Ld(t0, MemOperand(t0));
+
+ __ bind(&trampoline_loaded);
+ __ Add64(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
+
+ // Initialize the dispatch table register.
+ __ li(kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+
+ // Get the bytecode array pointer from the frame.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ kScratchReg, Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a1, Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ if (FLAG_debug_code) {
+ Label okay;
+ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Unreachable code.
+ __ break_(0xCC);
+ __ bind(&okay);
+ }
+
+ // Dispatch to the target bytecode.
+ __ Add64(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Lbu(a7, MemOperand(a1));
+ __ CalcScaledAddress(a1, kInterpreterDispatchTableRegister, a7,
+ kPointerSizeLog2);
+ __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ Label enter_bytecode, function_entry_bytecode;
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+
+ // Load the current bytecode.
+ __ Add64(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Lbu(a1, MemOperand(a1));
+
+ // Advance to the next bytecode.
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ a4, &if_return);
+
+ __ bind(&enter_bytecode);
+ // Convert new bytecode offset to a Smi and save in the stackframe.
+ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
+ __ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&enter_bytecode);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Default());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ UseScratchRegisterScope temp(masm);
+ Register scratch = temp.Acquire();
+ if (with_result) {
+ if (java_script_builtin) {
+ __ Move(scratch, a0);
+ } else {
+ // Overwrite the hole inserted by the deoptimizer with the return value
+ // from the LAZY deopt point.
+ __ Sd(a0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+
+ if (with_result && java_script_builtin) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point. t0 contains the arguments count, the return value
+ // from LAZY is always the last argument.
+ __ Add64(a0, a0,
+ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ __ CalcScaledAddress(t0, sp, a0, kSystemPointerSizeLog2);
+ __ Sd(scratch, MemOperand(t0));
+ // Recover arguments count.
+ __ Sub64(a0, a0,
+ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ }
+
+ __ Ld(fp, MemOperand(
+ sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
+ __ Pop(t0);
+ __ Add64(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ra);
+ __ LoadEntryFromBuiltinIndex(t0);
+ __ Jump(t0);
+}
+} // namespace
+
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ }
+
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code());
+ __ Ld(a0, MemOperand(sp, 0 * kPointerSize));
+ __ Add64(sp, sp, Operand(1 * kPointerSize)); // Remove state.
+ __ Ret();
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ }
+
+ // If the code object is null, just return to the caller.
+ __ Ret(eq, a0, Operand(Smi::zero()));
+
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Ld(a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ Add64(a0, a0, a1);
+ __ Add64(ra, a0, Code::kHeaderSize - kHeapObjectTag);
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[4] : thisArg
+ // -- sp[8] : argArray
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arg_array = a2;
+ Register receiver = a1;
+ Register this_arg = a5;
+ Register undefined_value = a3;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load receiver into a1, argArray into a2 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ Ld(this_arg, MemOperand(sp, kPointerSize));
+ __ Ld(arg_array, MemOperand(sp, 2 * kPointerSize));
+
+ Label done0, done1;
+ __ Branch(&done0, ne, argc, Operand(zero_reg));
+ __ Move(arg_array, undefined_value); // if argc == 0
+ __ Move(this_arg, undefined_value); // if argc == 0
+ __ bind(&done0); // else (i.e., argc > 0)
+
+ __ Branch(&done1, ne, argc, Operand(1));
+ __ Move(arg_array, undefined_value); // if argc == 1
+ __ bind(&done1); // else (i.e., argc > 1)
+
+ __ Ld(receiver, MemOperand(sp));
+ __ CalcScaledAddress(sp, sp, argc, kSystemPointerSizeLog2);
+ __ Sd(this_arg, MemOperand(sp));
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argArray
+ // -- a1 : receiver
+ // -- a3 : undefined root value
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
+ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
+
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
+ RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ Move(a0, zero_reg);
+ DCHECK(receiver == a1);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ { __ Pop(a1); }
+
+ // 2. Make sure we have at least one argument.
+ // a0: actual number of arguments
+ {
+ Label done;
+ __ Branch(&done, ne, a0, Operand(zero_reg));
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Add64(a0, a0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 3. Adjust the actual number of arguments.
+ __ Add64(a0, a0, -1);
+
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target (if argc >= 1)
+ // -- sp[16] : thisArgument (if argc >= 2)
+ // -- sp[24] : argumentsList (if argc == 3)
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arguments_list = a2;
+ Register target = a1;
+ Register this_argument = a5;
+ Register undefined_value = a3;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ Ld(target, MemOperand(sp, kPointerSize));
+ __ Ld(this_argument, MemOperand(sp, 2 * kPointerSize));
+ __ Ld(arguments_list, MemOperand(sp, 3 * kPointerSize));
+
+ Label done0, done1, done2;
+ __ Branch(&done0, ne, argc, Operand(zero_reg));
+ __ Move(arguments_list, undefined_value); // if argc == 0
+ __ Move(this_argument, undefined_value); // if argc == 0
+ __ Move(target, undefined_value); // if argc == 0
+ __ bind(&done0); // argc != 0
+
+ __ Branch(&done1, ne, argc, Operand(1));
+ __ Move(arguments_list, undefined_value); // if argc == 1
+ __ Move(this_argument, undefined_value); // if argc == 1
+ __ bind(&done1); // argc > 1
+
+ __ Branch(&done2, ne, argc, Operand(2));
+ __ Move(arguments_list, undefined_value); // if argc == 2
+ __ bind(&done2); // argc > 2
+
+ __ CalcScaledAddress(sp, sp, argc, kSystemPointerSizeLog2);
+ __ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argumentsList
+ // -- a1 : target
+ // -- a3 : undefined root value
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
+ RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target
+ // -- sp[16] : argumentsList
+ // -- sp[24] : new.target (optional)
+ // -----------------------------------
+ Register argc = a0;
+ Register arguments_list = a2;
+ Register target = a1;
+ Register new_target = a3;
+ Register undefined_value = a4;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
+ // new.target into a3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+ __ Ld(target, MemOperand(sp, kPointerSize));
+ __ Ld(arguments_list, MemOperand(sp, 2 * kPointerSize));
+ __ Ld(new_target, MemOperand(sp, 3 * kPointerSize));
+
+ Label done0, done1, done2;
+ __ Branch(&done0, ne, argc, Operand(zero_reg));
+ __ Move(arguments_list, undefined_value); // if argc == 0
+ __ Move(new_target, undefined_value); // if argc == 0
+ __ Move(target, undefined_value); // if argc == 0
+ __ bind(&done0);
+
+ __ Branch(&done1, ne, argc, Operand(1));
+ __ Move(arguments_list, undefined_value); // if argc == 1
+ __ Move(new_target, target); // if argc == 1
+ __ bind(&done1);
+
+ __ Branch(&done2, ne, argc, Operand(2));
+ __ Move(new_target, target); // if argc == 2
+ __ bind(&done2);
+
+ __ CalcScaledAddress(sp, sp, argc, kSystemPointerSizeLog2);
+ __ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argumentsList
+ // -- a1 : target
+ // -- a3 : new.target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a0 : number of parameters on the stack (not including the receiver)
+ // -- a2 : arguments list (a FixedArray)
+ // -- a4 : len (number of elements to push from args)
+ // -- a3 : new.target (for [[Construct]])
+ // -----------------------------------
+ if (masm->emit_debug_code()) {
+ // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, kScratchReg, kScratchReg);
+ __ Branch(&ok, eq, kScratchReg, Operand(FIXED_ARRAY_TYPE));
+ __ Branch(&fail, ne, kScratchReg, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ Branch(&ok, eq, a4, Operand(zero_reg));
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
+
+ Register args = a2;
+ Register len = a4;
+
+ // Check for stack overflow.
+ Label stack_overflow;
+ __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
+
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ __ Move(src, sp);
+ __ Sll64(t0, a4, kSystemPointerSizeLog2);
+ __ Sub64(sp, sp, Operand(t0));
+ // Update stack pointer.
+ __ Move(dest, sp);
+ __ Add64(t0, a0, Operand(zero_reg));
+
+ __ bind(&copy);
+ __ Ld(t1, MemOperand(src, 0));
+ __ Sd(t1, MemOperand(dest, 0));
+ __ Sub64(t0, t0, Operand(1));
+ __ Add64(src, src, Operand(kSystemPointerSize));
+ __ Add64(dest, dest, Operand(kSystemPointerSize));
+ __ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label done, push, loop;
+ Register src = a6;
+ Register scratch = len;
+ __ Add64(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add64(a0, a0, len); // The 'len' argument for Call() or Construct().
+ __ Branch(&done, eq, len, Operand(zero_reg));
+ __ Sll64(scratch, len, kPointerSizeLog2);
+ __ Sub64(scratch, sp, Operand(scratch));
+ __ LoadRoot(t1, RootIndex::kTheHoleValue);
+ __ bind(&loop);
+ __ Ld(a5, MemOperand(src));
+ __ Add64(src, src, kPointerSize);
+ __ Branch(&push, ne, a5, Operand(t1));
+ __ LoadRoot(a5, RootIndex::kUndefinedValue);
+ __ bind(&push);
+ __ Sd(a5, MemOperand(a7, 0));
+ __ Add64(a7, a7, Operand(kSystemPointerSize));
+ __ Add64(scratch, scratch, Operand(kSystemPointerSize));
+ __ Branch(&loop, ne, scratch, Operand(sp));
+ __ bind(&done);
+ }
+
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+}
+
+// static
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ CallOrConstructMode mode,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a3 : the new.target (for [[Construct]] calls)
+ // -- a1 : the target to call (can be any Object)
+ // -- a2 : start index (to support rest parameters)
+ // -----------------------------------
+
+ // Check if new.target has a [[Construct]] internal method.
+ if (mode == CallOrConstructMode::kConstruct) {
+ Label new_target_constructor, new_target_not_constructor;
+ __ JumpIfSmi(a3, &new_target_not_constructor);
+ __ Ld(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ Lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
+ __ bind(&new_target_not_constructor);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ Push(a3);
+ __ CallRuntime(Runtime::kThrowNotConstructor);
+ }
+ __ bind(&new_target_constructor);
+ }
+
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ Move(a6, fp);
+ __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ Label stack_done, stack_overflow;
+ __ Sub32(a7, a7, a2);
+ __ Branch(&stack_done, le, a7, Operand(zero_reg));
+ {
+ // Check for stack overflow.
+ __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+
+ // Point to the first argument to copy (skipping the receiver).
+ __ Add64(a6, a6,
+ Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
+ kSystemPointerSize));
+ __ CalcScaledAddress(a6, a6, a2, kSystemPointerSizeLog2);
+
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy;
+ Register src = t0, dest = a2;
+ __ Move(src, sp);
+ // Update stack pointer.
+ __ Sll64(t1, a7, kSystemPointerSizeLog2);
+ __ Sub64(sp, sp, Operand(t1));
+ __ Move(dest, sp);
+ __ Add64(t2, a0, Operand(zero_reg));
+
+ __ bind(&copy);
+ __ Ld(t1, MemOperand(src, 0));
+ __ Sd(t1, MemOperand(dest, 0));
+ __ Sub64(t2, t2, Operand(1));
+ __ Add64(src, src, Operand(kSystemPointerSize));
+ __ Add64(dest, dest, Operand(kSystemPointerSize));
+ __ Branch(&copy, ge, t2, Operand(zero_reg));
+ }
+
+ // Copy arguments from the caller frame.
+ // TODO(victorgomes): Consider using forward order as potentially more cache
+ // friendly.
+ {
+ Label loop;
+ __ Add64(a0, a0, a7);
+ __ bind(&loop);
+ {
+ __ Sub32(a7, a7, Operand(1));
+ __ CalcScaledAddress(t0, a6, a7, kPointerSizeLog2);
+ __ Ld(kScratchReg, MemOperand(t0));
+ __ CalcScaledAddress(t0, a2, a7, kPointerSizeLog2);
+ __ Sd(kScratchReg, MemOperand(t0));
+ __ Branch(&loop, ne, a7, Operand(zero_reg));
+ }
+ }
+ }
+ __ Branch(&stack_done);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_done);
+
+ // Tail-call to the {code} handler.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that function is not a "classConstructor".
+ Label class_constructor;
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
+ __ And(kScratchReg, a3,
+ Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
+ __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
+
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
+ __ And(kScratchReg, a3,
+ Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
+ __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
+ {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ LoadReceiver(a3, a0);
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, a4, a4);
+ __ Branch(&done_convert, Ugreater_equal, a4,
+ Operand(FIRST_JS_RECEIVER_TYPE));
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ Push(a0, a1);
+ __ Move(a0, a3);
+ __ Push(cp);
+ __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
+ __ Move(a3, a0);
+ __ Pop(a0, a1);
+ __ SmiUntag(a0);
+ }
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ }
+ __ StoreReceiver(a3, a0, kScratchReg);
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ Lhu(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION);
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Patch the receiver to [[BoundThis]].
+ {
+ __ Ld(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ StoreReceiver(t0, a0, kScratchReg);
+ }
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ Sll64(a5, a4, kPointerSizeLog2);
+ __ Sub64(t0, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&done, Ugreater_equal, t0, Operand(kScratchReg));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Pop receiver.
+ __ Pop(t0);
+
+ // Push [[BoundArguments]].
+ {
+ Label loop, done_loop;
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Add64(a0, a0, Operand(a4));
+ __ Add64(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Sub64(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ CalcScaledAddress(a5, a2, a4, kPointerSizeLog2);
+ __ Ld(kScratchReg, MemOperand(a5));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Push receiver.
+ __ Push(t0);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_smi;
+ __ JumpIfSmi(a1, &non_callable);
+ __ bind(&non_smi);
+ __ LoadMap(t1, a1);
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t4);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, Uless_equal, t4,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Check if target has a [[Call]] internal method.
+ __ Lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
+ t2, Operand(JS_PROXY_TYPE));
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ // Overwrite the original receiver with the (original) target.
+ __ StoreReceiver(a1, a0, kScratchReg);
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
+ }
+}
+
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (checked to be a JSFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertConstructor(a1);
+ __ AssertFunction(a1);
+
+ // Calling convention for function specific ConstructStubs require
+ // a2 to contain either an AllocationSite or undefined.
+ __ LoadRoot(a2, RootIndex::kUndefinedValue);
+
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
+ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertConstructor(a1);
+ __ AssertBoundFunction(a1);
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a3 : the new target (checked to be a constructor)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ Sll64(a5, a4, kPointerSizeLog2);
+ __ Sub64(t0, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&done, Ugreater_equal, t0, Operand(kScratchReg));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Pop receiver.
+ __ Pop(t0);
+
+ // Push [[BoundArguments]].
+ {
+ Label loop, done_loop;
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Add64(a0, a0, Operand(a4));
+ __ Add64(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Sub64(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ CalcScaledAddress(a5, a2, a4, kPointerSizeLog2);
+ __ Ld(kScratchReg, MemOperand(a5));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Push receiver.
+ __ Push(t0);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label skip_load;
+ __ Branch(&skip_load, ne, a1, Operand(a3));
+ __ Ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip_load);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (can be any Object)
+ // -- a3 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target is a Smi.
+ Label non_constructor, non_proxy;
+ __ JumpIfSmi(a1, &non_constructor);
+
+ // Check if target has a [[Construct]] internal method.
+ __ Ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Lbu(t4, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t4, t4, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ Branch(&non_constructor, eq, t4, Operand(zero_reg));
+
+ // Dispatch based on instance type.
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t0);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, Uless_equal, t0,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
+ RelocInfo::CODE_TARGET);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ __ bind(&non_proxy);
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ StoreReceiver(a1, a0, kScratchReg);
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
+ RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in t0 by the jump table trampoline.
+ // Convert to Smi for the runtime call
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister);
+ {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
+
+ // Save all parameter registers (see kGpParamRegisters in wasm-linkage.cc).
+ // They might be overwritten in the runtime call below. We don't have any
+ // callee-saved registers in wasm, so no need to store anything else.
+ RegList gp_regs = 0;
+ for (Register gp_param_reg : wasm::kGpParamRegisters) {
+ gp_regs |= gp_param_reg.bit();
+ }
+ // Also push x1, because we must push multiples of 16 bytes (see
+ // {TurboAssembler::PushCPURegList}.
+ CHECK_EQ(0, NumRegs(gp_regs) % 2);
+
+ RegList fp_regs = 0;
+ for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
+ fp_regs |= fp_param_reg.bit();
+ }
+
+ CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
+ NumRegs(gp_regs));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
+ NumRegs(fp_regs));
+ __ MultiPush(gp_regs);
+ __ MultiPushFPU(fp_regs);
+
+ // Pass instance and function index as an explicit arguments to the runtime
+ // function.
+ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmCompileLazy, 2);
+
+ __ Move(s1, a0); // move return value to s1 since a0 will be restored to
+ // the value before the call
+
+ // Restore registers.
+ __ MultiPopFPU(fp_regs);
+ __ MultiPop(gp_regs);
+ }
+ // Finally, jump to the entrypoint.
+ __ Jump(s1);
+}
+
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
+void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
+ SaveFPRegsMode save_doubles, ArgvMode argv_mode,
+ bool builtin_exit_frame) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ //
+ // If argv_mode == kArgvInRegister:
+ // a2: pointer to the first argument
+
+ if (argv_mode == kArgvInRegister) {
+ // Move argv into the correct register.
+ __ Move(s1, a2);
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ CalcScaledAddress(s1, sp, a0, kPointerSizeLog2);
+ __ Sub64(s1, s1, kPointerSize);
+ }
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(
+ save_doubles == kSaveFPRegs, 0,
+ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+
+ // s3: number of arguments including receiver (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ // Prepare arguments for C routine.
+ // a0 = argc
+ __ Move(s3, a0);
+ __ Move(s2, a1);
+
+ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+ // also need to reserve the 4 argument slots on the stack.
+
+ __ AssertStackIsAligned();
+
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, ExternalReference::isolate_address(masm->isolate()));
+ __ Move(a1, s1);
+
+ __ StoreReturnAddressAndCall(s2);
+
+ // Result returned in a0 or a1:a0 - do not destroy these registers!
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ LoadRoot(a4, RootIndex::kException);
+ __ Branch(&exception_returned, eq, a4, Operand(a0));
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ ExternalReference pending_exception_address = ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate());
+ __ li(a2, pending_exception_address);
+ __ Ld(a2, MemOperand(a2));
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ Branch(&okay, eq, a4, Operand(a2));
+ __ stop();
+ __ bind(&okay);
+ }
+
+ // Exit C frame and return.
+ // a0:a1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ Register argc = argv_mode == kArgvInRegister
+ // We don't want to pop arguments so set argc to no_reg.
+ ? no_reg
+ // s3: still holds argc (callee-saved).
+ : s3;
+ __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ ExternalReference pending_handler_context_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
+ ExternalReference pending_handler_entrypoint_address =
+ ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
+ ExternalReference pending_handler_fp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
+ ExternalReference pending_handler_sp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
+
+ // Ask the runtime for help to determine the handler. This will set a0 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler =
+ ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, 0, a0);
+ __ Move(a0, zero_reg);
+ __ Move(a1, zero_reg);
+ __ li(a2, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(find_handler, 3);
+ }
+
+ // Retrieve the handler context, SP and FP.
+ __ li(cp, pending_handler_context_address);
+ __ Ld(cp, MemOperand(cp));
+ __ li(sp, pending_handler_sp_address);
+ __ Ld(sp, MemOperand(sp));
+ __ li(fp, pending_handler_fp_address);
+ __ Ld(fp, MemOperand(fp));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label zero;
+ __ Branch(&zero, eq, cp, Operand(zero_reg));
+ __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&zero);
+
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
+ // with both configurations. It is safe to always do this, because the
+ // underlying register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
+ // Compute the handler entry address and jump to it.
+ UseScratchRegisterScope temp(masm);
+ Register scratch = temp.Acquire();
+ __ li(scratch, pending_handler_entrypoint_address);
+ __ Ld(scratch, MemOperand(scratch));
+ __ Jump(scratch);
+}
+
+void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
+ Label done;
+ Register result_reg = t0;
+
+ Register scratch = GetRegisterThatIsNotOneOf(result_reg);
+ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
+ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
+ DoubleRegister double_scratch = kScratchDoubleReg;
+
+ // Account for saved regs.
+ const int kArgumentOffset = 4 * kPointerSize;
+
+ __ Push(result_reg);
+ __ Push(scratch, scratch2, scratch3);
+
+ // Load double input.
+ __ LoadDouble(double_scratch, MemOperand(sp, kArgumentOffset));
+
+ // Try a conversion to a signed integer, if exception occurs, scratch is
+ // set to 0
+ __ Trunc_w_d(scratch3, double_scratch, scratch);
+
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, eq, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
+
+ // Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
+
+ __ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
+ __ Lw(input_high,
+ MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
+
+ Label normal_exponent;
+ // Extract the biased exponent in result.
+ __ ExtractBits(result_reg, input_high, HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ __ Sub32(scratch, result_reg, HeapNumber::kExponentMask);
+ __ LoadZeroIfConditionZero(
+ result_reg,
+ scratch); // result_reg = scratch == 0 ? 0 : result_reg
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ __ Sub32(result_reg, result_reg,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+ __ Move(result_reg, zero_reg);
+ __ Branch(&done);
+
+ __ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ __ Add32(scratch, result_reg,
+ Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result_reg;
+ result_reg = no_reg;
+ __ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // We must specially handle shifts greater than 31.
+ Label high_shift_needed, high_shift_done;
+ __ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ __ Move(input_high, zero_reg);
+ __ Branch(&high_shift_done);
+ __ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ __ Or(input_high, input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ __ Sll32(input_high, input_high, scratch);
+
+ __ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done, sign_negative;
+ __ li(kScratchReg, 32);
+ __ subw(scratch, kScratchReg, scratch);
+ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ __ Sub32(scratch, zero_reg, scratch);
+ __ Sll32(input_low, input_low, scratch);
+ __ Branch(&shift_done);
+
+ __ bind(&pos_shift);
+ __ srlw(input_low, input_low, scratch);
+
+ __ bind(&shift_done);
+ __ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ __ Move(scratch, sign);
+ result_reg = sign;
+ sign = no_reg;
+ __ Sub32(result_reg, zero_reg, input_high);
+ __ Branch(&sign_negative, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, input_high);
+ __ bind(&sign_negative);
+
+ __ bind(&done);
+
+ __ Sd(result_reg, MemOperand(sp, kArgumentOffset));
+ __ Pop(scratch, scratch2, scratch3);
+ __ Pop(result_reg);
+ __ Ret();
+}
+
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ // TODO(v8:10701): Implement for this platform.
+ __ Trap();
+}
+
+namespace {
+
+int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ int64_t offset = (ref0.address() - ref1.address());
+ DCHECK(static_cast<int>(offset) == offset);
+ return static_cast<int>(offset);
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address == a1 || function_address == a2);
+
+ Label profiler_enabled, end_profiler_check;
+ {
+ UseScratchRegisterScope temp(masm);
+ Register scratch = temp.Acquire();
+ __ li(scratch, ExternalReference::is_profiling_address(isolate));
+ __ Lb(scratch, MemOperand(scratch, 0));
+ __ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg));
+ __ li(scratch, ExternalReference::address_of_runtime_stats_flag());
+ __ Lw(scratch, MemOperand(scratch, 0));
+ __ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg));
+ {
+ // Call the api function directly.
+ __ Move(scratch, function_address);
+ __ Branch(&end_profiler_check);
+ }
+
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ li(scratch, thunk_ref);
+ }
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ li(s5, next_address);
+ __ Ld(s3, MemOperand(s5, kNextOffset));
+ __ Ld(s1, MemOperand(s5, kLimitOffset));
+ __ Lw(s2, MemOperand(s5, kLevelOffset));
+ __ Add32(s2, s2, Operand(1));
+ __ Sw(s2, MemOperand(s5, kLevelOffset));
+
+ __ StoreReturnAddressAndCall(scratch);
+ }
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ Ld(a0, return_value_operand);
+ __ bind(&return_value_loaded);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ Sd(s3, MemOperand(s5, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ Lw(a1, MemOperand(s5, kLevelOffset));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
+ }
+ __ Sub32(s2, s2, Operand(1));
+ __ Sw(s2, MemOperand(s5, kLevelOffset));
+ __ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
+ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ li(s3, Operand(stack_space));
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ STATIC_ASSERT(kCArgSlotCount == 0);
+ __ Ld(s3, *stack_space_operand);
+ }
+
+ static constexpr bool kDontSaveDoubles = false;
+ static constexpr bool kRegisterContainsSlotCount = false;
+ __ LeaveExitFrame(kDontSaveDoubles, s3, NO_EMIT_RETURN,
+ kRegisterContainsSlotCount);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
+ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
+ __ Ld(a5, MemOperand(kScratchReg));
+ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
+
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ Sd(s1, MemOperand(s5, kLimitOffset));
+ __ Move(s3, a0);
+ __ PrepareCallCFunction(1, s1);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ Move(a0, s3);
+ __ Branch(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a1 : api function address
+ // -- a2 : arguments count (not including the receiver)
+ // -- a3 : call data
+ // -- a0 : holder
+ // --
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
+ // -- ...
+ // -- sp[(argc) * 8] : last argument
+ // -----------------------------------
+
+ Register api_function_address = a1;
+ Register argc = a2;
+ Register call_data = a3;
+ Register holder = a0;
+ Register scratch = t0;
+ Register base = t1; // For addressing MemOperands on the stack.
+
+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch,
+ base));
+
+ using FCA = FunctionCallbackArguments;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Set up the base register for addressing through MemOperands. It will point
+ // at the receiver (located at sp + argc * kPointerSize).
+ __ CalcScaledAddress(base, sp, argc, kPointerSizeLog2);
+
+ // Reserve space on the stack.
+ __ Sub64(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+
+ // kHolder.
+ __ Sd(holder, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ Sd(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue and kReturnValue.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, 3 * kPointerSize));
+
+ // kData.
+ __ Sd(call_data, MemOperand(sp, 4 * kPointerSize));
+
+ // kNewTarget.
+ __ Sd(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ Move(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+ static constexpr bool kDontSaveDoubles = false;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+
+ // EnterExitFrame may align the sp.
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ Sd(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ Add64(scratch, scratch,
+ Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
+ __ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
+ __ Sw(argc, MemOperand(sp, 3 * kPointerSize));
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ // Note: Unlike on other architectures, this stores the number of slots to
+ // drop, not the number of bytes.
+ __ Add64(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
+ __ Sd(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ DCHECK(!AreAliased(api_function_address, scratch, a0));
+ __ Add64(a0, sp, Operand(1 * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(sp, 4 * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = a4;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = a2;
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ using PCA = PropertyCallbackArguments;
+ __ Sub64(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK_EQ(0, Smi::zero().ptr());
+ __ Sd(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ Sd(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ Move(a0, sp); // a0 = Handle<Name>
+ __ Add64(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ Sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Add64(a1, sp, Operand(1 * kPointerSize));
+ // a1 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ld(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
+
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // The sole purpose of DirectCEntry is for movable callers (e.g. any general
+ // purpose Code object) to be able to call into C functions that may trigger
+ // GC and thus move the caller.
+ //
+ // DirectCEntry places the return address on the stack (updated by the GC),
+ // making the call GC safe. The irregexp backend relies on this.
+
+ // Make place for arguments to fit C calling convention. Callers use
+ // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
+ // have to do that here. Any caller must drop kCArgsSlotsSize stack space
+ // after the call.
+ __ Add64(sp, sp, -kCArgsSlotsSize);
+
+ __ Sd(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address.
+ __ Call(t6); // Call the C++ function.
+ __ Ld(t6, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code.
+
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC. Dereference the address and check for
+ // this.
+ __ Uld(a4, MemOperand(t6));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
+ Operand(reinterpret_cast<uint64_t>(kZapValue)));
+ }
+
+ __ Jump(t6);
+}
+
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double FPU registers before messing with them.
+ __ Sub64(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ StoreDouble(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Sub64(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ __ li(a2,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
+ __ Sd(fp, MemOperand(a2));
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register a4.
+ __ Move(a3, ra);
+ __ Add64(a4, sp, Operand(kSavedRegistersAreaSize));
+
+ __ Sub64(a4, fp, a4);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, a5);
+ // Pass six arguments, according to n64 ABI.
+ __ Move(a0, zero_reg);
+ Label context_check;
+ __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
+ __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ // a4: already has fp-to-sp delta.
+ __ li(a5, ExternalReference::isolate_address(isolate));
+
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register a0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ Ld(a2, MemOperand(sp, i * kPointerSize));
+ __ Sd(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, kDebugZapValue);
+ __ Sd(a2, MemOperand(a1, offset));
+ }
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ LoadDouble(ft0, MemOperand(sp, src_offset));
+ __ StoreDouble(ft0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the saved registers from the stack.
+ __ Add64(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Add64(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add64(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ BranchShort(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(a4);
+ __ Sd(a4, MemOperand(a3, 0));
+ __ Add64(a3, a3, sizeof(uint64_t));
+ __ bind(&pop_loop_header);
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+ // Compute the output frame in the deoptimizer.
+ __ push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: a4 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
+ __ CalcScaledAddress(a1, a4, a1, kPointerSizeLog2);
+ __ BranchShort(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
+ __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ BranchShort(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ Sub64(a3, a3, Operand(sizeof(uint64_t)));
+ __ Add64(a6, a2, Operand(a3));
+ __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+ __ push(a7);
+ __ bind(&inner_loop_header);
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Add64(a4, a4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
+
+ __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ LoadDouble(fpu_reg, MemOperand(a1, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
+ __ push(a6);
+ __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ push(a6);
+
+ // Technically restoring 't3' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ DCHECK(!(t3.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ Move(t3, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ Ld(ToRegister(i), MemOperand(t3, offset));
+ }
+ }
+
+ __ pop(t3); // Get continuation, leave pc on stack.
+ __ pop(ra);
+ __ Jump(t3);
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
+void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Only save the registers that the DynamicMapChecks builtin can clobber.
+ DynamicCheckMapsDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
+ // FLAG_debug_code is enabled CSA checks will call C function and so we need
+ // to save all CallerSaved registers too.
+ if (FLAG_debug_code) registers |= kJSCallerSaved;
+ __ SaveRegisters(registers);
+
+ // Load the immediate arguments from the deopt exit to pass to the builtin.
+ Register slot_arg =
+ descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kSlot);
+ Register handler_arg =
+ descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kHandler);
+ __ Ld(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
+ __ Uld(slot_arg, MemOperand(handler_arg,
+ Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
+ __ Uld(
+ handler_arg,
+ MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
+ __ Call(BUILTIN_CODE(masm->isolate(), DynamicCheckMaps),
+ RelocInfo::CODE_TARGET);
+
+ Label deopt, bailout;
+ __ Branch(&deopt, ne, a0,
+ Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
+
+ __ RestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ __ Ret();
+
+ __ bind(&deopt);
+ __ Branch(&bailout, eq, a0,
+ Operand(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
+
+ if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
+ Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
+ }
+ __ RestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ Handle<Code> deopt_eager = masm->isolate()->builtins()->builtin_handle(
+ Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
+ __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
+
+ __ bind(&bailout);
+ __ RestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ Handle<Code> deopt_bailout = masm->isolate()->builtins()->builtin_handle(
+ Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
+ __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index d7390f00ed..95dbb9a9b6 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -548,7 +548,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
}
// save r6 to r1
- __ mov(r1, r6);
+ __ mov(r0, r6);
// Push a frame with special values setup to mark it as an entry frame.
// Bad FP (-1)
@@ -565,11 +565,18 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ mov(r8, Operand(StackFrame::TypeToMarker(type)));
__ mov(r7, Operand(StackFrame::TypeToMarker(type)));
// Save copies of the top frame descriptor on the stack.
- __ Move(r6, ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, masm->isolate()));
- __ LoadU64(r6, MemOperand(r6));
+ __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ LoadU64(r6, MemOperand(r1));
__ StoreMultipleP(r6, r9, MemOperand(sp, kSystemPointerSize));
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ mov(r6, Operand::Zero());
+ __ StoreU64(r6, MemOperand(r1));
+
Register scrach = r8;
// Set up frame pointer for the frame to be pushed.
@@ -581,7 +588,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
// restore r6
- __ mov(r6, r1);
+ __ mov(r6, r0);
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
@@ -860,7 +867,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LoadU32(params_size,
FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
__ LoadU64(actual_params_size,
@@ -877,7 +883,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ bge(&corrected_args_count);
__ mov(params_size, actual_params_size);
__ bind(&corrected_args_count);
-#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
@@ -1013,7 +1018,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ AddS64(bytecode_offset, bytecode_offset, Operand(1));
__ LoadU8(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddS64(bytecode_size_table, bytecode_size_table,
- Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ b(&process_bytecode);
__ bind(&extra_wide);
@@ -1021,7 +1026,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ AddS64(bytecode_offset, bytecode_offset, Operand(1));
__ LoadU8(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddS64(bytecode_size_table, bytecode_size_table,
- Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
__ bind(&process_bytecode);
@@ -1047,13 +1052,35 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ ShiftLeftU64(scratch3, bytecode, Operand(2));
- __ LoadU32(scratch3, MemOperand(bytecode_size_table, scratch3));
+ __ LoadU8(scratch3, MemOperand(bytecode_size_table, bytecode));
__ AddS64(bytecode_offset, bytecode_offset, scratch3);
__ bind(&end);
}
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ Label maybe_has_optimized_code;
+ // Check if optimized code is available
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ r0);
+ __ beq(&maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ LoadAnyTaggedField(
+ optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1274,27 +1301,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
__ bind(&has_optimized_code_or_marker);
- Label maybe_has_optimized_code;
-
- // Check if optimized code is available
- __ TestBitMask(optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
- r0);
- __ beq(&maybe_has_optimized_code);
-
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
-
- __ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = optimization_state;
- __ LoadAnyTaggedField(
- optimization_marker,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kMaybeOptimizedCodeOffset));
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1878,44 +1886,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ SmiTag(r2);
- __ mov(r6, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- // Stack updated as such:
- // old SP --->
- // R14 Return Addr
- // Old FP <--- New FP
- // Argument Adapter SMI
- // Function
- // ArgC as SMI
- // Padding <--- New SP
- __ lay(sp, MemOperand(sp, -5 * kSystemPointerSize));
-
- // Cleanse the top nibble of 31-bit pointers.
- __ CleanseP(r14);
- __ StoreU64(r14, MemOperand(sp, 4 * kSystemPointerSize));
- __ StoreU64(fp, MemOperand(sp, 3 * kSystemPointerSize));
- __ StoreU64(r6, MemOperand(sp, 2 * kSystemPointerSize));
- __ StoreU64(r3, MemOperand(sp, 1 * kSystemPointerSize));
- __ StoreU64(r2, MemOperand(sp, 0 * kSystemPointerSize));
- __ Push(Smi::zero()); // Padding.
- __ la(fp,
- MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
-}
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then tear down the parameters.
- __ LoadU64(r3, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- int stack_adjustment = kSystemPointerSize; // adjust for receiver
- __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
- __ SmiToPtrArrayOffset(r3, r3);
- __ lay(sp, MemOperand(sp, r3));
-}
-
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -2036,41 +2006,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
- // code is erased.
- __ mov(r6, fp);
- __ LoadU64(r7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
-#else
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ LoadU64(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadU64(scratch,
- MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpS64(scratch,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ beq(&arguments_adaptor);
- {
- __ LoadU64(r7, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
- r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ LoadU16(
- r7,
- FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r6, fp);
- }
- __ b(&arguments_done);
- __ bind(&arguments_adaptor);
- {
- // Load the length from the ArgumentsAdaptorFrame.
- __ LoadU64(r7,
- MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(r7);
- }
- __ bind(&arguments_done);
-#endif
-
Label stack_done, stack_overflow;
+ __ LoadU64(r7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ SubS64(r7, r7, r4);
__ ble(&stack_done);
{
@@ -2090,7 +2027,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
__ mov(r5, r5);
// Point to the first argument to copy (skipping the receiver).
- __ AddS64(r6, r6,
+ __ AddS64(r6, fp,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
__ ShiftLeftU64(scratch, r4, Operand(kSystemPointerSizeLog2));
@@ -2353,9 +2290,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
Label non_callable, non_smi;
__ JumpIfSmi(r3, &non_callable);
__ bind(&non_smi);
- __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
+ __ LoadMap(r6, r3);
+ __ CompareInstanceTypeRange(r6, r7, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
- RelocInfo::CODE_TARGET, eq);
+ RelocInfo::CODE_TARGET, le);
__ CmpS64(r7, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
@@ -2466,9 +2405,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ beq(&non_constructor);
// Dispatch based on instance type.
- __ CompareInstanceType(r6, r7, JS_FUNCTION_TYPE);
+ __ CompareInstanceTypeRange(r6, r7, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
+ RelocInfo::CODE_TARGET, le);
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2500,153 +2440,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : actual number of arguments
- // -- r3 : function (passed through to callee)
- // -- r4 : expected number of arguments
- // -- r5 : new target (passed through to callee)
- // -----------------------------------
-
- Label dont_adapt_arguments, stack_overflow;
- __ tmll(r4, Operand(kDontAdaptArgumentsSentinel));
- __ b(Condition(1), &dont_adapt_arguments);
- __ LoadTaggedPointerField(
- r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
-
- // -------------------------------------------
- // Adapt arguments.
- // -------------------------------------------
- {
- Label under_application, over_application, invoke;
- __ CmpS64(r2, r4);
- __ blt(&under_application);
-
- // Enough parameters: actual >= expected
- __ bind(&over_application);
- {
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(r4, r7, &stack_overflow);
-
- // Calculate copy start address into r2 and copy end address into r6.
- // r2: actual number of arguments as a smi
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- __ ShiftLeftU64(r2, r4, Operand(kSystemPointerSizeLog2));
- __ AddS64(r2, fp);
- // adjust for return address and receiver
- __ AddS64(r2, r2, Operand(2 * kSystemPointerSize));
- __ ShiftLeftU64(r6, r4, Operand(kSystemPointerSizeLog2));
- __ SubS64(r6, r2, r6);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r2: copy start address
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- // r6: copy end address
-
- Label copy;
- __ bind(&copy);
- __ LoadU64(r0, MemOperand(r2, 0));
- __ push(r0);
- __ CmpS64(r2, r6); // Compare before moving to next argument.
- __ lay(r2, MemOperand(r2, -kSystemPointerSize));
- __ bne(&copy);
-
- __ b(&invoke);
- }
-
- // Too few parameters: Actual < expected
- __ bind(&under_application);
- {
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(r4, r7, &stack_overflow);
-
- // Fill the remaining expected arguments with undefined.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ LoadRoot(r7, RootIndex::kUndefinedValue);
- __ SmiUntag(r1, r2);
- __ SubS64(r8, r4, r1);
- __ ShiftLeftU64(r1, r8, Operand(kSystemPointerSizeLog2));
- __ SubS64(r6, fp, r1);
- // Adjust for frame.
- __ SubS64(r6, r6,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kSystemPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r7);
- __ CmpS64(sp, r6);
- __ b(ne, &fill);
-
- // Calculate copy start address into r0 and copy end address is fp.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r2, r2);
- __ lay(r2, MemOperand(r2, fp));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
-
- // Adjust load for return address and receiver.
- __ LoadU64(r7, MemOperand(r2, 2 * kSystemPointerSize));
- __ push(r7);
-
- __ CmpS64(r2, fp); // Compare before moving to next argument.
- __ lay(r2, MemOperand(r2, -kSystemPointerSize));
- __ b(ne, &copy);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ mov(r2, r4);
- // r2 : expected number of arguments
- // r3 : function (passed through to callee)
- // r5 : new target (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ CallCodeObject(r4);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
- masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Ret();
- }
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ RecordComment("-- Call without adapting args --");
- static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ JumpCodeObject(r4);
-
- __ bind(&stack_overflow);
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bkpt(0);
- }
-}
-
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call.
@@ -2656,17 +2449,45 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
- // Save all parameter registers (see wasm-linkage.cc). They might be
+ // Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf(r2, r3, r4, r5, r6);
-#if V8_TARGET_ARCH_S390X
- constexpr RegList fp_regs = DoubleRegister::ListOf(d0, d2, d4, d6);
-#else
- constexpr RegList fp_regs = DoubleRegister::ListOf(d0, d2);
-#endif
+ RegList gp_regs = 0;
+ for (Register gp_param_reg : wasm::kGpParamRegisters) {
+ gp_regs |= gp_param_reg.bit();
+ }
+
+ RegList fp_regs = 0;
+ for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
+ fp_regs |= fp_param_reg.bit();
+ }
+
+ CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
+ NumRegs(gp_regs));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
+ NumRegs(fp_regs));
+
__ MultiPush(gp_regs);
+ // Check if machine has simd enabled, if so push vector registers. If not
+ // then only push double registers.
+ Label push_doubles, simd_pushed;
+ __ Move(r1, ExternalReference::supports_wasm_simd_128_address());
+ __ LoadU8(r1, MemOperand(r1));
+ __ LoadAndTestP(r1, r1); // If > 0 then simd is available.
+ __ ble(&push_doubles, Label::kNear);
+ // Save vector registers, don't save double registers anymore.
+ __ MultiPushV128(fp_regs);
+ __ b(&simd_pushed);
+ __ bind(&push_doubles);
+ // Simd not supported, only save double registers.
__ MultiPushDoubles(fp_regs);
+ // kFixedFrameSizeFromFp is hard coded to include space for Simd
+ // registers, so we still need to allocate extra (unused) space on the stack
+ // as if they were saved.
+ __ lay(sp, MemOperand(sp, -(NumRegs(fp_regs) * kDoubleSize)));
+ __ bind(&simd_pushed);
// Pass instance and function index as explicit arguments to the runtime
// function.
@@ -2679,7 +2500,19 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ mov(ip, r2);
// Restore registers.
+ __ Move(r1, ExternalReference::supports_wasm_simd_128_address());
+ __ LoadU8(r1, MemOperand(r1));
+ Label pop_doubles, simd_popped;
+ __ LoadAndTestP(r1, r1); // If > 0 then simd is available.
+ __ ble(&pop_doubles, Label::kNear);
+ // Pop vector registers, don't pop double registers anymore.
+ __ MultiPopV128(fp_regs);
+ __ b(&simd_popped);
+ __ bind(&pop_doubles);
+ // Simd not supported, only pop double registers.
+ __ lay(sp, MemOperand(sp, NumRegs(fp_regs) * kDoubleSize));
__ MultiPopDoubles(fp_regs);
+ __ bind(&simd_popped);
__ MultiPop(gp_regs);
}
// Finally, jump to the entrypoint.
@@ -2870,6 +2703,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ {
+ UseScratchRegisterScope temps(masm);
+ __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ mov(r0, Operand::Zero());
+ __ StoreU64(r0, MemOperand(r1));
+ }
+
// Compute the handler entry address and jump to it.
__ Move(r3, pending_handler_entrypoint_address);
__ LoadU64(r3, MemOperand(r3));
diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq
index 390ed2a63a..42e3f6fe4b 100644
--- a/deps/v8/src/builtins/string-endswith.tq
+++ b/deps/v8/src/builtins/string-endswith.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(tebbi): This could be replaced with a fast C-call to
+// TODO(turbofan): This could be replaced with a fast C-call to
// CompareCharsUnsigned.
macro IsSubstringAt<A: type, B: type>(
string: ConstSlice<A>, searchStr: ConstSlice<B>, start: intptr): bool {
diff --git a/deps/v8/src/builtins/string-includes.tq b/deps/v8/src/builtins/string-includes.tq
new file mode 100644
index 0000000000..7da7aa7e6d
--- /dev/null
+++ b/deps/v8/src/builtins/string-includes.tq
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-string-gen.h'
+
+namespace string {
+
+// https://tc39.es/ecma262/#sec-string.prototype.includes
+transitioning javascript builtin
+StringPrototypeIncludes(js-implicit context: NativeContext, receiver: JSAny)(
+ ...arguments): Boolean {
+ const methodName: constexpr string = 'String.prototype.includes';
+ const searchString: JSAny = arguments[0];
+ const position: JSAny = arguments[1];
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const s = ToThisString(receiver, methodName);
+
+ // 3. Let isRegExp be ? IsRegExp(searchString).
+ // 4. If isRegExp is true, throw a TypeError exception.
+ if (regexp::IsRegExp(searchString)) {
+ ThrowTypeError(MessageTemplate::kFirstArgumentNotRegExp, methodName);
+ }
+
+ // 5. Let searchStr be ? ToString(searchString).
+ const searchStr = ToString_Inline(searchString);
+
+ // 6. Let pos be ? ToIntegerOrInfinity(position).
+ // 7. Assert: If position is undefined, then pos is 0.
+ let start: Smi = 0;
+ if (position != Undefined) {
+ // 8. Let len be the length of S.
+ const len = s.length_uintptr;
+
+ // 9. Let start be the result of clamping pos between 0 and len.
+ StaticAssertStringLengthFitsSmi();
+ start = Convert<Smi>(Signed(ClampToIndexRange(position, len)));
+ }
+
+ // 10. Let index be ! StringIndexOf(S, searchStr, start).
+ const index = StringIndexOf(s, searchStr, start);
+
+ // 11. If index is not -1, return true.
+ // 12. Return false.
+ return index != -1 ? True : False;
+}
+}
diff --git a/deps/v8/src/builtins/string-indexof.tq b/deps/v8/src/builtins/string-indexof.tq
new file mode 100644
index 0000000000..f649d1d15e
--- /dev/null
+++ b/deps/v8/src/builtins/string-indexof.tq
@@ -0,0 +1,39 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-string-gen.h'
+
+namespace string {
+
+// https://tc39.es/ecma262/#sec-string.prototype.indexof
+transitioning javascript builtin
+StringPrototypeIndexOf(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): Smi {
+ const methodName: constexpr string = 'String.prototype.indexOf';
+ const searchString: JSAny = arguments[0];
+ const position: JSAny = arguments[1];
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const s = ToThisString(receiver, methodName);
+
+ // 3. Let searchStr be ? ToString(searchString).
+ const searchStr = ToString_Inline(searchString);
+
+ // 4. Let pos be ? ToIntegerOrInfinity(position).
+ // 5. Assert: If position is undefined, then pos is 0.
+ let start: Smi = 0;
+ if (position != Undefined) {
+ // 6. Let len be the length of S.
+ const len = s.length_uintptr;
+
+ // 7. Let start be the result of clamping pos between 0 and len.
+ StaticAssertStringLengthFitsSmi();
+ start = Convert<Smi>(Signed(ClampToIndexRange(position, len)));
+ }
+
+ // 8. Let index be ! StringIndexOf(S, searchStr, start).
+ return StringIndexOf(s, searchStr, start);
+}
+}
diff --git a/deps/v8/src/builtins/string-match-search.tq b/deps/v8/src/builtins/string-match-search.tq
new file mode 100644
index 0000000000..18dcf1b812
--- /dev/null
+++ b/deps/v8/src/builtins/string-match-search.tq
@@ -0,0 +1,86 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace string {
+
+struct StringMatchFunctor {
+ macro FnSymbol(): Symbol {
+ return MatchSymbolConstant();
+ }
+ macro CanCallFast(implicit context: Context)(maybeRegExp: HeapObject): bool {
+ return regexp::IsFastRegExpForMatch(maybeRegExp);
+ }
+ transitioning macro CallFast(implicit context: Context)(
+ regexp: FastJSRegExp, string: String): JSAny {
+ return regexp::RegExpMatchFast(regexp, string);
+ }
+}
+
+struct StringSearchFunctor {
+ macro FnSymbol(): Symbol {
+ return SearchSymbolConstant();
+ }
+ macro CanCallFast(implicit context: Context)(maybeRegExp: HeapObject): bool {
+ return regexp::IsFastRegExpForSearch(maybeRegExp);
+ }
+ transitioning macro CallFast(implicit context: Context)(
+ regexp: FastJSRegExp, string: String): JSAny {
+ return regexp::RegExpSearchFast(regexp, string);
+ }
+}
+
+transitioning macro StringMatchSearch<F: type>(
+ implicit context: NativeContext, receiver: JSAny)(
+ regexp: JSAny, functor: F, methodName: constexpr string): JSAny {
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ RequireObjectCoercible(receiver, methodName);
+
+ try {
+ // 3. Let string be ? ToString(O).
+ const string = Cast<String>(receiver) otherwise Slow;
+ const heapRegexp = Cast<HeapObject>(regexp) otherwise Slow;
+ if (!functor.CanCallFast(heapRegexp)) goto Slow;
+
+ return functor.CallFast(UnsafeCast<FastJSRegExp>(heapRegexp), string);
+ } label Slow deferred {
+ // 2. If regexp is neither undefined nor null, then
+ if (regexp != Undefined && regexp != Null) {
+ try {
+ // a. Let fn be ? GetMethod(regexp, @@match/@@search).
+ // b. If fn is not undefined, then
+ const fn = GetMethod(regexp, functor.FnSymbol())
+ otherwise FnSymbolIsNullOrUndefined;
+ // i. Return ? Call(fn, regexp, « O »).
+ return Call(context, fn, regexp, receiver);
+ } label FnSymbolIsNullOrUndefined {}
+ }
+
+ // 3. Let string be ? ToString(O).
+ const string = ToString_Inline(receiver);
+
+ // 4. Let rx be ? RegExpCreate(regexp, undefined).
+ const rx = regexp::RegExpCreate(context, regexp, kEmptyString);
+
+ // 5. Return ? Invoke(rx, @@match/@@search, « string »).
+ const fn = GetProperty(rx, functor.FnSymbol());
+ return Call(context, fn, rx, string);
+ }
+}
+
+// https://tc39.es/ecma262/#sec-string.prototype.match
+transitioning javascript builtin
+StringPrototypeMatch(
+ js-implicit context: NativeContext, receiver: JSAny)(regexp: JSAny): JSAny {
+ return StringMatchSearch(
+ regexp, StringMatchFunctor{}, 'String.prototype.match');
+}
+
+// https://tc39.es/ecma262/#sec-string.prototype.search
+transitioning javascript builtin
+StringPrototypeSearch(
+ js-implicit context: NativeContext, receiver: JSAny)(regexp: JSAny): JSAny {
+ return StringMatchSearch(
+ regexp, StringSearchFunctor{}, 'String.prototype.search');
+}
+}
diff --git a/deps/v8/src/builtins/string-replaceall.tq b/deps/v8/src/builtins/string-replaceall.tq
index 30fa7745ab..cd670208ad 100644
--- a/deps/v8/src/builtins/string-replaceall.tq
+++ b/deps/v8/src/builtins/string-replaceall.tq
@@ -9,52 +9,6 @@ extern macro ReplaceSymbolConstant(): Symbol;
extern macro StringBuiltinsAssembler::GetSubstitution(
implicit context: Context)(String, Smi, Smi, String): String;
-extern builtin
-StringIndexOf(implicit context: Context)(String, String, Smi): Smi;
-
-// TODO(tebbi): This could be replaced with a fast C-call to StringSearchRaw.
-macro AbstractStringIndexOf<A: type, B: type>(
- string: ConstSlice<A>, searchString: ConstSlice<B>, fromIndex: Smi): Smi {
- for (let i: intptr = SmiUntag(fromIndex);
- i <= string.length - searchString.length; i++) {
- if (IsSubstringAt(string, searchString, i)) {
- return SmiTag(i);
- }
- }
- return -1;
-}
-
-struct AbstractStringIndexOfFunctor {
- fromIndex: Smi;
-}
-// Ideally, this would be a method of AbstractStringIndexOfFunctor, but
-// currently methods don't support templates.
-macro Call<A: type, B: type>(
- self: AbstractStringIndexOfFunctor, string: ConstSlice<A>,
- searchStr: ConstSlice<B>): Smi {
- return AbstractStringIndexOf(string, searchStr, self.fromIndex);
-}
-
-macro AbstractStringIndexOf(implicit context: Context)(
- string: String, searchString: String, fromIndex: Smi): Smi {
- // Special case the empty string.
- const searchStringLength = searchString.length_intptr;
- const stringLength = string.length_intptr;
- if (searchStringLength == 0 && SmiUntag(fromIndex) <= stringLength) {
- return fromIndex;
- }
-
- // Don't bother to search if the searchString would go past the end
- // of the string. This is actually necessary because of runtime
- // checks.
- if (SmiUntag(fromIndex) + searchStringLength > stringLength) {
- return -1;
- }
-
- return TwoStringsToSlices<Smi>(
- string, searchString, AbstractStringIndexOfFunctor{fromIndex: fromIndex});
-}
-
transitioning macro
ThrowIfNotGlobal(implicit context: Context)(searchValue: JSAny): void {
let shouldThrow: bool;
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index b258293a96..970fbd0862 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -16,8 +16,8 @@ extern runtime WasmFunctionTableGet(
extern runtime WasmFunctionTableSet(
Context, WasmInstanceObject, Smi, Smi, Object): JSAny;
extern runtime ThrowWasmError(Context, Smi): JSAny;
-extern runtime Throw(Context, Object): JSAny;
-extern runtime ReThrow(Context, Object): JSAny;
+extern runtime WasmThrow(Context, Object, FixedArray): JSAny;
+extern runtime WasmReThrow(Context, Object): JSAny;
extern runtime WasmTriggerTierUp(Context, WasmInstanceObject): JSAny;
extern runtime WasmStackGuard(Context): JSAny;
extern runtime ThrowWasmStackOverflow(Context): JSAny;
@@ -192,13 +192,19 @@ builtin WasmRefFunc(index: uint32): Object {
}
}
-builtin WasmThrow(exception: Object): JSAny {
- tail runtime::Throw(LoadContextFromFrame(), exception);
+builtin WasmAllocateFixedArray(size: intptr): FixedArray {
+ if (size == 0) return kEmptyFixedArray;
+ return UnsafeCast<FixedArray>(AllocateFixedArray(
+ ElementsKind::PACKED_ELEMENTS, size, AllocationFlag::kNone));
+}
+
+builtin WasmThrow(tag: Object, values: FixedArray): JSAny {
+ tail runtime::WasmThrow(LoadContextFromFrame(), tag, values);
}
builtin WasmRethrow(exception: Object): JSAny {
if (exception == Null) tail ThrowWasmTrapRethrowNull();
- tail runtime::ReThrow(LoadContextFromFrame(), exception);
+ tail runtime::WasmReThrow(LoadContextFromFrame(), exception);
}
builtin WasmTriggerTierUp(): JSAny {
@@ -231,6 +237,11 @@ builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray {
return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size);
}
+builtin WasmAllocatePair(first: Object, second: Object): Tuple2 {
+ const tuple2Map: Map = %GetClassMapConstant<Tuple2>();
+ return new Tuple2{map: tuple2Map, value1: first, value2: second};
+}
+
builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map {
tail runtime::WasmAllocateRtt(
LoadContextFromFrame(), SmiTag(typeIndex), parent);
@@ -273,6 +284,23 @@ transitioning builtin WasmAllocateObjectWrapper(implicit context: Context)(
return wrapper;
}
+builtin WasmSubtypeCheck(objectSupertypes: FixedArray, rtt: Map): int32 {
+ const rttSupertypeLength: Smi =
+ %RawDownCast<WasmTypeInfo>(
+ rtt.constructor_or_back_pointer_or_native_context)
+ .supertypes.length;
+
+ if (objectSupertypes.length <= rttSupertypeLength) {
+ return 0;
+ }
+
+ const supertype: Map = %RawDownCast<Map>(
+ LoadFixedArrayElement(objectSupertypes, rttSupertypeLength));
+
+ if (supertype == rtt) return 1;
+ return 0;
+}
+
builtin WasmInt32ToNumber(value: int32): Number {
return ChangeInt32ToTagged(value);
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 2ee03f0026..58a897821d 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -8,6 +8,8 @@
#include "src/base/bits-iterator.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
+#include "src/common/globals.h"
+#include "src/objects/code.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -42,8 +44,9 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
RelocInfo::CODE_TARGET);
}
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void GenerateTailCallToReturnedCode(
+ MacroAssembler* masm, Runtime::FunctionId function_id,
+ JumpMode jump_mode = JumpMode::kJump) {
// ----------- S t a t e -------------
// -- rax : actual argument count
// -- rdx : new target (preserved for callee)
@@ -70,7 +73,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(kJavaScriptCallTargetRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ JumpCodeObject(rcx);
+ __ JumpCodeObject(rcx, jump_mode);
}
namespace {
@@ -382,6 +385,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
{
Operand c_entry_fp_operand = masm->ExternalReferenceAsOperand(c_entry_fp);
__ Push(c_entry_fp_operand);
+
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ movq(c_entry_fp_operand, Immediate(0));
}
// Store the context address in the previously-reserved slot.
@@ -636,12 +645,18 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
-static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
- Register sfi_data,
- Register scratch1) {
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
Label done;
- __ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
+ __ LoadMap(scratch1, sfi_data);
+
+ __ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
+ __ j(equal, is_baseline);
+
+ __ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
__ LoadTaggedPointerField(
@@ -739,13 +754,22 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label is_baseline, ok;
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, rcx, kScratchRegister);
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
+ &is_baseline);
__ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
+ __ jmp(&ok);
+
+ __ bind(&is_baseline);
+ __ CmpObjectType(rcx, BASELINE_DATA_TYPE, rcx);
+ __ Assert(equal, AbortReason::kMissingBytecodeArray);
+
+ __ bind(&ok);
}
// Resume (Ignition/TurboFan) generator object.
@@ -820,7 +844,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ movl(params_size,
FieldOperand(params_size, BytecodeArray::kParameterSizeOffset));
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
__ movq(actual_params_size,
@@ -836,7 +859,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ j(greater_equal, &corrected_args_count, Label::kNear);
__ movq(params_size, actual_params_size);
__ bind(&corrected_args_count);
-#endif
// Leave the frame (also dropping the register file).
__ leave();
@@ -895,7 +917,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry,
- Register scratch1, Register scratch2) {
+ Register scratch1, Register scratch2,
+ JumpMode jump_mode) {
// ----------- S t a t e -------------
// -- rax : actual argument count
// -- rdx : new target (preserved for callee if needed, and caller)
@@ -925,13 +948,14 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch1, scratch2);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ Move(rcx, optimized_code_entry);
- __ JumpCodeObject(rcx);
+ __ JumpCodeObject(rcx, jump_mode);
// Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker
// and re-enter the closure's code.
__ bind(&heal_optimized_code_slot);
- GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot,
+ jump_mode);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -978,13 +1002,13 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Update table to the wide scaled table.
__ addq(bytecode_size_table,
- Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Immediate(kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Update table to the extra wide scaled table.
__ addq(bytecode_size_table,
- Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
+ Immediate(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ bind(&process_bytecode);
@@ -1009,12 +1033,51 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
- __ addl(bytecode_offset,
- Operand(bytecode_size_table, bytecode, times_int_size, 0));
+ __ movzxbl(kScratchRegister,
+ Operand(bytecode_size_table, bytecode, times_1, 0));
+ __ addl(bytecode_offset, kScratchRegister);
__ bind(&end);
}
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ __ RecordComment("[ Check optimization state");
+
+ __ movl(optimization_state,
+ FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ __ testl(
+ optimization_state,
+ Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ j(not_zero, has_optimized_code_or_marker);
+
+ __ RecordComment("]");
+}
+
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ JumpMode jump_mode = JumpMode::kJump) {
+ Label maybe_has_optimized_code;
+ __ testl(
+ optimization_state,
+ Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ j(zero, &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15, jump_mode);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1041,8 +1104,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadTaggedPointerField(
kInterpreterBytecodeArrayRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
- kScratchRegister);
+
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, kScratchRegister, &is_baseline);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
@@ -1060,23 +1125,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadTaggedPointerField(
- rcx, FieldOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadMap(rcx, feedback_vector);
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
- // Read off the optimization state in the feedback vector.
- Register optimization_state = rcx;
- __ movl(optimization_state,
- FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
-
- // Check if there is optimized code or a optimization marker that needs to be
- // processed.
+ // Check for an optimization marker.
Label has_optimized_code_or_marker;
- __ testl(
- optimization_state,
- Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ j(not_zero, &has_optimized_code_or_marker);
+ Register optimization_state = rcx;
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1228,25 +1285,43 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ int3(); // Should not return.
__ bind(&has_optimized_code_or_marker);
- Label maybe_has_optimized_code;
-
- __ testl(
- optimization_state,
- Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
- __ j(zero, &maybe_has_optimized_code);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
- Register optimization_marker = optimization_state;
- __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
- MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ LoadMap(rcx, feedback_vector);
+ __ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
+ __ j(not_equal, &install_baseline_code);
+
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+
+ // Load the baseline code into the closure.
+ __ LoadTaggedPointerField(rcx,
+ FieldOperand(kInterpreterBytecodeArrayRegister,
+ BaselineData::kBaselineCodeOffset));
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, rcx, closure,
+ kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ JumpCodeObject(rcx);
- __ bind(&maybe_has_optimized_code);
- Register optimized_code_entry = optimization_state;
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1531,6 +1606,141 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(
+ Builtins::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = rbx;
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(feedback_vector,
+ FieldOperand(feedback_vector, Cell::kValueOffset));
+ if (__ emit_debug_code()) {
+ __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
+ __ Assert(equal, AbortReason::kExpectedFeedbackVector);
+ }
+
+ // Check for an optimization marker.
+ Register optimization_state = rcx;
+ Label has_optimized_code_or_marker;
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+
+ // Increment invocation count for the function.
+ __ incl(
+ FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
+
+ Register return_address = r12;
+
+ __ RecordComment("[ Frame Setup");
+ // Save the return address, so that we can push it to the end of the newly
+ // set-up frame once we're done setting it up.
+ __ PopReturnAddressTo(return_address);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::BASELINE);
+
+ __ Push(descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext)); // Callee's
+ // context.
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+ __ Push(callee_js_function); // Callee's JS function.
+ __ Push(descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::
+ kJavaScriptCallArgCount)); // Actual argument
+ // count.
+
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecode_array = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ movw(FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
+ Immediate(0));
+ __ Push(bytecode_array);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ __ Push(feedback_vector);
+
+ __ RecordComment("]");
+
+ Register new_target = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
+
+ __ RecordComment("[ Stack/interrupt check");
+ Label call_stack_guard;
+ {
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ //
+ // TODO(v8:11429): Backport this folded check to the
+ // InterpreterEntryTrampoline.
+ Register frame_size = r11;
+ __ movzxwl(frame_size,
+ FieldOperand(bytecode_array, BytecodeArray::kFrameSizeOffset));
+ __ Move(kScratchRegister, rsp);
+ DCHECK_NE(frame_size, new_target);
+ __ subq(kScratchRegister, frame_size);
+ __ cmpq(kScratchRegister,
+ __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
+ __ j(below, &call_stack_guard);
+ __ RecordComment("]");
+ }
+
+ // Push the return address back onto the stack for return.
+ __ PushReturnAddressFrom(return_address);
+ // Return to caller pushed pc, without any frame teardown.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ __ RecordComment("[ Optimized marker check");
+ // Drop the return address, rebalancing the return stack buffer by using
+ // JumpMode::kPushAndReturn. We can't leave the slot and overwrite it on
+ // return since we may do a runtime call along the way that requires the
+ // stack to only contain valid frames.
+ __ Drop(1);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, rcx, feedback_vector,
+ JumpMode::kPushAndReturn);
+ __ Trap();
+ __ RecordComment("]");
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ __ RecordComment("[ Stack/interrupt call");
+ {
+ // Push the baseline code return address now, as if it had been pushed by
+ // the call to this builtin.
+ __ PushReturnAddressFrom(return_address);
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Save incoming new target or generator
+ __ Push(new_target);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ Pop(new_target);
+ }
+
+ // Return to caller pushed pc, without any frame teardown.
+ __ Ret();
+ __ RecordComment("]");
+ }
+}
+
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
@@ -1618,6 +1828,12 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ ret(1 * kSystemPointerSize); // Remove rax.
}
+void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
+ Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15,
+ JumpMode::kJump);
+}
+
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1835,147 +2051,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ pushq(rbp);
- __ movq(rbp, rsp);
-
- // Store the arguments adaptor context sentinel.
- __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Push the function on the stack.
- __ Push(rdi);
-
- // Preserve the number of arguments on the stack. Must preserve rax,
- // rbx and rcx because these registers are used when copying the
- // arguments and the receiver.
- __ SmiTag(r8, rax);
- __ Push(r8);
-
- __ Push(Immediate(0)); // Padding.
-}
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ movq(rsp, rbp);
- __ popq(rbp);
-
- // Remove caller arguments from the stack.
- __ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
- __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
- __ PushReturnAddressFrom(rcx);
-}
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : actual number of arguments
- // -- rbx : expected number of arguments
- // -- rdx : new target (passed through to callee)
- // -- rdi : function (passed through to callee)
- // -----------------------------------
-
- Label dont_adapt_arguments, stack_overflow;
- __ cmpq(rbx, Immediate(kDontAdaptArgumentsSentinel));
- __ j(equal, &dont_adapt_arguments);
- __ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-
- // -------------------------------------------
- // Adapt arguments.
- // -------------------------------------------
- {
- EnterArgumentsAdaptorFrame(masm);
- __ StackOverflowCheck(rbx, rcx, &stack_overflow);
-
- Label under_application, over_application, invoke;
- __ cmpq(rax, rbx);
- __ j(less, &under_application, Label::kNear);
-
- // Enough parameters: Actual >= expected.
- __ bind(&over_application);
- {
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ leaq(r8, Operand(rbp, rbx, times_system_pointer_size, offset));
- __ Set(rax, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(rax);
- __ Push(Operand(r8, 0));
- __ subq(r8, Immediate(kSystemPointerSize));
- __ cmpq(rax, rbx);
- __ j(less, &copy);
- __ jmp(&invoke, Label::kNear);
- }
-
- // Too few parameters: Actual < expected.
- __ bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- __ movq(r8, rbx);
- __ subq(r8, rax);
- __ bind(&fill);
- __ Push(kScratchRegister);
- __ decq(r8);
- __ j(greater, &fill);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ leaq(r9, Operand(rbp, rax, times_system_pointer_size, offset));
- __ Set(r8, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(r8);
- __ Push(Operand(r9, 0));
- __ subq(r9, Immediate(kSystemPointerSize));
- __ cmpq(r8, rax);
- __ j(less, &copy);
-
- // Update actual number of arguments.
- __ movq(rax, rbx);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- // rax : expected number of arguments
- // rdx : new target (passed through to callee)
- // rdi : function (passed through to callee)
- static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
- __ CallCodeObject(rcx);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
- masm->pc_offset());
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
- }
-
- // -------------------------------------------
- // Don't adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
- __ JumpCodeObject(rcx);
-
- __ bind(&stack_overflow);
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ int3();
- }
-}
-
// static
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
@@ -2095,43 +2170,15 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
- // code is erased.
- __ movq(rbx, rbp);
- __ movq(r8, Operand(rbp, StandardFrameConstants::kArgCOffset));
-#else
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ cmpq(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &arguments_adaptor, Label::kNear);
- {
- __ movq(r8, Operand(rbp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
- r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset));
- __ movzxwq(
- r8, FieldOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rbx, rbp);
- }
- __ jmp(&arguments_done, Label::kNear);
- __ bind(&arguments_adaptor);
- {
- __ SmiUntag(r8,
- Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- }
- __ bind(&arguments_done);
-#endif
-
Label stack_done, stack_overflow;
+ __ movq(r8, Operand(rbp, StandardFrameConstants::kArgCOffset));
__ subl(r8, rcx);
__ j(less_equal, &stack_done);
{
// ----------- S t a t e -------------
// -- rax : the number of arguments already in the stack (not including the
// receiver)
- // -- rbx : point to the caller stack frame
+ // -- rbp : point to the caller stack frame
// -- rcx : start index (to support rest parameters)
// -- rdx : the new target (for [[Construct]] calls)
// -- rdi : the target to call (can be any Object)
@@ -2172,13 +2219,13 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ leaq(rcx, Operand(rcx, times_system_pointer_size,
CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
- __ addq(rbx, rcx);
+ __ addq(rcx, rbp);
// Copy the additional caller arguments onto the stack.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
- Register src = rbx, dest = r9, num = r8;
+ Register src = rcx, dest = r9, num = r8;
Label loop;
__ bind(&loop);
__ decq(num);
@@ -2426,9 +2473,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
Label non_callable;
__ JumpIfSmi(rdi, &non_callable);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ LoadMap(rcx, rdi);
+ __ CmpInstanceTypeRange(rcx, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
- RelocInfo::CODE_TARGET, equal);
+ RelocInfo::CODE_TARGET, below_equal);
__ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
@@ -2540,9 +2588,9 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ j(zero, &non_constructor);
// Dispatch based on instance type.
- __ CmpInstanceType(rcx, JS_FUNCTION_TYPE);
+ __ CmpInstanceTypeRange(rcx, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, equal);
+ RelocInfo::CODE_TARGET, below_equal);
// Only dispatch to bound functions after checking whether they are
// constructors.
@@ -2572,7 +2620,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+namespace {
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
@@ -2586,9 +2635,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
__ bind(&skip);
- // Drop the handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- __ leave();
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ leave();
+ }
// Load deoptimization data from the code object.
__ LoadTaggedPointerField(rbx,
@@ -2608,6 +2659,15 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// And "return" to the OSR entry point of the function.
__ ret(0);
}
+} // namespace
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, false);
+}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was pushed to the stack by the caller as int32.
@@ -2618,7 +2678,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
- // Save all parameter registers (see wasm-linkage.cc). They might be
+ // Save all parameter registers (see wasm-linkage.h). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs ==
@@ -2866,6 +2926,13 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ ExternalReference c_entry_fp_address = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ Operand c_entry_fp_operand =
+ masm->ExternalReferenceAsOperand(c_entry_fp_address);
+ __ movq(c_entry_fp_operand, Immediate(0));
+
// Compute the handler entry address and jump to it.
__ movq(rdi,
masm->ExternalReferenceAsOperand(pending_handler_entrypoint_address));
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 332c1705b5..641ed9c403 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -10,10 +10,9 @@ jkummerow@chromium.org
leszeks@chromium.org
mslekova@chromium.org
mvstanton@chromium.org
+mythria@chromium.org
neis@chromium.org
rmcilroy@chromium.org
sigurds@chromium.org
solanes@chromium.org
-tebbi@chromium.org
-titzer@chromium.org
-mythria@chromium.org
+zhin@chromium.org
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index ef8a4669c5..6af924fa47 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -43,6 +43,7 @@
#include "src/base/overflowing-math.h"
#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/codegen/assembler-inl.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/string-constants.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -247,6 +248,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
DCHECK_IMPLIES(IsSupported(ARMv7_SUDIV), IsSupported(ARMv7));
DCHECK_IMPLIES(IsSupported(ARMv8), IsSupported(ARMv7_SUDIV));
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {
@@ -3994,6 +4001,7 @@ enum UnaryOp {
VRSQRTE,
VPADDL_S,
VPADDL_U,
+ VCEQ0,
VCLT0,
VCNT
};
@@ -4070,6 +4078,10 @@ static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
case VPADDL_U:
op_encoding = 0x5 * B7;
break;
+ case VCEQ0:
+ // Only support integers.
+ op_encoding = 0x1 * B16 | 0x2 * B7;
+ break;
case VCLT0:
// Only support signed integers.
op_encoding = 0x1 * B16 | 0x4 * B7;
@@ -4803,6 +4815,15 @@ void Assembler::vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinOp(VCEQ, size, dst, src1, src2));
}
+void Assembler::vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+ int value) {
+ DCHECK(IsEnabled(NEON));
+ DCHECK_EQ(0, value);
+ // Qd = vceq(Qn, Qm, #0) Vector Compare Equal to Zero.
+ // Instruction details available in ARM DDI 0406C.d, A8-847.
+ emit(EncodeNeonUnaryOp(VCEQ0, NEON_Q, size, dst.code(), src1.code()));
+}
+
void Assembler::vcge(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
@@ -5397,6 +5418,21 @@ Register UseScratchRegisterScope::Acquire() {
return reg;
}
+LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
+ uint8_t laneidx) {
+ if (rep == MachineRepresentation::kWord8) {
+ *this = LoadStoreLaneParams(laneidx, Neon8, 8);
+ } else if (rep == MachineRepresentation::kWord16) {
+ *this = LoadStoreLaneParams(laneidx, Neon16, 4);
+ } else if (rep == MachineRepresentation::kWord32) {
+ *this = LoadStoreLaneParams(laneidx, Neon32, 2);
+ } else if (rep == MachineRepresentation::kWord64) {
+ *this = LoadStoreLaneParams(laneidx, Neon64, 1);
+ } else {
+ UNREACHABLE();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index eecb40b027..456ac03f92 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -41,6 +41,7 @@
#define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
+
#include <memory>
#include <vector>
@@ -48,6 +49,7 @@
#include "src/codegen/arm/register-arm.h"
#include "src/codegen/assembler.h"
#include "src/codegen/constant-pool.h"
+#include "src/codegen/machine-type.h"
#include "src/numbers/double.h"
#include "src/utils/boxed-float.h"
@@ -949,6 +951,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
+ void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src, int value);
void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
@@ -1394,6 +1397,25 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
VfpRegList old_available_vfp_;
};
+// Helper struct for load lane and store lane to indicate which opcode to use
+// and what memory size to be encoded in the opcode, and the new lane index.
+class LoadStoreLaneParams {
+ public:
+ bool low_op;
+ NeonSize sz;
+ uint8_t laneidx;
+ // The register mapping on ARM (1 Q to 2 D), means that loading/storing high
+ // lanes of a Q register is equivalent to loading/storing the high D reg,
+ // modulo number of lanes in a D reg. This constructor decides, based on the
+ // laneidx and load/store size, whether the low or high D reg is accessed, and
+ // what the new lane index is.
+ LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
+
+ private:
+ LoadStoreLaneParams(uint8_t laneidx, NeonSize sz, int lanes)
+ : low_op(laneidx < lanes), sz(sz), laneidx(laneidx % lanes) {}
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
index 4363a63552..25063b2a32 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
@@ -86,6 +86,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
+const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+const Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
@@ -209,21 +218,22 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void Compare_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
+}
+
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
+void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r1, // JSFunction
- r3, // the new target
- r0, // actual number of arguments
- r2, // expected number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index d2f206e738..f1831aaea3 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -366,7 +366,8 @@ void TurboAssembler::CallCodeObject(Register code_object) {
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object) {
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
@@ -1609,7 +1610,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
DCHECK_EQ(actual_parameter_count, r0);
DCHECK_EQ(expected_parameter_count, r2);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
@@ -1663,20 +1663,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
-#else
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline.
- cmp(expected_parameter_count, actual_parameter_count);
- b(eq, &regular_invoke);
- Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
- if (flag == CALL_FUNCTION) {
- Call(adaptor);
- b(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-#endif
bind(&regular_invoke);
}
@@ -1855,6 +1842,17 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
cmp(type_reg, Operand(type));
}
+void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ InstanceType higher_limit) {
+ DCHECK_LT(lower_limit, higher_limit);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ sub(scratch, type_reg, Operand(lower_limit));
+ cmp(scratch, Operand(higher_limit - lower_limit));
+}
+
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -2155,9 +2153,11 @@ void MacroAssembler::AssertFunction(Register object) {
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
push(object);
- CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
+ LoadMap(object, object);
+ CompareInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
pop(object);
- Check(eq, AbortReason::kOperandIsNotAFunction);
+ Check(ls, AbortReason::kOperandIsNotAFunction);
}
}
@@ -2630,6 +2630,62 @@ void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
void TurboAssembler::Trap() { stop(); }
void TurboAssembler::DebugBreak() { stop(); }
+void TurboAssembler::I64x2BitMask(Register dst, QwNeonRegister src) {
+ UseScratchRegisterScope temps(this);
+ QwNeonRegister tmp1 = temps.AcquireQ();
+ Register tmp = temps.Acquire();
+
+ vshr(NeonU64, tmp1, src, 63);
+ vmov(NeonU32, dst, tmp1.low(), 0);
+ vmov(NeonU32, tmp, tmp1.high(), 0);
+ add(dst, dst, Operand(tmp, LSL, 1));
+}
+
+void TurboAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ UseScratchRegisterScope temps(this);
+ Simd128Register scratch = temps.AcquireQ();
+ vceq(Neon32, dst, src1, src2);
+ vrev64(Neon32, scratch, dst);
+ vand(dst, dst, scratch);
+}
+
+void TurboAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ vqsub(NeonS64, dst, src2, src1);
+ vshr(NeonS64, dst, dst, 63);
+}
+
+void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ vqsub(NeonS64, dst, src1, src2);
+ vshr(NeonS64, dst, dst, 63);
+ vmvn(dst, dst);
+}
+
+void TurboAssembler::V64x2AllTrue(Register dst, QwNeonRegister src) {
+ UseScratchRegisterScope temps(this);
+ QwNeonRegister tmp = temps.AcquireQ();
+ // src = | a | b | c | d |
+ // tmp = | max(a,b) | max(c,d) | ...
+ vpmax(NeonU32, tmp.low(), src.low(), src.high());
+ // tmp = | max(a,b) == 0 | max(c,d) == 0 | ...
+ vceq(Neon32, tmp, tmp, 0);
+ // tmp = | max(a,b) == 0 or max(c,d) == 0 | ...
+ vpmax(NeonU32, tmp.low(), tmp.low(), tmp.low());
+ // dst = (max(a,b) == 0 || max(c,d) == 0)
+ // dst will either be -1 or 0.
+ vmov(NeonS32, dst, tmp.low(), 0);
+ // dst = !dst (-1 -> 0, 0 -> 1)
+ add(dst, dst, Operand(1));
+ // This works because:
+ // !dst
+ // = !(max(a,b) == 0 || max(c,d) == 0)
+ // = max(a,b) != 0 && max(c,d) != 0
+ // = (a != 0 || b != 0) && (c != 0 || d != 0)
+ // = defintion of i64x2.all_true.
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 55a7baba7a..54c3e6c941 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -317,7 +317,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
- void JumpCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
@@ -565,6 +566,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
+ // Wasm SIMD helpers. These instructions don't have direct lowering to native
+ // instructions. These helpers allow us to define the optimal code sequence,
+ // and be used in both TurboFan and Liftoff.
+ void I64x2BitMask(Register dst, QwNeonRegister src);
+ void I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void V64x2AllTrue(Register dst, QwNeonRegister src);
+
private:
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
@@ -713,6 +723,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
+ // Compare instance type ranges for a map (lower_limit and higher_limit
+ // inclusive).
+ //
+ // Always use unsigned comparisons: ls for a positive result.
+ void CompareInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ InstanceType higher_limit);
+
// Compare the object in a register to a value from the root list.
// Acquires a scratch register.
void CompareRoot(Register obj, RootIndex index);
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 335419ebf5..441f299a17 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -44,7 +44,7 @@ namespace internal {
namespace {
#ifdef USE_SIMULATOR
-static unsigned SimulatorFeaturesFromCommandLine() {
+unsigned SimulatorFeaturesFromCommandLine() {
if (strcmp(FLAG_sim_arm64_optional_features, "none") == 0) {
return 0;
}
@@ -62,7 +62,7 @@ static unsigned SimulatorFeaturesFromCommandLine() {
}
#endif // USE_SIMULATOR
-static constexpr unsigned CpuFeaturesFromCompiler() {
+constexpr unsigned CpuFeaturesFromCompiler() {
unsigned features = 0;
#if defined(__ARM_FEATURE_JCVT)
features |= 1u << JSCVT;
@@ -70,6 +70,14 @@ static constexpr unsigned CpuFeaturesFromCompiler() {
return features;
}
+constexpr unsigned CpuFeaturesFromTargetOS() {
+ unsigned features = 0;
+#if defined(V8_TARGET_OS_MACOSX)
+ features |= 1u << JSCVT;
+#endif
+ return features;
+}
+
} // namespace
// -----------------------------------------------------------------------------
@@ -79,6 +87,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) {
supported_ |= CpuFeaturesFromCompiler();
+ supported_ |= CpuFeaturesFromTargetOS();
return;
}
@@ -101,6 +110,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesFromCompiler();
supported_ |= runtime;
#endif // USE_SIMULATOR
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {}
@@ -565,8 +580,7 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
- PatchingAssembler patcher(options(), reinterpret_cast<byte*>(link), 2);
- patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
+ base::Memcpy(link, &pc_, kSystemPointerSize);
} else {
link->SetImmPCOffsetTarget(options(),
reinterpret_cast<Instruction*>(pc_));
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
index fa23770199..246d6fc961 100644
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
@@ -86,6 +86,9 @@ const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
+const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return x3; }
+const Register BaselineLeaveFrameDescriptor::WeightRegister() { return x4; }
+
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
@@ -211,6 +214,15 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void Compare_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x1: left operand
+ // x0: right operand
+ // x2: feedback slot
+ Register registers[] = {x1, x0, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
@@ -219,14 +231,12 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
+void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x1, // JSFunction
- x3, // the new target
- x0, // actual number of arguments
- x2, // expected number of arguments
- };
+ // x1: left operand
+ // x0: right operand
+ // x2: feedback slot
+ Register registers[] = {x1, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 56be64693d..963f862f92 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -1277,23 +1277,6 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
#endif
}
-void TurboAssembler::Push(Handle<HeapObject> handle) {
- UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- Mov(tmp, Operand(handle));
- // This is only used in test-heap.cc, for generating code that is not
- // executed. Push a padding slot together with the handle here, to
- // satisfy the alignment requirement.
- Push(padreg, tmp);
-}
-
-void TurboAssembler::Push(Smi smi) {
- UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- Mov(tmp, Operand(smi));
- Push(tmp);
-}
-
void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK_GE(count, 0);
uint64_t size = count * unit_size;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 69db6d7c0c..7cd6027932 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -1199,10 +1199,6 @@ void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2,
}
void MacroAssembler::PushCalleeSavedRegisters() {
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- Pacibsp();
-#endif
-
{
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
@@ -1214,20 +1210,26 @@ void MacroAssembler::PushCalleeSavedRegisters() {
stp(d10, d11, tos);
stp(d8, d9, tos);
- STATIC_ASSERT(
- EntryFrameConstants::kCalleeSavedRegisterBytesPushedBeforeFpLrPair ==
- 8 * kSystemPointerSize);
- stp(x29, x30, tos); // fp, lr
-
- STATIC_ASSERT(
- EntryFrameConstants::kCalleeSavedRegisterBytesPushedAfterFpLrPair ==
- 10 * kSystemPointerSize);
-
stp(x27, x28, tos);
stp(x25, x26, tos);
stp(x23, x24, tos);
stp(x21, x22, tos);
stp(x19, x20, tos);
+
+ STATIC_ASSERT(
+ EntryFrameConstants::kCalleeSavedRegisterBytesPushedBeforeFpLrPair ==
+ 18 * kSystemPointerSize);
+
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ // Use the stack pointer's value immediately before pushing the LR as the
+ // context for signing it. This is what the StackFrameIterator expects.
+ pacibsp();
+#endif
+
+ stp(x29, x30, tos); // fp, lr
+
+ STATIC_ASSERT(
+ EntryFrameConstants::kCalleeSavedRegisterBytesPushedAfterFpLrPair == 0);
}
}
@@ -1238,22 +1240,25 @@ void MacroAssembler::PopCalleeSavedRegisters() {
MemOperand tos(sp, 2 * kXRegSize, PostIndex);
+ ldp(x29, x30, tos); // fp, lr
+
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ // The context (stack pointer value) for authenticating the LR here must
+ // match the one used for signing it (see `PushCalleeSavedRegisters`).
+ autibsp();
+#endif
+
ldp(x19, x20, tos);
ldp(x21, x22, tos);
ldp(x23, x24, tos);
ldp(x25, x26, tos);
ldp(x27, x28, tos);
- ldp(x29, x30, tos);
ldp(d8, d9, tos);
ldp(d10, d11, tos);
ldp(d12, d13, tos);
ldp(d14, d15, tos);
}
-
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- Autibsp();
-#endif
}
void TurboAssembler::AssertSpAligned() {
@@ -1405,7 +1410,19 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
+void TurboAssembler::PushRoot(RootIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ LoadRoot(tmp, index);
+ Push(tmp);
+}
+
void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
+void TurboAssembler::Move(Register dst, MemOperand src) { Ldr(dst, src); }
+void TurboAssembler::Move(Register dst, Register src) {
+ if (dst == src) return;
+ Mov(dst, src);
+}
void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
Register src1) {
@@ -1488,9 +1505,10 @@ void MacroAssembler::AssertFunction(Register object) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
-
- CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
- Check(eq, AbortReason::kOperandIsNotAFunction);
+ LoadMap(temp, object);
+ CompareInstanceTypeRange(temp, temp, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
+ Check(ls, AbortReason::kOperandIsNotAFunction);
}
}
@@ -1883,9 +1901,14 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
Register destination) {
- Ldr(destination,
- MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin_index)));
+ Ldr(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
+}
+
+MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
+ Builtins::Name builtin_index) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -1959,7 +1982,8 @@ void TurboAssembler::CallCodeObject(Register code_object) {
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object) {
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
UseScratchRegisterScope temps(this);
@@ -2128,7 +2152,6 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
DCHECK_EQ(actual_argument_count, x0);
DCHECK_EQ(formal_parameter_count, x2);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// If the formal parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
Cmp(formal_parameter_count, Operand(kDontAdaptArgumentsSentinel));
@@ -2222,24 +2245,6 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
CallRuntime(Runtime::kThrowStackOverflow);
Unreachable();
}
-#else
- // Check whether the expected and actual arguments count match. The registers
- // are set up according to contract with ArgumentsAdaptorTrampoline.ct.
- // If actual == expected perform a regular invocation.
- Cmp(formal_parameter_count, actual_argument_count);
- B(eq, &regular_invoke);
-
- // The argument counts mismatch, generate a call to the argument adaptor.
- Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
- if (flag == CALL_FUNCTION) {
- Call(adaptor);
- // If the arg counts don't match, no extra code is emitted by
- // MAsm::InvokeFunctionCode and we can just fall through.
- B(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-#endif
Bind(&regular_invoke);
}
@@ -2475,8 +2480,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// sp[2] : fp
// sp[1] : type
// sp[0] : for alignment
- } else {
- DCHECK_EQ(type, StackFrame::CONSTRUCT);
+ } else if (type == StackFrame::CONSTRUCT) {
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
@@ -2492,6 +2496,14 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// sp[2] : fp
// sp[1] : type
// sp[0] : cp
+ } else {
+ DCHECK(StackFrame::IsJavaScript(type));
+ // Just push a minimal "machine frame", saving the frame pointer and return
+ // address, without any markers.
+ Push<TurboAssembler::kSignLR>(lr, fp);
+ Mov(fp, sp);
+ // sp[1] : lr
+ // sp[0] : fp
}
}
@@ -2680,6 +2692,18 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
Cmp(type_reg, type);
}
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ InstanceType higher_limit) {
+ DCHECK_LT(lower_limit, higher_limit);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Sub(scratch, type_reg, Operand(lower_limit));
+ Cmp(scratch, Operand(higher_limit - lower_limit));
+}
+
void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
// Load the map's "bit field 2".
Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
@@ -2741,6 +2765,15 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination,
}
}
+void TurboAssembler::LoadTaggedSignedField(const Register& destination,
+ const MemOperand& field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedSigned(destination, field_operand);
+ } else {
+ Ldr(destination, field_operand);
+ }
+}
+
void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
SmiUntag(dst, src);
}
@@ -3408,6 +3441,25 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
}
+void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
+ UseScratchRegisterScope scope(this);
+ VRegister tmp1 = scope.AcquireV(kFormat2D);
+ Register tmp2 = scope.AcquireX();
+ Ushr(tmp1.V2D(), src.V2D(), 63);
+ Mov(dst.X(), tmp1.D(), 0);
+ Mov(tmp2.X(), tmp1.D(), 1);
+ Add(dst.W(), dst.W(), Operand(tmp2.W(), LSL, 1));
+}
+
+void TurboAssembler::V64x2AllTrue(Register dst, VRegister src) {
+ UseScratchRegisterScope scope(this);
+ VRegister tmp = scope.AcquireV(kFormat2D);
+ Cmeq(tmp.V2D(), src.V2D(), 0);
+ Addp(tmp.D(), tmp);
+ Fcmp(tmp.D(), tmp.D());
+ Cset(dst, eq);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 88e1502ef9..ef7bc15166 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -15,6 +15,7 @@
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
+#include "src/objects/tagged-index.h"
// Simulator specific helpers.
#if USE_SIMULATOR
@@ -200,9 +201,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
mov(rd, vn, vn_index);
}
- // This is required for compatibility with architecture independent code.
+ // These are required for compatibility with architecture independent code.
// Remove if not needed.
void Move(Register dst, Smi src);
+ void Move(Register dst, MemOperand src);
+ void Move(Register dst, Register src);
// Move src0 to dst0 and src1 to dst1, handling possible overlaps.
void MovePair(Register dst0, Register src0, Register dst1, Register src1);
@@ -261,8 +264,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(faddp, Faddp) \
V(fcvtas, Fcvtas) \
V(fcvtau, Fcvtau) \
+ V(fcvtl, Fcvtl) \
V(fcvtms, Fcvtms) \
V(fcvtmu, Fcvtmu) \
+ V(fcvtn, Fcvtn) \
V(fcvtns, Fcvtns) \
V(fcvtnu, Fcvtnu) \
V(fcvtps, Fcvtps) \
@@ -832,14 +837,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
template <StoreLRMode lr_mode = kDontStoreLR>
void Push(const Register& src0, const VRegister& src1);
- // This is a convenience method for pushing a single Handle<Object>.
- inline void Push(Handle<HeapObject> object);
- inline void Push(Smi smi);
-
- // Aliases of Push and Pop, required for V8 compatibility.
- inline void push(Register src) { Push(src); }
- inline void pop(Register dst) { Pop(dst); }
-
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
@@ -976,12 +973,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
Register destination);
+ MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
- void JumpCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
@@ -1269,6 +1268,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load an object from the root table.
void LoadRoot(Register destination, RootIndex index) override;
+ void PushRoot(RootIndex index);
inline void Ret(const Register& xn = lr);
@@ -1349,6 +1349,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand);
+ // Loads a field containing a tagged signed value and decompresses it if
+ // necessary.
+ void LoadTaggedSignedField(const Register& destination,
+ const MemOperand& field_operand);
+
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, const MemOperand& src);
@@ -1371,6 +1376,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreReturnAddressInWasmExitFrame(Label* return_location);
+ // Wasm SIMD helpers. These instructions don't have direct lowering to native
+ // instructions. These helpers allow us to define the optimal code sequence,
+ // and be used in both TurboFan and Liftoff.
+ void I64x2BitMask(Register dst, VRegister src);
+ void V64x2AllTrue(Register dst, VRegister src);
+
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@@ -1908,6 +1919,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
+ // Compare instance type ranges for a map (lower_limit and higher_limit
+ // inclusive).
+ //
+ // Always use unsigned comparisons: ls for a positive result.
+ void CompareInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ InstanceType higher_limit);
+
// Load the elements kind field from a map, and return it in the result
// register.
void LoadElementsKindFromMap(Register result, Register map);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 31620ae965..fbbb0a18da 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -524,8 +524,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler-arch.h b/deps/v8/src/codegen/assembler-arch.h
index d56b372504..3569644e52 100644
--- a/deps/v8/src/codegen/assembler-arch.h
+++ b/deps/v8/src/codegen/assembler-arch.h
@@ -23,6 +23,8 @@
#include "src/codegen/mips64/assembler-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/codegen/riscv64/assembler-riscv64.h"
#else
#error Unknown architecture.
#endif
diff --git a/deps/v8/src/codegen/assembler-inl.h b/deps/v8/src/codegen/assembler-inl.h
index 8c81315d50..c04b6d9687 100644
--- a/deps/v8/src/codegen/assembler-inl.h
+++ b/deps/v8/src/codegen/assembler-inl.h
@@ -23,6 +23,8 @@
#include "src/codegen/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390-inl.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/codegen/riscv64/assembler-riscv64-inl.h"
#else
#error Unknown architecture.
#endif
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index f23dccb53e..0b71701d31 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -122,11 +122,38 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer {
FATAL("Cannot grow external assembler buffer");
}
+ void* operator new(std::size_t count);
+ void operator delete(void* ptr) noexcept;
+
private:
byte* const start_;
const int size_;
};
+static thread_local std::aligned_storage_t<sizeof(ExternalAssemblerBufferImpl),
+ alignof(ExternalAssemblerBufferImpl)>
+ tls_singleton_storage;
+
+static thread_local bool tls_singleton_taken{false};
+
+void* ExternalAssemblerBufferImpl::operator new(std::size_t count) {
+ DCHECK_EQ(count, sizeof(ExternalAssemblerBufferImpl));
+ if (V8_LIKELY(!tls_singleton_taken)) {
+ tls_singleton_taken = true;
+ return &tls_singleton_storage;
+ }
+ return ::operator new(count);
+}
+
+void ExternalAssemblerBufferImpl::operator delete(void* ptr) noexcept {
+ if (V8_LIKELY(ptr == &tls_singleton_storage)) {
+ DCHECK(tls_singleton_taken);
+ tls_singleton_taken = false;
+ return;
+ }
+ ::operator delete(ptr);
+}
+
} // namespace
std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* start,
@@ -181,6 +208,7 @@ CpuFeatureScope::~CpuFeatureScope() {
#endif
bool CpuFeatures::initialized_ = false;
+bool CpuFeatures::supports_wasm_simd_128_ = false;
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::icache_line_size_ = 0;
unsigned CpuFeatures::dcache_line_size_ = 0;
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index c26ae026f5..57bbbca723 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -21,6 +21,7 @@ namespace internal {
V(kExpectedOptimizationSentinel, \
"Expected optimized code cell or optimization sentinel") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
+ V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kInputStringTooLong, "Input string too long") \
diff --git a/deps/v8/src/codegen/code-comments.cc b/deps/v8/src/codegen/code-comments.cc
index b0271a00fb..746df1a570 100644
--- a/deps/v8/src/codegen/code-comments.cc
+++ b/deps/v8/src/codegen/code-comments.cc
@@ -87,20 +87,5 @@ uint32_t CodeCommentsWriter::section_size() const {
return kOffsetToFirstCommentEntry + static_cast<uint32_t>(byte_count_);
}
-void PrintCodeCommentsSection(std::ostream& out, Address code_comments_start,
- uint32_t code_comments_size) {
- CodeCommentsIterator it(code_comments_start, code_comments_size);
- out << "CodeComments (size = " << it.size() << ")\n";
- if (it.HasCurrent()) {
- out << std::setw(6) << "pc" << std::setw(6) << "len"
- << " comment\n";
- }
- for (; it.HasCurrent(); it.Next()) {
- out << std::hex << std::setw(6) << it.GetPCOffset() << std::dec
- << std::setw(6) << it.GetCommentSize() << " (" << it.GetComment()
- << ")\n";
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/code-comments.h b/deps/v8/src/codegen/code-comments.h
index 5866296051..1c5189aa9a 100644
--- a/deps/v8/src/codegen/code-comments.h
+++ b/deps/v8/src/codegen/code-comments.h
@@ -62,9 +62,6 @@ class V8_EXPORT_PRIVATE CodeCommentsIterator {
Address current_entry_;
};
-void PrintCodeCommentsSection(std::ostream& out, Address code_comments_start,
- uint32_t code_comments_size);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index 006b6bee16..ceabbac807 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -258,11 +258,6 @@ Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
}
// static
-Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kArgumentsAdaptorTrampoline);
-}
-
-// static
Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
return Callable(isolate->builtins()->Call(mode), CallTrampolineDescriptor{});
}
diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h
index 02fc7e4b23..b98c576b9e 100644
--- a/deps/v8/src/codegen/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -68,7 +68,6 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type);
- static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable Call_WithFeedback(Isolate* isolate, ConvertReceiverMode mode);
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 8957ca3952..76ee8c2d06 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -249,12 +249,12 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
-#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
- TNode<BoolT> CodeStubAssembler::Is##name(SloppyTNode<Object> value) { \
- return TaggedEqual(value, name##Constant()); \
- } \
- TNode<BoolT> CodeStubAssembler::IsNot##name(SloppyTNode<Object> value) { \
- return TaggedNotEqual(value, name##Constant()); \
+#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
+ TNode<BoolT> CodeStubAssembler::Is##name(TNode<Object> value) { \
+ return TaggedEqual(value, name##Constant()); \
+ } \
+ TNode<BoolT> CodeStubAssembler::IsNot##name(TNode<Object> value) { \
+ return TaggedNotEqual(value, name##Constant()); \
}
HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
#undef HEAP_CONSTANT_TEST
@@ -320,7 +320,7 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(
return Signed(IntPtrAdd(value, IntPtrConstant(1)));
}
-TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) {
+TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(TNode<IntPtrT> value) {
intptr_t constant;
if (TryToIntPtrConstant(value, &constant)) {
return BoolConstant(base::bits::IsPowerOfTwo(constant));
@@ -334,7 +334,7 @@ TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) {
IntPtrConstant(0));
}
-TNode<Float64T> CodeStubAssembler::Float64Round(SloppyTNode<Float64T> x) {
+TNode<Float64T> CodeStubAssembler::Float64Round(TNode<Float64T> x) {
TNode<Float64T> one = Float64Constant(1.0);
TNode<Float64T> one_half = Float64Constant(0.5);
@@ -352,7 +352,7 @@ TNode<Float64T> CodeStubAssembler::Float64Round(SloppyTNode<Float64T> x) {
return TNode<Float64T>::UncheckedCast(var_x.value());
}
-TNode<Float64T> CodeStubAssembler::Float64Ceil(SloppyTNode<Float64T> x) {
+TNode<Float64T> CodeStubAssembler::Float64Ceil(TNode<Float64T> x) {
if (IsFloat64RoundUpSupported()) {
return Float64RoundUp(x);
}
@@ -404,7 +404,7 @@ TNode<Float64T> CodeStubAssembler::Float64Ceil(SloppyTNode<Float64T> x) {
return TNode<Float64T>::UncheckedCast(var_x.value());
}
-TNode<Float64T> CodeStubAssembler::Float64Floor(SloppyTNode<Float64T> x) {
+TNode<Float64T> CodeStubAssembler::Float64Floor(TNode<Float64T> x) {
if (IsFloat64RoundDownSupported()) {
return Float64RoundDown(x);
}
@@ -456,7 +456,7 @@ TNode<Float64T> CodeStubAssembler::Float64Floor(SloppyTNode<Float64T> x) {
return TNode<Float64T>::UncheckedCast(var_x.value());
}
-TNode<Float64T> CodeStubAssembler::Float64RoundToEven(SloppyTNode<Float64T> x) {
+TNode<Float64T> CodeStubAssembler::Float64RoundToEven(TNode<Float64T> x) {
if (IsFloat64RoundTiesEvenSupported()) {
return Float64RoundTiesEven(x);
}
@@ -487,7 +487,7 @@ TNode<Float64T> CodeStubAssembler::Float64RoundToEven(SloppyTNode<Float64T> x) {
return TNode<Float64T>::UncheckedCast(var_result.value());
}
-TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
+TNode<Float64T> CodeStubAssembler::Float64Trunc(TNode<Float64T> x) {
if (IsFloat64RoundTruncateSupported()) {
return Float64RoundTruncate(x);
}
@@ -638,7 +638,7 @@ TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) {
return UintPtrLessThanOrEqual(value, IntPtrConstant(Smi::kMaxValue));
}
-TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
+TNode<Smi> CodeStubAssembler::SmiTag(TNode<IntPtrT> value) {
int32_t constant_value;
if (TryToInt32Constant(value, &constant_value) &&
Smi::IsValid(constant_value)) {
@@ -652,7 +652,7 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
return smi;
}
-TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
+TNode<IntPtrT> CodeStubAssembler::SmiUntag(TNode<Smi> value) {
intptr_t constant_value;
if (TryToIntPtrConstant(value, &constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
@@ -665,7 +665,7 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
return Signed(WordSarShiftOutZeros(raw_bits, SmiShiftBitsConstant()));
}
-TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
+TNode<Int32T> CodeStubAssembler::SmiToInt32(TNode<Smi> value) {
if (COMPRESS_POINTERS_BOOL) {
return Signed(Word32SarShiftOutZeros(
TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)),
@@ -675,7 +675,7 @@ TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
return TruncateIntPtrToInt32(result);
}
-TNode<Float64T> CodeStubAssembler::SmiToFloat64(SloppyTNode<Smi> value) {
+TNode<Float64T> CodeStubAssembler::SmiToFloat64(TNode<Smi> value) {
return ChangeInt32ToFloat64(SmiToInt32(value));
}
@@ -992,15 +992,14 @@ TNode<Smi> CodeStubAssembler::SmiLexicographicCompare(TNode<Smi> x,
std::make_pair(MachineType::AnyTagged(), y)));
}
-TNode<Int32T> CodeStubAssembler::TruncateWordToInt32(SloppyTNode<WordT> value) {
+TNode<Int32T> CodeStubAssembler::TruncateWordToInt32(TNode<WordT> value) {
if (Is64()) {
return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value));
}
return ReinterpretCast<Int32T>(value);
}
-TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
- SloppyTNode<IntPtrT> value) {
+TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(TNode<IntPtrT> value) {
if (Is64()) {
return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value));
}
@@ -1019,7 +1018,7 @@ TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(TNode<MaybeObject> a) {
return Word32BinaryNot(TaggedIsSmi(a));
}
-TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
+TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(TNode<Object> a) {
#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
return Word32Equal(
Word32And(
@@ -1033,7 +1032,7 @@ TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
#endif
}
-TNode<BoolT> CodeStubAssembler::WordIsAligned(SloppyTNode<WordT> word,
+TNode<BoolT> CodeStubAssembler::WordIsAligned(TNode<WordT> word,
size_t alignment) {
DCHECK(base::bits::IsPowerOfTwo(alignment));
DCHECK_LE(alignment, kMaxUInt32);
@@ -1056,8 +1055,8 @@ TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
return LoadFixedDoubleArrayElement(array, index, if_hole);
}
-void CodeStubAssembler::BranchIfJSReceiver(SloppyTNode<Object> object,
- Label* if_true, Label* if_false) {
+void CodeStubAssembler::BranchIfJSReceiver(TNode<Object> object, Label* if_true,
+ Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Branch(IsJSReceiver(CAST(object)), if_true, if_false);
@@ -1314,7 +1313,7 @@ TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) {
IntPtrConstant(kMaxRegularHeapObjectSize));
}
-void CodeStubAssembler::BranchIfToBooleanIsTrue(SloppyTNode<Object> value,
+void CodeStubAssembler::BranchIfToBooleanIsTrue(TNode<Object> value,
Label* if_true,
Label* if_false) {
Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred),
@@ -1776,7 +1775,7 @@ TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties(
}
TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
- SloppyTNode<Object> receiver, Label* if_no_hash) {
+ TNode<Object> receiver, Label* if_no_hash) {
TVARIABLE(IntPtrT, var_hash);
Label done(this), if_smi(this), if_property_array(this),
if_ordered_property_dictionary(this), if_property_dictionary(this),
@@ -2110,7 +2109,7 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
}
TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
- TNode<PropertyArray> object, SloppyTNode<IntPtrT> index) {
+ TNode<PropertyArray> object, TNode<IntPtrT> index) {
int additional_offset = 0;
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe;
return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index,
@@ -2148,7 +2147,7 @@ TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr(
}
TNode<BigInt> CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
- SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) {
+ TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset) {
if (Is64()) {
TNode<IntPtrT> value = Load<IntPtrT>(data_pointer, offset);
return BigIntFromInt64(value);
@@ -2273,7 +2272,7 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) {
}
TNode<BigInt> CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
- SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) {
+ TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset) {
Label if_zero(this), done(this);
if (Is64()) {
TNode<UintPtrT> value = Load<UintPtrT>(data_pointer, offset);
@@ -2452,7 +2451,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
if (SmiValuesAre32Bits()) {
return Load<Int32T>(object, offset);
} else {
- return SmiToInt32(Load(MachineType::TaggedSigned(), object, offset));
+ return SmiToInt32(Load<Smi>(object, offset));
}
}
@@ -2587,7 +2586,7 @@ TNode<BoolT> CodeStubAssembler::LoadScopeInfoHasExtensionField(
}
void CodeStubAssembler::StoreContextElementNoWriteBarrier(
- TNode<Context> context, int slot_index, SloppyTNode<Object> value) {
+ TNode<Context> context, int slot_index, TNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
IntPtrConstant(offset), value);
@@ -2628,6 +2627,25 @@ TNode<Context> CodeStubAssembler::LoadModuleContext(TNode<Context> context) {
return UncheckedCast<Context>(cur_context.value());
}
+TNode<Object> CodeStubAssembler::GetImportMetaObject(TNode<Context> context) {
+ const TNode<Context> module_context = LoadModuleContext(context);
+ const TNode<HeapObject> module =
+ CAST(LoadContextElement(module_context, Context::EXTENSION_INDEX));
+ const TNode<Object> import_meta =
+ LoadObjectField(module, SourceTextModule::kImportMetaOffset);
+
+ TVARIABLE(Object, return_value, import_meta);
+
+ Label end(this);
+ GotoIfNot(IsTheHole(import_meta), &end);
+
+ return_value = CallRuntime(Runtime::kGetImportMetaObject, context);
+ Goto(&end);
+
+ BIND(&end);
+ return return_value.value();
+}
+
TNode<Map> CodeStubAssembler::LoadObjectFunctionInitialMap(
TNode<NativeContext> native_context) {
TNode<JSFunction> object_function =
@@ -2643,7 +2661,7 @@ TNode<Map> CodeStubAssembler::LoadSlowObjectWithNullPrototypeMap(
}
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
- SloppyTNode<Int32T> kind, TNode<NativeContext> native_context) {
+ TNode<Int32T> kind, TNode<NativeContext> native_context) {
CSA_ASSERT(this, IsFastElementsKind(kind));
TNode<IntPtrT> offset =
IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
@@ -2730,11 +2748,22 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
shared, SharedFunctionInfo::kFunctionDataOffset);
TVARIABLE(HeapObject, var_result, function_data);
+
+ Label check_for_interpreter_data(this, &var_result);
Label done(this, &var_result);
- GotoIfNot(HasInstanceType(function_data, INTERPRETER_DATA_TYPE), &done);
+ GotoIfNot(HasInstanceType(var_result.value(), BASELINE_DATA_TYPE),
+ &check_for_interpreter_data);
+ TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
+ var_result.value(), BaselineData::kDataOffset);
+ var_result = baseline_data;
+ Goto(&check_for_interpreter_data);
+
+ BIND(&check_for_interpreter_data);
+
+ GotoIfNot(HasInstanceType(var_result.value(), INTERPRETER_DATA_TYPE), &done);
TNode<BytecodeArray> bytecode_array = LoadObjectField<BytecodeArray>(
- function_data, InterpreterData::kBytecodeArrayOffset);
+ var_result.value(), InterpreterData::kBytecodeArrayOffset);
var_result = bytecode_array;
Goto(&done);
@@ -2749,8 +2778,8 @@ void CodeStubAssembler::StoreObjectByteNoWriteBarrier(TNode<HeapObject> object,
IntPtrConstant(offset - kHeapObjectTag), value);
}
-void CodeStubAssembler::StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
- SloppyTNode<Float64T> value) {
+void CodeStubAssembler::StoreHeapNumberValue(TNode<HeapNumber> object,
+ TNode<Float64T> value) {
StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value);
}
@@ -2912,7 +2941,8 @@ void CodeStubAssembler::StoreFeedbackVectorSlot(
// Check that slot <= feedback_vector.length.
CSA_ASSERT(this,
IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
- FeedbackVector::kHeaderSize));
+ FeedbackVector::kHeaderSize),
+ SmiFromIntPtr(offset), feedback_vector);
if (barrier_mode == SKIP_WRITE_BARRIER) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector, offset,
value);
@@ -3242,7 +3272,7 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
return CAST(result);
}
-TNode<BoolT> CodeStubAssembler::IsZeroOrContext(SloppyTNode<Object> object) {
+TNode<BoolT> CodeStubAssembler::IsZeroOrContext(TNode<Object> object) {
return Select<BoolT>(
TaggedEqual(object, SmiConstant(0)), [=] { return Int32TrueConstant(); },
[=] { return IsContext(CAST(object)); });
@@ -3575,8 +3605,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
}
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
- TNode<HeapObject> object, TNode<Map> map,
- SloppyTNode<IntPtrT> instance_size, int start_offset) {
+ TNode<HeapObject> object, TNode<Map> map, TNode<IntPtrT> instance_size,
+ int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
CSA_ASSERT(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
LoadMapBitField3(map)));
@@ -3585,8 +3615,7 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
}
void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
- TNode<HeapObject> object, TNode<Map> map,
- SloppyTNode<IntPtrT> instance_size) {
+ TNode<HeapObject> object, TNode<Map> map, TNode<IntPtrT> instance_size) {
Comment("InitializeJSObjectBodyNoSlackTracking");
// Perform in-object slack tracking if requested.
@@ -4846,19 +4875,24 @@ void CodeStubAssembler::CopyFixedArrayElements(
if_hole = nullptr;
}
- Node* value = LoadElementAndPrepareForStore(
- from_array, var_from_offset.value(), from_kind, to_kind, if_hole);
-
- if (needs_write_barrier) {
- CHECK_EQ(to_array, to_array_adjusted);
- Store(to_array_adjusted, to_offset, value);
- } else if (to_double_elements) {
+ if (to_double_elements) {
+ DCHECK(!needs_write_barrier);
+ TNode<Float64T> value = LoadElementAndPrepareForStore<Float64T>(
+ from_array, var_from_offset.value(), from_kind, to_kind, if_hole);
StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array_adjusted,
to_offset, value);
} else {
- UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged,
- to_array_adjusted, to_offset, value);
+ TNode<Object> value = LoadElementAndPrepareForStore<Object>(
+ from_array, var_from_offset.value(), from_kind, to_kind, if_hole);
+ if (needs_write_barrier) {
+ CHECK_EQ(to_array, to_array_adjusted);
+ Store(to_array_adjusted, to_offset, value);
+ } else {
+ UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged,
+ to_array_adjusted, to_offset, value);
+ }
}
+
Goto(&next_iter);
if (if_hole == &store_double_hole) {
@@ -4970,30 +5004,43 @@ TNode<FixedArrayBase> CodeStubAssembler::CloneFixedArray(
base::Optional<TNode<BInt>>(base::nullopt), flags);
}
-Node* CodeStubAssembler::LoadElementAndPrepareForStore(
+template <>
+TNode<Object> CodeStubAssembler::LoadElementAndPrepareForStore(
TNode<FixedArrayBase> array, TNode<IntPtrT> offset, ElementsKind from_kind,
ElementsKind to_kind, Label* if_hole) {
CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
+ DCHECK(!IsDoubleElementsKind(to_kind));
if (IsDoubleElementsKind(from_kind)) {
TNode<Float64T> value =
LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64());
- if (!IsDoubleElementsKind(to_kind)) {
- return AllocateHeapNumberWithValue(value);
+ return AllocateHeapNumberWithValue(value);
+ } else {
+ TNode<Object> value = Load<Object>(array, offset);
+ if (if_hole) {
+ GotoIf(TaggedEqual(value, TheHoleConstant()), if_hole);
}
return value;
+ }
+}
+template <>
+TNode<Float64T> CodeStubAssembler::LoadElementAndPrepareForStore(
+ TNode<FixedArrayBase> array, TNode<IntPtrT> offset, ElementsKind from_kind,
+ ElementsKind to_kind, Label* if_hole) {
+ CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
+ DCHECK(IsDoubleElementsKind(to_kind));
+ if (IsDoubleElementsKind(from_kind)) {
+ return LoadDoubleWithHoleCheck(array, offset, if_hole,
+ MachineType::Float64());
} else {
TNode<Object> value = Load<Object>(array, offset);
if (if_hole) {
GotoIf(TaggedEqual(value, TheHoleConstant()), if_hole);
}
- if (IsDoubleElementsKind(to_kind)) {
- if (IsSmiElementsKind(from_kind)) {
- return SmiToFloat64(CAST(value));
- }
- return LoadHeapNumberValue(CAST(value));
+ if (IsSmiElementsKind(from_kind)) {
+ return SmiToFloat64(CAST(value));
}
- return value;
+ return LoadHeapNumberValue(CAST(value));
}
}
@@ -5114,7 +5161,7 @@ TNode<Float64T> CodeStubAssembler::TryTaggedToFloat64(
}
TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
- TNode<Context> context, SloppyTNode<Object> value) {
+ TNode<Context> context, TNode<Object> value) {
// We might need to loop once due to ToNumber conversion.
TVARIABLE(Object, var_value, value);
TVARIABLE(Float64T, var_result);
@@ -5143,8 +5190,8 @@ TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
return var_result.value();
}
-TNode<Word32T> CodeStubAssembler::TruncateTaggedToWord32(
- TNode<Context> context, SloppyTNode<Object> value) {
+TNode<Word32T> CodeStubAssembler::TruncateTaggedToWord32(TNode<Context> context,
+ TNode<Object> value) {
TVARIABLE(Word32T, var_result);
Label done(this);
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(context, value,
@@ -5358,8 +5405,7 @@ TNode<Number> CodeStubAssembler::ChangeFloat32ToTagged(TNode<Float32T> value) {
return var_result.value();
}
-TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
- SloppyTNode<Float64T> value) {
+TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(TNode<Float64T> value) {
Label if_smi(this), done(this);
TVARIABLE(Smi, var_smi_result);
TVARIABLE(Number, var_result);
@@ -5578,16 +5624,15 @@ TNode<Float64T> CodeStubAssembler::ChangeTaggedToFloat64(TNode<Context> context,
return var_result.value();
}
-TNode<WordT> CodeStubAssembler::TimesSystemPointerSize(
- SloppyTNode<WordT> value) {
+TNode<WordT> CodeStubAssembler::TimesSystemPointerSize(TNode<WordT> value) {
return WordShl(value, kSystemPointerSizeLog2);
}
-TNode<WordT> CodeStubAssembler::TimesTaggedSize(SloppyTNode<WordT> value) {
+TNode<WordT> CodeStubAssembler::TimesTaggedSize(TNode<WordT> value) {
return WordShl(value, kTaggedSizeLog2);
}
-TNode<WordT> CodeStubAssembler::TimesDoubleSize(SloppyTNode<WordT> value) {
+TNode<WordT> CodeStubAssembler::TimesDoubleSize(TNode<WordT> value) {
return WordShl(value, kDoubleSizeLog2);
}
@@ -5785,8 +5830,8 @@ void CodeStubAssembler::ThrowTypeError(TNode<Context> context,
Unreachable();
}
-TNode<BoolT> CodeStubAssembler::InstanceTypeEqual(
- SloppyTNode<Int32T> instance_type, int type) {
+TNode<BoolT> CodeStubAssembler::InstanceTypeEqual(TNode<Int32T> instance_type,
+ int type) {
return Word32Equal(instance_type, Int32Constant(type));
}
@@ -5965,7 +6010,7 @@ TNode<BoolT> CodeStubAssembler::IsCustomElementsReceiverInstanceType(
}
TNode<BoolT> CodeStubAssembler::IsStringInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
STATIC_ASSERT(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
return Int32LessThan(instance_type, Int32Constant(FIRST_NONSTRING_TYPE));
}
@@ -5979,7 +6024,7 @@ TNode<BoolT> CodeStubAssembler::IsOneByteStringInstanceType(
}
TNode<BoolT> CodeStubAssembler::IsSequentialStringInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
@@ -5996,7 +6041,7 @@ TNode<BoolT> CodeStubAssembler::IsSeqOneByteStringInstanceType(
}
TNode<BoolT> CodeStubAssembler::IsConsStringInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
@@ -6004,7 +6049,7 @@ TNode<BoolT> CodeStubAssembler::IsConsStringInstanceType(
}
TNode<BoolT> CodeStubAssembler::IsIndirectStringInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
STATIC_ASSERT(kIsIndirectStringMask == 0x1);
STATIC_ASSERT(kIsIndirectStringTag == 0x1);
@@ -6013,7 +6058,7 @@ TNode<BoolT> CodeStubAssembler::IsIndirectStringInstanceType(
}
TNode<BoolT> CodeStubAssembler::IsExternalStringInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
@@ -6021,14 +6066,14 @@ TNode<BoolT> CodeStubAssembler::IsExternalStringInstanceType(
}
TNode<BoolT> CodeStubAssembler::IsUncachedExternalStringInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
STATIC_ASSERT(kUncachedExternalStringTag != 0);
return IsSetWord32(instance_type, kUncachedExternalStringMask);
}
TNode<BoolT> CodeStubAssembler::IsJSReceiverInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
return Int32GreaterThanOrEqual(instance_type,
Int32Constant(FIRST_JS_RECEIVER_TYPE));
@@ -6046,12 +6091,12 @@ TNode<BoolT> CodeStubAssembler::IsNullOrJSReceiver(TNode<HeapObject> object) {
return UncheckedCast<BoolT>(Word32Or(IsJSReceiver(object), IsNull(object)));
}
-TNode<BoolT> CodeStubAssembler::IsNullOrUndefined(SloppyTNode<Object> value) {
+TNode<BoolT> CodeStubAssembler::IsNullOrUndefined(TNode<Object> value) {
return UncheckedCast<BoolT>(Word32Or(IsUndefined(value), IsNull(value)));
}
TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_GLOBAL_PROXY_TYPE);
}
@@ -6068,7 +6113,7 @@ TNode<BoolT> CodeStubAssembler::IsJSGeneratorMap(TNode<Map> map) {
}
TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return Int32GreaterThanOrEqual(instance_type,
Int32Constant(FIRST_JS_OBJECT_TYPE));
@@ -6118,7 +6163,7 @@ TNode<BoolT> CodeStubAssembler::IsMap(TNode<HeapObject> map) {
}
TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_PRIMITIVE_WRAPPER_TYPE);
}
@@ -6131,7 +6176,7 @@ TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(TNode<Map> map) {
}
TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
}
@@ -6231,7 +6276,7 @@ TNode<BoolT> CodeStubAssembler::IsPropertyCell(TNode<HeapObject> object) {
}
TNode<BoolT> CodeStubAssembler::IsHeapNumberInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, HEAP_NUMBER_TYPE);
}
@@ -6240,7 +6285,7 @@ TNode<BoolT> CodeStubAssembler::IsOddball(TNode<HeapObject> object) {
}
TNode<BoolT> CodeStubAssembler::IsOddballInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, ODDBALL_TYPE);
}
@@ -6249,7 +6294,7 @@ TNode<BoolT> CodeStubAssembler::IsName(TNode<HeapObject> object) {
}
TNode<BoolT> CodeStubAssembler::IsNameInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return Int32LessThanOrEqual(instance_type, Int32Constant(LAST_NAME_TYPE));
}
@@ -6262,7 +6307,7 @@ TNode<BoolT> CodeStubAssembler::IsSeqOneByteString(TNode<HeapObject> object) {
}
TNode<BoolT> CodeStubAssembler::IsSymbolInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, SYMBOL_TYPE);
}
@@ -6317,7 +6362,7 @@ TNode<BoolT> CodeStubAssembler::IsUniqueNameNoCachedIndex(
}
TNode<BoolT> CodeStubAssembler::IsBigIntInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, BIGINT_TYPE);
}
@@ -6326,12 +6371,12 @@ TNode<BoolT> CodeStubAssembler::IsBigInt(TNode<HeapObject> object) {
}
TNode<BoolT> CodeStubAssembler::IsPrimitiveInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return Int32LessThanOrEqual(instance_type,
Int32Constant(LAST_PRIMITIVE_HEAP_OBJECT_TYPE));
}
-TNode<BoolT> CodeStubAssembler::IsPrivateName(SloppyTNode<Symbol> symbol) {
+TNode<BoolT> CodeStubAssembler::IsPrivateName(TNode<Symbol> symbol) {
TNode<Uint32T> flags = LoadObjectField<Uint32T>(symbol, Symbol::kFlagsOffset);
return IsSetWord32<Symbol::IsPrivateNameBit>(flags);
}
@@ -6369,9 +6414,15 @@ TNode<BoolT> CodeStubAssembler::IsJSGeneratorObject(TNode<HeapObject> object) {
return HasInstanceType(object, JS_GENERATOR_OBJECT_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsFunctionInstanceType(
+ TNode<Int32T> instance_type) {
+ return IsInRange(instance_type, FIRST_JS_FUNCTION_OR_BOUND_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_OR_BOUND_FUNCTION_TYPE);
+}
TNode<BoolT> CodeStubAssembler::IsJSFunctionInstanceType(
- SloppyTNode<Int32T> instance_type) {
- return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
+ TNode<Int32T> instance_type) {
+ return IsInRange(instance_type, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsJSFunction(TNode<HeapObject> object) {
@@ -6387,7 +6438,7 @@ TNode<BoolT> CodeStubAssembler::IsJSFunctionMap(TNode<Map> map) {
}
TNode<BoolT> CodeStubAssembler::IsJSTypedArrayInstanceType(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE);
}
@@ -6411,7 +6462,7 @@ TNode<BoolT> CodeStubAssembler::IsJSRegExp(TNode<HeapObject> object) {
return HasInstanceType(object, JS_REG_EXP_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsNumeric(SloppyTNode<Object> object) {
+TNode<BoolT> CodeStubAssembler::IsNumeric(TNode<Object> object) {
return Select<BoolT>(
TaggedIsSmi(object), [=] { return Int32TrueConstant(); },
[=] {
@@ -7086,8 +7137,16 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumeric(TNode<Context> context,
Object::Conversion::kToNumeric);
}
+TNode<Number> CodeStubAssembler::ToNumber(TNode<Context> context,
+ TNode<Object> input,
+ BigIntHandling bigint_handling) {
+ return CAST(ToNumberOrNumeric([context] { return context; }, input, nullptr,
+ Object::Conversion::kToNumber,
+ bigint_handling));
+}
+
TNode<Number> CodeStubAssembler::ToNumber_Inline(TNode<Context> context,
- SloppyTNode<Object> input) {
+ TNode<Object> input) {
TVARIABLE(Number, var_result);
Label end(this), not_smi(this, Label::kDeferred);
@@ -7110,16 +7169,20 @@ TNode<Number> CodeStubAssembler::ToNumber_Inline(TNode<Context> context,
return var_result.value();
}
-TNode<Number> CodeStubAssembler::ToNumber(TNode<Context> context,
- SloppyTNode<Object> input,
- BigIntHandling bigint_handling) {
- TVARIABLE(Number, var_result);
+TNode<Numeric> CodeStubAssembler::ToNumberOrNumeric(
+ LazyNode<Context> context, TNode<Object> input,
+ TVariable<Smi>* var_type_feedback, Object::Conversion mode,
+ BigIntHandling bigint_handling) {
+ TVARIABLE(Numeric, var_result);
Label end(this);
Label not_smi(this, Label::kDeferred);
GotoIfNot(TaggedIsSmi(input), &not_smi);
TNode<Smi> input_smi = CAST(input);
var_result = input_smi;
+ if (var_type_feedback) {
+ *var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
+ }
Goto(&end);
BIND(&not_smi);
@@ -7130,11 +7193,29 @@ TNode<Number> CodeStubAssembler::ToNumber(TNode<Context> context,
TNode<HeapNumber> input_hn = CAST(input_ho);
var_result = input_hn;
+ if (var_type_feedback) {
+ *var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
+ }
Goto(&end);
BIND(&not_heap_number);
{
- var_result = NonNumberToNumber(context, input_ho, bigint_handling);
+ if (mode == Object::Conversion::kToNumeric) {
+ // Special case for collecting BigInt feedback.
+ Label not_bigint(this);
+ GotoIfNot(IsBigInt(input_ho), &not_bigint);
+ {
+ var_result = CAST(input_ho);
+ *var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
+ Goto(&end);
+ }
+ BIND(&not_bigint);
+ }
+ var_result = NonNumberToNumberOrNumeric(context(), input_ho, mode,
+ bigint_handling);
+ if (var_type_feedback) {
+ *var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
+ }
Goto(&end);
}
}
@@ -7256,7 +7337,7 @@ void CodeStubAssembler::TaggedToNumeric(TNode<Context> context,
// ES#sec-touint32
TNode<Number> CodeStubAssembler::ToUint32(TNode<Context> context,
- SloppyTNode<Object> input) {
+ TNode<Object> input) {
const TNode<Float64T> float_zero = Float64Constant(0.0);
const TNode<Float64T> float_two_32 =
Float64Constant(static_cast<double>(1ULL << 32));
@@ -7359,7 +7440,7 @@ TNode<Number> CodeStubAssembler::ToUint32(TNode<Context> context,
}
TNode<String> CodeStubAssembler::ToString_Inline(TNode<Context> context,
- SloppyTNode<Object> input) {
+ TNode<Object> input) {
TVARIABLE(Object, var_result, input);
Label stub_call(this, Label::kDeferred), out(this);
@@ -7375,7 +7456,7 @@ TNode<String> CodeStubAssembler::ToString_Inline(TNode<Context> context,
}
TNode<JSReceiver> CodeStubAssembler::ToObject(TNode<Context> context,
- SloppyTNode<Object> input) {
+ TNode<Object> input) {
return CAST(CallBuiltin(Builtins::kToObject, context, input));
}
@@ -7404,7 +7485,7 @@ TNode<JSReceiver> CodeStubAssembler::ToObject_Inline(TNode<Context> context,
}
TNode<Number> CodeStubAssembler::ToLength_Inline(TNode<Context> context,
- SloppyTNode<Object> input) {
+ TNode<Object> input) {
TNode<Smi> smi_zero = SmiConstant(0);
return Select<Number>(
TaggedIsSmi(input), [=] { return SmiMax(CAST(input), smi_zero); },
@@ -7424,8 +7505,8 @@ TNode<Uint32T> CodeStubAssembler::DecodeWord32(TNode<Word32T> word32,
Int32Constant(mask >> shift)));
}
-TNode<UintPtrT> CodeStubAssembler::DecodeWord(SloppyTNode<WordT> word,
- uint32_t shift, uintptr_t mask) {
+TNode<UintPtrT> CodeStubAssembler::DecodeWord(TNode<WordT> word, uint32_t shift,
+ uintptr_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
return Unsigned(WordAnd(WordShr(word, static_cast<int>(shift)),
IntPtrConstant(mask >> shift)));
@@ -7524,7 +7605,7 @@ void CodeStubAssembler::Use(Label* label) {
GotoIf(Word32Equal(Int32Constant(0), Int32Constant(1)), label);
}
-void CodeStubAssembler::TryToName(SloppyTNode<Object> key, Label* if_keyisindex,
+void CodeStubAssembler::TryToName(TNode<Object> key, Label* if_keyisindex,
TVariable<IntPtrT>* var_index,
Label* if_keyisunique,
TVariable<Name>* var_unique,
@@ -7792,8 +7873,8 @@ TNode<IntPtrT> CodeStubAssembler::HashTableComputeCapacity(
return IntPtrMax(capacity, IntPtrConstant(HashTableBase::kMinCapacity));
}
-TNode<IntPtrT> CodeStubAssembler::IntPtrMax(SloppyTNode<IntPtrT> left,
- SloppyTNode<IntPtrT> right) {
+TNode<IntPtrT> CodeStubAssembler::IntPtrMax(TNode<IntPtrT> left,
+ TNode<IntPtrT> right) {
intptr_t left_constant;
intptr_t right_constant;
if (TryToIntPtrConstant(left, &left_constant) &&
@@ -7804,8 +7885,8 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrMax(SloppyTNode<IntPtrT> left,
right);
}
-TNode<IntPtrT> CodeStubAssembler::IntPtrMin(SloppyTNode<IntPtrT> left,
- SloppyTNode<IntPtrT> right) {
+TNode<IntPtrT> CodeStubAssembler::IntPtrMin(TNode<IntPtrT> left,
+ TNode<IntPtrT> right) {
intptr_t left_constant;
intptr_t right_constant;
if (TryToIntPtrConstant(left, &left_constant) &&
@@ -8037,12 +8118,21 @@ template <>
void CodeStubAssembler::InsertEntry<NameDictionary>(
TNode<NameDictionary> dictionary, TNode<Name> name, TNode<Object> value,
TNode<IntPtrT> index, TNode<Smi> enum_index) {
+ // This should only be used for adding, not updating existing mappings.
+ CSA_ASSERT(this,
+ Word32Or(TaggedEqual(LoadFixedArrayElement(dictionary, index),
+ UndefinedConstant()),
+ TaggedEqual(LoadFixedArrayElement(dictionary, index),
+ TheHoleConstant())));
+
// Store name and value.
StoreFixedArrayElement(dictionary, index, name);
StoreValueByKeyIndex<NameDictionary>(dictionary, index, value);
// Prepare details of the new property.
- PropertyDetails d(kData, NONE, PropertyCellType::kNoCell);
+ PropertyDetails d(kData, NONE,
+ PropertyDetails::kConstIfDictConstnessTracking);
+
enum_index =
SmiShl(enum_index, PropertyDetails::DictionaryStorageField::kShift);
// We OR over the actual index below, so we expect the initial value to be 0.
@@ -8657,7 +8747,7 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
}
void CodeStubAssembler::TryLookupProperty(
- TNode<HeapObject> object, TNode<Map> map, SloppyTNode<Int32T> instance_type,
+ TNode<HeapObject> object, TNode<Map> map, TNode<Int32T> instance_type,
TNode<Name> unique_name, Label* if_found_fast, Label* if_found_dict,
Label* if_found_global, TVariable<HeapObject>* var_meta_storage,
TVariable<IntPtrT>* var_name_index, Label* if_not_found,
@@ -8736,6 +8826,43 @@ TNode<Object> CodeStubAssembler::GetIteratorMethod(
if_iteratorundefined);
}
+TNode<Object> CodeStubAssembler::CreateAsyncFromSyncIterator(
+ TNode<Context> context, TNode<Object> sync_iterator) {
+ Label not_receiver(this, Label::kDeferred);
+ Label done(this);
+ TVARIABLE(Object, return_value);
+
+ GotoIf(TaggedIsSmi(sync_iterator), &not_receiver);
+ GotoIfNot(IsJSReceiver(CAST(sync_iterator)), &not_receiver);
+
+ const TNode<Object> next =
+ GetProperty(context, sync_iterator, factory()->next_string());
+
+ const TNode<NativeContext> native_context = LoadNativeContext(context);
+ const TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX));
+ const TNode<JSObject> iterator = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator);
+ StoreObjectFieldNoWriteBarrier(iterator, JSAsyncFromSyncIterator::kNextOffset,
+ next);
+
+ return_value = iterator;
+ Goto(&done);
+
+ BIND(&not_receiver);
+ {
+ return_value = CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
+
+ // Unreachable due to the Throw in runtime call.
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return return_value.value();
+}
+
void CodeStubAssembler::LoadPropertyFromFastObject(
TNode<HeapObject> object, TNode<Map> map,
TNode<DescriptorArray> descriptors, TNode<IntPtrT> name_index,
@@ -8791,13 +8918,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
}
BIND(&if_double);
{
- if (FLAG_unbox_double_fields) {
- var_double_value = LoadObjectField<Float64T>(object, field_offset);
- } else {
- TNode<HeapNumber> heap_number =
- CAST(LoadObjectField(object, field_offset));
- var_double_value = LoadHeapNumberValue(heap_number);
- }
+ TNode<HeapNumber> heap_number =
+ CAST(LoadObjectField(object, field_offset));
+ var_double_value = LoadHeapNumberValue(heap_number);
Goto(&rebox_double);
}
}
@@ -9066,8 +9189,8 @@ void CodeStubAssembler::TryGetOwnProperty(
}
void CodeStubAssembler::TryLookupElement(
- TNode<HeapObject> object, TNode<Map> map, SloppyTNode<Int32T> instance_type,
- SloppyTNode<IntPtrT> intptr_index, Label* if_found, Label* if_absent,
+ TNode<HeapObject> object, TNode<Map> map, TNode<Int32T> instance_type,
+ TNode<IntPtrT> intptr_index, Label* if_found, Label* if_absent,
Label* if_not_found, Label* if_bailout) {
// Handle special objects in runtime.
GotoIf(IsSpecialReceiverInstanceType(instance_type), if_bailout);
@@ -9243,7 +9366,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
TNode<Object> receiver, TNode<Object> object_arg, TNode<Object> key,
const LookupPropertyInHolder& lookup_property_in_holder,
const LookupElementInHolder& lookup_element_in_holder, Label* if_end,
- Label* if_bailout, Label* if_proxy) {
+ Label* if_bailout, Label* if_proxy, bool handle_private_names) {
// Ensure receiver is JSReceiver, otherwise bailout.
GotoIf(TaggedIsSmi(receiver), if_bailout);
TNode<HeapObject> object = CAST(object_arg);
@@ -9298,6 +9421,11 @@ void CodeStubAssembler::TryPrototypeChainLookup(
BIND(&next_proto);
+ if (handle_private_names) {
+ // Private name lookup doesn't walk the prototype chain.
+ GotoIf(IsPrivateSymbol(CAST(key)), if_end);
+ }
+
TNode<HeapObject> proto = LoadMapPrototype(holder_map);
GotoIf(IsNull(proto), if_end);
@@ -9429,7 +9557,7 @@ TNode<Oddball> CodeStubAssembler::OrdinaryHasInstance(
// Goto runtime if {callable} is not a JSFunction.
TNode<Uint16T> callable_instance_type = LoadMapInstanceType(callable_map);
- GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE),
+ GotoIfNot(IsJSFunctionInstanceType(callable_instance_type),
&return_runtime);
GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map,
@@ -9541,8 +9669,8 @@ CodeStubAssembler::ElementOffsetFromIndex<IntPtrT>(TNode<IntPtrT> index_node,
ElementsKind kind,
int base_size);
-TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
- SloppyTNode<IntPtrT> length,
+TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(TNode<IntPtrT> offset,
+ TNode<IntPtrT> length,
int header_size,
ElementsKind kind) {
// Make sure we point to the last field.
@@ -9604,6 +9732,15 @@ TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVectorForStub() {
return CAST(LoadFeedbackVector(function));
}
+TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVectorFromBaseline() {
+ return CAST(
+ LoadFromParentFrame(InterpreterFrameConstants::kBytecodeOffsetFromFp));
+}
+
+TNode<Context> CodeStubAssembler::LoadContextFromBaseline() {
+ return CAST(LoadFromParentFrame(InterpreterFrameConstants::kContextOffset));
+}
+
TNode<FeedbackVector>
CodeStubAssembler::LoadFeedbackVectorForStubWithTrampoline() {
TNode<RawPtrT> frame_pointer = LoadParentFramePointer();
@@ -9615,16 +9752,40 @@ CodeStubAssembler::LoadFeedbackVectorForStubWithTrampoline() {
}
void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
- TNode<HeapObject> maybe_vector,
- TNode<UintPtrT> slot_id) {
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<UintPtrT> slot_id,
+ UpdateFeedbackMode mode) {
+ switch (mode) {
+ case UpdateFeedbackMode::kOptionalFeedback:
+ MaybeUpdateFeedback(feedback, maybe_feedback_vector, slot_id);
+ break;
+ case UpdateFeedbackMode::kGuaranteedFeedback:
+ CSA_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));
+ UpdateFeedback(feedback, CAST(maybe_feedback_vector), slot_id);
+ break;
+ }
+}
+
+void CodeStubAssembler::MaybeUpdateFeedback(TNode<Smi> feedback,
+ TNode<HeapObject> maybe_vector,
+ TNode<UintPtrT> slot_id) {
Label end(this);
- // If feedback_vector is not valid, then nothing to do.
GotoIf(IsUndefined(maybe_vector), &end);
+ {
+ UpdateFeedback(feedback, CAST(maybe_vector), slot_id);
+ Goto(&end);
+ }
+ BIND(&end);
+}
+
+void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
+ TNode<FeedbackVector> feedback_vector,
+ TNode<UintPtrT> slot_id) {
+ Label end(this);
// This method is used for binary op and compare feedback. These
// vector nodes are initialized with a smi 0, so we can simply OR
// our new feedback in place.
- TNode<FeedbackVector> feedback_vector = CAST(maybe_vector);
TNode<MaybeObject> feedback_element =
LoadFeedbackVectorSlot(feedback_vector, slot_id);
TNode<Smi> previous_feedback = CAST(feedback_element);
@@ -9650,7 +9811,7 @@ void CodeStubAssembler::ReportFeedbackUpdate(
#ifdef V8_TRACE_FEEDBACK_UPDATES
// Trace the update.
- CallRuntime(Runtime::kInterpreterTraceUpdateFeedback, NoContextConstant(),
+ CallRuntime(Runtime::kTraceUpdateFeedback, NoContextConstant(),
LoadFromParentFrame(StandardFrameConstants::kFunctionOffset),
SmiTag(Signed(slot_id)), StringConstant(reason));
#endif // V8_TRACE_FEEDBACK_UPDATES
@@ -9688,14 +9849,14 @@ void CodeStubAssembler::CheckForAssociatedProtector(TNode<Name> name,
// Fall through if no case matched.
}
-TNode<Map> CodeStubAssembler::LoadReceiverMap(SloppyTNode<Object> receiver) {
+TNode<Map> CodeStubAssembler::LoadReceiverMap(TNode<Object> receiver) {
return Select<Map>(
TaggedIsSmi(receiver), [=] { return HeapNumberMapConstant(); },
[=] { return LoadMap(UncheckedCast<HeapObject>(receiver)); });
}
TNode<IntPtrT> CodeStubAssembler::TryToIntptr(
- SloppyTNode<Object> key, Label* if_not_intptr,
+ TNode<Object> key, Label* if_not_intptr,
TVariable<Int32T>* var_instance_type) {
TVARIABLE(IntPtrT, var_intptr_key);
Label done(this, &var_intptr_key), key_is_smi(this), key_is_heapnumber(this);
@@ -9780,10 +9941,10 @@ MachineRepresentation ElementsKindToMachineRepresentation(ElementsKind kind) {
} // namespace
template <typename TArray, typename TIndex>
-void CodeStubAssembler::StoreElementBigIntOrTypedArray(TNode<TArray> elements,
- ElementsKind kind,
- TNode<TIndex> index,
- Node* value) {
+void CodeStubAssembler::StoreElementTypedArray(TNode<TArray> elements,
+ ElementsKind kind,
+ TNode<TIndex> index,
+ Node* value) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
@@ -9792,6 +9953,7 @@ void CodeStubAssembler::StoreElementBigIntOrTypedArray(TNode<TArray> elements,
static_assert(std::is_same<TArray, RawPtrT>::value ||
std::is_same<TArray, FixedArrayBase>::value,
"Only RawPtrT or FixedArrayBase elements are allowed");
+ DCHECK(IsTypedArrayElementsKind(kind));
if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
TVARIABLE(UintPtrT, var_low);
@@ -9818,7 +9980,6 @@ void CodeStubAssembler::StoreElementBigIntOrTypedArray(TNode<TArray> elements,
}
#endif
} else {
- DCHECK(IsTypedArrayElementsKind(kind));
if (kind == UINT8_CLAMPED_ELEMENTS) {
CSA_ASSERT(this, Word32Equal(UncheckedCast<Word32T>(value),
Word32And(Int32Constant(0xFF), value)));
@@ -9833,27 +9994,37 @@ void CodeStubAssembler::StoreElementBigIntOrTypedArray(TNode<TArray> elements,
template <typename TIndex>
void CodeStubAssembler::StoreElement(TNode<FixedArrayBase> elements,
ElementsKind kind, TNode<TIndex> index,
- Node* value) {
- if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS ||
- IsTypedArrayElementsKind(kind)) {
- StoreElementBigIntOrTypedArray(elements, kind, index, value);
- } else if (IsDoubleElementsKind(kind)) {
- TNode<Float64T> value_float64 = UncheckedCast<Float64T>(value);
- StoreFixedDoubleArrayElement(CAST(elements), index, value_float64);
+ TNode<Object> value) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT indices are allowed");
+ DCHECK(!IsDoubleElementsKind(kind));
+ if (IsTypedArrayElementsKind(kind)) {
+ StoreElementTypedArray(elements, kind, index, value);
+ } else if (IsSmiElementsKind(kind)) {
+ TNode<Smi> smi_value = CAST(value);
+ StoreFixedArrayElement(CAST(elements), index, smi_value);
} else {
- WriteBarrierMode barrier_mode = IsSmiElementsKind(kind)
- ? UNSAFE_SKIP_WRITE_BARRIER
- : UPDATE_WRITE_BARRIER;
- StoreFixedArrayElement(CAST(elements), index, value, barrier_mode, 0);
+ StoreFixedArrayElement(CAST(elements), index, value);
}
}
template <typename TIndex>
+void CodeStubAssembler::StoreElement(TNode<FixedArrayBase> elements,
+ ElementsKind kind, TNode<TIndex> index,
+ TNode<Float64T> value) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT indices are allowed");
+ DCHECK(IsDoubleElementsKind(kind));
+ StoreFixedDoubleArrayElement(CAST(elements), index, value);
+}
+
+template <typename TIndex>
void CodeStubAssembler::StoreElement(TNode<RawPtrT> elements, ElementsKind kind,
TNode<TIndex> index, Node* value) {
- DCHECK(kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS ||
- IsTypedArrayElementsKind(kind));
- StoreElementBigIntOrTypedArray(elements, kind, index, value);
+ DCHECK(IsTypedArrayElementsKind(kind));
+ StoreElementTypedArray(elements, kind, index, value);
}
template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement<UintPtrT>(
TNode<RawPtrT>, ElementsKind, TNode<UintPtrT>, Node*);
@@ -10167,13 +10338,12 @@ void CodeStubAssembler::EmitElementStore(
// TODO(rmcilroy): TNodify the converted value once this funciton and
// StoreElement are templated based on the type elements_kind type.
- Node* converted_value = value;
if (IsTypedArrayElementsKind(elements_kind)) {
Label done(this), update_value_and_bailout(this, Label::kDeferred);
// IntegerIndexedElementSet converts value to a Number/BigInt prior to the
// bounds check.
- converted_value =
+ Node* converted_value =
PrepareValueForWriteToTypedArray(value, elements_kind, context);
TNode<JSTypedArray> typed_array = CAST(object);
@@ -10283,10 +10453,11 @@ void CodeStubAssembler::EmitElementStore(
// In case value is stored into a fast smi array, assure that the value is
// a smi before manipulating the backing store. Otherwise the backing store
// may be left in an invalid state.
+ base::Optional<TNode<Float64T>> float_value;
if (IsSmiElementsKind(elements_kind)) {
GotoIfNot(TaggedIsSmi(value), bailout);
} else if (IsDoubleElementsKind(elements_kind)) {
- converted_value = TryTaggedToFloat64(value, bailout);
+ float_value = TryTaggedToFloat64(value, bailout);
}
TNode<Smi> smi_length = Select<Smi>(
@@ -10327,7 +10498,11 @@ void CodeStubAssembler::EmitElementStore(
}
CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
- StoreElement(elements, elements_kind, intptr_key, converted_value);
+ if (float_value) {
+ StoreElement(elements, elements_kind, intptr_key, float_value.value());
+ } else {
+ StoreElement(elements, elements_kind, intptr_key, value);
+ }
}
TNode<FixedArrayBase> CodeStubAssembler::CheckForCapacityGrow(
@@ -10867,9 +11042,48 @@ Operation Reverse(Operation op) {
}
} // anonymous namespace
+TNode<Context> CodeStubAssembler::GotoIfHasContextExtensionUpToDepth(
+ TNode<Context> context, TNode<Uint32T> depth, Label* target) {
+ TVARIABLE(Context, cur_context, context);
+ TVARIABLE(Uint32T, cur_depth, depth);
+
+ Label context_search(this, {&cur_depth, &cur_context});
+ Label exit_loop(this);
+ Label no_extension(this);
+
+ // Loop until the depth is 0.
+ CSA_ASSERT(this, Word32NotEqual(cur_depth.value(), Int32Constant(0)));
+ Goto(&context_search);
+ BIND(&context_search);
+ {
+ // Check if context has an extension slot.
+ TNode<BoolT> has_extension =
+ LoadScopeInfoHasExtensionField(LoadScopeInfo(cur_context.value()));
+ GotoIfNot(has_extension, &no_extension);
+
+ // Jump to the target if the extension slot is not an undefined value.
+ TNode<Object> extension_slot =
+ LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
+ Branch(TaggedNotEqual(extension_slot, UndefinedConstant()), target,
+ &no_extension);
+
+ BIND(&no_extension);
+ {
+ cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
+ cur_context = CAST(
+ LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
+
+ Branch(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
+ &context_search, &exit_loop);
+ }
+ }
+ BIND(&exit_loop);
+ return cur_context.value();
+}
+
TNode<Oddball> CodeStubAssembler::RelationalComparison(
Operation op, TNode<Object> left, TNode<Object> right,
- TNode<Context> context, TVariable<Smi>* var_type_feedback) {
+ const LazyNode<Context>& context, TVariable<Smi>* var_type_feedback) {
Label return_true(this), return_false(this), do_float_comparison(this),
end(this);
TVARIABLE(Oddball, var_result); // Actually only "true" or "false".
@@ -10961,7 +11175,8 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
// dedicated ToPrimitive(right, hint Number) operation, as the
// ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
- var_right = CallBuiltin(Builtins::kNonNumberToNumeric, context, right);
+ var_right =
+ CallBuiltin(Builtins::kNonNumberToNumeric, context(), right);
Goto(&loop);
}
}
@@ -11006,7 +11221,8 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
// dedicated ToPrimitive(left, hint Number) operation, as the
// ToNumeric(left) will by itself already invoke ToPrimitive with
// a Number hint.
- var_left = CallBuiltin(Builtins::kNonNumberToNumeric, context, left);
+ var_left =
+ CallBuiltin(Builtins::kNonNumberToNumeric, context(), left);
Goto(&loop);
}
}
@@ -11062,7 +11278,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
// ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
var_right =
- CallBuiltin(Builtins::kNonNumberToNumeric, context, right);
+ CallBuiltin(Builtins::kNonNumberToNumeric, context(), right);
Goto(&loop);
}
}
@@ -11117,7 +11333,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
// ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
var_right =
- CallBuiltin(Builtins::kNonNumberToNumeric, context, right);
+ CallBuiltin(Builtins::kNonNumberToNumeric, context(), right);
Goto(&loop);
}
}
@@ -11149,7 +11365,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
default:
UNREACHABLE();
}
- var_result = CAST(CallBuiltin(builtin, context, left, right));
+ var_result = CAST(CallBuiltin(builtin, context(), left, right));
Goto(&end);
BIND(&if_right_not_string);
@@ -11168,8 +11384,8 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
&if_right_receiver);
var_left =
- CallBuiltin(Builtins::kNonNumberToNumeric, context, left);
- var_right = CallBuiltin(Builtins::kToNumeric, context, right);
+ CallBuiltin(Builtins::kNonNumberToNumeric, context(), left);
+ var_right = CallBuiltin(Builtins::kToNumeric, context(), right);
Goto(&loop);
BIND(&if_right_bigint);
@@ -11184,7 +11400,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
{
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
isolate(), ToPrimitiveHint::kNumber);
- var_right = CallStub(callable, context, right);
+ var_right = CallStub(callable, context(), right);
Goto(&loop);
}
}
@@ -11232,15 +11448,16 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
GotoIf(IsJSReceiverInstanceType(left_instance_type),
&if_left_receiver);
- var_right = CallBuiltin(Builtins::kToNumeric, context, right);
- var_left = CallBuiltin(Builtins::kNonNumberToNumeric, context, left);
+ var_right = CallBuiltin(Builtins::kToNumeric, context(), right);
+ var_left =
+ CallBuiltin(Builtins::kNonNumberToNumeric, context(), left);
Goto(&loop);
BIND(&if_left_receiver);
{
Callable callable = CodeFactory::NonPrimitiveToPrimitive(
isolate(), ToPrimitiveHint::kNumber);
- var_left = CallStub(callable, context, left);
+ var_left = CallStub(callable, context(), left);
Goto(&loop);
}
}
@@ -11292,7 +11509,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
}
TNode<Smi> CodeStubAssembler::CollectFeedbackForString(
- SloppyTNode<Int32T> instance_type) {
+ TNode<Int32T> instance_type) {
TNode<Smi> feedback = SelectSmiConstant(
Word32Equal(
Word32And(instance_type, Int32Constant(kIsNotInternalizedMask)),
@@ -11302,8 +11519,8 @@ TNode<Smi> CodeStubAssembler::CollectFeedbackForString(
return feedback;
}
-void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
- Label* if_equal, Label* if_notequal,
+void CodeStubAssembler::GenerateEqual_Same(TNode<Object> value, Label* if_equal,
+ Label* if_notequal,
TVariable<Smi>* var_type_feedback) {
// In case of abstract or strict equality checks, we need additional checks
// for NaN values because they are not considered equal, even if both the
@@ -11395,9 +11612,8 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
}
// ES6 section 7.2.12 Abstract Equality Comparison
-TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
- SloppyTNode<Object> right,
- TNode<Context> context,
+TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
+ const LazyNode<Context>& context,
TVariable<Smi>* var_type_feedback) {
// This is a slightly optimized version of Object::Equals. Whenever you
// change something functionality wise in here, remember to update the
@@ -11512,7 +11728,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
CombineFeedback(var_type_feedback,
CompareOperationFeedback::kReceiver);
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_right = CallStub(callable, context, right);
+ var_right = CallStub(callable, context(), right);
Goto(&loop);
}
}
@@ -11543,7 +11759,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
GotoIfNot(IsStringInstanceType(right_type), &use_symmetry);
result =
- CAST(CallBuiltin(Builtins::kStringEqual, context, left, right));
+ CAST(CallBuiltin(Builtins::kStringEqual, context(), left, right));
CombineFeedback(var_type_feedback,
SmiOr(CollectFeedbackForString(left_type),
CollectFeedbackForString(right_type)));
@@ -11788,7 +12004,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
// convert {left} to Primitive too.
CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_left = CallStub(callable, context, left);
+ var_left = CallStub(callable, context(), left);
Goto(&loop);
}
}
@@ -11803,7 +12019,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
CombineFeedback(var_type_feedback,
CollectFeedbackForString(right_type));
}
- var_right = CallBuiltin(Builtins::kStringToNumber, context, right);
+ var_right = CallBuiltin(Builtins::kStringToNumber, context(), right);
Goto(&loop);
}
@@ -11838,8 +12054,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
}
TNode<Oddball> CodeStubAssembler::StrictEqual(
- SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
- TVariable<Smi>* var_type_feedback) {
+ TNode<Object> lhs, TNode<Object> rhs, TVariable<Smi>* var_type_feedback) {
// Pseudo-code for the algorithm below:
//
// if (lhs == rhs) {
@@ -12214,8 +12429,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
// ECMA#sec-samevalue
// This algorithm differs from the Strict Equality Comparison Algorithm in its
// treatment of signed zeroes and NaNs.
-void CodeStubAssembler::BranchIfSameValue(SloppyTNode<Object> lhs,
- SloppyTNode<Object> rhs,
+void CodeStubAssembler::BranchIfSameValue(TNode<Object> lhs, TNode<Object> rhs,
Label* if_true, Label* if_false,
SameValueMode mode) {
TVARIABLE(Float64T, var_lhs_value);
@@ -12339,8 +12553,8 @@ void CodeStubAssembler::BranchIfSameNumberValue(TNode<Float64T> lhs_value,
}
TNode<Oddball> CodeStubAssembler::HasProperty(TNode<Context> context,
- SloppyTNode<Object> object,
- SloppyTNode<Object> key,
+ TNode<Object> object,
+ TNode<Object> key,
HasPropertyLookupMode mode) {
Label call_runtime(this, Label::kDeferred), return_true(this),
return_false(this), end(this), if_proxy(this, Label::kDeferred);
@@ -12368,9 +12582,10 @@ TNode<Oddball> CodeStubAssembler::HasProperty(TNode<Context> context,
&return_true, &return_false, next_holder, if_bailout);
};
+ const bool kHandlePrivateNames = mode == HasPropertyLookupMode::kHasProperty;
TryPrototypeChainLookup(object, object, key, lookup_property_in_holder,
lookup_element_in_holder, &return_false,
- &call_runtime, &if_proxy);
+ &call_runtime, &if_proxy, kHandlePrivateNames);
TVARIABLE(Oddball, result);
@@ -12429,7 +12644,8 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector,
TNode<FixedArray>* cache_array_out,
- TNode<Smi>* cache_length_out) {
+ TNode<Smi>* cache_length_out,
+ UpdateFeedbackMode update_feedback_mode) {
// Check if we're using an enum cache.
TVARIABLE(FixedArray, cache_array);
TVARIABLE(Smi, cache_length);
@@ -12458,7 +12674,7 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
IntPtrLessThanOrEqual(enum_length, enum_indices_length),
static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices),
static_cast<int>(ForInFeedback::kEnumCacheKeys));
- UpdateFeedback(feedback, maybe_feedback_vector, slot);
+ UpdateFeedback(feedback, maybe_feedback_vector, slot, update_feedback_mode);
cache_array = enum_keys;
cache_length = SmiTag(Signed(enum_length));
@@ -12472,7 +12688,7 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
// Record the fact that we hit the for-in slow-path.
UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
- slot);
+ slot, update_feedback_mode);
cache_array = array_enumerator;
cache_length = LoadFixedArrayBaseLength(array_enumerator);
@@ -12484,22 +12700,7 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
*cache_length_out = cache_length.value();
}
-TNode<FixedArray> CodeStubAssembler::ForInPrepareForTorque(
- TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
- TNode<FixedArray> cache_array;
- TNode<Smi> cache_length;
- ForInPrepare(enumerator, slot, maybe_feedback_vector, &cache_array,
- &cache_length);
-
- TNode<FixedArray> result = AllocateUninitializedFixedArray(2);
- StoreFixedArrayElement(result, 0, cache_array);
- StoreFixedArrayElement(result, 1, cache_length);
-
- return result;
-}
-
-TNode<String> CodeStubAssembler::Typeof(SloppyTNode<Object> value) {
+TNode<String> CodeStubAssembler::Typeof(TNode<Object> value) {
TVARIABLE(String, result_var);
Label return_number(this, Label::kDeferred), if_oddball(this),
@@ -12594,7 +12795,7 @@ TNode<HeapObject> CodeStubAssembler::GetSuperConstructor(
}
TNode<JSReceiver> CodeStubAssembler::SpeciesConstructor(
- TNode<Context> context, SloppyTNode<Object> object,
+ TNode<Context> context, TNode<Object> object,
TNode<JSReceiver> default_constructor) {
Isolate* isolate = this->isolate();
TVARIABLE(JSReceiver, var_result, default_constructor);
@@ -12882,8 +13083,7 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
}
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
- TNode<Context> context, SloppyTNode<Object> value,
- SloppyTNode<Oddball> done) {
+ TNode<Context> context, TNode<Object> value, TNode<Oddball> done) {
CSA_ASSERT(this, IsBoolean(done));
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> map = CAST(
@@ -12900,7 +13100,7 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
}
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
- TNode<Context> context, TNode<Object> key, SloppyTNode<Object> value) {
+ TNode<Context> context, TNode<Object> key, TNode<Object> value) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Smi> length = SmiConstant(2);
int const elements_size = FixedArray::SizeFor(2);
@@ -13144,7 +13344,7 @@ TNode<BoolT> CodeStubAssembler::IsFastSmiOrTaggedElementsKind(
}
TNode<BoolT> CodeStubAssembler::IsFastSmiElementsKind(
- SloppyTNode<Int32T> elements_kind) {
+ TNode<Int32T> elements_kind) {
return Uint32LessThanOrEqual(elements_kind,
Int32Constant(HOLEY_SMI_ELEMENTS));
}
@@ -13258,6 +13458,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
int32_t case_values[] = {BYTECODE_ARRAY_TYPE,
+ BASELINE_DATA_TYPE,
WASM_EXPORTED_FUNCTION_DATA_TYPE,
ASM_WASM_DATA_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
@@ -13266,6 +13467,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
WASM_JS_FUNCTION_DATA_TYPE,
WASM_CAPI_FUNCTION_DATA_TYPE};
Label check_is_bytecode_array(this);
+ Label check_is_baseline_data(this);
Label check_is_exported_function_data(this);
Label check_is_asm_wasm_data(this);
Label check_is_uncompiled_data_without_preparse_data(this);
@@ -13275,6 +13477,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
Label check_is_wasm_js_function_data(this);
Label check_is_wasm_capi_function_data(this);
Label* case_labels[] = {&check_is_bytecode_array,
+ &check_is_baseline_data,
&check_is_exported_function_data,
&check_is_asm_wasm_data,
&check_is_uncompiled_data_without_preparse_data,
@@ -13291,6 +13494,14 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
Goto(&done);
+ // IsBaselineData: Execute baseline code
+ BIND(&check_is_baseline_data);
+ TNode<BaselineData> baseline_data = CAST(sfi_data);
+ TNode<Code> baseline_code =
+ CAST(LoadObjectField(baseline_data, BaselineData::kBaselineCodeOffset));
+ sfi_code = baseline_code;
+ Goto(&done);
+
// IsWasmExportedFunctionData: Use the wrapper code
BIND(&check_is_exported_function_data);
sfi_code = CAST(LoadObjectField(
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index e1c423cfab..03af2cc5e2 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -21,6 +21,7 @@
#include "src/objects/promise.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
+#include "src/objects/swiss-name-dictionary.h"
#include "src/objects/tagged-index.h"
#include "src/roots/roots.h"
#include "torque-generated/exported-macros-assembler.h"
@@ -188,6 +189,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(regexp_to_string, regexp_to_string, RegexpToString) \
V(resolve_string, resolve_string, ResolveString) \
V(return_string, return_string, ReturnString) \
+ V(search_symbol, search_symbol, SearchSymbol) \
V(species_symbol, species_symbol, SpeciesSymbol) \
V(StaleRegister, stale_register, StaleRegister) \
V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
@@ -294,13 +296,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define CSA_SLOW_ASSERT(csa, ...) ((void)0)
#endif
-// Provides a constexpr boolean to be used inside Torque.
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
-constexpr bool kNoArgumentsAdaptor = true;
-#else
-constexpr bool kNoArgumentsAdaptor = false;
-#endif
-
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
// it's possible to add JavaScript-specific useful CodeAssembler "macros"
@@ -449,6 +444,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return CAST(heap_object);
}
+ template <typename T>
+ TNode<T> RunLazy(LazyNode<T> lazy) {
+ return lazy();
+ }
+
#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
TNode<Smi> OpName(TNode<Smi> a, TNode<Smi> b) { return SmiOpName(a, b); } \
TNode<IntPtrT> OpName(TNode<IntPtrT> a, TNode<IntPtrT> b) { \
@@ -525,8 +525,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
- TNode<BoolT> Is##name(SloppyTNode<Object> value); \
- TNode<BoolT> IsNot##name(SloppyTNode<Object> value);
+ TNode<BoolT> Is##name(TNode<Object> value); \
+ TNode<BoolT> IsNot##name(TNode<Object> value);
HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST)
#undef HEAP_CONSTANT_TEST
@@ -542,19 +542,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Round the 32bits payload of the provided word up to the next power of two.
TNode<IntPtrT> IntPtrRoundUpToPowerOfTwo32(TNode<IntPtrT> value);
// Select the maximum of the two provided IntPtr values.
- TNode<IntPtrT> IntPtrMax(SloppyTNode<IntPtrT> left,
- SloppyTNode<IntPtrT> right);
+ TNode<IntPtrT> IntPtrMax(TNode<IntPtrT> left, TNode<IntPtrT> right);
// Select the minimum of the two provided IntPtr values.
- TNode<IntPtrT> IntPtrMin(SloppyTNode<IntPtrT> left,
- SloppyTNode<IntPtrT> right);
+ TNode<IntPtrT> IntPtrMin(TNode<IntPtrT> left, TNode<IntPtrT> right);
TNode<UintPtrT> UintPtrMin(TNode<UintPtrT> left, TNode<UintPtrT> right);
// Float64 operations.
- TNode<Float64T> Float64Ceil(SloppyTNode<Float64T> x);
- TNode<Float64T> Float64Floor(SloppyTNode<Float64T> x);
- TNode<Float64T> Float64Round(SloppyTNode<Float64T> x);
- TNode<Float64T> Float64RoundToEven(SloppyTNode<Float64T> x);
- TNode<Float64T> Float64Trunc(SloppyTNode<Float64T> x);
+ TNode<Float64T> Float64Ceil(TNode<Float64T> x);
+ TNode<Float64T> Float64Floor(TNode<Float64T> x);
+ TNode<Float64T> Float64Round(TNode<Float64T> x);
+ TNode<Float64T> Float64RoundToEven(TNode<Float64T> x);
+ TNode<Float64T> Float64Trunc(TNode<Float64T> x);
// Select the minimum of the two provided Number values.
TNode<Number> NumberMax(TNode<Number> left, TNode<Number> right);
// Select the minimum of the two provided Number values.
@@ -564,17 +562,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsValidPositiveSmi(TNode<IntPtrT> value);
// Tag an IntPtr as a Smi value.
- TNode<Smi> SmiTag(SloppyTNode<IntPtrT> value);
+ TNode<Smi> SmiTag(TNode<IntPtrT> value);
// Untag a Smi value as an IntPtr.
- TNode<IntPtrT> SmiUntag(SloppyTNode<Smi> value);
+ TNode<IntPtrT> SmiUntag(TNode<Smi> value);
// Smi conversions.
- TNode<Float64T> SmiToFloat64(SloppyTNode<Smi> value);
- TNode<Smi> SmiFromIntPtr(SloppyTNode<IntPtrT> value) { return SmiTag(value); }
+ TNode<Float64T> SmiToFloat64(TNode<Smi> value);
+ TNode<Smi> SmiFromIntPtr(TNode<IntPtrT> value) { return SmiTag(value); }
TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value);
TNode<Smi> SmiFromUint32(TNode<Uint32T> value);
- TNode<IntPtrT> SmiToIntPtr(SloppyTNode<Smi> value) { return SmiUntag(value); }
- TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
+ TNode<IntPtrT> SmiToIntPtr(TNode<Smi> value) { return SmiUntag(value); }
+ TNode<Int32T> SmiToInt32(TNode<Smi> value);
// Smi operations.
#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
@@ -887,18 +885,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
single_char[0]));
}
- TNode<Int32T> TruncateWordToInt32(SloppyTNode<WordT> value);
- TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
+ TNode<Int32T> TruncateWordToInt32(TNode<WordT> value);
+ TNode<Int32T> TruncateIntPtrToInt32(TNode<IntPtrT> value);
// Check a value for smi-ness
TNode<BoolT> TaggedIsSmi(TNode<MaybeObject> a);
TNode<BoolT> TaggedIsNotSmi(TNode<MaybeObject> a);
// Check that the value is a non-negative smi.
- TNode<BoolT> TaggedIsPositiveSmi(SloppyTNode<Object> a);
+ TNode<BoolT> TaggedIsPositiveSmi(TNode<Object> a);
// Check that a word has a word-aligned address.
- TNode<BoolT> WordIsAligned(SloppyTNode<WordT> word, size_t alignment);
- TNode<BoolT> WordIsPowerOfTwo(SloppyTNode<IntPtrT> value);
+ TNode<BoolT> WordIsAligned(TNode<WordT> word, size_t alignment);
+ TNode<BoolT> WordIsPowerOfTwo(TNode<IntPtrT> value);
// Check if lower_limit <= value <= higher_limit.
template <typename U>
@@ -949,17 +947,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Branches to {if_true} if ToBoolean applied to {value} yields true,
// otherwise goes to {if_false}.
- void BranchIfToBooleanIsTrue(SloppyTNode<Object> value, Label* if_true,
+ void BranchIfToBooleanIsTrue(TNode<Object> value, Label* if_true,
Label* if_false);
// Branches to {if_false} if ToBoolean applied to {value} yields false,
// otherwise goes to {if_true}.
- void BranchIfToBooleanIsFalse(SloppyTNode<Object> value, Label* if_false,
+ void BranchIfToBooleanIsFalse(TNode<Object> value, Label* if_false,
Label* if_true) {
BranchIfToBooleanIsTrue(value, if_true, if_false);
}
- void BranchIfJSReceiver(SloppyTNode<Object> object, Label* if_true,
+ void BranchIfJSReceiver(TNode<Object> object, Label* if_true,
Label* if_false);
// Branches to {if_true} when --force-slow-path flag has been passed.
@@ -1274,7 +1272,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> instance_type,
Label* bailout);
// Load the identity hash of a JSRececiver.
- TNode<IntPtrT> LoadJSReceiverIdentityHash(SloppyTNode<Object> receiver,
+ TNode<IntPtrT> LoadJSReceiverIdentityHash(TNode<Object> receiver,
Label* if_no_hash = nullptr);
// This is only used on a newly allocated PropertyArray which
@@ -1397,7 +1395,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
TNode<Object> LoadPropertyArrayElement(TNode<PropertyArray> object,
- SloppyTNode<IntPtrT> index);
+ TNode<IntPtrT> index);
TNode<IntPtrT> LoadPropertyArrayLength(TNode<PropertyArray> object);
// Load an element from an array and untag it and return it as Word32.
@@ -1461,9 +1459,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> elements_kind);
// Parts of the above, factored out for readability:
TNode<BigInt> LoadFixedBigInt64ArrayElementAsTagged(
- SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset);
+ TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset);
TNode<BigInt> LoadFixedBigUint64ArrayElementAsTagged(
- SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset);
+ TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset);
// 64-bit platforms only:
TNode<BigInt> BigIntFromInt64(TNode<IntPtrT> value);
TNode<BigInt> BigIntFromUint64(TNode<UintPtrT> value);
@@ -1477,12 +1475,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Context manipulation:
void StoreContextElementNoWriteBarrier(TNode<Context> context, int slot_index,
- SloppyTNode<Object> value);
+ TNode<Object> value);
TNode<NativeContext> LoadNativeContext(TNode<Context> context);
// Calling this is only valid if there's a module context in the chain.
TNode<Context> LoadModuleContext(TNode<Context> context);
- void GotoIfContextElementEqual(SloppyTNode<Object> value,
+ TNode<Object> GetImportMetaObject(TNode<Context> context);
+
+ void GotoIfContextElementEqual(TNode<Object> value,
TNode<NativeContext> native_context,
int slot_index, Label* if_equal) {
GotoIf(TaggedEqual(value, LoadContextElement(native_context, slot_index)),
@@ -1496,7 +1496,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Map> LoadJSArrayElementsMap(ElementsKind kind,
TNode<NativeContext> native_context);
- TNode<Map> LoadJSArrayElementsMap(SloppyTNode<Int32T> kind,
+ TNode<Map> LoadJSArrayElementsMap(TNode<Int32T> kind,
TNode<NativeContext> native_context);
TNode<BoolT> IsJSFunctionWithPrototypeSlot(TNode<HeapObject> object);
@@ -1517,8 +1517,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Word32T> value);
// Store the floating point value of a HeapNumber.
- void StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
- SloppyTNode<Float64T> value);
+ void StoreHeapNumberValue(TNode<HeapNumber> object, TNode<Float64T> value);
+
// Store a field to an object on the heap.
void StoreObjectField(TNode<HeapObject> object, int offset,
TNode<Object> value);
@@ -1526,8 +1526,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> value);
template <class T>
void StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
- SloppyTNode<IntPtrT> offset,
- TNode<T> value) {
+ TNode<IntPtrT> offset, TNode<T> value) {
int const_offset;
if (TryToInt32Constant(offset, &const_offset)) {
return StoreObjectFieldNoWriteBarrier<T>(object, const_offset, value);
@@ -1558,37 +1557,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void StoreMapNoWriteBarrier(TNode<HeapObject> object, TNode<Map> map);
void StoreObjectFieldRoot(TNode<HeapObject> object, int offset,
RootIndex root);
+
// Store an array element to a FixedArray.
void StoreFixedArrayElement(
- TNode<FixedArray> object, int index, SloppyTNode<Object> value,
+ TNode<FixedArray> object, int index, TNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
CheckBounds check_bounds = CheckBounds::kAlways) {
return StoreFixedArrayElement(object, IntPtrConstant(index), value,
barrier_mode, 0, check_bounds);
}
- // This doesn't emit a bounds-check. As part of the security-performance
- // tradeoff, only use it if it is performance critical.
- void UnsafeStoreFixedArrayElement(
- TNode<FixedArray> object, int index, TNode<Object> value,
- WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
- return StoreFixedArrayElement(object, index, value, barrier_mode,
- CheckBounds::kDebugOnly);
- }
- void UnsafeStoreFixedArrayElement(TNode<FixedArray> object, int index,
- TNode<Smi> value) {
- return StoreFixedArrayElement(object, index, value,
- UNSAFE_SKIP_WRITE_BARRIER,
- CheckBounds::kDebugOnly);
- }
+
void StoreFixedArrayElement(TNode<FixedArray> object, int index,
TNode<Smi> value,
CheckBounds check_bounds = CheckBounds::kAlways) {
- return StoreFixedArrayElement(object, IntPtrConstant(index), value,
+ return StoreFixedArrayElement(object, IntPtrConstant(index),
+ TNode<Object>{value},
UNSAFE_SKIP_WRITE_BARRIER, 0, check_bounds);
}
+
template <typename TIndex>
void StoreFixedArrayElement(
- TNode<FixedArray> array, TNode<TIndex> index, SloppyTNode<Object> value,
+ TNode<FixedArray> array, TNode<TIndex> index, TNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0,
CheckBounds check_bounds = CheckBounds::kAlways) {
@@ -1603,9 +1592,34 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode,
additional_offset);
}
- // This doesn't emit a bounds-check. As part of the security-performance
+
+ template <typename TIndex>
+ void StoreFixedArrayElement(TNode<FixedArray> array, TNode<TIndex> index,
+ TNode<Smi> value, int additional_offset = 0) {
+ static_assert(std::is_same<TIndex, Smi>::value ||
+ std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT indeces is allowed");
+ StoreFixedArrayElement(array, index, TNode<Object>{value},
+ UNSAFE_SKIP_WRITE_BARRIER, additional_offset);
+ }
+
+ // These don't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
void UnsafeStoreFixedArrayElement(
+ TNode<FixedArray> object, int index, TNode<Object> value,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
+ return StoreFixedArrayElement(object, IntPtrConstant(index), value,
+ barrier_mode, 0, CheckBounds::kDebugOnly);
+ }
+
+ void UnsafeStoreFixedArrayElement(TNode<FixedArray> object, int index,
+ TNode<Smi> value) {
+ return StoreFixedArrayElement(object, IntPtrConstant(index), value,
+ UNSAFE_SKIP_WRITE_BARRIER, 0,
+ CheckBounds::kDebugOnly);
+ }
+
+ void UnsafeStoreFixedArrayElement(
TNode<FixedArray> array, TNode<IntPtrT> index, TNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0) {
@@ -1627,28 +1641,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
UPDATE_WRITE_BARRIER);
}
- void StoreFixedArrayElement(
- TNode<FixedArray> array, TNode<Smi> index, TNode<Object> value,
- WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
- StoreFixedArrayElement(array, index, value, barrier_mode, 0);
- }
- void StoreFixedArrayElement(
- TNode<FixedArray> array, TNode<IntPtrT> index, TNode<Smi> value,
- WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER,
- int additional_offset = 0) {
- DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
- StoreFixedArrayElement(array, index, TNode<Object>{value},
- UNSAFE_SKIP_WRITE_BARRIER, additional_offset);
- }
- void StoreFixedArrayElement(
- TNode<FixedArray> array, TNode<Smi> index, TNode<Smi> value,
- WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER,
- int additional_offset = 0) {
- DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
- StoreFixedArrayElement(array, index, TNode<Object>{value},
- UNSAFE_SKIP_WRITE_BARRIER, additional_offset);
- }
-
template <typename TIndex>
void StoreFixedDoubleArrayElement(
TNode<FixedDoubleArray> object, TNode<TIndex> index,
@@ -1781,12 +1773,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
base::Optional<TNode<FixedArray>> elements = base::nullopt,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
- void InitializeJSObjectBodyWithSlackTracking(
- TNode<HeapObject> object, TNode<Map> map,
- SloppyTNode<IntPtrT> instance_size);
+ void InitializeJSObjectBodyWithSlackTracking(TNode<HeapObject> object,
+ TNode<Map> map,
+ TNode<IntPtrT> instance_size);
void InitializeJSObjectBodyNoSlackTracking(
- TNode<HeapObject> object, TNode<Map> map,
- SloppyTNode<IntPtrT> instance_size,
+ TNode<HeapObject> object, TNode<Map> map, TNode<IntPtrT> instance_size,
int start_offset = JSObject::kHeaderSize);
TNode<BoolT> IsValidFastJSArrayCapacity(TNode<IntPtrT> capacity);
@@ -1907,13 +1898,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// TODO(v8:9722): Return type should be JSIteratorResult
TNode<JSObject> AllocateJSIteratorResult(TNode<Context> context,
- SloppyTNode<Object> value,
- SloppyTNode<Oddball> done);
+ TNode<Object> value,
+ TNode<Oddball> done);
// TODO(v8:9722): Return type should be JSIteratorResult
TNode<JSObject> AllocateJSIteratorResultForEntry(TNode<Context> context,
TNode<Object> key,
- SloppyTNode<Object> value);
+ TNode<Object> value);
TNode<JSReceiver> ArraySpeciesCreate(TNode<Context> context,
TNode<Object> originalArray,
@@ -2173,10 +2164,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// (NOTE: not index!), does a hole check if |if_hole| is provided and
// converts the value so that it becomes ready for storing to array of
// |to_kind| elements.
- Node* LoadElementAndPrepareForStore(TNode<FixedArrayBase> array,
- TNode<IntPtrT> offset,
- ElementsKind from_kind,
- ElementsKind to_kind, Label* if_hole);
+ template <typename TResult>
+ TNode<TResult> LoadElementAndPrepareForStore(TNode<FixedArrayBase> array,
+ TNode<IntPtrT> offset,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ Label* if_hole);
template <typename TIndex>
TNode<TIndex> CalculateNewElementsCapacity(TNode<TIndex> old_capacity);
@@ -2222,9 +2215,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> TryTaggedToFloat64(TNode<Object> value,
Label* if_valueisnotnumber);
TNode<Float64T> TruncateTaggedToFloat64(TNode<Context> context,
- SloppyTNode<Object> value);
+ TNode<Object> value);
TNode<Word32T> TruncateTaggedToWord32(TNode<Context> context,
- SloppyTNode<Object> value);
+ TNode<Object> value);
void TaggedToWord32OrBigInt(TNode<Context> context, TNode<Object> value,
Label* if_number, TVariable<Word32T>* var_word32,
Label* if_bigint,
@@ -2248,7 +2241,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>* output,
Label* if_smi);
TNode<Number> ChangeFloat32ToTagged(TNode<Float32T> value);
- TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
+ TNode<Number> ChangeFloat64ToTagged(TNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
TNode<Number> ChangeUintPtrToTagged(TNode<UintPtrT> value);
@@ -2266,7 +2259,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Numeric>* var_numeric,
TVariable<Smi>* var_feedback);
- TNode<WordT> TimesSystemPointerSize(SloppyTNode<WordT> value);
+ TNode<WordT> TimesSystemPointerSize(TNode<WordT> value);
TNode<IntPtrT> TimesSystemPointerSize(TNode<IntPtrT> value) {
return Signed(TimesSystemPointerSize(implicit_cast<TNode<WordT>>(value)));
}
@@ -2274,7 +2267,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Unsigned(TimesSystemPointerSize(implicit_cast<TNode<WordT>>(value)));
}
- TNode<WordT> TimesTaggedSize(SloppyTNode<WordT> value);
+ TNode<WordT> TimesTaggedSize(TNode<WordT> value);
TNode<IntPtrT> TimesTaggedSize(TNode<IntPtrT> value) {
return Signed(TimesTaggedSize(implicit_cast<TNode<WordT>>(value)));
}
@@ -2282,7 +2275,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Unsigned(TimesTaggedSize(implicit_cast<TNode<WordT>>(value)));
}
- TNode<WordT> TimesDoubleSize(SloppyTNode<WordT> value);
+ TNode<WordT> TimesDoubleSize(TNode<WordT> value);
TNode<UintPtrT> TimesDoubleSize(TNode<UintPtrT> value) {
return Unsigned(TimesDoubleSize(implicit_cast<TNode<WordT>>(value)));
}
@@ -2333,16 +2326,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Type checks.
// Check whether the map is for an object with special properties, such as a
// JSProxy or an object with interceptors.
- TNode<BoolT> InstanceTypeEqual(SloppyTNode<Int32T> instance_type, int type);
+ TNode<BoolT> InstanceTypeEqual(TNode<Int32T> instance_type, int type);
TNode<BoolT> IsNoElementsProtectorCellInvalid();
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
- TNode<BoolT> IsBigIntInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsBigIntInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsBigInt(TNode<HeapObject> object);
TNode<BoolT> IsBoolean(TNode<HeapObject> object);
TNode<BoolT> IsCallableMap(TNode<Map> map);
TNode<BoolT> IsCallable(TNode<HeapObject> object);
TNode<BoolT> TaggedIsCallable(TNode<Object> object);
- TNode<BoolT> IsConsStringInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsConsStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsConstructorMap(TNode<Map> map);
TNode<BoolT> IsConstructor(TNode<HeapObject> object);
TNode<BoolT> IsDeprecatedMap(TNode<Map> map);
@@ -2351,7 +2344,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsGlobalDictionary(TNode<HeapObject> object);
TNode<BoolT> IsExtensibleMap(TNode<Map> map);
TNode<BoolT> IsExtensibleNonPrototypeMap(TNode<Map> map);
- TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsExternalStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsFixedArray(TNode<HeapObject> object);
TNode<BoolT> IsFixedArraySubclass(TNode<HeapObject> object);
TNode<BoolT> IsFixedArrayWithKind(TNode<HeapObject> object,
@@ -2361,26 +2354,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsFunctionWithPrototypeSlotMap(TNode<Map> map);
TNode<BoolT> IsHashTable(TNode<HeapObject> object);
TNode<BoolT> IsEphemeronHashTable(TNode<HeapObject> object);
- TNode<BoolT> IsHeapNumberInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsHeapNumberInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsOddball(TNode<HeapObject> object);
- TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsOddballInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsIndirectStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayBuffer(TNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
- TNode<BoolT> IsJSArrayInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSArrayInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSArrayMap(TNode<Map> map);
TNode<BoolT> IsJSArray(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayIterator(TNode<HeapObject> object);
TNode<BoolT> IsJSAsyncGeneratorObject(TNode<HeapObject> object);
- TNode<BoolT> IsJSFunctionInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsFunctionInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsJSFunctionInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSFunctionMap(TNode<Map> map);
TNode<BoolT> IsJSFunction(TNode<HeapObject> object);
TNode<BoolT> IsJSBoundFunction(TNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorObject(TNode<HeapObject> object);
- TNode<BoolT> IsJSGlobalProxyInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSGlobalProxyInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSGlobalProxyMap(TNode<Map> map);
TNode<BoolT> IsJSGlobalProxy(TNode<HeapObject> object);
- TNode<BoolT> IsJSObjectInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSObjectInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSObjectMap(TNode<Map> map);
TNode<BoolT> IsJSObject(TNode<HeapObject> object);
TNode<BoolT> IsJSFinalizationRegistryMap(TNode<Map> map);
@@ -2390,28 +2384,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSProxy(TNode<HeapObject> object);
TNode<BoolT> IsJSStringIterator(TNode<HeapObject> object);
TNode<BoolT> IsJSRegExpStringIterator(TNode<HeapObject> object);
- TNode<BoolT> IsJSReceiverInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSReceiverInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSReceiverMap(TNode<Map> map);
TNode<BoolT> IsJSReceiver(TNode<HeapObject> object);
TNode<BoolT> IsJSRegExp(TNode<HeapObject> object);
- TNode<BoolT> IsJSTypedArrayInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSTypedArrayInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSTypedArrayMap(TNode<Map> map);
TNode<BoolT> IsJSTypedArray(TNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorMap(TNode<Map> map);
- TNode<BoolT> IsJSPrimitiveWrapperInstanceType(
- SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSPrimitiveWrapperInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSPrimitiveWrapperMap(TNode<Map> map);
TNode<BoolT> IsJSPrimitiveWrapper(TNode<HeapObject> object);
TNode<BoolT> IsMap(TNode<HeapObject> object);
TNode<BoolT> IsName(TNode<HeapObject> object);
- TNode<BoolT> IsNameInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsNameInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsNullOrJSReceiver(TNode<HeapObject> object);
- TNode<BoolT> IsNullOrUndefined(SloppyTNode<Object> object);
+ TNode<BoolT> IsNullOrUndefined(TNode<Object> object);
TNode<BoolT> IsNumberDictionary(TNode<HeapObject> object);
TNode<BoolT> IsOneByteStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsSeqOneByteStringInstanceType(TNode<Int32T> instance_type);
- TNode<BoolT> IsPrimitiveInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsPrivateName(SloppyTNode<Symbol> symbol);
+ TNode<BoolT> IsPrimitiveInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsPrivateName(TNode<Symbol> symbol);
TNode<BoolT> IsPropertyArray(TNode<HeapObject> object);
TNode<BoolT> IsPropertyCell(TNode<HeapObject> object);
TNode<BoolT> IsPromiseReactionJobTask(TNode<HeapObject> object);
@@ -2427,26 +2420,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsSloppyArgumentsMap(TNode<Context> context, TNode<Map> map);
TNode<BoolT> IsStrictArgumentsMap(TNode<Context> context, TNode<Map> map);
- TNode<BoolT> IsSequentialStringInstanceType(
- SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsSequentialStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsUncachedExternalStringInstanceType(
- SloppyTNode<Int32T> instance_type);
+ TNode<Int32T> instance_type);
TNode<BoolT> IsSpecialReceiverInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsCustomElementsReceiverInstanceType(
TNode<Int32T> instance_type);
TNode<BoolT> IsSpecialReceiverMap(TNode<Map> map);
- TNode<BoolT> IsStringInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsString(TNode<HeapObject> object);
TNode<BoolT> IsSeqOneByteString(TNode<HeapObject> object);
- TNode<BoolT> IsSymbolInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsSymbolInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsInternalizedStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsUniqueName(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoIndex(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoCachedIndex(TNode<HeapObject> object);
TNode<BoolT> IsUndetectableMap(TNode<Map> map);
TNode<BoolT> IsNotWeakFixedArraySubclass(TNode<HeapObject> object);
- TNode<BoolT> IsZeroOrContext(SloppyTNode<Object> object);
+ TNode<BoolT> IsZeroOrContext(TNode<Object> object);
TNode<BoolT> IsPromiseResolveProtectorCellInvalid();
TNode<BoolT> IsPromiseThenProtectorCellInvalid();
@@ -2473,7 +2465,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// True iff |object| is a Smi or a HeapNumber or a BigInt.
- TNode<BoolT> IsNumeric(SloppyTNode<Object> object);
+ TNode<BoolT> IsNumeric(TNode<Object> object);
// True iff |number| is either a Smi, or a HeapNumber whose value is not
// within Smi range.
@@ -2525,7 +2517,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return v8::internal::IsDoubleElementsKind(kind);
}
TNode<BoolT> IsFastSmiOrTaggedElementsKind(TNode<Int32T> elements_kind);
- TNode<BoolT> IsFastSmiElementsKind(SloppyTNode<Int32T> elements_kind);
+ TNode<BoolT> IsFastSmiElementsKind(TNode<Int32T> elements_kind);
TNode<BoolT> IsHoleyFastElementsKind(TNode<Int32T> elements_kind);
TNode<BoolT> IsHoleyFastElementsKindForRead(TNode<Int32T> elements_kind);
TNode<BoolT> IsElementsKindGreaterThan(TNode<Int32T> target_kind,
@@ -2565,10 +2557,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// With {bigint_handling} == kConvertToNumber, matches behavior of
// tc39.github.io/proposal-bigint/#sec-number-constructor-number-value.
TNode<Number> ToNumber(
- TNode<Context> context, SloppyTNode<Object> input,
+ TNode<Context> context, TNode<Object> input,
+ BigIntHandling bigint_handling = BigIntHandling::kThrow);
+ TNode<Number> ToNumber_Inline(TNode<Context> context, TNode<Object> input);
+ TNode<Numeric> ToNumberOrNumeric(
+ LazyNode<Context> context, TNode<Object> input,
+ TVariable<Smi>* var_type_feedback, Object::Conversion mode,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
- TNode<Number> ToNumber_Inline(TNode<Context> context,
- SloppyTNode<Object> input);
// Convert any plain primitive to a Number. No need to handle BigInts since
// they are not plain primitives.
TNode<Number> PlainPrimitiveToNumber(TNode<Object> input);
@@ -2580,13 +2575,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Converts |input| to one of 2^32 integer values in the range 0 through
// 2^32-1, inclusive.
// ES#sec-touint32
- TNode<Number> ToUint32(TNode<Context> context, SloppyTNode<Object> input);
+ TNode<Number> ToUint32(TNode<Context> context, TNode<Object> input);
// Convert any object to a String.
- TNode<String> ToString_Inline(TNode<Context> context,
- SloppyTNode<Object> input);
+ TNode<String> ToString_Inline(TNode<Context> context, TNode<Object> input);
- TNode<JSReceiver> ToObject(TNode<Context> context, SloppyTNode<Object> input);
+ TNode<JSReceiver> ToObject(TNode<Context> context, TNode<Object> input);
// Same as ToObject but avoids the Builtin call if |input| is already a
// JSReceiver.
@@ -2594,8 +2588,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> input);
// ES6 7.1.15 ToLength, but with inlined fast path.
- TNode<Number> ToLength_Inline(TNode<Context> context,
- SloppyTNode<Object> input);
+ TNode<Number> ToLength_Inline(TNode<Context> context, TNode<Object> input);
TNode<Object> OrdinaryToPrimitive(TNode<Context> context, TNode<Object> input,
OrdinaryToPrimitiveHint hint);
@@ -2610,7 +2603,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word|. Returns result as a word-size node.
template <typename BitField>
- TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word) {
+ TNode<UintPtrT> DecodeWord(TNode<WordT> word) {
return DecodeWord(word, BitField::kShift, BitField::kMask);
}
@@ -2624,7 +2617,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word|. Returns result as an uint32 node.
template <typename BitField>
- TNode<Uint32T> DecodeWord32FromWord(SloppyTNode<WordT> word) {
+ TNode<Uint32T> DecodeWord32FromWord(TNode<WordT> word) {
return UncheckedCast<Uint32T>(
TruncateIntPtrToInt32(Signed(DecodeWord<BitField>(word))));
}
@@ -2634,8 +2627,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
uint32_t mask);
// Decodes an unsigned (!) value from |word| to a word-size node.
- TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word, uint32_t shift,
- uintptr_t mask);
+ TNode<UintPtrT> DecodeWord(TNode<WordT> word, uint32_t shift, uintptr_t mask);
// Returns a node that contains the updated values of a |BitField|.
template <typename BitField>
@@ -2718,18 +2710,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns true if any of the |T|'s bits in given |word| are set.
template <typename T>
- TNode<BoolT> IsSetWord(SloppyTNode<WordT> word) {
+ TNode<BoolT> IsSetWord(TNode<WordT> word) {
return IsSetWord(word, T::kMask);
}
// Returns true if any of the mask's bits in given |word| are set.
- TNode<BoolT> IsSetWord(SloppyTNode<WordT> word, uint32_t mask) {
+ TNode<BoolT> IsSetWord(TNode<WordT> word, uint32_t mask) {
return WordNotEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
}
// Returns true if any of the mask's bit are set in the given Smi.
// Smi-encoding of the mask is performed implicitly!
- TNode<BoolT> IsSetSmi(SloppyTNode<Smi> smi, int untagged_mask) {
+ TNode<BoolT> IsSetSmi(TNode<Smi> smi, int untagged_mask) {
intptr_t mask_word = bit_cast<intptr_t>(Smi::FromInt(untagged_mask));
return WordNotEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(smi),
IntPtrConstant(mask_word)),
@@ -2750,12 +2742,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns true if all of the |T|'s bits in given |word| are clear.
template <typename T>
- TNode<BoolT> IsClearWord(SloppyTNode<WordT> word) {
+ TNode<BoolT> IsClearWord(TNode<WordT> word) {
return IsClearWord(word, T::kMask);
}
// Returns true if all of the mask's bits in given |word| are clear.
- TNode<BoolT> IsClearWord(SloppyTNode<WordT> word, uint32_t mask) {
+ TNode<BoolT> IsClearWord(TNode<WordT> word, uint32_t mask) {
return IntPtrEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
}
@@ -2782,7 +2774,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Note: If |key| does not yet have a hash, |if_notinternalized| will be taken
// even if |key| is an array index. |if_keyisunique| will never
// be taken for array indices.
- void TryToName(SloppyTNode<Object> key, Label* if_keyisindex,
+ void TryToName(TNode<Object> key, Label* if_keyisindex,
TVariable<IntPtrT>* var_index, Label* if_keyisunique,
TVariable<Name>* var_unique, Label* if_bailout,
Label* if_notinternalized = nullptr);
@@ -2858,8 +2850,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
const int kKeyToDetailsOffset =
(ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
kTaggedSize;
- StoreFixedArrayElement(container, key_index, details, SKIP_WRITE_BARRIER,
- kKeyToDetailsOffset);
+ StoreFixedArrayElement(container, key_index, details, kKeyToDetailsOffset);
}
// Stores the value for the entry with the given key_index.
@@ -2992,14 +2983,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Object>* var_raw_value, Label* if_not_found,
Label* if_bailout, GetOwnPropertyMode mode);
- TNode<Object> GetProperty(TNode<Context> context,
- SloppyTNode<Object> receiver, Handle<Name> name) {
+ TNode<Object> GetProperty(TNode<Context> context, TNode<Object> receiver,
+ Handle<Name> name) {
return GetProperty(context, receiver, HeapConstant(name));
}
- TNode<Object> GetProperty(TNode<Context> context,
- SloppyTNode<Object> receiver,
- SloppyTNode<Object> name) {
+ TNode<Object> GetProperty(TNode<Context> context, TNode<Object> receiver,
+ TNode<Object> name) {
return CallBuiltin(Builtins::kGetProperty, context, receiver, name);
}
@@ -3023,15 +3013,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<HeapObject> heap_obj,
Label* if_iteratorundefined);
+ TNode<Object> CreateAsyncFromSyncIterator(TNode<Context> context,
+ TNode<Object> sync_iterator);
+
template <class... TArgs>
- TNode<Object> CallBuiltin(Builtins::Name id, SloppyTNode<Object> context,
+ TNode<Object> CallBuiltin(Builtins::Name id, TNode<Object> context,
TArgs... args) {
return CallStub<Object>(Builtins::CallableFor(isolate(), id), context,
args...);
}
template <class... TArgs>
- void TailCallBuiltin(Builtins::Name id, SloppyTNode<Object> context,
+ void TailCallBuiltin(Builtins::Name id, TNode<Object> context,
TArgs... args) {
return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...);
}
@@ -3070,9 +3063,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Note: this code does not check if the global dictionary points to deleted
// entry! This has to be done by the caller.
void TryLookupProperty(TNode<HeapObject> object, TNode<Map> map,
- SloppyTNode<Int32T> instance_type,
- TNode<Name> unique_name, Label* if_found_fast,
- Label* if_found_dict, Label* if_found_global,
+ TNode<Int32T> instance_type, TNode<Name> unique_name,
+ Label* if_found_fast, Label* if_found_dict,
+ Label* if_found_global,
TVariable<HeapObject>* var_meta_storage,
TVariable<IntPtrT>* var_name_index,
Label* if_not_found, Label* if_bailout);
@@ -3093,8 +3086,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// if_absent if it's known to not exist. To if_not_found if the prototype
// chain needs to be checked. And if_bailout if the lookup is unsupported.
void TryLookupElement(TNode<HeapObject> object, TNode<Map> map,
- SloppyTNode<Int32T> instance_type,
- SloppyTNode<IntPtrT> intptr_index, Label* if_found,
+ TNode<Int32T> instance_type,
+ TNode<IntPtrT> intptr_index, Label* if_found,
Label* if_absent, Label* if_not_found,
Label* if_bailout);
@@ -3132,7 +3125,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> receiver, TNode<Object> object, TNode<Object> key,
const LookupPropertyInHolder& lookup_property_in_holder,
const LookupElementInHolder& lookup_element_in_holder, Label* if_end,
- Label* if_bailout, Label* if_proxy);
+ Label* if_bailout, Label* if_proxy, bool handle_private_names = false);
// Instanceof helpers.
// Returns true if {object} has {prototype} somewhere in it's prototype
@@ -3148,6 +3141,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Load type feedback vector from the stub caller's frame.
TNode<FeedbackVector> LoadFeedbackVectorForStub();
+ TNode<FeedbackVector> LoadFeedbackVectorFromBaseline();
+ TNode<Context> LoadContextFromBaseline();
// Load type feedback vector from the stub caller's frame, skipping an
// intermediate trampoline frame.
TNode<FeedbackVector> LoadFeedbackVectorForStubWithTrampoline();
@@ -3168,9 +3163,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<JSFunction> closure);
// Update the type feedback vector.
+ bool UpdateFeedbackModeEqual(UpdateFeedbackMode a, UpdateFeedbackMode b) {
+ return a == b;
+ }
void UpdateFeedback(TNode<Smi> feedback,
TNode<HeapObject> maybe_feedback_vector,
+ TNode<UintPtrT> slot_id, UpdateFeedbackMode mode);
+ void UpdateFeedback(TNode<Smi> feedback,
+ TNode<FeedbackVector> feedback_vector,
TNode<UintPtrT> slot_id);
+ void MaybeUpdateFeedback(TNode<Smi> feedback,
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<UintPtrT> slot_id);
// Report that there was a feedback update, performing any tasks that should
// be done after a feedback update.
@@ -3190,7 +3194,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// used for a property store or deletion.
void CheckForAssociatedProtector(TNode<Name> name, Label* if_protector);
- TNode<Map> LoadReceiverMap(SloppyTNode<Object> receiver);
+ TNode<Map> LoadReceiverMap(TNode<Object> receiver);
// Loads script context from the script context table.
TNode<Context> LoadScriptContext(TNode<Context> context,
@@ -3319,9 +3323,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> start_offset,
TNode<IntPtrT> end_offset, RootIndex root);
+ // Goto the given |target| if the context chain starting at |context| has any
+ // extensions up to the given |depth|. Returns the Context with the
+ // extensions if there was one, otherwise returns the Context at the given
+ // |depth|.
+ TNode<Context> GotoIfHasContextExtensionUpToDepth(TNode<Context> context,
+ TNode<Uint32T> depth,
+ Label* target);
+
TNode<Oddball> RelationalComparison(
Operation op, TNode<Object> left, TNode<Object> right,
- TNode<Context> context, TVariable<Smi>* var_type_feedback = nullptr);
+ TNode<Context> context, TVariable<Smi>* var_type_feedback = nullptr) {
+ return RelationalComparison(
+ op, left, right, [=]() { return context; }, var_type_feedback);
+ }
+
+ TNode<Oddball> RelationalComparison(
+ Operation op, TNode<Object> left, TNode<Object> right,
+ const LazyNode<Context>& context,
+ TVariable<Smi>* var_type_feedback = nullptr);
void BranchIfNumberRelationalComparison(Operation op, TNode<Number> left,
TNode<Number> right, Label* if_true,
@@ -3371,19 +3391,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void GotoIfNumberGreaterThanOrEqual(TNode<Number> left, TNode<Number> right,
Label* if_false);
- TNode<Oddball> Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+ TNode<Oddball> Equal(TNode<Object> lhs, TNode<Object> rhs,
TNode<Context> context,
+ TVariable<Smi>* var_type_feedback = nullptr) {
+ return Equal(
+ lhs, rhs, [=]() { return context; }, var_type_feedback);
+ }
+ TNode<Oddball> Equal(TNode<Object> lhs, TNode<Object> rhs,
+ const LazyNode<Context>& context,
TVariable<Smi>* var_type_feedback = nullptr);
- TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+ TNode<Oddball> StrictEqual(TNode<Object> lhs, TNode<Object> rhs,
TVariable<Smi>* var_type_feedback = nullptr);
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
// differs from positive zero.
enum class SameValueMode { kNumbersOnly, kFull };
- void BranchIfSameValue(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
- Label* if_true, Label* if_false,
+ void BranchIfSameValue(TNode<Object> lhs, TNode<Object> rhs, Label* if_true,
+ Label* if_false,
SameValueMode mode = SameValueMode::kFull);
// A part of BranchIfSameValue() that handles two double values.
// Treats NaN == NaN and +0 != -0.
@@ -3393,9 +3419,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum HasPropertyLookupMode { kHasProperty, kForInHasProperty };
- TNode<Oddball> HasProperty(TNode<Context> context, SloppyTNode<Object> object,
- SloppyTNode<Object> key,
- HasPropertyLookupMode mode);
+ TNode<Oddball> HasProperty(TNode<Context> context, TNode<Object> object,
+ TNode<Object> key, HasPropertyLookupMode mode);
// Due to naming conflict with the builtin function namespace.
TNode<Oddball> HasProperty_Inline(TNode<Context> context,
@@ -3408,21 +3433,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void ForInPrepare(TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector,
TNode<FixedArray>* cache_array_out,
- TNode<Smi>* cache_length_out);
- // Returns {cache_array} and {cache_length} in a fixed array of length 2.
- // TODO(jgruber): Tuple2 would be a slightly better fit as the return type,
- // but FixedArray has better support and there are no effective drawbacks to
- // using it instead of Tuple2 in practice.
- TNode<FixedArray> ForInPrepareForTorque(
- TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector);
+ TNode<Smi>* cache_length_out,
+ UpdateFeedbackMode update_feedback_mode);
- TNode<String> Typeof(SloppyTNode<Object> value);
+ TNode<String> Typeof(TNode<Object> value);
TNode<HeapObject> GetSuperConstructor(TNode<JSFunction> active_function);
TNode<JSReceiver> SpeciesConstructor(TNode<Context> context,
- SloppyTNode<Object> object,
+ TNode<Object> object,
TNode<JSReceiver> default_constructor);
TNode<Oddball> InstanceOf(TNode<Object> object, TNode<Object> callable,
@@ -3460,8 +3479,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
int base_size = 0);
// Check that a field offset is within the bounds of the an object.
- TNode<BoolT> IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
- SloppyTNode<IntPtrT> length, int header_size,
+ TNode<BoolT> IsOffsetInBounds(TNode<IntPtrT> offset, TNode<IntPtrT> length,
+ int header_size,
ElementsKind kind = HOLEY_ELEMENTS);
// Load a builtin's code from the builtin array in the isolate.
@@ -3659,7 +3678,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> receiver, Label* if_bailout,
GetOwnPropertyMode mode = kCallJSGetter);
- TNode<IntPtrT> TryToIntptr(SloppyTNode<Object> key, Label* if_not_intptr,
+ TNode<IntPtrT> TryToIntptr(TNode<Object> key, Label* if_not_intptr,
TVariable<Int32T>* var_instance_type = nullptr);
TNode<JSArray> ArrayCreate(TNode<Context> context, TNode<Number> length);
@@ -3736,8 +3755,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint32T> GetSortedKeyIndex(TNode<Array> descriptors,
TNode<Uint32T> entry_index);
- TNode<Smi> CollectFeedbackForString(SloppyTNode<Int32T> instance_type);
- void GenerateEqual_Same(SloppyTNode<Object> value, Label* if_equal,
+ TNode<Smi> CollectFeedbackForString(TNode<Int32T> instance_type);
+ void GenerateEqual_Same(TNode<Object> value, Label* if_equal,
Label* if_notequal,
TVariable<Smi>* var_type_feedback = nullptr);
@@ -3787,12 +3806,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// TODO(solanes): This method can go away and simplify into only one version
// of StoreElement once we have "if constexpr" available to use.
template <typename TArray, typename TIndex>
- void StoreElementBigIntOrTypedArray(TNode<TArray> elements, ElementsKind kind,
- TNode<TIndex> index, Node* value);
+ void StoreElementTypedArray(TNode<TArray> elements, ElementsKind kind,
+ TNode<TIndex> index, Node* value);
template <typename TIndex>
void StoreElement(TNode<FixedArrayBase> elements, ElementsKind kind,
- TNode<TIndex> index, Node* value);
+ TNode<TIndex> index, TNode<Object> value);
+
+ template <typename TIndex>
+ void StoreElement(TNode<FixedArrayBase> elements, ElementsKind kind,
+ TNode<TIndex> index, TNode<Float64T> value);
// Converts {input} to a number if {input} is a plain primitve (i.e. String or
// Oddball) and stores the result in {var_result}. Otherwise, it bails out to
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index 01af6df644..826b53293a 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -78,10 +78,8 @@ void CompilationCacheScript::Age() {
void CompilationCacheEval::Age() { AgeCustom(this); }
void CompilationCacheRegExp::Age() { AgeByGeneration(this); }
void CompilationCacheCode::Age() {
- if (FLAG_turbo_nci_cache_ageing) {
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
- AgeByGeneration(this);
- }
+ if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
+ AgeByGeneration(this);
}
void CompilationSubCache::Iterate(RootVisitor* v) {
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index b8d9c25677..66336ca32c 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -13,6 +13,8 @@
#include "src/ast/scopes.h"
#include "src/base/logging.h"
#include "src/base/optional.h"
+#include "src/base/platform/time.h"
+#include "src/baseline/baseline.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/optimized-compilation-info.h"
@@ -30,6 +32,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/maybe-handles.h"
@@ -61,55 +64,8 @@ namespace internal {
namespace {
-bool IsForNativeContextIndependentCachingOnly(CodeKind kind) {
- // NCI code is only cached (and not installed on the JSFunction upon
- // successful compilation), unless the testing-only
- // FLAG_turbo_nci_as_midtier is enabled.
- return CodeKindIsNativeContextIndependentJSFunction(kind) &&
- !FLAG_turbo_nci_as_midtier;
-}
-
-// This predicate is currently needed only because the nci-as-midtier testing
-// configuration is special. A quick summary of compilation configurations:
-//
-// - Turbofan (and currently Turboprop) uses both the optimization marker and
-// the optimized code cache (underneath, the marker and the cache share the same
-// slot on the feedback vector).
-// - Native context independent (NCI) code uses neither the marker nor the
-// cache.
-// - The NCI-as-midtier testing configuration uses the marker, but not the
-// cache.
-//
-// This predicate supports that last case. In the near future, this last case is
-// expected to change s.t. code kinds use the marker iff they use the optimized
-// code cache (details still TBD). In that case, the existing
-// CodeKindIsStoredInOptimizedCodeCache is sufficient and this extra predicate
-// can be removed.
-// TODO(jgruber,rmcilroy,v8:8888): Remove this predicate once that has happened.
-bool UsesOptimizationMarker(CodeKind kind) {
- return !IsForNativeContextIndependentCachingOnly(kind);
-}
-
class CompilerTracer : public AllStatic {
public:
- static void PrintTracePrefix(const CodeTracer::Scope& scope,
- const char* header,
- OptimizedCompilationInfo* info) {
- PrintTracePrefix(scope, header, info->closure(), info->code_kind());
- }
-
- static void PrintTracePrefix(const CodeTracer::Scope& scope,
- const char* header, Handle<JSFunction> function,
- CodeKind code_kind) {
- PrintF(scope.file(), "[%s ", header);
- function->ShortPrint(scope.file());
- PrintF(scope.file(), " (target %s)", CodeKindToString(code_kind));
- }
-
- static void PrintTraceSuffix(const CodeTracer::Scope& scope) {
- PrintF(scope.file(), "]\n");
- }
-
static void TracePrepareJob(Isolate* isolate, OptimizedCompilationInfo* info,
const char* compiler_name) {
if (!FLAG_trace_opt || !info->IsOptimizing()) return;
@@ -120,6 +76,15 @@ class CompilerTracer : public AllStatic {
PrintTraceSuffix(scope);
}
+ static void TraceStartBaselineCompile(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
+ if (!FLAG_trace_baseline) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintTracePrefix(scope, "compiling method", shared, CodeKind::BASELINE);
+ PrintF(scope.file(), " using Sparkplug");
+ PrintTraceSuffix(scope);
+ }
+
static void TraceCompilationStats(Isolate* isolate,
OptimizedCompilationInfo* info,
double ms_creategraph, double ms_optimize,
@@ -132,6 +97,16 @@ class CompilerTracer : public AllStatic {
PrintTraceSuffix(scope);
}
+ static void TraceFinishBaselineCompile(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared,
+ double ms_timetaken) {
+ if (!FLAG_trace_baseline) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintTracePrefix(scope, "compiling", shared, CodeKind::BASELINE);
+ PrintF(scope.file(), " - took %0.3f ms", ms_timetaken);
+ PrintTraceSuffix(scope);
+ }
+
static void TraceCompletedJob(Isolate* isolate,
OptimizedCompilationInfo* info) {
if (!FLAG_trace_opt) return;
@@ -152,13 +127,13 @@ class CompilerTracer : public AllStatic {
static void TraceOptimizedCodeCacheHit(Isolate* isolate,
Handle<JSFunction> function,
- BailoutId osr_offset,
+ BytecodeOffset osr_offset,
CodeKind code_kind) {
if (!FLAG_trace_opt) return;
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintTracePrefix(scope, "found optimized code for", function, code_kind);
if (!osr_offset.IsNone()) {
- PrintF(scope.file(), " at OSR AST id %d", osr_offset.ToInt());
+ PrintF(scope.file(), " at OSR bytecode offset %d", osr_offset.ToInt());
}
PrintTraceSuffix(scope);
}
@@ -182,6 +157,34 @@ class CompilerTracer : public AllStatic {
PrintF(scope.file(), " for optimized recompilation because --always-opt");
PrintF(scope.file(), "]\n");
}
+
+ private:
+ static void PrintTracePrefix(const CodeTracer::Scope& scope,
+ const char* header,
+ OptimizedCompilationInfo* info) {
+ PrintTracePrefix(scope, header, info->closure(), info->code_kind());
+ }
+
+ static void PrintTracePrefix(const CodeTracer::Scope& scope,
+ const char* header, Handle<JSFunction> function,
+ CodeKind code_kind) {
+ PrintF(scope.file(), "[%s ", header);
+ function->ShortPrint(scope.file());
+ PrintF(scope.file(), " (target %s)", CodeKindToString(code_kind));
+ }
+
+ static void PrintTracePrefix(const CodeTracer::Scope& scope,
+ const char* header,
+ Handle<SharedFunctionInfo> shared,
+ CodeKind code_kind) {
+ PrintF(scope.file(), "[%s ", header);
+ shared->ShortPrint(scope.file());
+ PrintF(scope.file(), " (target %s)", CodeKindToString(code_kind));
+ }
+
+ static void PrintTraceSuffix(const CodeTracer::Scope& scope) {
+ PrintF(scope.file(), "]\n");
+ }
};
} // namespace
@@ -199,13 +202,13 @@ struct ScopedTimer {
base::TimeDelta* location_;
};
-namespace {
-
-void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
- Handle<SharedFunctionInfo> shared,
- Handle<Script> script,
- Handle<AbstractCode> abstract_code, bool optimizing,
- double time_taken_ms, Isolate* isolate) {
+// static
+void Compiler::LogFunctionCompilation(Isolate* isolate,
+ CodeEventListener::LogEventsAndTags tag,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Script> script,
+ Handle<AbstractCode> abstract_code,
+ CodeKind kind, double time_taken_ms) {
DCHECK(!abstract_code.is_null());
DCHECK(!abstract_code.is_identical_to(BUILTIN_CODE(isolate, CompileLazy)));
@@ -230,7 +233,23 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
line_num, column_num));
if (!FLAG_log_function_events) return;
- std::string name = optimizing ? "optimize" : "compile";
+ std::string name;
+ switch (kind) {
+ case CodeKind::INTERPRETED_FUNCTION:
+ name = "interpreter";
+ break;
+ case CodeKind::BASELINE:
+ name = "baseline";
+ break;
+ case CodeKind::TURBOPROP:
+ name = "turboprop";
+ break;
+ case CodeKind::TURBOFAN:
+ name = "optimize";
+ break;
+ default:
+ UNREACHABLE();
+ }
switch (tag) {
case CodeEventListener::EVAL_TAG:
name += "-eval";
@@ -253,6 +272,8 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
*debug_name));
}
+namespace {
+
ScriptOriginOptions OriginOptionsForEval(Object script) {
if (!script.IsScript()) return ScriptOriginOptions();
@@ -332,8 +353,9 @@ void RecordUnoptimizedFunctionCompilation(
time_taken_to_finalize.InMillisecondsF();
Handle<Script> script(Script::cast(shared->script()), isolate);
- LogFunctionCompilation(tag, shared, script, abstract_code, false,
- time_taken_ms, isolate);
+ Compiler::LogFunctionCompilation(isolate, tag, shared, script, abstract_code,
+ CodeKind::INTERPRETED_FUNCTION,
+ time_taken_ms);
}
} // namespace
@@ -467,8 +489,9 @@ void OptimizedCompilationJob::RecordFunctionCompilation(
Handle<Script> script(
Script::cast(compilation_info()->shared_info()->script()), isolate);
- LogFunctionCompilation(tag, compilation_info()->shared_info(), script,
- abstract_code, true, time_taken_ms, isolate);
+ Compiler::LogFunctionCompilation(
+ isolate, tag, compilation_info()->shared_info(), script, abstract_code,
+ compilation_info()->code_kind(), time_taken_ms);
}
// ----------------------------------------------------------------------------
@@ -476,6 +499,7 @@ void OptimizedCompilationJob::RecordFunctionCompilation(
namespace {
+#if V8_ENABLE_WEBASSEMBLY
bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
// Check whether asm.js validation is enabled.
if (!FLAG_validate_asm) return false;
@@ -490,6 +514,7 @@ bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
// In general, we respect the "use asm" directive.
return literal->scope()->IsAsmModule();
}
+#endif
void InstallInterpreterTrampolineCopy(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info) {
@@ -613,6 +638,73 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
shared_info.SetScopeInfo(*literal->scope()->scope_info());
}
+bool CanCompileWithBaseline(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
+ // Check if we actually have bytecode.
+ if (!shared->HasBytecodeArray()) return false;
+
+ // Do not optimize when debugger needs to hook into every call.
+ if (isolate->debug()->needs_check_on_function_call()) return false;
+
+ // Functions with breakpoints have to stay interpreted.
+ if (shared->HasBreakInfo()) return false;
+
+ // Do not baseline compile if sparkplug is disabled or function doesn't pass
+ // sparkplug_filter.
+ if (!FLAG_sparkplug || !shared->PassesFilter(FLAG_sparkplug_filter)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool CompileSharedWithBaseline(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared,
+ Compiler::ClearExceptionFlag flag,
+ IsCompiledScope* is_compiled_scope) {
+ // We shouldn't be passing uncompiled functions into this function.
+ DCHECK(is_compiled_scope->is_compiled());
+
+ // Early return for already baseline-compiled functions.
+ if (shared->HasBaselineData()) return true;
+
+ // Check if we actually can compile with baseline.
+ if (!CanCompileWithBaseline(isolate, shared)) return false;
+
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
+ if (flag == Compiler::KEEP_EXCEPTION) {
+ isolate->StackOverflow();
+ }
+ return false;
+ }
+
+ CompilerTracer::TraceStartBaselineCompile(isolate, shared);
+ Handle<Code> code;
+ base::TimeDelta time_taken;
+ {
+ ScopedTimer timer(&time_taken);
+ code = GenerateBaselineCode(isolate, shared);
+
+ Handle<HeapObject> function_data =
+ handle(HeapObject::cast(shared->function_data(kAcquireLoad)), isolate);
+ Handle<BaselineData> baseline_data =
+ isolate->factory()->NewBaselineData(code, function_data);
+ shared->set_baseline_data(*baseline_data);
+ }
+ double time_taken_ms = time_taken.InMillisecondsF();
+
+ CompilerTracer::TraceFinishBaselineCompile(isolate, shared, time_taken_ms);
+
+ if (shared->script().IsScript()) {
+ Compiler::LogFunctionCompilation(
+ isolate, CodeEventListener::FUNCTION_TAG, shared,
+ handle(Script::cast(shared->script()), isolate),
+ Handle<AbstractCode>::cast(code), CodeKind::BASELINE, time_taken_ms);
+ }
+ return true;
+}
+
// Finalize a single compilation job. This function can return
// RETRY_ON_MAIN_THREAD if the job cannot be finalized off-thread, in which case
// it should be safe to call it again on the main thread with the same job.
@@ -648,7 +740,9 @@ std::unique_ptr<UnoptimizedCompilationJob>
ExecuteSingleUnoptimizedCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- std::vector<FunctionLiteral*>* eager_inner_literals) {
+ std::vector<FunctionLiteral*>* eager_inner_literals,
+ LocalIsolate* local_isolate) {
+#if V8_ENABLE_WEBASSEMBLY
if (UseAsmWasm(literal, parse_info->flags().is_asm_wasm_broken())) {
std::unique_ptr<UnoptimizedCompilationJob> asm_job(
AsmJs::NewCompilationJob(parse_info, literal, allocator));
@@ -661,9 +755,10 @@ ExecuteSingleUnoptimizedCompilationJob(
// with a validation error or another error that could be solve by falling
// through to standard unoptimized compile.
}
+#endif
std::unique_ptr<UnoptimizedCompilationJob> job(
interpreter::Interpreter::NewCompilationJob(
- parse_info, literal, allocator, eager_inner_literals));
+ parse_info, literal, allocator, eager_inner_literals, local_isolate));
if (job->ExecuteJob() != CompilationJob::SUCCEEDED) {
// Compilation failed, return null.
@@ -678,9 +773,13 @@ bool RecursivelyExecuteUnoptimizedCompilationJobs(
AccountingAllocator* allocator,
UnoptimizedCompilationJobList* function_jobs) {
std::vector<FunctionLiteral*> eager_inner_literals;
+
+ // We need to pass nullptr here because we are on the background
+ // thread but don't have a LocalIsolate.
+ DCHECK_NULL(LocalHeap::Current());
std::unique_ptr<UnoptimizedCompilationJob> job =
ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, allocator,
- &eager_inner_literals);
+ &eager_inner_literals, nullptr);
if (!job) return false;
@@ -719,7 +818,9 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
std::unique_ptr<UnoptimizedCompilationJob> job =
ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, allocator,
- &functions_to_compile);
+ &functions_to_compile,
+ isolate->AsLocalIsolate());
+
if (!job) return false;
UpdateSharedFunctionFlagsAfterCompilation(literal, *shared_info);
@@ -745,8 +846,8 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
DCHECK((!std::is_same<LocalIsolate, Isolate>::value));
DCHECK_NOT_NULL(jobs_to_retry_finalization_on_main_thread);
- // Clear the literal and ParseInfo to prevent further attempts to access
- // them.
+ // Clear the literal and ParseInfo to prevent further attempts to
+ // access them.
job->compilation_info()->ClearLiteral();
job->ClearParseInfo();
jobs_to_retry_finalization_on_main_thread->emplace_back(
@@ -834,7 +935,8 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
}
V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
- Handle<JSFunction> function, BailoutId osr_offset, CodeKind code_kind) {
+ Handle<JSFunction> function, BytecodeOffset osr_offset,
+ CodeKind code_kind) {
RuntimeCallTimerScope runtimeTimer(
function->GetIsolate(),
RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
@@ -845,7 +947,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
if (osr_offset.IsNone() && function->has_feedback_vector()) {
FeedbackVector feedback_vector = function->feedback_vector();
feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "GetCodeFromOptimizedCodeCache");
+ function->raw_feedback_cell(), function->shared(),
+ "GetCodeFromOptimizedCodeCache");
code = feedback_vector.optimized_code();
} else if (!osr_offset.IsNone()) {
code = function->context()
@@ -866,7 +969,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
}
void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
- DCHECK(UsesOptimizationMarker(compilation_info->code_kind()));
+ DCHECK(!CodeKindIsNativeContextIndependentJSFunction(
+ compilation_info->code_kind()));
Handle<JSFunction> function = compilation_info->closure();
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
@@ -878,12 +982,7 @@ void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
void InsertCodeIntoOptimizedCodeCache(
OptimizedCompilationInfo* compilation_info) {
const CodeKind kind = compilation_info->code_kind();
- if (!CodeKindIsStoredInOptimizedCodeCache(kind)) {
- if (UsesOptimizationMarker(kind)) {
- ClearOptimizedCodeCache(compilation_info);
- }
- return;
- }
+ if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
if (compilation_info->function_context_specializing()) {
// Function context specialization folds-in the function context, so no
@@ -901,7 +1000,8 @@ void InsertCodeIntoOptimizedCodeCache(
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
- FeedbackVector::SetOptimizedCode(vector, code);
+ FeedbackVector::SetOptimizedCode(vector, code,
+ function->raw_feedback_cell());
} else {
DCHECK(CodeKindCanOSR(kind));
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
@@ -1048,8 +1148,8 @@ Handle<Code> ContinuationForConcurrentOptimization(
// Tiering up to Turbofan and cached optimized code exists. Continue
// execution there until TF optimization has finished.
return cached_code;
- } else if (FLAG_turboprop_as_midtier &&
- function->HasAvailableOptimizedCode()) {
+ } else if (FLAG_turboprop && function->HasAvailableOptimizedCode()) {
+ DCHECK(!FLAG_turboprop_as_toptier);
DCHECK(function->NextTier() == CodeKind::TURBOFAN);
// It is possible that we have marked a closure for TurboFan optimization
// but the marker is processed by another closure that doesn't have
@@ -1057,17 +1157,26 @@ Handle<Code> ContinuationForConcurrentOptimization(
// code.
if (!function->HasAttachedOptimizedCode()) {
DCHECK(function->feedback_vector().has_optimized_code());
+ // Release store isn't required here because it was done on store
+ // into the feedback vector.
+ STATIC_ASSERT(
+ FeedbackVector::kFeedbackVectorMaybeOptimizedCodeIsStoreRelease);
function->set_code(function->feedback_vector().optimized_code());
}
return handle(function->code(), isolate);
+ } else if (function->shared().HasBaselineData()) {
+ Code baseline_code = function->shared().baseline_data().baseline_code();
+ function->set_code(baseline_code);
+ return handle(baseline_code, isolate);
}
+ DCHECK(function->ActiveTierIsIgnition());
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
-MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
- ConcurrencyMode mode, CodeKind code_kind,
- BailoutId osr_offset = BailoutId::None(),
- JavaScriptFrame* osr_frame = nullptr) {
+MaybeHandle<Code> GetOptimizedCode(
+ Handle<JSFunction> function, ConcurrencyMode mode, CodeKind code_kind,
+ BytecodeOffset osr_offset = BytecodeOffset::None(),
+ JavaScriptFrame* osr_frame = nullptr) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
Isolate* isolate = function->GetIsolate();
@@ -1075,10 +1184,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Make sure we clear the optimization marker on the function so that we
// don't try to re-optimize.
- // If compiling for NCI caching only (which does not use the optimization
- // marker), don't touch the marker to avoid interfering with Turbofan
- // compilation.
- if (UsesOptimizationMarker(code_kind) && function->HasOptimizationMarker()) {
+ // If compiling for NCI (which does not use the optimization marker), don't
+ // touch the marker to avoid interfering with Turbofan compilation.
+ if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
+ function->HasOptimizationMarker()) {
function->ClearOptimizationMarker();
}
@@ -1122,8 +1231,6 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// contexts).
if (CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
DCHECK(osr_offset.IsNone());
- DCHECK(FLAG_turbo_nci_as_midtier || !FLAG_turbo_nci_delayed_codegen ||
- shared->has_optimized_at_least_once());
Handle<Code> cached_code;
if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) {
@@ -1237,6 +1344,10 @@ void FinalizeUnoptimizedCompilation(
if (FLAG_interpreted_frames_native_stack) {
InstallInterpreterTrampolineCopy(isolate, shared_info);
}
+ if (FLAG_always_sparkplug) {
+ CompileSharedWithBaseline(isolate, shared_info, Compiler::KEEP_EXCEPTION,
+ &is_compiled_scope);
+ }
Handle<CoverageInfo> coverage_info;
if (finalize_data.coverage_info().ToHandle(&coverage_info)) {
isolate->debug()->InstallCoverageInfo(shared_info, coverage_info);
@@ -1417,14 +1528,6 @@ void CompileOnBackgroundThread(ParseInfo* parse_info,
// Character stream shouldn't be used again.
parse_info->ResetCharacterStream();
}
-
-MaybeHandle<SharedFunctionInfo> CompileToplevel(
- ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
- IsCompiledScope* is_compiled_scope) {
- return CompileToplevel(parse_info, script, kNullMaybeHandle, isolate,
- is_compiled_scope);
-}
-
} // namespace
CompilationHandleScope::~CompilationHandleScope() {
@@ -1587,37 +1690,39 @@ void BackgroundCompileTask::Run() {
} else {
DCHECK(info_->flags().is_toplevel());
- LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground);
- UnparkedScope unparked_scope(isolate.heap());
- LocalHandleScope handle_scope(&isolate);
-
- info_->ast_value_factory()->Internalize(&isolate);
-
- // We don't have the script source, origin, or details yet, so use default
- // values for them. These will be fixed up during the main-thread merge.
- Handle<Script> script = info_->CreateScript(
- &isolate, isolate.factory()->empty_string(), kNullMaybeHandle,
- ScriptOriginOptions(false, false, false, info_->flags().is_module()));
+ {
+ LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&isolate);
+ LocalHandleScope handle_scope(&isolate);
+
+ info_->ast_value_factory()->Internalize(&isolate);
+
+ // We don't have the script source, origin, or details yet, so use default
+ // values for them. These will be fixed up during the main-thread merge.
+ Handle<Script> script = info_->CreateScript(
+ &isolate, isolate.factory()->empty_string(), kNullMaybeHandle,
+ ScriptOriginOptions(false, false, false, info_->flags().is_module()));
+
+ parser_->HandleSourceURLComments(&isolate, script);
+
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ if (info_->literal() != nullptr) {
+ maybe_result = CompileAndFinalizeOnBackgroundThread(
+ info_.get(), compile_state_.allocator(), script, &isolate,
+ &finalize_unoptimized_compilation_data_,
+ &jobs_to_retry_finalization_on_main_thread_, &is_compiled_scope_);
+ } else {
+ DCHECK(compile_state_.pending_error_handler()->has_pending_error());
+ PreparePendingException(&isolate, info_.get());
+ }
- parser_->HandleSourceURLComments(&isolate, script);
+ outer_function_sfi_ =
+ isolate.heap()->NewPersistentMaybeHandle(maybe_result);
+ script_ = isolate.heap()->NewPersistentHandle(script);
- MaybeHandle<SharedFunctionInfo> maybe_result;
- if (info_->literal() != nullptr) {
- maybe_result = CompileAndFinalizeOnBackgroundThread(
- info_.get(), compile_state_.allocator(), script, &isolate,
- &finalize_unoptimized_compilation_data_,
- &jobs_to_retry_finalization_on_main_thread_, &is_compiled_scope_);
- } else {
- DCHECK(compile_state_.pending_error_handler()->has_pending_error());
- PreparePendingException(&isolate, info_.get());
+ persistent_handles_ = isolate.heap()->DetachPersistentHandles();
}
- outer_function_sfi_ =
- isolate.heap()->NewPersistentMaybeHandle(maybe_result);
- script_ = isolate.heap()->NewPersistentHandle(script);
-
- persistent_handles_ = isolate.heap()->DetachPersistentHandles();
-
{
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeCodeBackground.ReleaseParser");
@@ -1717,7 +1822,8 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
std::unique_ptr<UnoptimizedCompilationJob> job;
{
job = interpreter::Interpreter::NewSourcePositionCollectionJob(
- &parse_info, parse_info.literal(), bytecode, isolate->allocator());
+ &parse_info, parse_info.literal(), bytecode, isolate->allocator(),
+ isolate->main_thread_local_isolate());
if (!job || job->ExecuteJob() != CompilationJob::SUCCEEDED ||
job->FinalizeJob(shared_info, isolate) != CompilationJob::SUCCEEDED) {
@@ -1745,18 +1851,17 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
}
// static
-bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
+bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope) {
// We should never reach here if the function is already compiled.
DCHECK(!shared_info->is_compiled());
DCHECK(!is_compiled_scope->is_compiled());
-
- Isolate* isolate = shared_info->GetIsolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
DCHECK(!isolate->has_pending_exception());
DCHECK(!shared_info->HasBytecodeArray());
+
VMState<BYTECODE_COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
@@ -1820,7 +1925,8 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
// static
-bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
+bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
+ ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope) {
// We should never reach here if the function is already compiled or
// optimized.
@@ -1832,20 +1938,30 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
// flushed.
function->ResetIfBytecodeFlushed();
- Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);
// Ensure shared function info is compiled.
*is_compiled_scope = shared_info->is_compiled_scope(isolate);
if (!is_compiled_scope->is_compiled() &&
- !Compile(shared_info, flag, is_compiled_scope)) {
+ !Compile(isolate, shared_info, flag, is_compiled_scope)) {
return false;
}
+
DCHECK(is_compiled_scope->is_compiled());
Handle<Code> code = handle(shared_info->GetCode(), isolate);
- // Initialize the feedback cell for this JSFunction.
- JSFunction::InitializeFeedbackCell(function, is_compiled_scope);
+ // Initialize the feedback cell for this JSFunction and reset the interrupt
+ // budget for feedback vector allocation even if there is a closure feedback
+ // cell array. We are re-compiling when we have a closure feedback cell array
+ // which means we are compiling after a bytecode flush.
+ // TODO(verwaest/mythria): Investigate if allocating feedback vector
+ // immediately after a flush would be better.
+ JSFunction::InitializeFeedbackCell(function, is_compiled_scope, true);
+
+ // If --always-sparkplug is enabled, make sure we have baseline code.
+ if (FLAG_always_sparkplug && CanCompileWithBaseline(isolate, shared_info)) {
+ DCHECK(shared_info->HasBaselineData());
+ }
// Optimize now if --always-opt is enabled.
if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
@@ -1861,7 +1977,12 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
}
// Install code on closure.
- function->set_code(*code);
+ function->set_code(*code, kReleaseStore);
+
+ // Install a feedback vector if necessary.
+ if (code->kind() == CodeKind::BASELINE) {
+ JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
+ }
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
@@ -1871,6 +1992,33 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
}
// static
+bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
+ ClearExceptionFlag flag,
+ IsCompiledScope* is_compiled_scope) {
+ Handle<SharedFunctionInfo> shared(function->shared(isolate), isolate);
+ if (!CompileSharedWithBaseline(isolate, shared, flag, is_compiled_scope)) {
+ return false;
+ }
+
+ // Baseline code needs a feedback vector.
+ JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
+
+ Code baseline_code = shared->baseline_data().baseline_code(isolate);
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ function->set_code(baseline_code);
+
+ return true;
+}
+
+// static
+MaybeHandle<SharedFunctionInfo> Compiler::CompileToplevel(
+ ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
+ IsCompiledScope* is_compiled_scope) {
+ return v8::internal::CompileToplevel(parse_info, script, kNullMaybeHandle,
+ isolate, is_compiled_scope);
+}
+
+// static
bool Compiler::FinalizeBackgroundCompileTask(
BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate, ClearExceptionFlag flag) {
@@ -1914,33 +2062,32 @@ bool Compiler::FinalizeBackgroundCompileTask(
}
// static
-bool Compiler::CompileOptimized(Handle<JSFunction> function,
+bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
-
- Isolate* isolate = function->GetIsolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
Handle<Code> code;
if (!GetOptimizedCode(function, mode, code_kind).ToHandle(&code)) {
- // Optimization failed, get unoptimized code. Unoptimized code must exist
- // already if we are optimizing.
+ // Optimization failed, get the existing code. We could have optimized code
+ // from a lower tier here. Unoptimized code must exist already if we are
+ // optimizing.
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared().is_compiled());
DCHECK(function->shared().IsInterpreted());
- code = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
+ code = ContinuationForConcurrentOptimization(isolate, function);
}
- if (!IsForNativeContextIndependentCachingOnly(code_kind)) {
- function->set_code(*code);
+ if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
+ function->set_code(*code, kReleaseStore);
}
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared().is_compiled());
- DCHECK(IsForNativeContextIndependentCachingOnly(code_kind) ||
+ DCHECK(CodeKindIsNativeContextIndependentJSFunction(code_kind) ||
function->is_compiled());
- if (UsesOptimizationMarker(code_kind)) {
+ if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
DCHECK_IMPLIES(function->HasOptimizationMarker(),
function->IsInOptimizationQueue());
DCHECK_IMPLIES(function->HasOptimizationMarker(),
@@ -1955,7 +2102,8 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
MaybeHandle<SharedFunctionInfo> Compiler::CompileForLiveEdit(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate) {
IsCompiledScope is_compiled_scope;
- return CompileToplevel(parse_info, script, isolate, &is_compiled_scope);
+ return Compiler::CompileToplevel(parse_info, script, isolate,
+ &is_compiled_scope);
}
// static
@@ -2037,8 +2185,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
}
script->set_eval_from_position(eval_position);
- if (!CompileToplevel(&parse_info, script, maybe_outer_scope_info, isolate,
- &is_compiled_scope)
+ if (!v8::internal::CompileToplevel(&parse_info, script,
+ maybe_outer_scope_info, isolate,
+ &is_compiled_scope)
.ToHandle(&shared_info)) {
return MaybeHandle<JSFunction>();
}
@@ -2059,7 +2208,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
result = Factory::JSFunctionBuilder{isolate, shared_info, context}
.set_allocation_type(AllocationType::kYoung)
.Build();
- JSFunction::InitializeFeedbackCell(result, &is_compiled_scope);
+ // TODO(mythria): I don't think we need this here. PostInstantiation
+ // already initializes feedback cell.
+ JSFunction::InitializeFeedbackCell(result, &is_compiled_scope, true);
if (allow_eval_cache) {
// Make sure to cache this result.
Handle<FeedbackCell> new_feedback_cell(result->raw_feedback_cell(),
@@ -2072,7 +2223,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
result = Factory::JSFunctionBuilder{isolate, shared_info, context}
.set_allocation_type(AllocationType::kYoung)
.Build();
- JSFunction::InitializeFeedbackCell(result, &is_compiled_scope);
+ // TODO(mythria): I don't think we need this here. PostInstantiation
+ // already initializes feedback cell.
+ JSFunction::InitializeFeedbackCell(result, &is_compiled_scope, true);
if (allow_eval_cache) {
// Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
// we didn't retrieve from there.
@@ -2506,7 +2659,8 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnMainThread(
script->IsUserJavaScript());
DCHECK_EQ(parse_info.flags().is_repl_mode(), script->is_repl_mode());
- return CompileToplevel(&parse_info, script, isolate, is_compiled_scope);
+ return Compiler::CompileToplevel(&parse_info, script, isolate,
+ is_compiled_scope);
}
class StressBackgroundCompileThread : public base::Thread {
@@ -2821,8 +2975,9 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
origin_options, NOT_NATIVES_CODE, arguments);
Handle<SharedFunctionInfo> top_level;
- maybe_result = CompileToplevel(&parse_info, script, maybe_outer_scope_info,
- isolate, &is_compiled_scope);
+ maybe_result = v8::internal::CompileToplevel(&parse_info, script,
+ maybe_outer_scope_info,
+ isolate, &is_compiled_scope);
if (maybe_result.is_null()) isolate->ReportPendingMessages();
ASSIGN_RETURN_ON_EXCEPTION(isolate, top_level, maybe_result, JSFunction);
@@ -3029,12 +3184,12 @@ template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// static
MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
- BailoutId osr_offset,
+ BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame) {
DCHECK(!osr_offset.IsNone());
DCHECK_NOT_NULL(osr_frame);
return GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent,
- CodeKindForTopTier(), osr_offset, osr_frame);
+ CodeKindForOSR(), osr_offset, osr_frame);
}
// static
@@ -3055,7 +3210,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
CodeKind code_kind = compilation_info->code_kind();
const bool should_install_code_on_function =
- !IsForNativeContextIndependentCachingOnly(code_kind);
+ !CodeKindIsNativeContextIndependentJSFunction(code_kind);
if (should_install_code_on_function) {
// Reset profiler ticks, function is no longer considered hot.
compilation_info->closure()->feedback_vector().set_profiler_ticks(0);
@@ -3080,7 +3235,8 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
InsertCodeIntoCompilationCache(isolate, compilation_info);
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
if (should_install_code_on_function) {
- compilation_info->closure()->set_code(*compilation_info->code());
+ compilation_info->closure()->set_code(*compilation_info->code(),
+ kReleaseStore);
}
return CompilationJob::SUCCEEDED;
}
@@ -3088,9 +3244,9 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
CompilerTracer::TraceAbortedJob(isolate, compilation_info);
- compilation_info->closure()->set_code(shared->GetCode());
+ compilation_info->closure()->set_code(shared->GetCode(), kReleaseStore);
// Clear the InOptimizationQueue marker, if it exists.
- if (UsesOptimizationMarker(code_kind) &&
+ if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
compilation_info->closure()->IsInOptimizationQueue()) {
compilation_info->closure()->ClearOptimizationMarker();
}
@@ -3106,7 +3262,9 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
// If code is compiled to bytecode (i.e., isn't asm.js), then allocate a
// feedback and check for optimized code.
if (is_compiled_scope.is_compiled() && shared->HasBytecodeArray()) {
- JSFunction::InitializeFeedbackCell(function, &is_compiled_scope);
+ // Don't reset budget if there is a closure feedback cell array already. We
+ // are just creating a new closure that shares the same feedback cell.
+ JSFunction::InitializeFeedbackCell(function, &is_compiled_scope, false);
if (function->has_feedback_vector()) {
// Evict any deoptimized code on feedback vector. We need to do this after
@@ -3114,12 +3272,18 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
// deoptimized the code on the feedback vector. So check for any
// deoptimized code just before installing it on the funciton.
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- *shared, "new function from shared function info");
+ function->raw_feedback_cell(), *shared,
+ "new function from shared function info");
Code code = function->feedback_vector().optimized_code();
if (!code.is_null()) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code.marked_for_deoptimization());
DCHECK(function->shared().is_compiled());
+
+ // We don't need a release store because the optimized code was
+ // stored with release semantics into the vector
+ STATIC_ASSERT(
+ FeedbackVector::kFeedbackVectorMaybeOptimizedCodeIsStoreRelease);
function->set_code(code);
}
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index cc06e36eff..7ff1b5eecd 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -65,14 +65,27 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// whereas successful compilation ensures the {is_compiled} predicate on the
// given function holds (except for live-edit, which compiles the world).
- static bool Compile(Handle<SharedFunctionInfo> shared,
+ static bool Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
- static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
+ static bool Compile(Isolate* isolate, Handle<JSFunction> function,
+ ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
- static bool CompileOptimized(Handle<JSFunction> function,
+ static bool CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
+ ClearExceptionFlag flag,
+ IsCompiledScope* is_compiled_scope);
+ static bool CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind);
-
+ static MaybeHandle<SharedFunctionInfo> CompileToplevel(
+ ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
+ IsCompiledScope* is_compiled_scope);
+
+ static void LogFunctionCompilation(Isolate* isolate,
+ CodeEventListener::LogEventsAndTags tag,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Script> script,
+ Handle<AbstractCode> abstract_code,
+ CodeKind kind, double time_taken_ms);
// Collect source positions for a function that has already been compiled to
// bytecode, but for which source positions were not collected (e.g. because
// they were not immediately needed).
@@ -192,7 +205,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Generate and return optimized code for OSR, or empty handle on failure.
V8_WARN_UNUSED_RESULT static MaybeHandle<Code> GetOptimizedCodeForOSR(
- Handle<JSFunction> function, BailoutId osr_offset,
+ Handle<JSFunction> function, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame);
};
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 1a676787df..4f8a6286a4 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -459,5 +459,254 @@ void ConstantPool::MaybeCheck() {
#endif // defined(V8_TARGET_ARCH_ARM64)
+#if defined(V8_TARGET_ARCH_RISCV64)
+
+// Constant Pool.
+
+ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
+ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
+
+RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
+ RelocInfo::Mode rmode) {
+ ConstantPoolKey key(data, rmode);
+ CHECK(key.is_value32());
+ return RecordKey(std::move(key), assm_->pc_offset());
+}
+
+RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
+ RelocInfo::Mode rmode) {
+ ConstantPoolKey key(data, rmode);
+ CHECK(!key.is_value32());
+ return RecordKey(std::move(key), assm_->pc_offset());
+}
+
+RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
+ RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
+ if (write_reloc_info == RelocInfoStatus::kMustRecord) {
+ if (key.is_value32()) {
+ if (entry32_count_ == 0) first_use_32_ = offset;
+ ++entry32_count_;
+ } else {
+ if (entry64_count_ == 0) first_use_64_ = offset;
+ ++entry64_count_;
+ }
+ }
+ entries_.insert(std::make_pair(key, offset));
+
+ if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
+ // Request constant pool emission after the next instruction.
+ SetNextCheckIn(1);
+ }
+
+ return write_reloc_info;
+}
+
+RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
+ const ConstantPoolKey& key) {
+ if (key.AllowsDeduplication()) {
+ auto existing = entries_.find(key);
+ if (existing != entries_.end()) {
+ return RelocInfoStatus::kMustOmitForDuplicate;
+ }
+ }
+ return RelocInfoStatus::kMustRecord;
+}
+
+void ConstantPool::EmitAndClear(Jump require_jump) {
+ DCHECK(!IsBlocked());
+ // Prevent recursive pool emission.
+ Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
+ Alignment require_alignment =
+ IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
+ int size = ComputeSize(require_jump, require_alignment);
+ Label size_check;
+ assm_->bind(&size_check);
+ assm_->RecordConstPool(size);
+
+ // Emit the constant pool. It is preceded by an optional branch if
+ // {require_jump} and a header which will:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // 3) align the 64bit pool entries to 64-bit.
+ // TODO(all): Make the alignment part less fragile. Currently code is
+ // allocated as a byte array so there are no guarantees the alignment will
+ // be preserved on compaction. Currently it works as allocation seems to be
+ // 64-bit aligned.
+ DEBUG_PRINTF("\tConstant Pool start\n")
+ Label after_pool;
+ if (require_jump == Jump::kRequired) assm_->b(&after_pool);
+
+ assm_->RecordComment("[ Constant Pool");
+
+ EmitPrologue(require_alignment);
+ if (require_alignment == Alignment::kRequired) assm_->DataAlign(kInt64Size);
+ EmitEntries();
+ assm_->RecordComment("]");
+ assm_->bind(&after_pool);
+ DEBUG_PRINTF("\tConstant Pool end\n")
+
+ DCHECK_LE(assm_->SizeOfCodeGeneratedSince(&size_check) - size, 3);
+ Clear();
+}
+
+void ConstantPool::Clear() {
+ entries_.clear();
+ first_use_32_ = -1;
+ first_use_64_ = -1;
+ entry32_count_ = 0;
+ entry64_count_ = 0;
+ next_check_ = 0;
+}
+
+void ConstantPool::StartBlock() {
+ if (blocked_nesting_ == 0) {
+ // Prevent constant pool checks from happening by setting the next check to
+ // the biggest possible offset.
+ next_check_ = kMaxInt;
+ }
+ ++blocked_nesting_;
+}
+
+void ConstantPool::EndBlock() {
+ --blocked_nesting_;
+ if (blocked_nesting_ == 0) {
+ DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
+ // Make sure a check happens quickly after getting unblocked.
+ next_check_ = 0;
+ }
+}
+
+bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
+
+void ConstantPool::SetNextCheckIn(size_t instructions) {
+ next_check_ =
+ assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
+}
+
+void ConstantPool::EmitEntries() {
+ for (auto iter = entries_.begin(); iter != entries_.end();) {
+ DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
+ auto range = entries_.equal_range(iter->first);
+ bool shared = iter->first.AllowsDeduplication();
+ for (auto it = range.first; it != range.second; ++it) {
+ SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
+ if (!shared) Emit(it->first);
+ }
+ if (shared) Emit(iter->first);
+ iter = range.second;
+ }
+}
+
+void ConstantPool::Emit(const ConstantPoolKey& key) {
+ if (key.is_value32()) {
+ assm_->dd(key.value32());
+ } else {
+ assm_->dq(key.value64());
+ }
+}
+
+bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
+ if (IsEmpty()) return false;
+ if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
+ return true;
+ }
+ // We compute {dist32/64}, i.e. the distance from the first instruction
+ // accessing a 32bit/64bit entry in the constant pool to any of the
+ // 32bit/64bit constant pool entries, respectively. This is required because
+ // we do not guarantee that entries are emitted in order of reference, i.e. it
+ // is possible that the entry with the earliest reference is emitted last.
+ // The constant pool should be emitted if either of the following is true:
+ // (A) {dist32/64} will be out of range at the next check in.
+ // (B) Emission can be done behind an unconditional branch and {dist32/64}
+ // exceeds {kOpportunityDist*}.
+ // (C) {dist32/64} exceeds the desired approximate distance to the pool.
+ int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
+ size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
+ size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
+ if (Entry64Count() != 0) {
+ // The 64-bit constants are always emitted before the 32-bit constants, so
+ // we subtract the size of the 32-bit constants from {size}.
+ size_t dist64 = pool_end_64 - first_use_64_;
+ bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
+ bool opportune_emission_without_jump =
+ require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
+ bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
+ if (next_check_too_late || opportune_emission_without_jump ||
+ approximate_distance_exceeded) {
+ return true;
+ }
+ }
+ if (Entry32Count() != 0) {
+ size_t dist32 = pool_end_32 - first_use_32_;
+ bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
+ bool opportune_emission_without_jump =
+ require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
+ bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
+ if (next_check_too_late || opportune_emission_without_jump ||
+ approximate_distance_exceeded) {
+ return true;
+ }
+ }
+ return false;
+}
+
+int ConstantPool::ComputeSize(Jump require_jump,
+ Alignment require_alignment) const {
+ int size_up_to_marker = PrologueSize(require_jump);
+ int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
+ size_t size_after_marker =
+ Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
+ return size_up_to_marker + static_cast<int>(size_after_marker);
+}
+
+Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
+ int pc_offset) const {
+ int size_up_to_marker = PrologueSize(require_jump);
+ if (Entry64Count() != 0 &&
+ !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
+ return Alignment::kRequired;
+ }
+ return Alignment::kOmitted;
+}
+
+bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
+ // Check that all entries are in range if the pool is emitted at {pc_offset}.
+ // This ignores kPcLoadDelta (conservatively, since all offsets are positive),
+ // and over-estimates the last entry's address with the pool's end.
+ Alignment require_alignment =
+ IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
+ size_t pool_end_32 =
+ pc_offset + ComputeSize(Jump::kRequired, require_alignment);
+ size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
+ bool entries_in_range_32 =
+ Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
+ bool entries_in_range_64 =
+ Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
+ return entries_in_range_32 && entries_in_range_64;
+}
+
+ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
+ : pool_(&assm->constpool_) {
+ pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
+ pool_->StartBlock();
+}
+
+ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
+ : pool_(&assm->constpool_) {
+ DCHECK_EQ(check, PoolEmissionCheck::kSkip);
+ pool_->StartBlock();
+}
+
+ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
+
+void ConstantPool::MaybeCheck() {
+ if (assm_->pc_offset() >= next_check_) {
+ Check(Emission::kIfNeeded, Jump::kRequired);
+ }
+}
+
+#endif // defined(V8_TARGET_ARCH_RISCV64)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index 122561bbd5..fb4147507c 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -163,7 +163,7 @@ class ConstantPoolBuilder {
#endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
-#if defined(V8_TARGET_ARCH_ARM64)
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64)
class ConstantPoolKey {
public:
diff --git a/deps/v8/src/codegen/constants-arch.h b/deps/v8/src/codegen/constants-arch.h
index 7a222c960f..cea8dc068f 100644
--- a/deps/v8/src/codegen/constants-arch.h
+++ b/deps/v8/src/codegen/constants-arch.h
@@ -21,6 +21,8 @@
#include "src/codegen/s390/constants-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/codegen/x64/constants-x64.h" // NOLINT
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/codegen/riscv64/constants-riscv64.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index 7e0951ba92..7cb45c7785 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -68,6 +68,11 @@ enum CpuFeature {
VECTOR_ENHANCE_FACILITY_1,
VECTOR_ENHANCE_FACILITY_2,
MISC_INSTR_EXT2,
+
+#elif V8_TARGET_ARCH_RISCV64
+ FPU,
+ FP64FPU,
+ RISCV_SIMD,
#endif
NUMBER_OF_CPU_FEATURES
@@ -133,6 +138,10 @@ class V8_EXPORT_PRIVATE CpuFeatures : public AllStatic {
static unsigned icache_line_size_;
static unsigned dcache_line_size_;
static bool initialized_;
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ static bool supports_wasm_simd_128_;
};
} // namespace internal
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 1c2d227aa2..bf75ff3d12 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -75,6 +75,72 @@ constexpr struct alignas(16) {
} double_negate_constant = {uint64_t{0x8000000000000000},
uint64_t{0x8000000000000000}};
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_i8x16_swizzle_mask = {uint64_t{0x70707070'70707070},
+ uint64_t{0x70707070'70707070}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_i8x16_popcnt_mask = {uint64_t{0x03020201'02010100},
+ uint64_t{0x04030302'03020201}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_i8x16_splat_0x01 = {uint64_t{0x01010101'01010101},
+ uint64_t{0x01010101'01010101}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_i8x16_splat_0x0f = {uint64_t{0x0F0F0F0F'0F0F0F0F},
+ uint64_t{0x0F0F0F0F'0F0F0F0F}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_i8x16_splat_0x33 = {uint64_t{0x33333333'33333333},
+ uint64_t{0x33333333'33333333}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_i8x16_splat_0x55 = {uint64_t{0x55555555'55555555},
+ uint64_t{0x55555555'55555555}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_i16x8_splat_0x0001 = {uint64_t{0x00010001'00010001},
+ uint64_t{0x00010001'00010001}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_f64x2_convert_low_i32x4_u_int_mask = {uint64_t{0x4330000043300000},
+ uint64_t{0x4330000043300000}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_double_2_power_52 = {uint64_t{0x4330000000000000},
+ uint64_t{0x4330000000000000}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_int32_max_as_double = {uint64_t{0x41dfffffffc00000},
+ uint64_t{0x41dfffffffc00000}};
+
+constexpr struct alignas(16) {
+ uint64_t a;
+ uint64_t b;
+} wasm_uint32_max_as_double = {uint64_t{0x41efffffffe00000},
+ uint64_t{0x41efffffffe00000}};
+
// Implementation of ExternalReference
static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
@@ -432,6 +498,11 @@ ExternalReference ExternalReference::address_of_builtin_subclassing_flag() {
return ExternalReference(&FLAG_builtin_subclassing);
}
+ExternalReference
+ExternalReference::address_of_harmony_regexp_match_indices_flag() {
+ return ExternalReference(&FLAG_harmony_regexp_match_indices);
+}
+
ExternalReference ExternalReference::address_of_runtime_stats_flag() {
return ExternalReference(&TracingFlags::runtime_stats);
}
@@ -480,11 +551,69 @@ ExternalReference ExternalReference::address_of_double_neg_constant() {
return ExternalReference(reinterpret_cast<Address>(&double_negate_constant));
}
+ExternalReference ExternalReference::address_of_wasm_i8x16_swizzle_mask() {
+ return ExternalReference(reinterpret_cast<Address>(&wasm_i8x16_swizzle_mask));
+}
+
+ExternalReference ExternalReference::address_of_wasm_i8x16_popcnt_mask() {
+ return ExternalReference(reinterpret_cast<Address>(&wasm_i8x16_popcnt_mask));
+}
+
+ExternalReference ExternalReference::address_of_wasm_i8x16_splat_0x01() {
+ return ExternalReference(reinterpret_cast<Address>(&wasm_i8x16_splat_0x01));
+}
+
+ExternalReference ExternalReference::address_of_wasm_i8x16_splat_0x0f() {
+ return ExternalReference(reinterpret_cast<Address>(&wasm_i8x16_splat_0x0f));
+}
+
+ExternalReference ExternalReference::address_of_wasm_i8x16_splat_0x33() {
+ return ExternalReference(reinterpret_cast<Address>(&wasm_i8x16_splat_0x33));
+}
+
+ExternalReference ExternalReference::address_of_wasm_i8x16_splat_0x55() {
+ return ExternalReference(reinterpret_cast<Address>(&wasm_i8x16_splat_0x55));
+}
+
+ExternalReference ExternalReference::address_of_wasm_i16x8_splat_0x0001() {
+ return ExternalReference(reinterpret_cast<Address>(&wasm_i16x8_splat_0x0001));
+}
+
+ExternalReference
+ExternalReference::address_of_wasm_f64x2_convert_low_i32x4_u_int_mask() {
+ return ExternalReference(
+ reinterpret_cast<Address>(&wasm_f64x2_convert_low_i32x4_u_int_mask));
+}
+
+ExternalReference ExternalReference::supports_wasm_simd_128_address() {
+ return ExternalReference(
+ reinterpret_cast<Address>(&CpuFeatures::supports_wasm_simd_128_));
+}
+
+ExternalReference ExternalReference::address_of_wasm_double_2_power_52() {
+ return ExternalReference(reinterpret_cast<Address>(&wasm_double_2_power_52));
+}
+
+ExternalReference ExternalReference::address_of_wasm_int32_max_as_double() {
+ return ExternalReference(
+ reinterpret_cast<Address>(&wasm_int32_max_as_double));
+}
+
+ExternalReference ExternalReference::address_of_wasm_uint32_max_as_double() {
+ return ExternalReference(
+ reinterpret_cast<Address>(&wasm_uint32_max_as_double));
+}
+
ExternalReference
ExternalReference::address_of_enable_experimental_regexp_engine() {
return ExternalReference(&FLAG_enable_experimental_regexp_engine);
}
+ExternalReference ExternalReference::thread_in_wasm_flag_address_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->thread_in_wasm_flag_address_address());
+}
+
ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
return ExternalReference(isolate->is_profiling_address());
}
@@ -519,6 +648,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_S390
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
+#elif V8_TARGET_ARCH_RISCV64
+#define re_stack_check_func RegExpMacroAssemblerRISCV::CheckStackGuardState
#else
UNREACHABLE();
#endif
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index cef36a5d27..d44b8e801c 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -78,6 +78,8 @@ class StatsCounter;
V(address_of_regexp_stack_memory_top_address, \
"RegExpStack::memory_top_address_address()") \
V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector") \
+ V(thread_in_wasm_flag_address_address, \
+ "Isolate::thread_in_wasm_flag_address_address") \
V(re_case_insensitive_compare_unicode, \
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUnicode()") \
V(re_case_insensitive_compare_non_unicode, \
@@ -99,20 +101,29 @@ class StatsCounter;
#define EXTERNAL_REFERENCE_LIST(V) \
V(abort_with_reason, "abort_with_reason") \
+ V(address_of_builtin_subclassing_flag, "FLAG_builtin_subclassing") \
V(address_of_double_abs_constant, "double_absolute_constant") \
V(address_of_double_neg_constant, "double_negate_constant") \
V(address_of_enable_experimental_regexp_engine, \
"address_of_enable_experimental_regexp_engine") \
V(address_of_float_abs_constant, "float_absolute_constant") \
V(address_of_float_neg_constant, "float_negate_constant") \
+ V(address_of_harmony_regexp_match_indices_flag, \
+ "FLAG_harmony_regexp_match_indices") \
V(address_of_min_int, "LDoubleConstant::min_int") \
V(address_of_mock_arraybuffer_allocator_flag, \
"FLAG_mock_arraybuffer_allocator") \
- V(address_of_builtin_subclassing_flag, "FLAG_builtin_subclassing") \
V(address_of_one_half, "LDoubleConstant::one_half") \
V(address_of_runtime_stats_flag, "TracingFlags::runtime_stats") \
V(address_of_the_hole_nan, "the_hole_nan") \
V(address_of_uint32_bias, "uint32_bias") \
+ V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
+ V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
+ V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
+ V(address_of_wasm_i8x16_splat_0x0f, "wasm_i8x16_splat_0x0f") \
+ V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
+ V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
+ V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
V(check_object_type, "check_object_type") \
V(compute_integer_hash, "ComputeSeededHash") \
@@ -233,6 +244,12 @@ class StatsCounter;
V(wasm_memory_init, "wasm::memory_init") \
V(wasm_memory_copy, "wasm::memory_copy") \
V(wasm_memory_fill, "wasm::memory_fill") \
+ V(address_of_wasm_f64x2_convert_low_i32x4_u_int_mask, \
+ "wasm_f64x2_convert_low_i32x4_u_int_mask") \
+ V(supports_wasm_simd_128_address, "wasm::supports_wasm_simd_128_address") \
+ V(address_of_wasm_double_2_power_52, "wasm_double_2_power_52") \
+ V(address_of_wasm_int32_max_as_double, "wasm_int32_max_as_double") \
+ V(address_of_wasm_uint32_max_as_double, "wasm_uint32_max_as_double") \
V(write_barrier_marking_from_code_function, "WriteBarrier::MarkingFromCode") \
V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
V(call_enter_context_function, "call_enter_context_function") \
@@ -253,7 +270,6 @@ class StatsCounter;
"ExperimentalRegExp::MatchForCallFromJs") \
EXTERNAL_REFERENCE_LIST_INTL(V) \
EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V)
-
#ifdef V8_INTL_SUPPORT
#define EXTERNAL_REFERENCE_LIST_INTL(V) \
V(intl_convert_one_byte_to_lower, "intl_convert_one_byte_to_lower") \
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index 74c3168d49..25d2d486ce 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -49,7 +49,11 @@ namespace internal {
bool CpuFeatures::SupportsOptimizer() { return true; }
-bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
+bool CpuFeatures::SupportsWasmSimd128() {
+ if (IsSupported(SSE4_1)) return true;
+ if (FLAG_wasm_simd_ssse3_codegen && IsSupported(SSSE3)) return true;
+ return false;
+}
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 7135bf01d1..3f9d7ddfa2 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -130,6 +130,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
+ if (cpu.has_sse42() && FLAG_enable_sse4_2) supported_ |= 1u << SSE4_2;
if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
if (cpu.has_ssse3() && FLAG_enable_ssse3) supported_ |= 1u << SSSE3;
if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
@@ -153,6 +154,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
supported_ |= 1u << ATOM;
}
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {}
@@ -2171,6 +2178,29 @@ void Assembler::cvtdq2ps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0xE6);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::cvtps2pd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5A);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::cvtpd2ps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5A);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::cvttps2dq(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -2179,6 +2209,14 @@ void Assembler::cvttps2dq(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xE6);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::addsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@@ -2479,6 +2517,14 @@ void Assembler::movdqa(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x6F);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::movdqu(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -2585,6 +2631,16 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
EMIT(imm8);
}
+void Assembler::pcmpgtq(XMMRegister dst, XMMRegister src) {
+ DCHECK(IsEnabled(SSE4_2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x38);
+ EMIT(0x37);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::psllw(XMMRegister reg, uint8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -3113,6 +3169,10 @@ void Assembler::vextractps(Operand dst, XMMRegister src, byte imm8) {
EMIT(imm8);
}
+void Assembler::vpcmpgtq(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vinstr(0x37, dst, src1, src2, k66, k0F38, VexW::kWIG);
+}
+
void Assembler::bmi1(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 874bbf7d87..3914c35544 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -942,10 +942,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
cvtdq2ps(dst, Operand(src));
}
void cvtdq2ps(XMMRegister dst, Operand src);
+ void cvtdq2pd(XMMRegister dst, XMMRegister src);
+ void cvtps2pd(XMMRegister dst, XMMRegister src);
+ void cvtpd2ps(XMMRegister dst, XMMRegister src);
void cvttps2dq(XMMRegister dst, XMMRegister src) {
cvttps2dq(dst, Operand(src));
}
void cvttps2dq(XMMRegister dst, Operand src);
+ void cvttpd2dq(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src) { addsd(dst, Operand(src)); }
void addsd(XMMRegister dst, Operand src);
@@ -986,6 +990,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movdqa(XMMRegister dst, Operand src);
void movdqa(Operand dst, XMMRegister src);
+ void movdqa(XMMRegister dst, XMMRegister src);
void movdqu(XMMRegister dst, Operand src);
void movdqu(Operand dst, XMMRegister src);
void movdqu(XMMRegister dst, XMMRegister src);
@@ -1012,6 +1017,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void extractps(Operand dst, XMMRegister src, byte imm8);
void extractps(Register dst, XMMRegister src, byte imm8);
+ void pcmpgtq(XMMRegister dst, XMMRegister src);
+
void psllw(XMMRegister reg, uint8_t shift);
void pslld(XMMRegister reg, uint8_t shift);
void psrlw(XMMRegister reg, uint8_t shift);
@@ -1365,6 +1372,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vextractps(Operand dst, XMMRegister src, byte imm8);
+ void vpcmpgtq(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+
void vmovaps(XMMRegister dst, XMMRegister src) { vmovaps(dst, Operand(src)); }
void vmovaps(XMMRegister dst, Operand src) { vps(0x28, dst, xmm0, src); }
void vmovapd(XMMRegister dst, XMMRegister src) { vmovapd(dst, Operand(src)); }
@@ -1473,12 +1482,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vcvtdq2ps(XMMRegister dst, Operand src) {
vinstr(0x5B, dst, xmm0, src, kNone, k0F, kWIG);
}
+ void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
+ vinstr(0xE6, dst, xmm0, src, kF3, k0F, kWIG);
+ }
+ void vcvtps2pd(XMMRegister dst, XMMRegister src) {
+ vinstr(0x5A, dst, xmm0, src, kNone, k0F, kWIG);
+ }
+ void vcvtpd2ps(XMMRegister dst, XMMRegister src) {
+ vinstr(0x5A, dst, xmm0, src, k66, k0F, kWIG);
+ }
void vcvttps2dq(XMMRegister dst, XMMRegister src) {
vcvttps2dq(dst, Operand(src));
}
void vcvttps2dq(XMMRegister dst, Operand src) {
vinstr(0x5B, dst, xmm0, src, kF3, k0F, kWIG);
}
+ void vcvttpd2dq(XMMRegister dst, XMMRegister src) {
+ vinstr(0xE6, dst, xmm0, src, k66, k0F, kWIG);
+ }
void vmovddup(XMMRegister dst, Operand src) {
vinstr(0x12, dst, xmm0, src, kF2, k0F, kWIG);
@@ -1492,6 +1513,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vbroadcastss(XMMRegister dst, Operand src) {
vinstr(0x18, dst, xmm0, src, k66, k0F38, kW0);
}
+ void vmovdqa(XMMRegister dst, Operand src) {
+ vinstr(0x6F, dst, xmm0, src, k66, k0F, kWIG);
+ }
void vmovdqu(XMMRegister dst, Operand src) {
vinstr(0x6F, dst, xmm0, src, kF3, k0F, kWIG);
}
@@ -1593,6 +1617,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pd(byte op, XMMRegister dst, Operand src);
#define PACKED_OP_LIST(V) \
+ V(unpckl, 0x14) \
V(and, 0x54) \
V(andn, 0x55) \
V(or, 0x56) \
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
index 24f4f5df08..d732fa2fbb 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
@@ -89,6 +89,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
+const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+const Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
@@ -213,21 +222,22 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void Compare_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
+}
+
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
+void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edi, // JSFunction
- edx, // the new target
- eax, // actual number of arguments
- ecx, // expected number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 36a5a6888d..7a99d6c701 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -650,6 +650,20 @@ void TurboAssembler::Roundpd(XMMRegister dst, XMMRegister src,
}
}
+void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpmulhrsw(dst, src1, src2);
+ } else {
+ if (dst != src1) {
+ movdqu(dst, src1);
+ }
+ CpuFeatureScope sse_scope(this, SSSE3);
+ pmulhrsw(dst, src2);
+ }
+}
+
// 1. Unpack src0, src1 into even-number elements of scratch.
// 2. Unpack src1, src0 into even-number elements of dst.
// 3. Multiply 1. with 2.
@@ -738,6 +752,448 @@ void TurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
}
}
+void TurboAssembler::I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpunpckhqdq(dst, src, src);
+ vpmovsxdq(dst, dst);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pshufd(dst, src, 0xEE);
+ pmovsxdq(dst, dst);
+ }
+}
+
+void TurboAssembler::I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpxor(scratch, scratch, scratch);
+ vpunpckhdq(dst, src, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pshufd(dst, src, 0xEE);
+ pmovzxdq(dst, dst);
+ }
+}
+
+void TurboAssembler::I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // src = |a|b|c|d|e|f|g|h| (high)
+ // dst = |e|e|f|f|g|g|h|h|
+ vpunpckhwd(dst, src, src);
+ vpsrad(dst, dst, 16);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ if (dst == src) {
+ // 2 bytes shorter than pshufd, but has depdency on dst.
+ movhlps(dst, src);
+ pmovsxwd(dst, dst);
+ } else {
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovsxwd(dst, dst);
+ }
+ }
+}
+
+void TurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // scratch = |0|0|0|0|0|0|0|0|
+ // src = |a|b|c|d|e|f|g|h|
+ // dst = |0|a|0|b|0|c|0|d|
+ XMMRegister tmp = dst == src ? scratch : dst;
+ vpxor(tmp, tmp, tmp);
+ vpunpckhwd(dst, src, tmp);
+ } else {
+ if (dst == src) {
+ // xorps can be executed on more ports than pshufd.
+ xorps(scratch, scratch);
+ punpckhwd(dst, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovzxwd(dst, dst);
+ }
+ }
+}
+
+void TurboAssembler::I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // src = |a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p| (high)
+ // dst = |i|i|j|j|k|k|l|l|m|m|n|n|o|o|p|p|
+ vpunpckhbw(dst, src, src);
+ vpsraw(dst, dst, 8);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ if (dst == src) {
+ // 2 bytes shorter than pshufd, but has depdency on dst.
+ movhlps(dst, src);
+ pmovsxbw(dst, dst);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovsxbw(dst, dst);
+ }
+ }
+}
+
+void TurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // tmp = |0|0|0|0|0|0|0|0 | 0|0|0|0|0|0|0|0|
+ // src = |a|b|c|d|e|f|g|h | i|j|k|l|m|n|o|p|
+ // dst = |0|a|0|b|0|c|0|d | 0|e|0|f|0|g|0|h|
+ XMMRegister tmp = dst == src ? scratch : dst;
+ vpxor(tmp, tmp, tmp);
+ vpunpckhbw(dst, src, tmp);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ if (dst == src) {
+ // xorps can be executed on more ports than pshufd.
+ xorps(scratch, scratch);
+ punpckhbw(dst, scratch);
+ } else {
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovzxbw(dst, dst);
+ }
+ }
+}
+
+void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister scratch) {
+ // k = i16x8.splat(0x8000)
+ Pcmpeqd(scratch, scratch);
+ Psllw(scratch, scratch, byte{15});
+
+ Pmulhrsw(dst, src1, src2);
+ Pcmpeqw(scratch, dst);
+ Pxor(dst, scratch);
+}
+
+void TurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
+ uint8_t laneidx) {
+ if (laneidx == 0) {
+ Movss(dst, src);
+ } else {
+ DCHECK_GE(3, laneidx);
+ Extractps(dst, src, laneidx);
+ }
+}
+
+void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
+ XMMRegister tmp1, XMMRegister tmp2,
+ Register scratch) {
+ DCHECK_NE(dst, tmp1);
+ DCHECK_NE(src, tmp1);
+ DCHECK_NE(dst, tmp2);
+ DCHECK_NE(src, tmp2);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ vpandn(tmp2, tmp1, src);
+ vpand(dst, tmp1, src);
+ vmovdqa(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask(),
+ scratch));
+ vpsrlw(tmp2, tmp2, 4);
+ vpshufb(dst, tmp1, dst);
+ vpshufb(tmp2, tmp1, tmp2);
+ vpaddb(dst, dst, tmp2);
+ } else if (CpuFeatures::IsSupported(ATOM)) {
+ // Pre-Goldmont low-power Intel microarchitectures have very slow
+ // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
+ // algorithm on these processors. ATOM CPU feature captures exactly
+ // the right set of processors.
+ xorps(tmp1, tmp1);
+ pavgb(tmp1, src);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ andps(tmp1,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x55(), scratch));
+ psubb(dst, tmp1);
+ Operand splat_0x33 = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x33(), scratch);
+ movaps(tmp1, dst);
+ andps(dst, splat_0x33);
+ psrlw(tmp1, 2);
+ andps(tmp1, splat_0x33);
+ paddb(dst, tmp1);
+ movaps(tmp1, dst);
+ psrlw(dst, 4);
+ paddb(dst, tmp1);
+ andps(dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(), scratch));
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movaps(tmp1,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(), scratch));
+ Operand mask = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask(), scratch);
+ if (tmp2 != tmp1) {
+ movaps(tmp2, tmp1);
+ }
+ andps(tmp1, src);
+ andnps(tmp2, src);
+ psrlw(tmp2, 4);
+ movaps(dst, mask);
+ pshufb(dst, tmp1);
+ movaps(tmp1, mask);
+ pshufb(tmp1, tmp2);
+ paddb(dst, tmp1);
+ }
+}
+
+void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
+ Register tmp) {
+ // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
+ // 0x43300000'00000000 is a special double where the significand bits
+ // precisely represents all uint32 numbers.
+ Unpcklps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::
+ address_of_wasm_f64x2_convert_low_i32x4_u_int_mask(),
+ tmp));
+ Subpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), tmp));
+}
+
+void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch,
+ Register tmp) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ XMMRegister original_dst = dst;
+ // Make sure we don't overwrite src.
+ if (dst == src) {
+ DCHECK_NE(scratch, src);
+ dst = scratch;
+ }
+ // dst = 0 if src == NaN, else all ones.
+ vcmpeqpd(dst, src, src);
+ // dst = 0 if src == NaN, else INT32_MAX as double.
+ vandpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
+ // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
+ vminpd(dst, src, dst);
+ // Values > INT32_MAX already saturated, values < INT32_MIN raises an
+ // exception, which is masked and returns 0x80000000.
+ vcvttpd2dq(dst, dst);
+
+ if (original_dst != dst) {
+ vmovaps(original_dst, dst);
+ }
+ } else {
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ movaps(scratch, dst);
+ cmpeqpd(scratch, dst);
+ andps(scratch,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
+ minpd(dst, scratch);
+ cvttpd2dq(dst, dst);
+ }
+}
+
+void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch,
+ Register tmp) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vxorpd(scratch, scratch, scratch);
+ // Saturate to 0.
+ vmaxpd(dst, src, scratch);
+ // Saturate to UINT32_MAX.
+ vminpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
+ // Truncate.
+ vroundpd(dst, dst, kRoundToZero);
+ // Add to special double where significant bits == uint32.
+ vaddpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), tmp));
+ // Extract low 32 bits of each double's significand, zero top lanes.
+ // dst = [dst[0], dst[2], 0, 0]
+ vshufps(dst, dst, scratch, 0x88);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+
+ xorps(scratch, scratch);
+ maxpd(dst, scratch);
+ minpd(dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
+ roundpd(dst, dst, kRoundToZero);
+ addpd(dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), tmp));
+ shufps(dst, scratch, 0x88);
+ }
+}
+
+void TurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ XMMRegister tmp = dst == src ? scratch : dst;
+ vpxor(tmp, tmp, tmp);
+ vpsubq(tmp, tmp, src);
+ vblendvpd(dst, src, tmp, src);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE3);
+ movshdup(scratch, src);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ psrad(scratch, 31);
+ xorps(dst, scratch);
+ psubq(dst, scratch);
+ }
+}
+
+void TurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
+ XMMRegister src1, XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpcmpgtq(dst, src0, src1);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ CpuFeatureScope sse_scope(this, SSE4_2);
+ DCHECK_EQ(dst, src0);
+ pcmpgtq(dst, src1);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ DCHECK_NE(dst, src0);
+ DCHECK_NE(dst, src1);
+ movaps(dst, src1);
+ movaps(scratch, src0);
+ psubq(dst, src0);
+ pcmpeqd(scratch, src1);
+ andps(dst, scratch);
+ movaps(scratch, src0);
+ pcmpgtd(scratch, src1);
+ orps(dst, scratch);
+ movshdup(dst, dst);
+ }
+}
+
+void TurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
+ XMMRegister src1, XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpcmpgtq(dst, src1, src0);
+ vpcmpeqd(scratch, scratch, scratch);
+ vpxor(dst, dst, scratch);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ CpuFeatureScope sse_scope(this, SSE4_2);
+ DCHECK_NE(dst, src0);
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ pcmpgtq(dst, src0);
+ pcmpeqd(scratch, scratch);
+ xorps(dst, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ DCHECK_NE(dst, src0);
+ DCHECK_NE(dst, src1);
+ movaps(dst, src0);
+ movaps(scratch, src1);
+ psubq(dst, src1);
+ pcmpeqd(scratch, src0);
+ andps(dst, scratch);
+ movaps(scratch, src1);
+ pcmpgtd(scratch, src0);
+ orps(dst, scratch);
+ movshdup(dst, dst);
+ pcmpeqd(scratch, scratch);
+ xorps(dst, scratch);
+ }
+}
+
+void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
+ XMMRegister tmp,
+ Register scratch) {
+ // pmaddubsw treats the first operand as unsigned, so pass the external
+ // reference to as the first operand.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(tmp, op);
+ vpmaddubsw(dst, tmp, src);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ if (dst == src) {
+ movaps(tmp, op);
+ pmaddubsw(tmp, src);
+ movaps(dst, tmp);
+ } else {
+ movaps(dst, op);
+ pmaddubsw(dst, src);
+ }
+ }
+}
+
+void TurboAssembler::I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ }
+ Pmaddubsw(dst, src, op);
+}
+
+void TurboAssembler::I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i16x8_splat_0x0001(), scratch);
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ }
+ // pmaddwd multiplies signed words in src and op, producing
+ // signed doublewords, then adds pairwise.
+ // src = |a|b|c|d|e|f|g|h|
+ // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
+ Pmaddwd(dst, src, op);
+}
+
+void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
+ XMMRegister tmp) {
+ // src = |a|b|c|d|e|f|g|h|
+ // tmp = i32x4.splat(0x0000FFFF)
+ Pcmpeqd(tmp, tmp);
+ Psrld(tmp, tmp, byte{16});
+ // tmp =|0|b|0|d|0|f|0|h|
+ Pand(tmp, src);
+ // dst = |0|a|0|c|0|e|0|g|
+ Psrld(dst, src, byte{16});
+ // dst = |a+b|c+d|e+f|g+h|
+ Paddd(dst, dst, tmp);
+}
+
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
@@ -821,6 +1277,15 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
+void MacroAssembler::CmpInstanceTypeRange(Register map, Register scratch,
+ InstanceType lower_limit,
+ InstanceType higher_limit) {
+ DCHECK_LT(lower_limit, higher_limit);
+ movzx_w(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
+ lea(scratch, Operand(scratch, 0u - lower_limit));
+ cmp(scratch, Immediate(higher_limit - lower_limit));
+}
+
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -841,14 +1306,16 @@ void MacroAssembler::AssertConstructor(Register object) {
}
}
-void MacroAssembler::AssertFunction(Register object) {
+void MacroAssembler::AssertFunction(Register object, Register scratch) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
- CmpObjectType(object, JS_FUNCTION_TYPE, object);
+ LoadMap(object, object);
+ CmpInstanceTypeRange(object, scratch, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
Pop(object);
- Check(equal, AbortReason::kOperandIsNotAFunction);
+ Check(below_equal, AbortReason::kOperandIsNotAFunction);
}
}
@@ -967,11 +1434,13 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
}
void TurboAssembler::AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
while (bytes > kStackPageSize) {
sub(esp, Immediate(kStackPageSize));
mov(Operand(esp, 0), Immediate(0));
bytes -= kStackPageSize;
}
+ if (bytes == 0) return;
sub(esp, Immediate(bytes));
}
#endif
@@ -1286,7 +1755,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
DCHECK_EQ(actual_parameter_count, eax);
DCHECK_EQ(expected_parameter_count, ecx);
Label regular_invoke;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
+
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
@@ -1358,17 +1827,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
CallRuntime(Runtime::kThrowStackOverflow);
int3(); // This should be unreachable.
}
-#else
- cmp(expected_parameter_count, actual_parameter_count);
- j(equal, &regular_invoke);
- Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
- if (flag == CALL_FUNCTION) {
- Call(adaptor, RelocInfo::CODE_TARGET);
- jmp(done, Label::kNear);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-#endif
+
bind(&regular_invoke);
}
}
@@ -1749,8 +2208,22 @@ void TurboAssembler::Haddps(XMMRegister dst, XMMRegister src1, Operand src2) {
}
}
+void TurboAssembler::Pcmpeqq(XMMRegister dst, Operand src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpcmpeqq(dst, dst, src);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ pcmpeqq(dst, src);
+ }
+}
+
void TurboAssembler::Pcmpeqq(XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
+ Pcmpeqq(dst, src1, Operand(src2));
+}
+
+void TurboAssembler::Pcmpeqq(XMMRegister dst, XMMRegister src1, Operand src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpcmpeqq(dst, src1, src2);
@@ -1885,28 +2358,40 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
}
void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
+ Pinsrb(dst, dst, src, imm8);
+}
+
+void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
+ int8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpinsrb(dst, dst, src, imm8);
+ vpinsrb(dst, src1, src2, imm8);
return;
}
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrb(dst, src, imm8);
+ if (dst != src1) {
+ movdqu(dst, src1);
+ }
+ pinsrb(dst, src2, imm8);
return;
}
FATAL("no AVX or SSE4.1 support");
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
+void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpinsrd(dst, dst, src, imm8);
+ vpinsrd(dst, src1, src2, imm8);
return;
}
+ if (dst != src1) {
+ movdqu(dst, src1);
+ }
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrd(dst, src, imm8);
+ pinsrd(dst, src2, imm8);
return;
}
// Without AVX or SSE, we can only have 64-bit values in xmm registers.
@@ -1917,10 +2402,10 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
// Write original content of {dst} to the stack.
movsd(Operand(esp, 0), dst);
// Overwrite the portion specified in {imm8}.
- if (src.is_reg_only()) {
- mov(Operand(esp, imm8 * kUInt32Size), src.reg());
+ if (src2.is_reg_only()) {
+ mov(Operand(esp, imm8 * kUInt32Size), src2.reg());
} else {
- movss(dst, src);
+ movss(dst, src2);
movss(Operand(esp, imm8 * kUInt32Size), dst);
}
// Load back the full value into {dst}.
@@ -1928,13 +2413,25 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
add(esp, Immediate(kDoubleSize));
}
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
+ Pinsrd(dst, dst, src, imm8);
+}
+
void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
+ Pinsrw(dst, dst, src, imm8);
+}
+
+void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
+ int8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpinsrw(dst, dst, src, imm8);
+ vpinsrw(dst, src1, src2, imm8);
return;
} else {
- pinsrw(dst, src, imm8);
+ if (dst != src1) {
+ movdqu(dst, src1);
+ }
+ pinsrw(dst, src2, imm8);
return;
}
}
@@ -2283,9 +2780,17 @@ void TurboAssembler::CallCodeObject(Register code_object) {
call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object) {
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
LoadCodeObjectEntry(code_object, code_object);
- jmp(code_object);
+ switch (jump_mode) {
+ case JumpMode::kJump:
+ jmp(code_object);
+ return;
+ case JumpMode::kPushAndReturn:
+ push(code_object);
+ ret(0);
+ return;
+ }
}
void TurboAssembler::Jump(const ExternalReference& reference) {
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index fde150458e..29bb8ca2a0 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -96,7 +96,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void AllocateStackSpace(int bytes);
#else
void AllocateStackSpace(Register bytes) { sub(esp, bytes); }
- void AllocateStackSpace(int bytes) { sub(esp, Immediate(bytes)); }
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ sub(esp, Immediate(bytes));
+ }
#endif
// Print a message to stdout and abort execution.
@@ -141,7 +145,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
- void JumpCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) override;
void Jump(const ExternalReference& reference) override;
void RetpolineCall(Register reg);
@@ -324,6 +329,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, XMMRegister)
+ AVX_OP2_WITH_TYPE(Cvtdq2pd, cvtdq2pd, XMMRegister, XMMRegister)
+ AVX_OP2_WITH_TYPE(Cvtps2pd, cvtps2pd, XMMRegister, XMMRegister)
+ AVX_OP2_WITH_TYPE(Cvtpd2ps, cvtpd2ps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Cvttps2dq, cvttps2dq, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtps, sqrtps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, XMMRegister)
@@ -406,6 +414,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
+ AVX_PACKED_OP3(Unpcklps, unpcklps)
AVX_PACKED_OP3(Addps, addps)
AVX_PACKED_OP3(Addpd, addpd)
AVX_PACKED_OP3(Subps, subps)
@@ -442,6 +451,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Pavgb, pavgb)
AVX_PACKED_OP3(Pavgw, pavgw)
AVX_PACKED_OP3(Pand, pand)
+ AVX_PACKED_OP3(Pminub, pminub)
+ AVX_PACKED_OP3(Pmaxub, pmaxub)
+ AVX_PACKED_OP3(Paddusb, paddusb)
+ AVX_PACKED_OP3(Psubusb, psubusb)
+ AVX_PACKED_OP3(Pcmpgtb, pcmpgtb)
+ AVX_PACKED_OP3(Pcmpeqb, pcmpeqb)
+ AVX_PACKED_OP3(Paddb, paddb)
+ AVX_PACKED_OP3(Paddsb, paddsb)
+ AVX_PACKED_OP3(Psubb, psubb)
+ AVX_PACKED_OP3(Psubsb, psubsb)
+
#undef AVX_PACKED_OP3
AVX_PACKED_OP3_WITH_TYPE(Psllw, psllw, XMMRegister, uint8_t)
@@ -453,10 +473,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3_WITH_TYPE(Psraw, psraw, XMMRegister, uint8_t)
AVX_PACKED_OP3_WITH_TYPE(Psrad, psrad, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Movlps, movlps, XMMRegister, Operand)
- AVX_PACKED_OP3_WITH_TYPE(Movhps, movhps, XMMRegister, Operand)
#undef AVX_PACKED_OP3_WITH_TYPE
+// Macro for instructions that have 2 operands for AVX version and 1 operand for
+// SSE version. Will move src1 to dst if dst != src1.
+#define AVX_OP3_WITH_MOVE(macro_name, name, dst_type, src_type) \
+ void macro_name(dst_type dst, dst_type src1, src_type src2) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, src1, src2); \
+ } else { \
+ if (dst != src1) { \
+ movaps(dst, src1); \
+ } \
+ name(dst, src2); \
+ } \
+ }
+ AVX_OP3_WITH_MOVE(Movlps, movlps, XMMRegister, Operand)
+ AVX_OP3_WITH_MOVE(Movhps, movhps, XMMRegister, Operand)
+#undef AVX_OP3_WITH_MOVE
+
// Non-SSE2 instructions.
#define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
sse_scope) \
@@ -526,13 +562,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
AVX_OP3_XO_SSE4(Pmaxsd, pmaxsd)
+ AVX_OP3_XO_SSE4(Pminsb, pminsb)
+ AVX_OP3_XO_SSE4(Pmaxsb, pmaxsb)
AVX_OP3_WITH_TYPE_SCOPE(Pmaddubsw, pmaddubsw, XMMRegister, XMMRegister, SSSE3)
- AVX_OP3_WITH_TYPE_SCOPE(Pmulhrsw, pmulhrsw, XMMRegister, XMMRegister, SSSE3)
+ AVX_OP3_WITH_TYPE_SCOPE(Pmaddubsw, pmaddubsw, XMMRegister, Operand, SSSE3)
#undef AVX_OP3_XO_SSE4
#undef AVX_OP3_WITH_TYPE_SCOPE
void Haddps(XMMRegister dst, XMMRegister src1, Operand src2);
+ void Pcmpeqq(XMMRegister dst, Operand src);
+ void Pcmpeqq(XMMRegister dst, XMMRegister src) { Pcmpeqq(dst, Operand(src)); }
+ void Pcmpeqq(XMMRegister dst, XMMRegister src1, Operand src2);
void Pcmpeqq(XMMRegister dst, XMMRegister src1, XMMRegister src2);
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, dst, src); }
void Pshufb(XMMRegister dst, Operand src) { Pshufb(dst, dst, src); }
@@ -568,14 +609,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Pinsrb(dst, Operand(src), imm8);
}
void Pinsrb(XMMRegister dst, Operand src, int8_t imm8);
+ // Moves src1 to dst if AVX is not supported.
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, uint8_t imm8) {
Pinsrd(dst, Operand(src), imm8);
}
void Pinsrd(XMMRegister dst, Operand src, uint8_t imm8);
+ // Moves src1 to dst if AVX is not supported.
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
void Pinsrw(XMMRegister dst, Register src, int8_t imm8) {
Pinsrw(dst, Operand(src), imm8);
}
void Pinsrw(XMMRegister dst, Operand src, int8_t imm8);
+ // Moves src1 to dst if AVX is not supported.
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
void Vbroadcastss(XMMRegister dst, Operand src);
void Extractps(Operand dst, XMMRegister src, uint8_t imm8);
@@ -612,6 +659,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
void Roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ // Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
+ void Pmulhrsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+
// These Wasm SIMD ops do not have direct lowerings on IA32. These
// helpers are optimized to produce the fastest and smallest codegen.
// Defined here to allow usage on both TurboFan and Liftoff.
@@ -625,6 +675,38 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Requires dst == mask when AVX is not supported.
void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
XMMRegister src2, XMMRegister scratch);
+ void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
+ void I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch);
+ void I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src);
+ void I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch);
+ void I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src);
+ void I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch);
+ void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch);
+ void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+ void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp1,
+ XMMRegister tmp2, Register scratch);
+ void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src, Register tmp);
+ void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp);
+ void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp);
+ void I64x2Abs(XMMRegister dst, XMMRegister src, XMMRegister scratch);
+ void I64x2GtS(XMMRegister dst, XMMRegister src0, XMMRegister src1,
+ XMMRegister scratch);
+ void I64x2GeS(XMMRegister dst, XMMRegister src0, XMMRegister src1,
+ XMMRegister scratch);
+ void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
+ XMMRegister tmp, Register scratch);
+ void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
+ Register scratch);
+ void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
+ Register scratch);
+ void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
+ XMMRegister tmp);
void Push(Register src) { push(src); }
void Push(Operand src) { push(src); }
@@ -803,6 +885,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
+ // Compare instance type ranges for a map (lower_limit and higher_limit
+ // inclusive).
+ //
+ // Always use unsigned comparisons: below_equal for a positive
+ // result.
+ void CmpInstanceTypeRange(Register map, Register scratch,
+ InstanceType lower_limit,
+ InstanceType higher_limit);
+
// Smi tagging support.
void SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
@@ -840,7 +931,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object);
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object);
+ void AssertFunction(Register object, Register scratch);
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 79dad84077..cb686a1bfa 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -23,7 +23,13 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
for (int i = 0; i < register_parameter_count; i++) {
// The value of the root register must be reserved, thus any uses
// within the calling convention are disallowed.
- DCHECK_NE(registers[i], kRootRegister);
+#ifdef DEBUG
+ CHECK_NE(registers[i], kRootRegister);
+ // Check for duplicated registers.
+ for (int j = i + 1; j < register_parameter_count; j++) {
+ CHECK_NE(registers[i], registers[j]);
+ }
+#endif
register_params_[i] = registers[i];
}
}
@@ -191,12 +197,26 @@ const Register FastNewObjectDescriptor::NewTargetRegister() {
return kJavaScriptCallNewTargetRegister;
}
+void TailCallOptimizedCodeSlotDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {kJavaScriptCallCodeStartRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void LoadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void LoadBaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {LoadDescriptor::ReceiverRegister(),
+ LoadDescriptor::NameRegister(),
+ LoadDescriptor::SlotRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void LoadNoFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), ICKindRegister()};
@@ -209,6 +229,18 @@ void LoadGlobalDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void LoadGlobalBaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {LoadGlobalDescriptor::NameRegister(),
+ LoadGlobalDescriptor::SlotRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void LookupBaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
void LoadGlobalNoFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {NameRegister(), ICKindRegister()};
@@ -231,6 +263,16 @@ void LoadWithReceiverAndVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
+void LoadWithReceiverBaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ LoadWithReceiverAndVectorDescriptor::ReceiverRegister(),
+ LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
+ LoadWithReceiverAndVectorDescriptor::NameRegister(),
+ LoadWithReceiverAndVectorDescriptor::SlotRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void StoreGlobalDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {NameRegister(), ValueRegister(), SlotRegister()};
@@ -239,6 +281,16 @@ void StoreGlobalDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
+void StoreGlobalBaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {StoreGlobalDescriptor::NameRegister(),
+ StoreGlobalDescriptor::ValueRegister(),
+ StoreGlobalDescriptor::SlotRegister()};
+
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
+}
+
void StoreGlobalWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {NameRegister(), ValueRegister(), SlotRegister(),
@@ -256,6 +308,16 @@ void StoreDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
+void StoreBaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(), StoreDescriptor::SlotRegister()};
+
+ int len = arraysize(registers) - kStackArgumentsCount;
+ data->InitializePlatformSpecific(len, registers);
+}
+
void StoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -266,6 +328,30 @@ void StoreTransitionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
+void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on other platforms.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+ Register registers[] = {
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ kInterpreterBytecodeArrayRegister, kJavaScriptCallNewTargetRegister};
+ data->InitializePlatformSpecific(kParameterCount, registers);
+#else
+ InitializePlatformUnimplemented(data, kParameterCount);
+#endif
+}
+
+void BaselineLeaveFrameDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on other platforms.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+ Register registers[] = {ParamsSizeRegister(), WeightRegister()};
+ data->InitializePlatformSpecific(kParameterCount, registers);
+#else
+ InitializePlatformUnimplemented(data, kParameterCount);
+#endif
+}
+
void StringAtDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
@@ -293,7 +379,12 @@ void TypeConversionNoContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void TypeConversionStackParameterDescriptor::InitializePlatformSpecific(
+void TypeConversion_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
+void SingleParameterOnStackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr);
}
@@ -397,7 +488,8 @@ void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
}
#endif // !V8_TARGET_ARCH_IA32
-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
+#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
+ !defined(V8_TARGET_ARCH_RISCV64)
void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
@@ -415,6 +507,11 @@ void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void CloneObjectBaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
// static
Register RunMicrotasksDescriptor::MicrotaskQueueRegister() {
return CallDescriptors::call_descriptor_data(CallDescriptors::RunMicrotasks)
@@ -451,6 +548,11 @@ void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallTrampoline_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, 4);
@@ -461,6 +563,11 @@ void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithSpread_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, 4);
@@ -471,6 +578,12 @@ void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void ConstructWithSpread_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data,
+ kParameterCount - kStackArgumentsCount);
+}
+
void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, 4);
@@ -486,5 +599,15 @@ void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 3);
}
+void UnaryOp_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 2);
+}
+
+void ForInPrepareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index ae93513021..d9ae65f5c6 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -25,7 +25,6 @@ namespace internal {
V(Allocate) \
V(ApiCallback) \
V(ApiGetter) \
- V(ArgumentsAdaptor) \
V(ArrayConstructor) \
V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
@@ -34,19 +33,24 @@ namespace internal {
V(BigIntToI32Pair) \
V(BigIntToI64) \
V(BinaryOp) \
+ V(BinaryOp_Baseline) \
V(BinaryOp_WithFeedback) \
V(CallForwardVarargs) \
V(CallFunctionTemplate) \
V(CallTrampoline) \
+ V(CallTrampoline_Baseline) \
V(CallTrampoline_WithFeedback) \
V(CallVarargs) \
V(CallWithArrayLike) \
V(CallWithArrayLike_WithFeedback) \
V(CallWithSpread) \
+ V(CallWithSpread_Baseline) \
V(CallWithSpread_WithFeedback) \
V(CEntry1ArgvOnStack) \
+ V(CloneObjectBaseline) \
V(CloneObjectWithVector) \
V(Compare) \
+ V(Compare_Baseline) \
V(Compare_WithFeedback) \
V(ConstructForwardVarargs) \
V(ConstructStub) \
@@ -54,13 +58,16 @@ namespace internal {
V(ConstructWithArrayLike) \
V(ConstructWithArrayLike_WithFeedback) \
V(Construct_WithFeedback) \
+ V(Construct_Baseline) \
V(ConstructWithSpread) \
+ V(ConstructWithSpread_Baseline) \
V(ConstructWithSpread_WithFeedback) \
V(ContextOnly) \
V(CppBuiltinAdaptor) \
V(DynamicCheckMaps) \
V(EphemeronKeyBarrier) \
V(FastNewObject) \
+ V(ForInPrepare) \
V(FrameDropperTrampoline) \
V(GetIteratorStackParameter) \
V(GetProperty) \
@@ -70,23 +77,33 @@ namespace internal {
V(InterpreterCEntry1) \
V(InterpreterCEntry2) \
V(InterpreterDispatch) \
+ V(TailCallOptimizedCodeSlot) \
V(InterpreterPushArgsThenCall) \
V(InterpreterPushArgsThenConstruct) \
V(JSTrampoline) \
+ V(BaselineOutOfLinePrologue) \
+ V(BaselineLeaveFrame) \
V(Load) \
+ V(LoadBaseline) \
V(LoadGlobal) \
+ V(LoadGlobalBaseline) \
V(LoadGlobalNoFeedback) \
V(LoadGlobalWithVector) \
V(LoadNoFeedback) \
V(LoadWithVector) \
V(LoadWithReceiverAndVector) \
+ V(LoadWithReceiverBaseline) \
+ V(LookupBaseline) \
V(NoContext) \
V(RecordWrite) \
V(ResumeGenerator) \
V(RunMicrotasks) \
V(RunMicrotasksEntry) \
+ V(SingleParameterOnStack) \
V(Store) \
+ V(StoreBaseline) \
V(StoreGlobal) \
+ V(StoreGlobalBaseline) \
V(StoreGlobalWithVector) \
V(StoreTransition) \
V(StoreWithVector) \
@@ -95,8 +112,9 @@ namespace internal {
V(StringSubstring) \
V(TypeConversion) \
V(TypeConversionNoContext) \
- V(TypeConversionStackParameter) \
+ V(TypeConversion_Baseline) \
V(Typeof) \
+ V(UnaryOp_Baseline) \
V(UnaryOp_WithFeedback) \
V(Void) \
V(WasmFloat32ToNumber) \
@@ -328,6 +346,14 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
UNREACHABLE();
}
+ // Initializes |data| to an unspecified state, for platforms that haven't
+ // implemented a given builtin.
+ static void InitializePlatformUnimplemented(CallInterfaceDescriptorData* data,
+ int register_parameter_count) {
+ DefaultInitializePlatformSpecific(data,
+ std::min(register_parameter_count, 4));
+ }
+
virtual void InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// Default descriptor configuration: one result, all parameters are passed
@@ -521,11 +547,25 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
kNewTarget, \
kActualArgumentsCount, \
##__VA_ARGS__, \
- \
kParameterCount, \
kContext = kParameterCount /* implicit parameter */ \
};
+#define DEFINE_JS_PARAMETERS_NO_CONTEXT(...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kAllowVarArgs | \
+ CallInterfaceDescriptorData::kNoContext; \
+ static constexpr int kReturnCount = 1; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kJS; \
+ enum ParameterIndices { \
+ kTarget, \
+ kNewTarget, \
+ kActualArgumentsCount, \
+ ##__VA_ARGS__, \
+ kParameterCount, \
+ };
+
#define DEFINE_JS_PARAMETER_TYPES(...) \
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), /* kTarget */ \
MachineType::AnyTagged(), /* kNewTarget */ \
@@ -548,7 +588,7 @@ class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
};
// This class is subclassed by Torque-generated call interface descriptors.
-template <int parameter_count, bool has_context_parameter>
+template <int return_count, int parameter_count, bool has_context_parameter>
class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
public:
static constexpr int kDescriptorFlags =
@@ -561,7 +601,7 @@ class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
STATIC_ASSERT(0 <= i && i < kParameterCount);
return static_cast<ParameterIndices>(i);
}
- static constexpr int kReturnCount = 1;
+ static constexpr int kReturnCount = return_count;
using CallInterfaceDescriptor::CallInterfaceDescriptor;
@@ -571,14 +611,15 @@ class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
? kMaxTFSBuiltinRegisterParams
: kParameterCount;
static const int kStackParams = kParameterCount - kRegisterParams;
- virtual MachineType ReturnType() = 0;
+ virtual std::vector<MachineType> ReturnType() = 0;
virtual std::array<MachineType, kParameterCount> ParameterTypes() = 0;
void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override {
DefaultInitializePlatformSpecific(data, kRegisterParams);
}
void InitializePlatformIndependent(
CallInterfaceDescriptorData* data) override {
- std::vector<MachineType> machine_types = {ReturnType()};
+ std::vector<MachineType> machine_types = ReturnType();
+ DCHECK_EQ(kReturnCount, machine_types.size());
auto parameter_types = ParameterTypes();
machine_types.insert(machine_types.end(), parameter_types.begin(),
parameter_types.end());
@@ -652,6 +693,17 @@ class LoadDescriptor : public CallInterfaceDescriptor {
static const Register SlotRegister();
};
+// LoadBaselineDescriptor is a load descriptor that does not take a context as
+// input.
+class LoadBaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(LoadBaselineDescriptor, CallInterfaceDescriptor)
+};
+
class LoadGlobalNoFeedbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kName, kICKind)
@@ -705,6 +757,23 @@ class LoadGlobalDescriptor : public CallInterfaceDescriptor {
}
};
+class LoadGlobalBaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kName, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(LoadGlobalBaselineDescriptor, CallInterfaceDescriptor)
+};
+
+class LookupBaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kName, kDepth, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kDepth
+ MachineType::AnyTagged()) // kSlot
+ DECLARE_DESCRIPTOR(LookupBaselineDescriptor, CallInterfaceDescriptor)
+};
+
class StoreDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot)
@@ -729,6 +798,25 @@ class StoreDescriptor : public CallInterfaceDescriptor {
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
};
+class StoreBaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kValue, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(StoreBaselineDescriptor, CallInterfaceDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ static const bool kPassLastArgsOnStack = true;
+#else
+ static const bool kPassLastArgsOnStack = false;
+#endif
+
+ // Pass value and slot through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+};
+
class StoreTransitionDescriptor : public StoreDescriptor {
public:
DEFINE_PARAMETERS(kReceiver, kName, kMap, kValue, kSlot, kVector)
@@ -790,6 +878,20 @@ class StoreGlobalDescriptor : public CallInterfaceDescriptor {
}
};
+class StoreGlobalBaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kName, kValue, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged(), // kValue
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(StoreGlobalBaselineDescriptor, CallInterfaceDescriptor)
+
+ static const bool kPassLastArgsOnStack =
+ StoreDescriptor::kPassLastArgsOnStack;
+ // Pass value and slot through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+};
+
class StoreGlobalWithVectorDescriptor : public StoreGlobalDescriptor {
public:
DEFINE_PARAMETERS(kName, kValue, kSlot, kVector)
@@ -858,6 +960,18 @@ class LoadWithReceiverAndVectorDescriptor : public LoadWithVectorDescriptor {
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
};
+class LoadWithReceiverBaselineDescriptor : public LoadBaselineDescriptor {
+ public:
+ // TODO(v8:9497): Revert the Machine type for kSlot to the
+ // TaggedSigned once Torque can emit better call descriptors
+ DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kLookupStartObject, kName, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
+ MachineType::AnyTagged(), // kLookupStartObject
+ MachineType::AnyTagged(), // kName
+ MachineType::AnyTagged()) // kSlot
+ DECLARE_DESCRIPTOR(LoadWithReceiverBaselineDescriptor, LoadBaselineDescriptor)
+};
+
class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
public:
DEFINE_PARAMETERS(kName, kSlot, kVector)
@@ -935,13 +1049,18 @@ class TypeConversionNoContextDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(TypeConversionNoContextDescriptor, CallInterfaceDescriptor)
};
-class TypeConversionStackParameterDescriptor final
- : public CallInterfaceDescriptor {
+class TypeConversion_BaselineDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kArgument, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::UintPtr())
+ DECLARE_DESCRIPTOR(TypeConversion_BaselineDescriptor, CallInterfaceDescriptor)
+};
+
+class SingleParameterOnStackDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kArgument)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(TypeConversionStackParameterDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(SingleParameterOnStackDescriptor, CallInterfaceDescriptor)
};
class AsyncFunctionStackParameterDescriptor final
@@ -1021,16 +1140,25 @@ class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallWithSpreadDescriptor, CallInterfaceDescriptor)
};
-// TODO(jgruber): Pass the slot as UintPtr.
+class CallWithSpread_BaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::Int32(), // kArgumentsCount
+ MachineType::AnyTagged(), // kSpread
+ MachineType::UintPtr()) // kSlot
+ DECLARE_DESCRIPTOR(CallWithSpread_BaselineDescriptor, CallInterfaceDescriptor)
+};
+
class CallWithSpread_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot,
- kMaybeFeedbackVector)
+ kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kArgumentsCount
MachineType::AnyTagged(), // kSpread
- MachineType::Int32(), // kSlot
- MachineType::AnyTagged()) // kMaybeFeedbackVector
+ MachineType::UintPtr(), // kSlot
+ MachineType::AnyTagged()) // kFeedbackVector
DECLARE_DESCRIPTOR(CallWithSpread_WithFeedbackDescriptor,
CallInterfaceDescriptor)
};
@@ -1043,15 +1171,14 @@ class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
-// TODO(jgruber): Pass the slot as UintPtr.
class CallWithArrayLike_WithFeedbackDescriptor
: public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged(), // kArgumentsList
- MachineType::Int32(), // kSlot
- MachineType::AnyTagged()) // kMaybeFeedbackVector
+ MachineType::UintPtr(), // kSlot
+ MachineType::AnyTagged()) // kFeedbackVector
DECLARE_DESCRIPTOR(CallWithArrayLike_WithFeedbackDescriptor,
CallInterfaceDescriptor)
};
@@ -1079,16 +1206,35 @@ class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor, CallInterfaceDescriptor)
};
-// TODO(jgruber): Pass the slot as UintPtr.
+class ConstructWithSpread_BaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ // Note: kSlot comes before kSpread since as an untagged value it must be
+ // passed in a register.
+ DEFINE_JS_PARAMETERS(kSlot, kSpread)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot
+ MachineType::AnyTagged()) // kSpread
+ DECLARE_DESCRIPTOR(ConstructWithSpread_BaselineDescriptor,
+ CallInterfaceDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ static const bool kPassLastArgsOnStack = true;
+#else
+ static const bool kPassLastArgsOnStack = false;
+#endif
+
+ // Pass spread through the stack.
+ static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
+};
+
class ConstructWithSpread_WithFeedbackDescriptor
: public CallInterfaceDescriptor {
public:
// Note: kSlot comes before kSpread since as an untagged value it must be
// passed in a register.
- DEFINE_JS_PARAMETERS(kSlot, kSpread, kMaybeFeedbackVector)
- DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kSlot
+ DEFINE_JS_PARAMETERS(kSlot, kSpread, kFeedbackVector)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot
MachineType::AnyTagged(), // kSpread
- MachineType::AnyTagged()) // kMaybeFeedbackVector
+ MachineType::AnyTagged()) // kFeedbackVector
DECLARE_DESCRIPTOR(ConstructWithSpread_WithFeedbackDescriptor,
CallInterfaceDescriptor)
};
@@ -1102,17 +1248,15 @@ class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ConstructWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
-// TODO(jgruber): Pass the slot as UintPtr.
class ConstructWithArrayLike_WithFeedbackDescriptor
: public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot,
- kMaybeFeedbackVector)
+ DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged(), // kNewTarget
MachineType::AnyTagged(), // kArgumentsList
- MachineType::Int32(), // kSlot
- MachineType::AnyTagged()) // kMaybeFeedbackVector
+ MachineType::UintPtr(), // kSlot
+ MachineType::AnyTagged()) // kFeedbackVector
DECLARE_DESCRIPTOR(ConstructWithArrayLike_WithFeedbackDescriptor,
CallInterfaceDescriptor)
};
@@ -1204,6 +1348,15 @@ class BinaryOpDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(BinaryOpDescriptor, CallInterfaceDescriptor)
};
+class BinaryOp_BaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
+ MachineType::AnyTagged(), // kRight
+ MachineType::UintPtr()) // kSlot
+ DECLARE_DESCRIPTOR(BinaryOp_BaselineDescriptor, CallInterfaceDescriptor)
+};
+
// This desciptor is shared among String.p.charAt/charCodeAt/codePointAt
// as they all have the same interface.
class StringAtDescriptor final : public CallInterfaceDescriptor {
@@ -1238,13 +1391,6 @@ class StringSubstringDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(StringSubstringDescriptor, CallInterfaceDescriptor)
};
-class ArgumentsAdaptorDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_JS_PARAMETERS(kExpectedArgumentsCount)
- DEFINE_JS_PARAMETER_TYPES(MachineType::Int32())
- DECLARE_DESCRIPTOR(ArgumentsAdaptorDescriptor, CallInterfaceDescriptor)
-};
-
class CppBuiltinAdaptorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_JS_PARAMETERS(kCFunction)
@@ -1308,6 +1454,41 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
static const Register KeyRegister();
};
+class V8_EXPORT_PRIVATE TailCallOptimizedCodeSlotDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kOptimizedCodeEntry)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) // kAccumulator
+ DECLARE_DESCRIPTOR(TailCallOptimizedCodeSlotDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class BaselineOutOfLinePrologueDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kCalleeContext, kClosure,
+ kJavaScriptCallArgCount,
+ kInterpreterBytecodeArray,
+ kJavaScriptCallNewTarget)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kCalleeContext
+ MachineType::AnyTagged(), // kClosure
+ MachineType::Int32(), // kJavaScriptCallArgCount
+ MachineType::AnyTagged(), // kInterpreterBytecodeArray
+ MachineType::AnyTagged()) // kJavaScriptCallNewTarget
+ DECLARE_DESCRIPTOR(BaselineOutOfLinePrologueDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class BaselineLeaveFrameDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kParamsSize, kWeight)
+ DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kParamsSize
+ MachineType::Int32()) // kWeight
+ DECLARE_DESCRIPTOR(BaselineLeaveFrameDescriptor, CallInterfaceDescriptor)
+
+ static const Register ParamsSizeRegister();
+ static const Register WeightRegister();
+};
+
class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
: public CallInterfaceDescriptor {
public:
@@ -1376,6 +1557,18 @@ class InterpreterCEntry2Descriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(InterpreterCEntry2Descriptor, CallInterfaceDescriptor)
};
+class ForInPrepareDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_RESULT_AND_PARAMETERS(2, kEnumerator, kVectorIndex, kFeedbackVector)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(
+ MachineType::AnyTagged(), // result 1 (cache array)
+ MachineType::AnyTagged(), // result 2 (cache length)
+ MachineType::AnyTagged(), // kEnumerator
+ MachineType::TaggedSigned(), // kVectorIndex
+ MachineType::AnyTagged()) // kFeedbackVector
+ DECLARE_DESCRIPTOR(ForInPrepareDescriptor, CallInterfaceDescriptor)
+};
+
class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kValue, kGenerator)
@@ -1510,59 +1703,100 @@ class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CloneObjectWithVectorDescriptor, CallInterfaceDescriptor)
};
+class CloneObjectBaselineDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kSource, kFlags, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kSource
+ MachineType::TaggedSigned(), // kFlags
+ MachineType::TaggedSigned()) // kSlot
+ DECLARE_DESCRIPTOR(CloneObjectBaselineDescriptor, CallInterfaceDescriptor)
+};
+
class BinaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr(), // kSlot
- MachineType::AnyTagged()) // kMaybeFeedbackVector
+ MachineType::AnyTagged()) // kFeedbackVector
DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
-// TODO(jgruber): Pass the slot as UintPtr.
+class CallTrampoline_BaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
+ MachineType::Int32(), // kActualArgumentsCount
+ MachineType::UintPtr()) // kSlot
+ DECLARE_DESCRIPTOR(CallTrampoline_BaselineDescriptor, CallInterfaceDescriptor)
+};
+
class CallTrampoline_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot,
- kMaybeFeedbackVector)
+ kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
MachineType::Int32(), // kActualArgumentsCount
- MachineType::Int32(), // kSlot
- MachineType::AnyTagged()) // kMaybeFeedbackVector
+ MachineType::UintPtr(), // kSlot
+ MachineType::AnyTagged()) // kFeedbackVector
DECLARE_DESCRIPTOR(CallTrampoline_WithFeedbackDescriptor,
CallInterfaceDescriptor)
};
class Compare_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr(), // kSlot
- MachineType::AnyTagged()) // kMaybeFeedbackVector
+ MachineType::AnyTagged()) // kFeedbackVector
DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
-// TODO(jgruber): Pass the slot as UintPtr.
+class Compare_BaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
+ MachineType::AnyTagged(), // kRight
+ MachineType::UintPtr()) // kSlot
+ DECLARE_DESCRIPTOR(Compare_BaselineDescriptor, CallInterfaceDescriptor)
+};
+
+class Construct_BaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_JS_PARAMETERS_NO_CONTEXT(kSlot)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr()) // kSlot
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_BaselineDescriptor,
+ CallInterfaceDescriptor, 1)
+};
+
class Construct_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
- // kSlot is passed in a register, kMaybeFeedbackVector on the stack.
- DEFINE_JS_PARAMETERS(kSlot, kMaybeFeedbackVector)
- DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kSlot
- MachineType::AnyTagged()) // kMaybeFeedbackVector
+ // kSlot is passed in a register, kFeedbackVector on the stack.
+ DEFINE_JS_PARAMETERS(kSlot, kFeedbackVector)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot
+ MachineType::AnyTagged()) // kFeedbackVector
DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_WithFeedbackDescriptor,
CallInterfaceDescriptor, 1)
};
class UnaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kValue, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETERS(kValue, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
MachineType::UintPtr(), // kSlot
- MachineType::AnyTagged()) // kMaybeFeedbackVector
+ MachineType::AnyTagged()) // kFeedbackVector
DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
+class UnaryOp_BaselineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kValue, kSlot)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
+ MachineType::UintPtr()) // kSlot
+ DECLARE_DESCRIPTOR(UnaryOp_BaselineDescriptor, CallInterfaceDescriptor)
+};
+
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor : public CallInterfaceDescriptor { \
public: \
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index e7e10208d7..cd46fd1aef 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -324,6 +324,12 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeInBytes(
return 1 << ElementSizeLog2Of(rep);
}
+V8_EXPORT_PRIVATE inline constexpr int ElementSizeInPointers(
+ MachineRepresentation rep) {
+ return (ElementSizeInBytes(rep) + kSystemPointerSize - 1) /
+ kSystemPointerSize;
+}
+
// Converts representation to bit for representation masks.
V8_EXPORT_PRIVATE inline constexpr int RepresentationBit(
MachineRepresentation rep) {
diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index 1769dc834a..ce3ccbf332 100644
--- a/deps/v8/src/codegen/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -52,6 +52,9 @@ enum AllocationFlags {
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/constants-s390.h"
#include "src/codegen/s390/macro-assembler-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/codegen/riscv64/constants-riscv64.h"
+#include "src/codegen/riscv64/macro-assembler-riscv64.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 476362685e..9864c0f253 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -115,6 +115,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
}
#endif
#endif
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {}
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
index 7affb8fad2..f41a0e14ca 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
@@ -112,6 +112,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+const Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
@@ -235,21 +244,22 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void Compare_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
+}
+
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
+void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // JSFunction
- a3, // the new target
- a0, // actual number of arguments
- a2, // expected number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 7e82bb0e21..060a4b748a 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -4375,7 +4375,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
DCHECK_EQ(actual_parameter_count, a0);
DCHECK_EQ(expected_parameter_count, a2);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
Branch(&regular_invoke, eq, expected_parameter_count,
@@ -4429,20 +4428,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
CallRuntime(Runtime::kThrowStackOverflow);
break_(0xCC);
}
-#else
- // Check whether the expected and actual arguments count match. The registers
- // are set up according to contract with ArgumentsAdaptorTrampoline:
- Branch(&regular_invoke, eq, expected_parameter_count,
- Operand(actual_parameter_count));
- Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
- if (flag == CALL_FUNCTION) {
- Call(adaptor);
- Branch(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-#endif
bind(&regular_invoke);
}
@@ -4573,6 +4559,13 @@ void MacroAssembler::GetObjectType(Register object, Register map,
lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
+void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ Register range) {
+ lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Subu(range, type_reg, Operand(lower_limit));
+}
+
// -----------------------------------------------------------------------------
// Runtime calls.
@@ -5072,9 +5065,12 @@ void MacroAssembler::AssertFunction(Register object) {
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
Operand(zero_reg));
- GetObjectType(object, t8, t8);
- Check(eq, AbortReason::kOperandIsNotAFunction, t8,
- Operand(JS_FUNCTION_TYPE));
+ push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotAFunction, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ pop(object);
}
}
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index 46429caaca..1fe4c451f9 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -227,7 +227,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
- void JumpCodeObject(Register code_object) override {
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
@@ -1041,6 +1042,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GetObjectType(Register function, Register map, Register type_reg);
+ void GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit, Register range);
+
// -------------------------------------------------------------------------
// Runtime calls.
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 8b522eb173..11dd818922 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -37,6 +37,7 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/mips64/assembler-mips64-inl.h"
#include "src/codegen/safepoint-table.h"
#include "src/codegen/string-constants.h"
@@ -90,6 +91,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
#endif
#endif
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {}
@@ -3989,6 +3996,26 @@ Register UseScratchRegisterScope::Acquire() {
bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
+LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
+ uint8_t laneidx) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ *this = LoadStoreLaneParams(laneidx, MSA_B, 16);
+ break;
+ case MachineRepresentation::kWord16:
+ *this = LoadStoreLaneParams(laneidx, MSA_H, 8);
+ break;
+ case MachineRepresentation::kWord32:
+ *this = LoadStoreLaneParams(laneidx, MSA_W, 4);
+ break;
+ case MachineRepresentation::kWord64:
+ *this = LoadStoreLaneParams(laneidx, MSA_D, 2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index 26205ccefd..41ebea8e5b 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -36,12 +36,14 @@
#define V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_
#include <stdio.h>
+
#include <memory>
#include <set>
#include "src/codegen/assembler.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/label.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/mips64/register-mips64.h"
#include "src/objects/contexts.h"
@@ -1951,6 +1953,20 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
RegList old_available_;
};
+// Helper struct for load lane and store lane to indicate what memory size
+// to be encoded in the opcode, and the new lane index.
+class LoadStoreLaneParams {
+ public:
+ MSASize sz;
+ uint8_t laneidx;
+
+ LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
+
+ private:
+ LoadStoreLaneParams(uint8_t laneidx, MSASize sz, int lanes)
+ : sz(sz), laneidx(laneidx % lanes) {}
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
index 38ba0c3509..f34d16e15b 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
@@ -112,6 +112,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+const Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
@@ -235,21 +244,22 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void Compare_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
+}
+
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
+void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // JSFunction
- a3, // the new target
- a0, // actual number of arguments
- a2, // expected number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 7d6d125aeb..ec19fa5db5 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -2011,6 +2011,31 @@ void TurboAssembler::MultiPopFPU(RegList regs) {
daddiu(sp, sp, stack_offset);
}
+void TurboAssembler::MultiPushMSA(RegList regs) {
+ int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t stack_offset = num_to_push * kSimd128Size;
+
+ Dsubu(sp, sp, Operand(stack_offset));
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kSimd128Size;
+ st_d(MSARegister::from_code(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+void TurboAssembler::MultiPopMSA(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ ld_d(MSARegister::from_code(i), MemOperand(sp, stack_offset));
+ stack_offset += kSimd128Size;
+ }
+ }
+ daddiu(sp, sp, stack_offset);
+}
+
void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK_LT(pos, 32);
@@ -2633,22 +2658,24 @@ void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {
void TurboAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx,
MemOperand src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
switch (sz) {
case MSA_B:
- Lbu(kScratchReg, src);
- insert_b(dst, laneidx, kScratchReg);
+ Lbu(scratch, src);
+ insert_b(dst, laneidx, scratch);
break;
case MSA_H:
- Lhu(kScratchReg, src);
- insert_h(dst, laneidx, kScratchReg);
+ Lhu(scratch, src);
+ insert_h(dst, laneidx, scratch);
break;
case MSA_W:
- Lwu(kScratchReg, src);
- insert_w(dst, laneidx, kScratchReg);
+ Lwu(scratch, src);
+ insert_w(dst, laneidx, scratch);
break;
case MSA_D:
- Ld(kScratchReg, src);
- insert_d(dst, laneidx, kScratchReg);
+ Ld(scratch, src);
+ insert_d(dst, laneidx, scratch);
break;
default:
UNREACHABLE();
@@ -2657,22 +2684,24 @@ void TurboAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx,
void TurboAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx,
MemOperand dst) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
switch (sz) {
case MSA_B:
- copy_u_b(kScratchReg, src, laneidx);
- Sb(kScratchReg, dst);
+ copy_u_b(scratch, src, laneidx);
+ Sb(scratch, dst);
break;
case MSA_H:
- copy_u_h(kScratchReg, src, laneidx);
- Sh(kScratchReg, dst);
+ copy_u_h(scratch, src, laneidx);
+ Sh(scratch, dst);
break;
case MSA_W:
if (laneidx == 0) {
FPURegister src_reg = FPURegister::from_code(src.code());
Swc1(src_reg, dst);
} else {
- copy_u_w(kScratchReg, src, laneidx);
- Sw(kScratchReg, dst);
+ copy_u_w(scratch, src, laneidx);
+ Sw(scratch, dst);
}
break;
case MSA_D:
@@ -2680,8 +2709,8 @@ void TurboAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx,
FPURegister src_reg = FPURegister::from_code(src.code());
Sdc1(src_reg, dst);
} else {
- copy_s_d(kScratchReg, src, laneidx);
- Sd(kScratchReg, dst);
+ copy_s_d(scratch, src, laneidx);
+ Sd(scratch, dst);
}
break;
default:
@@ -2726,6 +2755,51 @@ void TurboAssembler::ExtMulHigh(MSADataType type, MSARegister dst,
}
#undef EXT_MUL_BINOP
+void TurboAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ switch (sz) {
+ case MSA_B:
+ Lb(scratch, src);
+ fill_b(dst, scratch);
+ break;
+ case MSA_H:
+ Lh(scratch, src);
+ fill_h(dst, scratch);
+ break;
+ case MSA_W:
+ Lw(scratch, src);
+ fill_w(dst, scratch);
+ break;
+ case MSA_D:
+ Ld(scratch, src);
+ fill_d(dst, scratch);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void TurboAssembler::ExtAddPairwise(MSADataType type, MSARegister dst,
+ MSARegister src) {
+ switch (type) {
+ case MSAS8:
+ hadd_s_h(dst, src, src);
+ break;
+ case MSAU8:
+ hadd_u_h(dst, src, src);
+ break;
+ case MSAS16:
+ hadd_s_w(dst, src, src);
+ break;
+ case MSAU16:
+ hadd_u_w(dst, src, src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
void TurboAssembler::MSARoundW(MSARegister dst, MSARegister src,
FPURoundingMode mode) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -4815,7 +4889,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
DCHECK_EQ(actual_parameter_count, a0);
DCHECK_EQ(expected_parameter_count, a2);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
Branch(&regular_invoke, eq, expected_parameter_count,
@@ -4869,21 +4942,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
CallRuntime(Runtime::kThrowStackOverflow);
break_(0xCC);
}
-#else
- // Check whether the expected and actual arguments count match. The registers
- // are set up according to contract with ArgumentsAdaptorTrampoline:
- Branch(&regular_invoke, eq, expected_parameter_count,
- Operand(actual_parameter_count));
-
- Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
- if (flag == CALL_FUNCTION) {
- Call(adaptor);
- Branch(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-#endif
bind(&regular_invoke);
}
@@ -5015,6 +5074,13 @@ void MacroAssembler::GetObjectType(Register object, Register map,
Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
+void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ Register range) {
+ Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Dsubu(range, type_reg, Operand(lower_limit));
+}
+
// -----------------------------------------------------------------------------
// Runtime calls.
@@ -5533,9 +5599,12 @@ void MacroAssembler::AssertFunction(Register object) {
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
Operand(zero_reg));
- GetObjectType(object, t8, t8);
- Check(eq, AbortReason::kOperandIsNotAFunction, t8,
- Operand(JS_FUNCTION_TYPE));
+ push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotAFunction, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ pop(object);
}
}
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index af5dc103e1..721326ae96 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -250,7 +250,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
- void JumpCodeObject(Register code_object) override {
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) override {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
@@ -355,6 +356,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// saved in higher memory addresses.
void MultiPush(RegList regs);
void MultiPushFPU(RegList regs);
+ void MultiPushMSA(RegList regs);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
@@ -402,6 +404,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
void MultiPopFPU(RegList regs);
+ void MultiPopMSA(RegList regs);
#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
@@ -799,6 +802,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
MSARegister src2);
void ExtMulHigh(MSADataType type, MSARegister dst, MSARegister src1,
MSARegister src2);
+ void LoadSplat(MSASize sz, MSARegister dst, MemOperand src);
+ void ExtAddPairwise(MSADataType type, MSARegister dst, MSARegister src);
void MSARoundW(MSARegister dst, MSARegister src, FPURoundingMode mode);
void MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode);
@@ -1091,6 +1096,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void GetObjectType(Register function, Register map, Register type_reg);
+ void GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit, Register range);
+
// -------------------------------------------------------------------------
// Runtime calls.
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 6e8ce93aaa..2dc30fb55d 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -39,6 +39,10 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
SetTracingFlags(shared->PassesFilter(FLAG_trace_turbo_filter));
ConfigureFlags();
+
+ if (isolate->node_observer()) {
+ SetNodeObserver(isolate->node_observer());
+ }
}
OptimizedCompilationInfo::OptimizedCompilationInfo(
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index 42dc87f392..b72526ab61 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -34,6 +34,10 @@ class JavaScriptFrame;
class JSGlobalObject;
class Zone;
+namespace compiler {
+class NodeObserver;
+}
+
namespace wasm {
struct WasmCompilationResult;
} // namespace wasm
@@ -119,8 +123,13 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
CodeKind code_kind() const { return code_kind_; }
int32_t builtin_index() const { return builtin_index_; }
void set_builtin_index(int32_t index) { builtin_index_ = index; }
- BailoutId osr_offset() const { return osr_offset_; }
+ BytecodeOffset osr_offset() const { return osr_offset_; }
JavaScriptFrame* osr_frame() const { return osr_frame_; }
+ void SetNodeObserver(compiler::NodeObserver* observer) {
+ DCHECK_NULL(node_observer_);
+ node_observer_ = observer;
+ }
+ compiler::NodeObserver* node_observer() const { return node_observer_; }
void SetPoisoningMitigationLevel(PoisoningMitigationLevel poisoning_level) {
poisoning_level_ = poisoning_level;
@@ -155,7 +164,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool IsTurboprop() const { return code_kind() == CodeKind::TURBOPROP; }
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
- void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {
+ void SetOptimizingForOsr(BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
osr_offset_ = osr_offset;
osr_frame_ = osr_frame;
@@ -276,13 +286,15 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// The WebAssembly compilation result, not published in the NativeModule yet.
std::unique_ptr<wasm::WasmCompilationResult> wasm_compilation_result_;
- // Entry point when compiling for OSR, {BailoutId::None} otherwise.
- BailoutId osr_offset_ = BailoutId::None();
+ // Entry point when compiling for OSR, {BytecodeOffset::None} otherwise.
+ BytecodeOffset osr_offset_ = BytecodeOffset::None();
// The zone from which the compilation pipeline working on this
// OptimizedCompilationInfo allocates.
Zone* const zone_;
+ compiler::NodeObserver* node_observer_ = nullptr;
+
BailoutReason bailout_reason_ = BailoutReason::kNoReason;
InlinedFunctionList inlined_functions_;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index ff1835f435..02e50e5fa3 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -110,6 +110,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= (1u << FPR_GPR_MOV);
#endif
#endif
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index d26b686861..31bbb48044 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -2393,6 +2393,8 @@ using Instr = uint32_t;
V(vbpermq, VBPERMQ, 0x1000054C)
#define PPC_VX_OPCODE_C_FORM_LIST(V) \
+ /* Vector Unpack Low Signed Word */ \
+ V(vupklsw, VUPKLSW, 0x100006CE) \
/* Vector Unpack High Signed Word */ \
V(vupkhsw, VUPKHSW, 0x1000064E) \
/* Vector Unpack Low Signed Halfword */ \
@@ -2559,8 +2561,6 @@ using Instr = uint32_t;
V(vupkhpx, VUPKHPX, 0x1000034E) \
/* Vector Unpack Low Pixel */ \
V(vupklpx, VUPKLPX, 0x100003CE) \
- /* Vector Unpack Low Signed Word */ \
- V(vupklsw, VUPKLSW, 0x100006CE) \
/* Vector AES Cipher */ \
V(vcipher, VCIPHER, 0x10000508) \
/* Vector AES Cipher Last */ \
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
index ba8da6e0a8..ed304e80fc 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
@@ -86,6 +86,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
+const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+const Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
@@ -209,21 +218,22 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void Compare_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
+}
+
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
+void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r4, // JSFunction
- r6, // the new target
- r3, // actual number of arguments
- r5, // expected number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index c5d52e0444..e78130ee42 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -429,6 +429,21 @@ void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
}
}
+void TurboAssembler::MultiPushV128(RegList dregs, Register location) {
+ int16_t num_to_push = base::bits::CountPopulation(dregs);
+ int16_t stack_offset = num_to_push * kSimd128Size;
+
+ subi(location, location, Operand(stack_offset));
+ for (int16_t i = Simd128Register::kNumRegisters - 1; i >= 0; i--) {
+ if ((dregs & (1 << i)) != 0) {
+ Simd128Register dreg = Simd128Register::from_code(i);
+ stack_offset -= kSimd128Size;
+ li(ip, Operand(stack_offset));
+ StoreSimd128(dreg, MemOperand(location, ip), r0, kScratchSimd128Reg);
+ }
+ }
+}
+
void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
int16_t stack_offset = 0;
@@ -442,6 +457,20 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
addi(location, location, Operand(stack_offset));
}
+void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < Simd128Register::kNumRegisters; i++) {
+ if ((dregs & (1 << i)) != 0) {
+ Simd128Register dreg = Simd128Register::from_code(i);
+ li(ip, Operand(stack_offset));
+ LoadSimd128(dreg, MemOperand(location, ip), r0, kScratchSimd128Reg);
+ stack_offset += kSimd128Size;
+ }
+ }
+ addi(location, location, Operand(stack_offset));
+}
+
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
DCHECK(cond == al);
@@ -1391,7 +1420,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
DCHECK_EQ(actual_parameter_count, r3);
DCHECK_EQ(expected_parameter_count, r5);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
mov(r0, Operand(kDontAdaptArgumentsSentinel));
@@ -1441,24 +1469,12 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this, StackFrame::MANUAL);
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
-#else
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline.
- cmp(expected_parameter_count, actual_parameter_count);
- beq(&regular_invoke);
- Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
- if (flag == CALL_FUNCTION) {
- Call(adaptor);
- b(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-#endif
bind(&regular_invoke);
}
@@ -1641,6 +1657,18 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
cmpi(type_reg, Operand(type));
}
+void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ InstanceType higher_limit) {
+ DCHECK_LT(lower_limit, higher_limit);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadHalfWord(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ mov(scratch, Operand(lower_limit));
+ sub(scratch, type_reg, scratch);
+ cmpli(scratch, Operand(higher_limit - lower_limit));
+}
+
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
DCHECK(obj != r0);
LoadRoot(r0, index);
@@ -1983,9 +2011,11 @@ void MacroAssembler::AssertFunction(Register object) {
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
push(object);
- CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
+ LoadMap(object, object);
+ CompareInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
pop(object);
- Check(eq, AbortReason::kOperandIsNotAFunction);
+ Check(le, AbortReason::kOperandIsNotAFunction);
}
}
@@ -3307,7 +3337,8 @@ void TurboAssembler::CallCodeObject(Register code_object) {
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object) {
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 1b97c23128..5da219ba84 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -298,6 +298,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
+ void MultiPushV128(RegList dregs, Register location = sp);
+ void MultiPopV128(RegList dregs, Register location = sp);
+
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
@@ -440,7 +443,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
- void JumpCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) override;
void CallBuiltinByIndex(Register builtin_index) override;
void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
@@ -873,6 +877,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
+ // Compare instance type ranges for a map (lower_limit and higher_limit
+ // inclusive).
+ //
+ // Always use unsigned comparisons: ls for a positive result.
+ void CompareInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ InstanceType higher_limit);
+
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, RootIndex index);
diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h
index 21a7233016..3a72daae27 100644
--- a/deps/v8/src/codegen/register-arch.h
+++ b/deps/v8/src/codegen/register-arch.h
@@ -24,6 +24,8 @@
#include "src/codegen/mips64/register-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/register-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/codegen/riscv64/register-riscv64.h"
#else
#error Unknown architecture.
#endif
diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc
index 1c48303294..aca5295c11 100644
--- a/deps/v8/src/codegen/register-configuration.cc
+++ b/deps/v8/src/codegen/register-configuration.cc
@@ -66,6 +66,8 @@ static int get_num_allocatable_double_registers() {
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_S390
kMaxAllocatableDoubleRegisterCount;
+#elif V8_TARGET_ARCH_RISCV64
+ kMaxAllocatableDoubleRegisterCount;
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index eb7750f941..25b3daef8e 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -330,7 +330,8 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
return false;
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
- defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390)
+ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
+ defined(V8_TARGET_ARCH_RISCV64)
return true;
#endif
}
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index 3230e5f022..f478de86a1 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -70,7 +70,7 @@ class RelocInfo {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
- // Encoded internal reference, used only on MIPS, MIPS64 and PPC.
+ // Encoded internal reference, used only on RISCV64, MIPS, MIPS64 and PPC.
INTERNAL_REFERENCE_ENCODED,
// An off-heap instruction stream target. See http://goo.gl/Z2HUiM.
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
new file mode 100644
index 0000000000..b99262cb36
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
@@ -0,0 +1,261 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#ifndef V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_
+#define V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/riscv64/assembler-riscv64.h"
+#include "src/debug/debug.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
+
+bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(RISCV_SIMD); }
+
+// -----------------------------------------------------------------------------
+// Operand and MemOperand.
+
+bool Operand::is_reg() const { return rm_.is_valid(); }
+
+int64_t Operand::immediate() const {
+ DCHECK(!is_reg());
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+}
+
+// -----------------------------------------------------------------------------
+// RelocInfo.
+
+void RelocInfo::apply(intptr_t delta) {
+ if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
+ // Absolute code pointer inside code object moves with the code object.
+ Assembler::RelocateInternalReference(rmode_, pc_, delta);
+ }
+}
+
+Address RelocInfo::target_address() {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+Address RelocInfo::target_address_address() {
+ DCHECK(HasTargetAddressAddress());
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instruction like LUI/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written. In this case the target_address_address function should
+ // return the end of the instructions to be patched, allowing the
+ // deserializer to deserialize the instructions as raw bytes and put them in
+ // place, ready to be patched with the target. After jump optimization,
+ // that is the address of the instruction that follows J/JAL/JR/JALR
+ // instruction.
+ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
+}
+
+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
+
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+
+void Assembler::deserialization_set_special_target_at(
+ Address instruction_payload, Code code, Address target) {
+ set_target_address_at(instruction_payload,
+ !code.is_null() ? code.constant_pool() : kNullAddress,
+ target);
+}
+
+int Assembler::deserialization_special_target_size(
+ Address instruction_payload) {
+ return kSpecialTargetSize;
+}
+
+void Assembler::set_target_internal_reference_encoded_at(Address pc,
+ Address target) {
+ set_target_value_at(pc, static_cast<uint64_t>(target));
+}
+
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ if (RelocInfo::IsInternalReferenceEncoded(mode)) {
+ DCHECK(IsLui(instr_at(pc)));
+ set_target_internal_reference_encoded_at(pc, target);
+ } else {
+ DCHECK(RelocInfo::IsInternalReference(mode));
+ Memory<Address>(pc) = target;
+ }
+}
+
+HeapObject RelocInfo::target_object() {
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+}
+
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
+Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+}
+
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
+ WriteBarrierForCode(host(), this, target);
+ }
+}
+
+Address RelocInfo::target_external_reference() {
+ DCHECK(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
+
+Address RelocInfo::target_internal_reference() {
+ if (rmode_ == INTERNAL_REFERENCE) {
+ return Memory<Address>(pc_);
+ } else {
+ // Encoded internal references are j/jal instructions.
+ DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
+ DCHECK(Assembler::IsLui(Assembler::instr_at(pc_ + 0 * kInstrSize)));
+ Address address = Assembler::target_address_at(pc_);
+ return address;
+ }
+}
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
+ return pc_;
+}
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::WipeOut() {
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
+ IsOffHeapTarget(rmode_));
+ if (IsInternalReference(rmode_)) {
+ Memory<Address>(pc_) = kNullAddress;
+ } else if (IsInternalReferenceEncoded(rmode_)) {
+ Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress);
+ } else {
+ Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+}
+
+template <typename T>
+void Assembler::EmitHelper(T x) {
+ *reinterpret_cast<T*>(pc_) = x;
+ pc_ += sizeof(x);
+}
+
+void Assembler::emit(Instr x) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ DEBUG_PRINTF("%p: ", pc_);
+ disassembleInstr(x);
+ EmitHelper(x);
+ CheckTrampolinePoolQuick();
+}
+
+void Assembler::emit(ShortInstr x) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ DEBUG_PRINTF("%p: ", pc_);
+ disassembleInstr(x);
+ EmitHelper(x);
+ CheckTrampolinePoolQuick();
+}
+
+void Assembler::emit(uint64_t data) {
+ if (!is_buffer_growth_blocked()) CheckBuffer();
+ EmitHelper(data);
+}
+
+EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
new file mode 100644
index 0000000000..e070e72f45
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -0,0 +1,3020 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/codegen/riscv64/assembler-riscv64.h"
+
+#include "src/base/cpu.h"
+#include "src/codegen/riscv64/assembler-riscv64-inl.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/objects/heap-number-inl.h"
+
+namespace v8 {
+namespace internal {
+// Get the CPU features enabled by the build. For cross compilation the
+// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
+// can be defined to enable FPU instructions when building the
+// snapshot.
+static unsigned CpuFeaturesImpliedByCompiler() {
+ unsigned answer = 0;
+#ifdef CAN_USE_FPU_INSTRUCTIONS
+ answer |= 1u << FPU;
+#endif // def CAN_USE_FPU_INSTRUCTIONS
+
+ return answer;
+}
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= CpuFeaturesImpliedByCompiler();
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+ if (cpu.has_fpu()) supported_ |= 1u << FPU;
+}
+
+void CpuFeatures::PrintTarget() {}
+void CpuFeatures::PrintFeatures() {}
+
+int ToNumber(Register reg) {
+ DCHECK(reg.is_valid());
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // ra
+ 2, // sp
+ 3, // gp
+ 4, // tp
+ 5, // t0
+ 6, // t1
+ 7, // t2
+ 8, // s0/fp
+ 9, // s1
+ 10, // a0
+ 11, // a1
+ 12, // a2
+ 13, // a3
+ 14, // a4
+ 15, // a5
+ 16, // a6
+ 17, // a7
+ 18, // s2
+ 19, // s3
+ 20, // s4
+ 21, // s5
+ 22, // s6
+ 23, // s7
+ 24, // s8
+ 25, // s9
+ 26, // s10
+ 27, // s11
+ 28, // t3
+ 29, // t4
+ 30, // t5
+ 31, // t6
+ };
+ return kNumbers[reg.code()];
+}
+
+Register ToRegister(int num) {
+ DCHECK(num >= 0 && num < kNumRegisters);
+ const Register kRegisters[] = {
+ zero_reg, ra, sp, gp, tp, t0, t1, t2, fp, s1, a0, a1, a2, a3, a4, a5,
+ a6, a7, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, t3, t4, t5, t6};
+ return kRegisters[num];
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo.
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on RISC-V means that it is a lui/addi instruction, and that
+ // is always the case inside code objects.
+ return true;
+}
+
+bool RelocInfo::IsInConstantPool() { return false; }
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return static_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand.
+// See assembler-riscv64-inl.h for inlined constructors.
+
+Operand::Operand(Handle<HeapObject> handle)
+ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
+ value_.immediate = static_cast<intptr_t>(handle.address());
+}
+
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
+}
+
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
+MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
+ offset_ = offset;
+}
+
+MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
+ OffsetAddend offset_addend)
+ : Operand(rm) {
+ offset_ = unit * multiplier + offset_addend;
+}
+
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
+ break;
+ case HeapObjectRequest::kStringConstant:
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ object = str->AllocateStringConstant(isolate);
+ break;
+ }
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
+ set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
+ scratch_register_list_(t3.bit() | t5.bit()),
+ constpool_(this) {
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
+
+ last_trampoline_pool_end_ = 0;
+ no_trampoline_pool_before_ = 0;
+ trampoline_pool_blocked_nesting_ = 0;
+ // We leave space (16 * kTrampolineSlotsSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ = FLAG_force_long_branches
+ ? kMaxInt
+ : kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ internal_trampoline_exception_ = false;
+ last_bound_pos_ = 0;
+
+ trampoline_emitted_ = FLAG_force_long_branches;
+ unbound_labels_count_ = 0;
+ block_buffer_growth_ = false;
+}
+
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
+ ForceConstantPoolEmissionWithoutJump();
+
+ int code_comments_size = WriteCodeComments();
+
+ DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
+ // Set up code descriptor.
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
+}
+
+void Assembler::Align(int m) {
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+void Assembler::CodeTargetAlign() {
+ // No advantage to aligning branch/call targets to more than
+ // single instruction, that I am aware of.
+ Align(4);
+}
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+// The link chain is terminated by a value in the instruction of 0,
+// which is an otherwise illegal value (branch 0 is inf loop). When this case
+// is detected, return an position of -1, an otherwise illegal position.
+const int kEndOfChain = -1;
+const int kEndOfJumpChain = 0;
+
+bool Assembler::IsBranch(Instr instr) {
+ return (instr & kBaseOpcodeMask) == BRANCH;
+}
+
+bool Assembler::IsJump(Instr instr) {
+ int Op = instr & kBaseOpcodeMask;
+ return Op == JAL || Op == JALR;
+}
+
+bool Assembler::IsJal(Instr instr) { return (instr & kBaseOpcodeMask) == JAL; }
+
+bool Assembler::IsJalr(Instr instr) {
+ return (instr & kBaseOpcodeMask) == JALR;
+}
+
+bool Assembler::IsCJal(Instr instr) {
+ return (instr & kRvcOpcodeMask) == RO_C_J;
+}
+
+bool Assembler::IsLui(Instr instr) { return (instr & kBaseOpcodeMask) == LUI; }
+bool Assembler::IsAuipc(Instr instr) {
+ return (instr & kBaseOpcodeMask) == AUIPC;
+}
+bool Assembler::IsAddiw(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDIW;
+}
+bool Assembler::IsAddi(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDI;
+}
+bool Assembler::IsOri(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ORI;
+}
+bool Assembler::IsSlli(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_SLLI;
+}
+
+bool Assembler::IsLd(Instr instr) {
+ return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LD;
+}
+
+int Assembler::target_at(int pos, bool is_internal) {
+ if (is_internal) {
+ int64_t* p = reinterpret_cast<int64_t*>(buffer_start_ + pos);
+ int64_t address = *p;
+ if (address == kEndOfJumpChain) {
+ return kEndOfChain;
+ } else {
+ int64_t instr_address = reinterpret_cast<int64_t>(p);
+ DCHECK(instr_address - address < INT_MAX);
+ int delta = static_cast<int>(instr_address - address);
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
+ }
+ Instruction* instruction = Instruction::At(buffer_start_ + pos);
+ DEBUG_PRINTF("target_at: %p (%d)\n\t",
+ reinterpret_cast<Instr*>(buffer_start_ + pos), pos);
+ Instr instr = instruction->InstructionBits();
+ disassembleInstr(instruction->InstructionBits());
+
+ switch (instruction->InstructionOpcodeType()) {
+ case BRANCH: {
+ int32_t imm13 = BranchOffset(instr);
+ if (imm13 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + imm13;
+ }
+ } break;
+ case JAL: {
+ int32_t imm21 = JumpOffset(instr);
+ if (imm21 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + imm21;
+ }
+ } break;
+ case JALR: {
+ int32_t imm12 = instr >> 20;
+ if (imm12 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + imm12;
+ }
+ } break;
+ case LUI: {
+ Address pc = reinterpret_cast<Address>(buffer_start_ + pos);
+ pc = target_address_at(pc);
+ uint64_t instr_address = reinterpret_cast<uint64_t>(buffer_start_ + pos);
+ uint64_t imm = reinterpret_cast<uint64_t>(pc);
+ if (imm == kEndOfJumpChain) {
+ return kEndOfChain;
+ } else {
+ DCHECK(instr_address - imm < INT_MAX);
+ int32_t delta = static_cast<int32_t>(instr_address - imm);
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
+ } break;
+ case AUIPC: {
+ Instr instr_auipc = instr;
+ Instr instr_I = instr_at(pos + 4);
+ DCHECK(IsJalr(instr_I) || IsAddi(instr_I));
+ int32_t offset = BrachlongOffset(instr_auipc, instr_I);
+ if (offset == kEndOfJumpChain) return kEndOfChain;
+ return offset + pos;
+ } break;
+ case RO_C_J: {
+ int32_t offset = instruction->RvcImm11CJValue();
+ if (offset == kEndOfJumpChain) return kEndOfChain;
+ return offset + pos;
+ } break;
+ default: {
+ if (instr == kEndOfJumpChain) {
+ return kEndOfChain;
+ } else {
+ int32_t imm18 =
+ ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
+ } break;
+ }
+}
+
+static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t imm = target_pos - pos;
+ DCHECK_EQ(imm & 1, 0);
+ DCHECK(is_intn(imm, Assembler::kBranchOffsetBits));
+
+ instr &= ~kBImm12Mask;
+ int32_t imm12 = ((imm & 0x800) >> 4) | // bit 11
+ ((imm & 0x1e) << 7) | // bits 4-1
+ ((imm & 0x7e0) << 20) | // bits 10-5
+ ((imm & 0x1000) << 19); // bit 12
+
+ return instr | (imm12 & kBImm12Mask);
+}
+
+static inline Instr SetLdOffset(int32_t offset, Instr instr) {
+ DCHECK(Assembler::IsLd(instr));
+ DCHECK(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ return instr | (imm12 & kImm12Mask);
+}
+
+static inline Instr SetAuipcOffset(int32_t offset, Instr instr) {
+ DCHECK(Assembler::IsAuipc(instr));
+ DCHECK(is_int20(offset));
+ instr = (instr & ~kImm31_12Mask) | ((offset & kImm19_0Mask) << 12);
+ return instr;
+}
+
+static inline Instr SetJalOffset(int32_t pos, int32_t target_pos, Instr instr) {
+ DCHECK(Assembler::IsJal(instr));
+ int32_t imm = target_pos - pos;
+ DCHECK_EQ(imm & 1, 0);
+ DCHECK(is_intn(imm, Assembler::kJumpOffsetBits));
+
+ instr &= ~kImm20Mask;
+ int32_t imm20 = (imm & 0xff000) | // bits 19-12
+ ((imm & 0x800) << 9) | // bit 11
+ ((imm & 0x7fe) << 20) | // bits 10-1
+ ((imm & 0x100000) << 11); // bit 20
+
+ return instr | (imm20 & kImm20Mask);
+}
+
+static inline ShortInstr SetCJalOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ DCHECK(Assembler::IsCJal(instr));
+ int32_t imm = target_pos - pos;
+ DCHECK_EQ(imm & 1, 0);
+ DCHECK(is_intn(imm, Assembler::kCJalOffsetBits));
+ instr &= ~kImm11Mask;
+ int16_t imm11 = ((imm & 0x800) >> 1) | ((imm & 0x400) >> 4) |
+ ((imm & 0x300) >> 1) | ((imm & 0x80) >> 3) |
+ ((imm & 0x40) >> 1) | ((imm & 0x20) >> 5) |
+ ((imm & 0x10) << 5) | (imm & 0xe);
+ imm11 = imm11 << kImm11Shift;
+ DCHECK(Assembler::IsCJal(instr | (imm11 & kImm11Mask)));
+ return instr | (imm11 & kImm11Mask);
+}
+
+void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
+ if (is_internal) {
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
+ *reinterpret_cast<uint64_t*>(buffer_start_ + pos) = imm;
+ return;
+ }
+ DEBUG_PRINTF("target_at_put: %p (%d) to %p (%d)\n",
+ reinterpret_cast<Instr*>(buffer_start_ + pos), pos,
+ reinterpret_cast<Instr*>(buffer_start_ + target_pos),
+ target_pos);
+ Instruction* instruction = Instruction::At(buffer_start_ + pos);
+ Instr instr = instruction->InstructionBits();
+
+ switch (instruction->InstructionOpcodeType()) {
+ case BRANCH: {
+ instr = SetBranchOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
+ } break;
+ case JAL: {
+ instr = SetJalOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
+ } break;
+ case LUI: {
+ Address pc = reinterpret_cast<Address>(buffer_start_ + pos);
+ set_target_value_at(
+ pc, reinterpret_cast<uint64_t>(buffer_start_ + target_pos));
+ } break;
+ case AUIPC: {
+ Instr instr_auipc = instr;
+ Instr instr_I = instr_at(pos + 4);
+ DCHECK(IsJalr(instr_I) || IsAddi(instr_I));
+
+ int64_t offset = target_pos - pos;
+ DCHECK(is_int32(offset));
+
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
+
+ instr_auipc =
+ (instr_auipc & ~kImm31_12Mask) | ((Hi20 & kImm19_0Mask) << 12);
+ instr_at_put(pos, instr_auipc);
+
+ const int kImm31_20Mask = ((1 << 12) - 1) << 20;
+ const int kImm11_0Mask = ((1 << 12) - 1);
+ instr_I = (instr_I & ~kImm31_20Mask) | ((Lo12 & kImm11_0Mask) << 20);
+ instr_at_put(pos + 4, instr_I);
+ } break;
+ case RO_C_J: {
+ ShortInstr short_instr = SetCJalOffset(pos, target_pos, instr);
+ instr_at_put(pos, short_instr);
+ } break;
+ default: {
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code pointer of generated Code object.
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ } break;
+ }
+ disassembleInstr(instr);
+}
+
+void Assembler::print(const Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l;
+ l.link_to(L->pos());
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ if ((instr & ~kImm16Mask) == 0) {
+ PrintF("value\n");
+ } else {
+ PrintF("%d\n", instr);
+ }
+ next(&l, is_internal_reference(&l));
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+void Assembler::bind_to(Label* L, int pos) {
+ DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
+ DEBUG_PRINTF("binding %d to label %p\n", pos, L);
+ int trampoline_pos = kInvalidSlotPos;
+ bool is_internal = false;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ if (!is_internal_reference(L)) {
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+ }
+
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ int dist = pos - fixup_pos;
+ is_internal = is_internal_reference(L);
+ next(L, is_internal); // Call next before overwriting link with target
+ // at fixup_pos.
+ Instr instr = instr_at(fixup_pos);
+ DEBUG_PRINTF("\tfixup: %d to %d\n", fixup_pos, dist);
+ if (is_internal) {
+ target_at_put(fixup_pos, pos, is_internal);
+ } else {
+ if (IsBranch(instr)) {
+ if (dist > kMaxBranchOffset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK_NE(trampoline_pos, kInvalidSlotPos);
+ }
+ CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
+ DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos);
+ target_at_put(fixup_pos, trampoline_pos, false);
+ fixup_pos = trampoline_pos;
+ }
+ target_at_put(fixup_pos, pos, false);
+ } else if (IsJal(instr)) {
+ if (dist > kMaxJumpOffset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK_NE(trampoline_pos, kInvalidSlotPos);
+ }
+ CHECK((trampoline_pos - fixup_pos) <= kMaxJumpOffset);
+ DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos);
+ target_at_put(fixup_pos, trampoline_pos, false);
+ fixup_pos = trampoline_pos;
+ }
+ target_at_put(fixup_pos, pos, false);
+ } else {
+ target_at_put(fixup_pos, pos, false);
+ }
+ }
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_) last_bound_pos_ = pos;
+}
+
+void Assembler::bind(Label* L) {
+ DCHECK(!L->is_bound()); // Label can only be bound once.
+ bind_to(L, pc_offset());
+}
+
+void Assembler::next(Label* L, bool is_internal) {
+ DCHECK(L->is_linked());
+ int link = target_at(L->pos(), is_internal);
+ if (link == kEndOfChain) {
+ L->Unuse();
+ } else {
+ DCHECK_GT(link, 0);
+ DEBUG_PRINTF("next: %p to %p (%d)\n", L,
+ reinterpret_cast<Instr*>(buffer_start_ + link), link);
+ L->link_to(link);
+ }
+}
+
+bool Assembler::is_near(Label* L) {
+ DCHECK(L->is_bound());
+ return is_intn((pc_offset() - L->pos()), kJumpOffsetBits);
+}
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ if (L == nullptr || !L->is_bound()) return true;
+ return is_intn((pc_offset() - L->pos()), bits);
+}
+
+bool Assembler::is_near_branch(Label* L) {
+ DCHECK(L->is_bound());
+ return is_intn((pc_offset() - L->pos()), kBranchOffsetBits);
+}
+
+int Assembler::BranchOffset(Instr instr) {
+ // | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
+ // 31 25 11 7
+ int32_t imm13 = ((instr & 0xf00) >> 7) | ((instr & 0x7e000000) >> 20) |
+ ((instr & 0x80) << 4) | ((instr & 0x80000000) >> 19);
+ imm13 = imm13 << 19 >> 19;
+ return imm13;
+}
+
+int Assembler::JumpOffset(Instr instr) {
+ int32_t imm21 = ((instr & 0x7fe00000) >> 20) | ((instr & 0x100000) >> 9) |
+ (instr & 0xff000) | ((instr & 0x80000000) >> 11);
+ imm21 = imm21 << 11 >> 11;
+ return imm21;
+}
+
+int Assembler::CJumpOffset(Instr instr) {
+ int32_t imm12 = ((instr & 0x4) << 3) | ((instr & 0x38) >> 2) |
+ ((instr & 0x40) << 1) | ((instr & 0x80) >> 1) |
+ ((instr & 0x100) << 2) | ((instr & 0x600) >> 1) |
+ ((instr & 0x800) >> 7) | ((instr & 0x1000) >> 1);
+ imm12 = imm12 << 20 >> 20;
+ return imm12;
+}
+
+int Assembler::BrachlongOffset(Instr auipc, Instr instr_I) {
+ DCHECK(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
+ InstructionBase::kIType);
+ const int kImm19_0Mask = ((1 << 20) - 1);
+ int32_t imm_auipc = auipc & (kImm19_0Mask << 12);
+ int32_t imm_12 = instr_I >> 20;
+ int32_t offset = imm_12 + imm_auipc;
+ return offset;
+}
+
+int Assembler::LdOffset(Instr instr) {
+ DCHECK(IsLd(instr));
+ int32_t imm12 = (instr & kImm12Mask) >> 20;
+ imm12 = imm12 << 12 >> 12;
+ return imm12;
+}
+
+int Assembler::AuipcOffset(Instr instr) {
+ DCHECK(IsAuipc(instr));
+ int32_t imm20 = instr & kImm20Mask;
+ return imm20;
+}
+// We have to use a temporary register for things that can be relocated even
+// if they can be encoded in RISC-V's 12 bits of immediate-offset instruction
+// space. There is no guarantee that the relocated location can be similarly
+// encoded.
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsNone(rmode);
+}
+
+void Assembler::disassembleInstr(Instr instr) {
+ if (!FLAG_debug_riscv) return;
+ disasm::NameConverter converter;
+ disasm::Disassembler disasm(converter);
+ EmbeddedVector<char, 128> disasm_buffer;
+
+ disasm.InstructionDecode(disasm_buffer, reinterpret_cast<byte*>(&instr));
+ DEBUG_PRINTF("%s\n", disasm_buffer.begin());
+}
+
+// ----- Top-level instruction formats match those in the ISA manual
+// (R, I, S, B, U, J). These match the formats defined in the compiler
+void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode,
+ Register rd, Register rs1, Register rs2) {
+ DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
+ rs1.is_valid() && rs2.is_valid());
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode,
+ FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
+ rs1.is_valid() && rs2.is_valid());
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode,
+ Register rd, FPURegister rs1, Register rs2) {
+ DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
+ rs1.is_valid() && rs2.is_valid());
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode,
+ FPURegister rd, Register rs1, Register rs2) {
+ DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
+ rs1.is_valid() && rs2.is_valid());
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode,
+ FPURegister rd, FPURegister rs1, Register rs2) {
+ DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
+ rs1.is_valid() && rs2.is_valid());
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode,
+ Register rd, FPURegister rs1, FPURegister rs2) {
+ DCHECK(is_uint7(funct7) && is_uint3(funct3) && rd.is_valid() &&
+ rs1.is_valid() && rs2.is_valid());
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrR4(uint8_t funct2, Opcode opcode, Register rd,
+ Register rs1, Register rs2, Register rs3,
+ RoundingMode frm) {
+ DCHECK(is_uint2(funct2) && rd.is_valid() && rs1.is_valid() &&
+ rs2.is_valid() && rs3.is_valid() && is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrR4(uint8_t funct2, Opcode opcode, FPURegister rd,
+ FPURegister rs1, FPURegister rs2, FPURegister rs3,
+ RoundingMode frm) {
+ DCHECK(is_uint2(funct2) && rd.is_valid() && rs1.is_valid() &&
+ rs2.is_valid() && rs3.is_valid() && is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct2 << kFunct2Shift) | (rs3.code() << kRs3Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrRAtomic(uint8_t funct5, bool aq, bool rl,
+ uint8_t funct3, Register rd, Register rs1,
+ Register rs2) {
+ DCHECK(is_uint5(funct5) && is_uint3(funct3) && rd.is_valid() &&
+ rs1.is_valid() && rs2.is_valid());
+ Instr instr = AMO | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (rl << kRlShift) | (aq << kAqShift) | (funct5 << kFunct5Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrRFrm(uint8_t funct7, Opcode opcode, Register rd,
+ Register rs1, Register rs2, RoundingMode frm) {
+ DCHECK(rd.is_valid() && rs1.is_valid() && rs2.is_valid() && is_uint3(frm));
+ Instr instr = opcode | (rd.code() << kRdShift) | (frm << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (rs2.code() << kRs2Shift) |
+ (funct7 << kFunct7Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrI(uint8_t funct3, Opcode opcode, Register rd,
+ Register rs1, int16_t imm12) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
+ (is_uint12(imm12) || is_int12(imm12)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrI(uint8_t funct3, Opcode opcode, FPURegister rd,
+ Register rs1, int16_t imm12) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
+ (is_uint12(imm12) || is_int12(imm12)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (imm12 << kImm12Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrIShift(bool arithshift, uint8_t funct3, Opcode opcode,
+ Register rd, Register rs1, uint8_t shamt) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
+ is_uint6(shamt));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (shamt << kShamtShift) |
+ (arithshift << kArithShiftShift);
+ emit(instr);
+}
+
+void Assembler::GenInstrIShiftW(bool arithshift, uint8_t funct3, Opcode opcode,
+ Register rd, Register rs1, uint8_t shamt) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
+ is_uint5(shamt));
+ Instr instr = opcode | (rd.code() << kRdShift) | (funct3 << kFunct3Shift) |
+ (rs1.code() << kRs1Shift) | (shamt << kShamtWShift) |
+ (arithshift << kArithShiftShift);
+ emit(instr);
+}
+
+void Assembler::GenInstrS(uint8_t funct3, Opcode opcode, Register rs1,
+ Register rs2, int16_t imm12) {
+ DCHECK(is_uint3(funct3) && rs1.is_valid() && rs2.is_valid() &&
+ is_int12(imm12));
+ Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm12 & 0xfe0) << 20); // bits 11-5
+ emit(instr);
+}
+
+void Assembler::GenInstrS(uint8_t funct3, Opcode opcode, Register rs1,
+ FPURegister rs2, int16_t imm12) {
+ DCHECK(is_uint3(funct3) && rs1.is_valid() && rs2.is_valid() &&
+ is_int12(imm12));
+ Instr instr = opcode | ((imm12 & 0x1f) << 7) | // bits 4-0
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm12 & 0xfe0) << 20); // bits 11-5
+ emit(instr);
+}
+
+void Assembler::GenInstrB(uint8_t funct3, Opcode opcode, Register rs1,
+ Register rs2, int16_t imm13) {
+ DCHECK(is_uint3(funct3) && rs1.is_valid() && rs2.is_valid() &&
+ is_int13(imm13) && ((imm13 & 1) == 0));
+ Instr instr = opcode | ((imm13 & 0x800) >> 4) | // bit 11
+ ((imm13 & 0x1e) << 7) | // bits 4-1
+ (funct3 << kFunct3Shift) | (rs1.code() << kRs1Shift) |
+ (rs2.code() << kRs2Shift) |
+ ((imm13 & 0x7e0) << 20) | // bits 10-5
+ ((imm13 & 0x1000) << 19); // bit 12
+ emit(instr);
+}
+
+void Assembler::GenInstrU(Opcode opcode, Register rd, int32_t imm20) {
+ DCHECK(rd.is_valid() && (is_int20(imm20) || is_uint20(imm20)));
+ Instr instr = opcode | (rd.code() << kRdShift) | (imm20 << kImm20Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrJ(Opcode opcode, Register rd, int32_t imm21) {
+ DCHECK(rd.is_valid() && is_int21(imm21) && ((imm21 & 1) == 0));
+ Instr instr = opcode | (rd.code() << kRdShift) |
+ (imm21 & 0xff000) | // bits 19-12
+ ((imm21 & 0x800) << 9) | // bit 11
+ ((imm21 & 0x7fe) << 20) | // bits 10-1
+ ((imm21 & 0x100000) << 11); // bit 20
+ emit(instr);
+}
+
+void Assembler::GenInstrCR(uint8_t funct4, Opcode opcode, Register rd,
+ Register rs2) {
+ DCHECK(is_uint4(funct4) && rd.is_valid() && rs2.is_valid());
+ ShortInstr instr = opcode | (rs2.code() << kRvcRs2Shift) |
+ (rd.code() << kRvcRdShift) | (funct4 << kRvcFunct4Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCA(uint8_t funct6, Opcode opcode, Register rd,
+ uint8_t funct, Register rs2) {
+ DCHECK(is_uint6(funct6) && rd.is_valid() && rs2.is_valid() &&
+ is_uint2(funct));
+ ShortInstr instr = opcode | ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((rd.code() & 0x7) << kRvcRs1sShift) |
+ (funct6 << kRvcFunct6Shift) | (funct << kRvcFunct2Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCI(uint8_t funct3, Opcode opcode, Register rd,
+ int8_t imm6) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && is_int6(imm6));
+ ShortInstr instr = opcode | ((imm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((imm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCIU(uint8_t funct3, Opcode opcode, Register rd,
+ uint8_t uimm6) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && is_uint6(uimm6));
+ ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCIU(uint8_t funct3, Opcode opcode, FPURegister rd,
+ uint8_t uimm6) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && is_uint6(uimm6));
+ ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) |
+ (rd.code() << kRvcRdShift) | ((uimm6 & 0x20) << 7) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCIW(uint8_t funct3, Opcode opcode, Register rd,
+ uint8_t uimm8) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && is_uint8(uimm8));
+ ShortInstr instr = opcode | ((uimm8) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCSS(uint8_t funct3, Opcode opcode, Register rs2,
+ uint8_t uimm6) {
+ DCHECK(is_uint3(funct3) && rs2.is_valid() && is_uint6(uimm6));
+ ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCSS(uint8_t funct3, Opcode opcode, FPURegister rs2,
+ uint8_t uimm6) {
+ DCHECK(is_uint3(funct3) && rs2.is_valid() && is_uint6(uimm6));
+ ShortInstr instr = opcode | (uimm6 << 7) | (rs2.code() << kRvcRs2Shift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCL(uint8_t funct3, Opcode opcode, Register rd,
+ Register rs1, uint8_t uimm5) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
+ is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCL(uint8_t funct3, Opcode opcode, FPURegister rd,
+ Register rs1, uint8_t uimm5) {
+ DCHECK(is_uint3(funct3) && rd.is_valid() && rs1.is_valid() &&
+ is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rd.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+void Assembler::GenInstrCJ(uint8_t funct3, Opcode opcode, uint16_t uint11) {
+ DCHECK(is_uint11(uint11));
+ ShortInstr instr = opcode | (funct3 << kRvcFunct3Shift) | (uint11 << 2);
+ emit(instr);
+}
+
+void Assembler::GenInstrCS(uint8_t funct3, Opcode opcode, Register rs2,
+ Register rs1, uint8_t uimm5) {
+ DCHECK(is_uint3(funct3) && rs2.is_valid() && rs1.is_valid() &&
+ is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCS(uint8_t funct3, Opcode opcode, FPURegister rs2,
+ Register rs1, uint8_t uimm5) {
+ DCHECK(is_uint3(funct3) && rs2.is_valid() && rs1.is_valid() &&
+ is_uint5(uimm5));
+ ShortInstr instr = opcode | ((uimm5 & 0x3) << 5) |
+ ((rs2.code() & 0x7) << kRvcRs2sShift) |
+ ((uimm5 & 0x1c) << 8) | (funct3 << kRvcFunct3Shift) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift);
+ emit(instr);
+}
+
+// ----- Instruction class templates match those in the compiler
+
+void Assembler::GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm13) {
+ GenInstrB(funct3, BRANCH, rs1, rs2, imm13);
+}
+
+void Assembler::GenInstrLoad_ri(uint8_t funct3, Register rd, Register rs1,
+ int16_t imm12) {
+ GenInstrI(funct3, LOAD, rd, rs1, imm12);
+}
+
+void Assembler::GenInstrStore_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm12) {
+ GenInstrS(funct3, STORE, rs1, rs2, imm12);
+}
+
+void Assembler::GenInstrALU_ri(uint8_t funct3, Register rd, Register rs1,
+ int16_t imm12) {
+ GenInstrI(funct3, OP_IMM, rd, rs1, imm12);
+}
+
+void Assembler::GenInstrShift_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt) {
+ DCHECK(is_uint6(shamt));
+ GenInstrI(funct3, OP_IMM, rd, rs1, (arithshift << 10) | shamt);
+}
+
+void Assembler::GenInstrALU_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ Register rs1, Register rs2) {
+ GenInstrR(funct7, funct3, OP, rd, rs1, rs2);
+}
+
+void Assembler::GenInstrCSR_ir(uint8_t funct3, Register rd,
+ ControlStatusReg csr, Register rs1) {
+ GenInstrI(funct3, SYSTEM, rd, rs1, csr);
+}
+
+void Assembler::GenInstrCSR_ii(uint8_t funct3, Register rd,
+ ControlStatusReg csr, uint8_t imm5) {
+ GenInstrI(funct3, SYSTEM, rd, ToRegister(imm5), csr);
+}
+
+void Assembler::GenInstrShiftW_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt) {
+ GenInstrIShiftW(arithshift, funct3, OP_IMM_32, rd, rs1, shamt);
+}
+
+void Assembler::GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ Register rs1, Register rs2) {
+ GenInstrR(funct7, funct3, OP_32, rd, rs1, rs2);
+}
+
+void Assembler::GenInstrPriv(uint8_t funct7, Register rs1, Register rs2) {
+ GenInstrR(funct7, 0b000, SYSTEM, ToRegister(0), rs1, rs2);
+}
+
+void Assembler::GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, Register rs1,
+ int16_t imm12) {
+ GenInstrI(funct3, LOAD_FP, rd, rs1, imm12);
+}
+
+void Assembler::GenInstrStoreFP_rri(uint8_t funct3, Register rs1,
+ FPURegister rs2, int16_t imm12) {
+ GenInstrS(funct3, STORE_FP, rs1, rs2, imm12);
+}
+
+void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, FPURegister rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ Register rs1, Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, Register rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+void Assembler::GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, FPURegister rs2) {
+ GenInstrR(funct7, funct3, OP_FP, rd, rs1, rs2);
+}
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+ int32_t trampoline_entry = kInvalidSlotPos;
+ if (!internal_trampoline_exception_) {
+ DEBUG_PRINTF("\tstart: %d,pos: %d\n", trampoline_.start(), pos);
+ if (trampoline_.start() > pos) {
+ trampoline_entry = trampoline_.take_slot();
+ }
+
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
+ }
+ }
+ return trampoline_entry;
+}
+
+uint64_t Assembler::jump_address(Label* L) {
+ int64_t target_pos;
+ DEBUG_PRINTF("jump_address: %p to %p (%d)\n", L,
+ reinterpret_cast<Instr*>(buffer_start_ + pc_offset()),
+ pc_offset());
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ DEBUG_PRINTF("\tstarted link\n");
+ return kEndOfJumpChain;
+ }
+ }
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
+ DCHECK_EQ(imm & 3, 0);
+
+ return imm;
+}
+
+uint64_t Assembler::branch_long_offset(Label* L) {
+ int64_t target_pos;
+
+ DEBUG_PRINTF("branch_long_offset: %p to %p (%d)\n", L,
+ reinterpret_cast<Instr*>(buffer_start_ + pc_offset()),
+ pc_offset());
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ DEBUG_PRINTF("\tstarted link\n");
+ return kEndOfJumpChain;
+ }
+ }
+ int64_t offset = target_pos - pc_offset();
+ DCHECK_EQ(offset & 3, 0);
+
+ return static_cast<uint64_t>(offset);
+}
+
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
+ int32_t target_pos;
+
+ DEBUG_PRINTF("branch_offset_helper: %p to %p (%d)\n", L,
+ reinterpret_cast<Instr*>(buffer_start_ + pc_offset()),
+ pc_offset());
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ DEBUG_PRINTF("\tbound: %d", target_pos);
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ DEBUG_PRINTF("\tadded to link: %d\n", target_pos);
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ DEBUG_PRINTF("\tstarted link\n");
+ return kEndOfJumpChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK(is_intn(offset, bits));
+ DCHECK_EQ(offset & 1, 0);
+ DEBUG_PRINTF("\toffset = %d\n", offset);
+ return offset;
+}
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+ int target_pos;
+ DEBUG_PRINTF("label_at_put: %p @ %p (%d)\n", L,
+ reinterpret_cast<Instr*>(buffer_start_ + at_offset), at_offset);
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ int32_t imm18 = target_pos - at_offset;
+ DCHECK_EQ(imm18 & 3, 0);
+ int32_t imm16 = imm18 >> 2;
+ DCHECK(is_int16(imm16));
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
+ } else {
+ target_pos = kEndOfJumpChain;
+ instr_at_put(at_offset, target_pos);
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ }
+ L->link_to(at_offset);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+void Assembler::lui(Register rd, int32_t imm20) { GenInstrU(LUI, rd, imm20); }
+
+void Assembler::auipc(Register rd, int32_t imm20) {
+ GenInstrU(AUIPC, rd, imm20);
+}
+
+// Jumps
+
+void Assembler::jal(Register rd, int32_t imm21) {
+ GenInstrJ(JAL, rd, imm21);
+ BlockTrampolinePoolFor(1);
+}
+
+void Assembler::jalr(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, JALR, rd, rs1, imm12);
+ BlockTrampolinePoolFor(1);
+}
+
+// Branches
+
+void Assembler::beq(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b000, rs1, rs2, imm13);
+}
+
+void Assembler::bne(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b001, rs1, rs2, imm13);
+}
+
+void Assembler::blt(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b100, rs1, rs2, imm13);
+}
+
+void Assembler::bge(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b101, rs1, rs2, imm13);
+}
+
+void Assembler::bltu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b110, rs1, rs2, imm13);
+}
+
+void Assembler::bgeu(Register rs1, Register rs2, int16_t imm13) {
+ GenInstrBranchCC_rri(0b111, rs1, rs2, imm13);
+}
+
+// Loads
+
+void Assembler::lb(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b000, rd, rs1, imm12);
+}
+
+void Assembler::lh(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b001, rd, rs1, imm12);
+}
+
+void Assembler::lw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b010, rd, rs1, imm12);
+}
+
+void Assembler::lbu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b100, rd, rs1, imm12);
+}
+
+void Assembler::lhu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b101, rd, rs1, imm12);
+}
+
+// Stores
+
+void Assembler::sb(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b000, base, source, imm12);
+}
+
+void Assembler::sh(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b001, base, source, imm12);
+}
+
+void Assembler::sw(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b010, base, source, imm12);
+}
+
+// Arithmetic with immediate
+
+void Assembler::addi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b000, rd, rs1, imm12);
+}
+
+void Assembler::slti(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b010, rd, rs1, imm12);
+}
+
+void Assembler::sltiu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b011, rd, rs1, imm12);
+}
+
+void Assembler::xori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b100, rd, rs1, imm12);
+}
+
+void Assembler::ori(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b110, rd, rs1, imm12);
+}
+
+void Assembler::andi(Register rd, Register rs1, int16_t imm12) {
+ GenInstrALU_ri(0b111, rd, rs1, imm12);
+}
+
+void Assembler::slli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b001, rd, rs1, shamt & 0x3f);
+}
+
+void Assembler::srli(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(0, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+void Assembler::srai(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShift_ri(1, 0b101, rd, rs1, shamt & 0x3f);
+}
+
+// Arithmetic
+
+void Assembler::add(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::sub(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::sll(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void Assembler::slt(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::sltu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::xor_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b100, rd, rs1, rs2);
+}
+
+void Assembler::srl(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void Assembler::sra(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+void Assembler::or_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b110, rd, rs1, rs2);
+}
+
+void Assembler::and_(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000000, 0b111, rd, rs1, rs2);
+}
+
+// Memory fences
+
+void Assembler::fence(uint8_t pred, uint8_t succ) {
+ DCHECK(is_uint4(pred) && is_uint4(succ));
+ uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12);
+}
+
+void Assembler::fence_tso() {
+ uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8);
+ GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12);
+}
+
+// Environment call / break
+
+void Assembler::ecall() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 0);
+}
+
+void Assembler::ebreak() {
+ GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 1);
+}
+
+// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+// instruction (i.e., it should always trap, if your implementation has invalid
+// instruction traps).
+void Assembler::unimp() {
+ GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000);
+}
+
+// CSR
+
+void Assembler::csrrw(Register rd, ControlStatusReg csr, Register rs1) {
+ GenInstrCSR_ir(0b001, rd, csr, rs1);
+}
+
+void Assembler::csrrs(Register rd, ControlStatusReg csr, Register rs1) {
+ GenInstrCSR_ir(0b010, rd, csr, rs1);
+}
+
+void Assembler::csrrc(Register rd, ControlStatusReg csr, Register rs1) {
+ GenInstrCSR_ir(0b011, rd, csr, rs1);
+}
+
+void Assembler::csrrwi(Register rd, ControlStatusReg csr, uint8_t imm5) {
+ GenInstrCSR_ii(0b101, rd, csr, imm5);
+}
+
+void Assembler::csrrsi(Register rd, ControlStatusReg csr, uint8_t imm5) {
+ GenInstrCSR_ii(0b110, rd, csr, imm5);
+}
+
+void Assembler::csrrci(Register rd, ControlStatusReg csr, uint8_t imm5) {
+ GenInstrCSR_ii(0b111, rd, csr, imm5);
+}
+
+// RV64I
+
+void Assembler::lwu(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b110, rd, rs1, imm12);
+}
+
+void Assembler::ld(Register rd, Register rs1, int16_t imm12) {
+ GenInstrLoad_ri(0b011, rd, rs1, imm12);
+}
+
+void Assembler::sd(Register source, Register base, int16_t imm12) {
+ GenInstrStore_rri(0b011, base, source, imm12);
+}
+
+void Assembler::addiw(Register rd, Register rs1, int16_t imm12) {
+ GenInstrI(0b000, OP_IMM_32, rd, rs1, imm12);
+}
+
+void Assembler::slliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b001, rd, rs1, shamt & 0x1f);
+}
+
+void Assembler::srliw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(0, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void Assembler::sraiw(Register rd, Register rs1, uint8_t shamt) {
+ GenInstrShiftW_ri(1, 0b101, rd, rs1, shamt & 0x1f);
+}
+
+void Assembler::addw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::subw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::sllw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b001, rd, rs1, rs2);
+}
+
+void Assembler::srlw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000000, 0b101, rd, rs1, rs2);
+}
+
+void Assembler::sraw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0100000, 0b101, rd, rs1, rs2);
+}
+
+// RV32M Standard Extension
+
+void Assembler::mul(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::mulh(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b001, rd, rs1, rs2);
+}
+
+void Assembler::mulhsu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::mulhu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::div(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b100, rd, rs1, rs2);
+}
+
+void Assembler::divu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b101, rd, rs1, rs2);
+}
+
+void Assembler::rem(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b110, rd, rs1, rs2);
+}
+
+void Assembler::remu(Register rd, Register rs1, Register rs2) {
+ GenInstrALU_rr(0b0000001, 0b111, rd, rs1, rs2);
+}
+
+// RV64M Standard Extension (in addition to RV32M)
+
+void Assembler::mulw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::divw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b100, rd, rs1, rs2);
+}
+
+void Assembler::divuw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b101, rd, rs1, rs2);
+}
+
+void Assembler::remw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b110, rd, rs1, rs2);
+}
+
+void Assembler::remuw(Register rd, Register rs1, Register rs2) {
+ GenInstrALUW_rr(0b0000001, 0b111, rd, rs1, rs2);
+}
+
+// RV32A Standard Extension
+
+void Assembler::lr_w(bool aq, bool rl, Register rd, Register rs1) {
+ GenInstrRAtomic(0b00010, aq, rl, 0b010, rd, rs1, zero_reg);
+}
+
+void Assembler::sc_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00011, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::amoswap_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00001, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::amoadd_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::amoxor_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::amoand_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::amoor_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::amomin_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::amomax_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::amominu_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11000, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::amomaxu_w(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11100, aq, rl, 0b010, rd, rs1, rs2);
+}
+
+// RV64A Standard Extension (in addition to RV32A)
+
+void Assembler::lr_d(bool aq, bool rl, Register rd, Register rs1) {
+ GenInstrRAtomic(0b00010, aq, rl, 0b011, rd, rs1, zero_reg);
+}
+
+void Assembler::sc_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00011, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::amoswap_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00001, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::amoadd_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::amoxor_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b00100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::amoand_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::amoor_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b01000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::amomin_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::amomax_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b10100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::amominu_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11000, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+void Assembler::amomaxu_d(bool aq, bool rl, Register rd, Register rs1,
+ Register rs2) {
+ GenInstrRAtomic(0b11100, aq, rl, 0b011, rd, rs1, rs2);
+}
+
+// RV32F Standard Extension
+
+void Assembler::flw(FPURegister rd, Register rs1, int16_t imm12) {
+ GenInstrLoadFP_ri(0b010, rd, rs1, imm12);
+}
+
+void Assembler::fsw(FPURegister source, Register base, int16_t imm12) {
+ GenInstrStoreFP_rri(0b010, base, source, imm12);
+}
+
+void Assembler::fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm) {
+ GenInstrR4(0b00, MADD, rd, rs1, rs2, rs3, frm);
+}
+
+void Assembler::fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm) {
+ GenInstrR4(0b00, MSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void Assembler::fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm) {
+ GenInstrR4(0b00, NMSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void Assembler::fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm) {
+ GenInstrR4(0b00, NMADD, rd, rs1, rs2, rs3, frm);
+}
+
+void Assembler::fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm) {
+ GenInstrALUFP_rr(0b0000000, frm, rd, rs1, rs2);
+}
+
+void Assembler::fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm) {
+ GenInstrALUFP_rr(0b0000100, frm, rd, rs1, rs2);
+}
+
+void Assembler::fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm) {
+ GenInstrALUFP_rr(0b0001000, frm, rd, rs1, rs2);
+}
+
+void Assembler::fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm) {
+ GenInstrALUFP_rr(0b0001100, frm, rd, rs1, rs2);
+}
+
+void Assembler::fsqrt_s(FPURegister rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b0101100, frm, rd, rs1, zero_reg);
+}
+
+void Assembler::fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::fsgnjn_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b001, rd, rs1, rs2);
+}
+
+void Assembler::fsgnjx_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010000, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010100, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010100, 0b001, rd, rs1, rs2);
+}
+
+void Assembler::fcvt_w_s(Register rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, zero_reg);
+}
+
+void Assembler::fcvt_wu_s(Register rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(1));
+}
+
+void Assembler::fmv_x_w(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110000, 0b000, rd, rs1, zero_reg);
+}
+
+void Assembler::feq_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::flt_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b001, rd, rs1, rs2);
+}
+
+void Assembler::fle_s(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010000, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::fclass_s(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110000, 0b001, rd, rs1, zero_reg);
+}
+
+void Assembler::fcvt_s_w(FPURegister rd, Register rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, zero_reg);
+}
+
+void Assembler::fcvt_s_wu(FPURegister rd, Register rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(1));
+}
+
+void Assembler::fmv_w_x(FPURegister rd, Register rs1) {
+ GenInstrALUFP_rr(0b1111000, 0b000, rd, rs1, zero_reg);
+}
+
+// RV64F Standard Extension (in addition to RV32F)
+
+void Assembler::fcvt_l_s(Register rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(2));
+}
+
+void Assembler::fcvt_lu_s(Register rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1100000, frm, rd, rs1, ToRegister(3));
+}
+
+void Assembler::fcvt_s_l(FPURegister rd, Register rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(2));
+}
+
+void Assembler::fcvt_s_lu(FPURegister rd, Register rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1101000, frm, rd, rs1, ToRegister(3));
+}
+
+// RV32D Standard Extension
+
+void Assembler::fld(FPURegister rd, Register rs1, int16_t imm12) {
+ GenInstrLoadFP_ri(0b011, rd, rs1, imm12);
+}
+
+void Assembler::fsd(FPURegister source, Register base, int16_t imm12) {
+ GenInstrStoreFP_rri(0b011, base, source, imm12);
+}
+
+void Assembler::fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm) {
+ GenInstrR4(0b01, MADD, rd, rs1, rs2, rs3, frm);
+}
+
+void Assembler::fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm) {
+ GenInstrR4(0b01, MSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void Assembler::fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm) {
+ GenInstrR4(0b01, NMSUB, rd, rs1, rs2, rs3, frm);
+}
+
+void Assembler::fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm) {
+ GenInstrR4(0b01, NMADD, rd, rs1, rs2, rs3, frm);
+}
+
+void Assembler::fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm) {
+ GenInstrALUFP_rr(0b0000001, frm, rd, rs1, rs2);
+}
+
+void Assembler::fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm) {
+ GenInstrALUFP_rr(0b0000101, frm, rd, rs1, rs2);
+}
+
+void Assembler::fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm) {
+ GenInstrALUFP_rr(0b0001001, frm, rd, rs1, rs2);
+}
+
+void Assembler::fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm) {
+ GenInstrALUFP_rr(0b0001101, frm, rd, rs1, rs2);
+}
+
+void Assembler::fsqrt_d(FPURegister rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b0101101, frm, rd, rs1, zero_reg);
+}
+
+void Assembler::fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::fsgnjn_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b001, rd, rs1, rs2);
+}
+
+void Assembler::fsgnjx_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010001, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010101, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b0010101, 0b001, rd, rs1, rs2);
+}
+
+void Assembler::fcvt_s_d(FPURegister rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b0100000, frm, rd, rs1, ToRegister(1));
+}
+
+void Assembler::fcvt_d_s(FPURegister rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b0100001, frm, rd, rs1, zero_reg);
+}
+
+void Assembler::feq_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b010, rd, rs1, rs2);
+}
+
+void Assembler::flt_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b001, rd, rs1, rs2);
+}
+
+void Assembler::fle_d(Register rd, FPURegister rs1, FPURegister rs2) {
+ GenInstrALUFP_rr(0b1010001, 0b000, rd, rs1, rs2);
+}
+
+void Assembler::fclass_d(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110001, 0b001, rd, rs1, zero_reg);
+}
+
+void Assembler::fcvt_w_d(Register rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, zero_reg);
+}
+
+void Assembler::fcvt_wu_d(Register rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(1));
+}
+
+void Assembler::fcvt_d_w(FPURegister rd, Register rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, zero_reg);
+}
+
+void Assembler::fcvt_d_wu(FPURegister rd, Register rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(1));
+}
+
+// RV64D Standard Extension (in addition to RV32D)
+
+void Assembler::fcvt_l_d(Register rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(2));
+}
+
+void Assembler::fcvt_lu_d(Register rd, FPURegister rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(3));
+}
+
+void Assembler::fmv_x_d(Register rd, FPURegister rs1) {
+ GenInstrALUFP_rr(0b1110001, 0b000, rd, rs1, zero_reg);
+}
+
+void Assembler::fcvt_d_l(FPURegister rd, Register rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(2));
+}
+
+void Assembler::fcvt_d_lu(FPURegister rd, Register rs1, RoundingMode frm) {
+ GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(3));
+}
+
+void Assembler::fmv_d_x(FPURegister rd, Register rs1) {
+ GenInstrALUFP_rr(0b1111001, 0b000, rd, rs1, zero_reg);
+}
+
+// RV64C Standard Extension
+void Assembler::c_nop() { GenInstrCI(0b000, C1, zero_reg, 0); }
+
+void Assembler::c_addi(Register rd, int8_t imm6) {
+ DCHECK(rd != zero_reg && imm6 != 0);
+ GenInstrCI(0b000, C1, rd, imm6);
+}
+
+void Assembler::c_addiw(Register rd, int8_t imm6) {
+ DCHECK(rd != zero_reg);
+ GenInstrCI(0b001, C1, rd, imm6);
+}
+
+void Assembler::c_addi16sp(int16_t imm10) {
+ DCHECK(is_int10(imm10) && (imm10 & 0xf) == 0);
+ uint8_t uimm6 = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) |
+ ((imm10 & 0x40) >> 3) | ((imm10 & 0x180) >> 6) |
+ ((imm10 & 0x20) >> 5);
+ GenInstrCIU(0b011, C1, sp, uimm6);
+}
+
+void Assembler::c_addi4spn(Register rd, int16_t uimm10) {
+ DCHECK(is_uint10(uimm10) && (uimm10 != 0));
+ uint8_t uimm8 = ((uimm10 & 0x4) >> 1) | ((uimm10 & 0x8) >> 3) |
+ ((uimm10 & 0x30) << 2) | ((uimm10 & 0x3c0) >> 4);
+ GenInstrCIW(0b000, C0, rd, uimm8);
+}
+
+void Assembler::c_li(Register rd, int8_t imm6) {
+ DCHECK(rd != zero_reg);
+ GenInstrCI(0b010, C1, rd, imm6);
+}
+
+void Assembler::c_lui(Register rd, int8_t imm6) {
+ DCHECK(rd != zero_reg && rd != sp && imm6 != 0);
+ GenInstrCI(0b011, C1, rd, imm6);
+}
+
+void Assembler::c_slli(Register rd, uint8_t uimm6) {
+ DCHECK(rd != zero_reg && uimm6 != 0);
+ GenInstrCIU(0b000, C2, rd, uimm6);
+}
+
+void Assembler::c_fldsp(FPURegister rd, uint16_t uimm9) {
+ DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCIU(0b001, C2, rd, uimm6);
+}
+
+void Assembler::c_lwsp(Register rd, uint16_t uimm8) {
+ DCHECK(rd != zero_reg && is_uint8(uimm8) && (uimm8 & 0x3) == 0);
+ uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCIU(0b010, C2, rd, uimm6);
+}
+
+void Assembler::c_ldsp(Register rd, uint16_t uimm9) {
+ DCHECK(rd != zero_reg && is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCIU(0b011, C2, rd, uimm6);
+}
+
+void Assembler::c_jr(Register rs1) {
+ DCHECK(rs1 != zero_reg);
+ GenInstrCR(0b1000, C2, rs1, zero_reg);
+ BlockTrampolinePoolFor(1);
+}
+
+void Assembler::c_mv(Register rd, Register rs2) {
+ DCHECK(rd != zero_reg && rs2 != zero_reg);
+ GenInstrCR(0b1000, C2, rd, rs2);
+}
+
+void Assembler::c_ebreak() { GenInstrCR(0b1001, C2, zero_reg, zero_reg); }
+
+void Assembler::c_jalr(Register rs1) {
+ DCHECK(rs1 != zero_reg);
+ GenInstrCR(0b1001, C2, rs1, zero_reg);
+ BlockTrampolinePoolFor(1);
+}
+
+void Assembler::c_add(Register rd, Register rs2) {
+ DCHECK(rd != zero_reg && rs2 != zero_reg);
+ GenInstrCR(0b1001, C2, rd, rs2);
+}
+
+// CA Instructions
+void Assembler::c_sub(Register rd, Register rs2) {
+ DCHECK(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b00, rs2);
+}
+
+void Assembler::c_xor(Register rd, Register rs2) {
+ DCHECK(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b01, rs2);
+}
+
+void Assembler::c_or(Register rd, Register rs2) {
+ DCHECK(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b10, rs2);
+}
+
+void Assembler::c_and(Register rd, Register rs2) {
+ DCHECK(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100011, C1, rd, 0b11, rs2);
+}
+
+void Assembler::c_subw(Register rd, Register rs2) {
+ DCHECK(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100111, C1, rd, 0b00, rs2);
+}
+
+void Assembler::c_addw(Register rd, Register rs2) {
+ DCHECK(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs2.code() & 0b11000) == 0b01000));
+ GenInstrCA(0b100111, C1, rd, 0b01, rs2);
+}
+
+void Assembler::c_swsp(Register rs2, uint16_t uimm8) {
+ DCHECK(is_uint8(uimm8) && (uimm8 & 0x3) == 0);
+ uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCSS(0b110, C2, rs2, uimm6);
+}
+
+void Assembler::c_sdsp(Register rs2, uint16_t uimm9) {
+ DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCSS(0b111, C2, rs2, uimm6);
+}
+
+void Assembler::c_fsdsp(FPURegister rs2, uint16_t uimm9) {
+ DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
+ uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
+ GenInstrCSS(0b101, C2, rs2, uimm6);
+}
+
+// CL Instructions
+
+void Assembler::c_lw(Register rd, Register rs1, uint16_t uimm7) {
+ DCHECK(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7));
+ uint8_t uimm5 =
+ ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
+ GenInstrCL(0b010, C0, rd, rs1, uimm5);
+}
+
+void Assembler::c_ld(Register rd, Register rs1, uint16_t uimm8) {
+ DCHECK(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCL(0b011, C0, rd, rs1, uimm5);
+}
+
+void Assembler::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
+ DCHECK(((rd.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCL(0b001, C0, rd, rs1, uimm5);
+}
+
+// CS Instructions
+
+void Assembler::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
+ DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7));
+ uint8_t uimm5 =
+ ((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
+ GenInstrCS(0b110, C0, rs2, rs1, uimm5);
+}
+
+void Assembler::c_sd(Register rs2, Register rs1, uint16_t uimm8) {
+ DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCS(0b111, C0, rs2, rs1, uimm5);
+}
+
+void Assembler::c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8) {
+ DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
+ ((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8));
+ uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
+ GenInstrCS(0b101, C0, rs2, rs1, uimm5);
+}
+
+// CJ Instructions
+
+void Assembler::c_j(int16_t imm12) {
+ DCHECK(is_int12(imm12));
+ int16_t uimm11 = ((imm12 & 0x800) >> 1) | ((imm12 & 0x400) >> 4) |
+ ((imm12 & 0x300) >> 1) | ((imm12 & 0x80) >> 3) |
+ ((imm12 & 0x40) >> 1) | ((imm12 & 0x20) >> 5) |
+ ((imm12 & 0x10) << 5) | (imm12 & 0xe);
+ GenInstrCJ(0b101, C1, uimm11);
+ BlockTrampolinePoolFor(1);
+}
+
+// Privileged
+
+void Assembler::uret() {
+ GenInstrPriv(0b0000000, ToRegister(0), ToRegister(0b00010));
+}
+
+void Assembler::sret() {
+ GenInstrPriv(0b0001000, ToRegister(0), ToRegister(0b00010));
+}
+
+void Assembler::mret() {
+ GenInstrPriv(0b0011000, ToRegister(0), ToRegister(0b00010));
+}
+
+void Assembler::wfi() {
+ GenInstrPriv(0b0001000, ToRegister(0), ToRegister(0b00101));
+}
+
+void Assembler::sfence_vma(Register rs1, Register rs2) {
+ GenInstrR(0b0001001, 0b000, SYSTEM, ToRegister(0), rs1, rs2);
+}
+
+// Assembler Pseudo Instructions (Tables 25.2 and 25.3, RISC-V Unprivileged ISA)
+
+void Assembler::nop() { addi(ToRegister(0), ToRegister(0), 0); }
+
+void Assembler::RV_li(Register rd, int64_t imm) {
+ // 64-bit imm is put in the register rd.
+ // In most cases the imm is 32 bit and 2 instructions are generated. If a
+ // temporary register is available, in the worst case, 6 instructions are
+ // generated for a full 64-bit immediate. If temporay register is not
+ // available the maximum will be 8 instructions. If imm is more than 32 bits
+ // and a temp register is available, imm is divided into two 32-bit parts,
+ // low_32 and up_32. Each part is built in a separate register. low_32 is
+ // built before up_32. If low_32 is negative (upper 32 bits are 1), 0xffffffff
+ // is subtracted from up_32 before up_32 is built. This compensates for 32
+ // bits of 1's in the lower when the two registers are added. If no temp is
+ // available, the upper 32 bit is built in rd, and the lower 32 bits are
+ // devided to 3 parts (11, 11, and 10 bits). The parts are shifted and added
+ // to the upper part built in rd.
+ if (is_int32(imm + 0x800)) {
+ // 32-bit case. Maximum of 2 instructions generated
+ int64_t high_20 = ((imm + 0x800) >> 12);
+ int64_t low_12 = imm << 52 >> 52;
+ if (high_20) {
+ lui(rd, (int32_t)high_20);
+ if (low_12) {
+ addi(rd, rd, low_12);
+ }
+ } else {
+ addi(rd, zero_reg, low_12);
+ }
+ return;
+ } else {
+ // 64-bit case: divide imm into two 32-bit parts, upper and lower
+ int64_t up_32 = imm >> 32;
+ int64_t low_32 = imm & 0xffffffffull;
+ Register temp_reg = rd;
+ // Check if a temporary register is available
+ if (up_32 == 0 || low_32 == 0) {
+ // No temp register is needed
+ } else {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ temp_reg = temps.hasAvailable() ? temps.Acquire() : no_reg;
+ }
+ if (temp_reg != no_reg) {
+ // keep track of hardware behavior for lower part in sim_low
+ int64_t sim_low = 0;
+ // Build lower part
+ if (low_32 != 0) {
+ int64_t high_20 = ((low_32 + 0x800) >> 12);
+ int64_t low_12 = low_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ sim_low = ((high_20 << 12) << 32) >> 32;
+ lui(rd, (int32_t)high_20);
+ if (low_12) {
+ sim_low += (low_12 << 52 >> 52) | low_12;
+ addi(rd, rd, low_12);
+ }
+ } else {
+ sim_low = low_12;
+ ori(rd, zero_reg, low_12);
+ }
+ }
+ if (sim_low & 0x100000000) {
+ // Bit 31 is 1. Either an overflow or a negative 64 bit
+ if (up_32 == 0) {
+ // Positive number, but overflow because of the add 0x800
+ slli(rd, rd, 32);
+ srli(rd, rd, 32);
+ return;
+ }
+ // low_32 is a negative 64 bit after the build
+ up_32 = (up_32 - 0xffffffff) & 0xffffffff;
+ }
+ if (up_32 == 0) {
+ return;
+ }
+ // Build upper part in a temporary register
+ if (low_32 == 0) {
+ // Build upper part in rd
+ temp_reg = rd;
+ }
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ lui(temp_reg, (int32_t)high_20);
+ if (low_12) {
+ addi(temp_reg, temp_reg, low_12);
+ }
+ } else {
+ ori(temp_reg, zero_reg, low_12);
+ }
+ // Put it at the bgining of register
+ slli(temp_reg, temp_reg, 32);
+ if (low_32 != 0) {
+ add(rd, rd, temp_reg);
+ }
+ return;
+ }
+ // No temp register. Build imm in rd.
+ // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
+ // parts to the upper part by doing shift and add.
+ // First build upper part in rd.
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ lui(rd, (int32_t)high_20);
+ if (low_12) {
+ addi(rd, rd, low_12);
+ }
+ } else {
+ ori(rd, zero_reg, low_12);
+ }
+ // upper part already in rd. Each part to be added to rd, has maximum of 11
+ // bits, and always starts with a 1. rd is shifted by the size of the part
+ // plus the number of zeros between the parts. Each part is added after the
+ // left shift.
+ uint32_t mask = 0x80000000;
+ int32_t shift_val = 0;
+ int32_t i;
+ for (i = 0; i < 32; i++) {
+ if ((low_32 & mask) == 0) {
+ mask >>= 1;
+ shift_val++;
+ if (i == 31) {
+ // rest is zero
+ slli(rd, rd, shift_val);
+ }
+ continue;
+ }
+ // The first 1 seen
+ int32_t part;
+ if ((i + 11) < 32) {
+ // Pick 11 bits
+ part = ((uint32_t)(low_32 << i) >> i) >> (32 - (i + 11));
+ slli(rd, rd, shift_val + 11);
+ ori(rd, rd, part);
+ i += 10;
+ mask >>= 11;
+ } else {
+ part = (uint32_t)(low_32 << i) >> i;
+ slli(rd, rd, shift_val + (32 - i));
+ ori(rd, rd, part);
+ break;
+ }
+ shift_val = 0;
+ }
+ }
+}
+
+int Assembler::li_estimate(int64_t imm, bool is_get_temp_reg) {
+ int count = 0;
+ // imitate Assembler::RV_li
+ if (is_int32(imm + 0x800)) {
+ // 32-bit case. Maximum of 2 instructions generated
+ int64_t high_20 = ((imm + 0x800) >> 12);
+ int64_t low_12 = imm << 52 >> 52;
+ if (high_20) {
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ return count;
+ } else {
+ // 64-bit case: divide imm into two 32-bit parts, upper and lower
+ int64_t up_32 = imm >> 32;
+ int64_t low_32 = imm & 0xffffffffull;
+ // Check if a temporary register is available
+ if (is_get_temp_reg) {
+ // keep track of hardware behavior for lower part in sim_low
+ int64_t sim_low = 0;
+ // Build lower part
+ if (low_32 != 0) {
+ int64_t high_20 = ((low_32 + 0x800) >> 12);
+ int64_t low_12 = low_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ sim_low = ((high_20 << 12) << 32) >> 32;
+ count++;
+ if (low_12) {
+ sim_low += (low_12 << 52 >> 52) | low_12;
+ count++;
+ }
+ } else {
+ sim_low = low_12;
+ count++;
+ }
+ }
+ if (sim_low & 0x100000000) {
+ // Bit 31 is 1. Either an overflow or a negative 64 bit
+ if (up_32 == 0) {
+ // Positive number, but overflow because of the add 0x800
+ count++;
+ count++;
+ return count;
+ }
+ // low_32 is a negative 64 bit after the build
+ up_32 = (up_32 - 0xffffffff) & 0xffffffff;
+ }
+ if (up_32 == 0) {
+ return count;
+ }
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ // Put it at the bgining of register
+ count++;
+ if (low_32 != 0) {
+ count++;
+ }
+ return count;
+ }
+ // No temp register. Build imm in rd.
+ // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
+ // parts to the upper part by doing shift and add.
+ // First build upper part in rd.
+ int64_t high_20 = (up_32 + 0x800) >> 12;
+ int64_t low_12 = up_32 & 0xfff;
+ if (high_20) {
+ // Adjust to 20 bits for the case of overflow
+ high_20 &= 0xfffff;
+ count++;
+ if (low_12) {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ // upper part already in rd. Each part to be added to rd, has maximum of 11
+ // bits, and always starts with a 1. rd is shifted by the size of the part
+ // plus the number of zeros between the parts. Each part is added after the
+ // left shift.
+ uint32_t mask = 0x80000000;
+ int32_t shift_val = 0;
+ int32_t i;
+ for (i = 0; i < 32; i++) {
+ if ((low_32 & mask) == 0) {
+ mask >>= 1;
+ shift_val++;
+ if (i == 31) {
+ // rest is zero
+ count++;
+ }
+ continue;
+ }
+ // The first 1 seen
+ int32_t part;
+ if ((i + 11) < 32) {
+ // Pick 11 bits
+ part = ((uint32_t)(low_32 << i) >> i) >> (32 - (i + 11));
+ count++;
+ count++;
+ i += 10;
+ mask >>= 11;
+ } else {
+ part = (uint32_t)(low_32 << i) >> i;
+ count++;
+ count++;
+ break;
+ }
+ shift_val = 0;
+ }
+ }
+ return count;
+}
+
+void Assembler::li_ptr(Register rd, int64_t imm) {
+ // Initialize rd with an address
+ // Pointers are 48 bits
+ // 6 fixed instructions are generated
+ DCHECK_EQ((imm & 0xfff0000000000000ll), 0);
+ int64_t a6 = imm & 0x3f; // bits 0:6. 6 bits
+ int64_t b11 = (imm >> 6) & 0x7ff; // bits 6:11. 11 bits
+ int64_t high_31 = (imm >> 17) & 0x7fffffff; // 31 bits
+ int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
+ int64_t low_12 = high_31 & 0xfff; // 12 bits
+ lui(rd, (int32_t)high_20);
+ addi(rd, rd, low_12); // 31 bits in rd.
+ slli(rd, rd, 11); // Space for next 11 bis
+ ori(rd, rd, b11); // 11 bits are put in. 42 bit in rd
+ slli(rd, rd, 6); // Space for next 6 bits
+ ori(rd, rd, a6); // 6 bits are put in. 48 bis in rd
+}
+
+void Assembler::li_constant(Register rd, int64_t imm) {
+ DEBUG_PRINTF("li_constant(%d, %lx <%ld>)\n", ToNumber(rd), imm, imm);
+ lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >>
+ 48); // Bits 63:48
+ addiw(rd, rd,
+ (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >>
+ 52); // Bits 47:36
+ slli(rd, rd, 12);
+ addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52); // Bits 35:24
+ slli(rd, rd, 12);
+ addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52); // Bits 23:12
+ slli(rd, rd, 12);
+ addi(rd, rd, imm << 52 >> 52); // Bits 11:0
+}
+
+// Break / Trap instructions.
+void Assembler::break_(uint32_t code, bool break_as_stop) {
+ // We need to invalidate breaks that could be stops as well because the
+ // simulator expects a char pointer after the stop instruction.
+ // See constants-mips.h for explanation.
+ DCHECK(
+ (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
+
+ // since ebreak does not allow additional immediate field, we use the
+ // immediate field of lui instruction immediately following the ebreak to
+ // encode the "code" info
+ ebreak();
+ DCHECK(is_uint20(code));
+ lui(zero_reg, code);
+}
+
+void Assembler::stop(uint32_t code) {
+ DCHECK_GT(code, kMaxWatchpointCode);
+ DCHECK_LE(code, kMaxStopCode);
+#if defined(V8_HOST_ARCH_RISCV64)
+ break_(0x54321);
+#else // V8_HOST_ARCH_RISCV64
+ break_(code, true);
+#endif
+}
+
+// Original MIPS Instructions
+
+// ------------Memory-instructions-------------
+
+bool Assembler::NeedAdjustBaseAndOffset(const MemOperand& src,
+ OffsetAccessType access_type,
+ int second_access_add_to_offset) {
+ bool two_accesses = static_cast<bool>(access_type);
+ DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
+
+ // is_int12 must be passed a signed value, hence the static cast below.
+ if (is_int12(src.offset()) &&
+ (!two_accesses || is_int12(static_cast<int32_t>(
+ src.offset() + second_access_add_to_offset)))) {
+ // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
+ // value) fits into int12.
+ return false;
+ }
+ return true;
+}
+
+void Assembler::AdjustBaseAndOffset(MemOperand* src, Register scratch,
+ OffsetAccessType access_type,
+ int second_Access_add_to_offset) {
+ // This method is used to adjust the base register and offset pair
+ // for a load/store when the offset doesn't fit into int12.
+
+ // Must not overwrite the register 'base' while loading 'offset'.
+ DCHECK(src->rm() != scratch);
+
+ RV_li(scratch, src->offset());
+ add(scratch, scratch, src->rm());
+ src->offset_ = 0;
+ src->rm_ = scratch;
+}
+
+int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ if (RelocInfo::IsInternalReference(rmode)) {
+ int64_t* p = reinterpret_cast<int64_t*>(pc);
+ if (*p == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ *p += pc_delta;
+ return 2; // Number of instructions patched.
+ }
+ Instr instr = instr_at(pc);
+ DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
+ if (IsLui(instr)) {
+ uint64_t target_address = target_address_at(pc) + pc_delta;
+ DEBUG_PRINTF("target_address 0x%lx\n", target_address);
+ set_target_value_at(pc, target_address);
+ return 8; // Number of instructions patched.
+ } else {
+ UNIMPLEMENTED();
+ return 1;
+ }
+}
+
+void Assembler::GrowBuffer() {
+ DEBUG_PRINTF("GrowBuffer: %p -> ", buffer_start_);
+ // Compute new buffer size.
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (new_size > kMaximalBufferSize) {
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
+ }
+
+ // Set up new buffer.
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
+
+ // Copy the data.
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ reloc_size);
+
+ // Switch buffers.
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
+ DEBUG_PRINTF("%p\n", buffer_start_);
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // Relocate runtime entries.
+ Vector<byte> instructions{buffer_start_, pc_offset()};
+ Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
+ for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
+ }
+ }
+ DCHECK(!overflow());
+}
+
+void Assembler::db(uint8_t data) {
+ if (!is_buffer_growth_blocked()) CheckBuffer();
+ DEBUG_PRINTF("%p: constant 0x%x\n", pc_, data);
+ EmitHelper(data);
+}
+
+void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
+ if (!RelocInfo::IsNone(rmode)) {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ RecordRelocInfo(rmode);
+ }
+ if (!is_buffer_growth_blocked()) CheckBuffer();
+ DEBUG_PRINTF("%p: constant 0x%x\n", pc_, data);
+ EmitHelper(data);
+}
+
+void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
+ if (!RelocInfo::IsNone(rmode)) {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ RecordRelocInfo(rmode);
+ }
+ if (!is_buffer_growth_blocked()) CheckBuffer();
+ DEBUG_PRINTF("%p: constant 0x%lx\n", pc_, data);
+ EmitHelper(data);
+}
+
+void Assembler::dd(Label* label) {
+ uint64_t data;
+ if (!is_buffer_growth_blocked()) CheckBuffer();
+ if (label->is_bound()) {
+ data = reinterpret_cast<uint64_t>(buffer_start_ + label->pos());
+ } else {
+ data = jump_address(label);
+ internal_reference_positions_.insert(label->pos());
+ }
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ EmitHelper(data);
+}
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (!ShouldRecordRelocInfo(rmode)) return;
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
+ reloc_info_writer.Write(&rinfo);
+}
+
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+ DEBUG_PRINTF("\tBlockTrampolinePoolFor %d", instructions);
+ CheckTrampolinePoolQuick(instructions);
+ DEBUG_PRINTF("\tpc_offset %d,BlockTrampolinePoolBefore %d\n", pc_offset(),
+ pc_offset() + instructions * kInstrSize);
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+void Assembler::CheckTrampolinePool() {
+ // Some small sequences of instructions must not be broken up by the
+ // insertion of a trampoline pool; such sequences are protected by setting
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
+ // are blocked by trampoline_pool_blocked_nesting_.
+ DEBUG_PRINTF("\tpc_offset %d no_trampoline_pool_before:%d\n", pc_offset(),
+ no_trampoline_pool_before_);
+ DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n",
+ trampoline_pool_blocked_nesting_);
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_trampoline_pool_before_)) {
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
+ if (trampoline_pool_blocked_nesting_ > 0) {
+ next_buffer_check_ = pc_offset() + kInstrSize;
+ } else {
+ next_buffer_check_ = no_trampoline_pool_before_;
+ }
+ return;
+ }
+
+ DCHECK(!trampoline_emitted_);
+ DCHECK_GE(unbound_labels_count_, 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump, then we emit trampoline pool.
+ {
+ DEBUG_PRINTF("inserting trampoline pool at %p (%d)\n",
+ reinterpret_cast<Instr*>(buffer_start_ + pc_offset()),
+ pc_offset());
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
+ j(&after_pool);
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ int64_t imm64;
+ imm64 = branch_long_offset(&after_pool);
+ DCHECK(is_int32(imm64));
+ int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
+ auipc(t6, Hi20); // Read PC + Hi20 into t6
+ jr(t6, Lo12); // jump PC + Hi20 + Lo12
+ }
+ // If unbound_labels_count_ is big enough, label after_pool will
+ // need a trampoline too, so we must create the trampoline before
+ // the bind operation to make sure function 'bind' can get this
+ // information.
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
+ }
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ =
+ pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ }
+ return;
+}
+
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Instr* instr = reinterpret_cast<Instr*>(pc);
+ if (IsAuipc(*instr)) {
+ DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4)));
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
+ Memory<Address>(pc + Hi20 + Lo12) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize);
+ }
+ } else {
+ set_target_address_at(pc, target, icache_flush_mode);
+ }
+}
+
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
+ Instr* instr = reinterpret_cast<Instr*>(pc);
+ if (IsAuipc(*instr)) {
+ DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4)));
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
+ return Memory<Address>(pc + Hi20 + Lo12);
+ } else {
+ return target_address_at(pc);
+ }
+}
+Address Assembler::target_address_at(Address pc) {
+ DEBUG_PRINTF("target_address_at: pc: %lx\t", pc);
+ Instruction* instr0 = Instruction::At((unsigned char*)pc);
+ Instruction* instr1 = Instruction::At((unsigned char*)(pc + 1 * kInstrSize));
+ Instruction* instr2 = Instruction::At((unsigned char*)(pc + 2 * kInstrSize));
+ Instruction* instr3 = Instruction::At((unsigned char*)(pc + 3 * kInstrSize));
+ Instruction* instr4 = Instruction::At((unsigned char*)(pc + 4 * kInstrSize));
+ Instruction* instr5 = Instruction::At((unsigned char*)(pc + 5 * kInstrSize));
+
+ // Interpret instructions for address generated by li: See listing in
+ // Assembler::set_target_address_at() just below.
+ if (IsLui(*reinterpret_cast<Instr*>(instr0)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr3)) &&
+ IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr5))) {
+ // Assemble the 64 bit value.
+ int64_t addr = (int64_t)(instr0->Imm20UValue() << kImm20Shift) +
+ (int64_t)instr1->Imm12Value();
+ addr <<= 11;
+ addr |= (int64_t)instr3->Imm12Value();
+ addr <<= 6;
+ addr |= (int64_t)instr5->Imm12Value();
+
+ DEBUG_PRINTF("addr: %lx\n", addr);
+ return static_cast<Address>(addr);
+ }
+ // We should never get here, force a bad address if we do.
+ UNREACHABLE();
+}
+// On RISC-V, a 48-bit target address is stored in an 6-instruction sequence:
+// lui(reg, (int32_t)high_20); // 19 high bits
+// addi(reg, reg, low_12); // 12 following bits. total is 31 high bits in reg.
+// slli(reg, reg, 11); // Space for next 11 bits
+// ori(reg, reg, b11); // 11 bits are put in. 42 bit in reg
+// slli(reg, reg, 6); // Space for next 6 bits
+// ori(reg, reg, a6); // 6 bits are put in. all 48 bis in reg
+//
+// Patching the address must replace all instructions, and flush the i-cache.
+// Note that this assumes the use of SV48, the 48-bit virtual memory system.
+void Assembler::set_target_value_at(Address pc, uint64_t target,
+ ICacheFlushMode icache_flush_mode) {
+ DEBUG_PRINTF("set_target_value_at: pc: %lx\ttarget: %lx\n", pc, target);
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ DCHECK_EQ((target & 0xffff000000000000ll), 0);
+#ifdef DEBUG
+ // Check we have the result from a li macro-instruction.
+ Instruction* instr0 = Instruction::At((unsigned char*)pc);
+ Instruction* instr1 = Instruction::At((unsigned char*)(pc + 1 * kInstrSize));
+ Instruction* instr3 = Instruction::At((unsigned char*)(pc + 3 * kInstrSize));
+ Instruction* instr5 = Instruction::At((unsigned char*)(pc + 5 * kInstrSize));
+ DCHECK(IsLui(*reinterpret_cast<Instr*>(instr0)) &&
+ IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr3)) &&
+ IsOri(*reinterpret_cast<Instr*>(instr5)));
+#endif
+ int64_t a6 = target & 0x3f; // bits 0:6. 6 bits
+ int64_t b11 = (target >> 6) & 0x7ff; // bits 6:11. 11 bits
+ int64_t high_31 = (target >> 17) & 0x7fffffff; // 31 bits
+ int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
+ int64_t low_12 = high_31 & 0xfff; // 12 bits
+ *p = *p & 0xfff;
+ *p = *p | ((int32_t)high_20 << 12);
+ *(p + 1) = *(p + 1) & 0xfffff;
+ *(p + 1) = *(p + 1) | ((int32_t)low_12 << 20);
+ *(p + 2) = *(p + 2) & 0xfffff;
+ *(p + 2) = *(p + 2) | (11 << 20);
+ *(p + 3) = *(p + 3) & 0xfffff;
+ *(p + 3) = *(p + 3) | ((int32_t)b11 << 20);
+ *(p + 4) = *(p + 4) & 0xfffff;
+ *(p + 4) = *(p + 4) | (6 << 20);
+ *(p + 5) = *(p + 5) & 0xfffff;
+ *(p + 5) = *(p + 5) | ((int32_t)a6 << 20);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, 8 * kInstrSize);
+ }
+ DCHECK_EQ(target_address_at(pc), target);
+}
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : available_(assembler->GetScratchRegisterList()),
+ old_available_(*available_) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *available_ = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ DCHECK_NOT_NULL(available_);
+ DCHECK_NE(*available_, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
+ *available_ &= ~(1UL << index);
+
+ return Register::from_code(index);
+}
+
+bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
+
+bool Assembler::IsConstantPoolAt(Instruction* instr) {
+ // The constant pool marker is made of two instructions. These instructions
+ // will never be emitted by the JIT, so checking for the first one is enough:
+ // 0: ld x0, t3, #offset
+ Instr instr_value = *reinterpret_cast<Instr*>(instr);
+
+ bool result = IsLd(instr_value) && (instr->RdValue() == kRegCode_zero_reg);
+ // It is still worth asserting the marker is complete.
+ // 4: j 0
+#ifdef DEBUG
+ Instruction* instr_fllowing = instr + kInstrSize;
+ DCHECK(!result || (IsJal(*reinterpret_cast<Instr*>(instr_fllowing)) &&
+ instr_fllowing->Imm20JValue() == 0 &&
+ instr_fllowing->RdValue() == kRegCode_zero_reg));
+#endif
+ return result;
+}
+
+int Assembler::ConstantPoolSizeAt(Instruction* instr) {
+ if (IsConstantPoolAt(instr)) {
+ return instr->Imm12Value();
+ } else {
+ return -1;
+ }
+}
+
+void Assembler::RecordConstPool(int size) {
+ // We only need this for debugger support, to correctly compute offsets in the
+ // code.
+ Assembler::BlockPoolsScope block_pools(this);
+ RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+}
+
+void Assembler::EmitPoolGuard() {
+ // We must generate only one instruction as this is used in scopes that
+ // control the size of the code generated.
+ j(0);
+}
+
+// Constant Pool
+
+void ConstantPool::EmitPrologue(Alignment require_alignment) {
+ // Recorded constant pool size is expressed in number of 32-bits words,
+ // and includes prologue and alignment, but not the jump around the pool
+ // and the size of the marker itself.
+ const int marker_size = 1;
+ int word_count =
+ ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size;
+ assm_->ld(zero_reg, zero_reg, word_count);
+ assm_->EmitPoolGuard();
+}
+
+int ConstantPool::PrologueSize(Jump require_jump) const {
+ // Prologue is:
+ // j over ;; if require_jump
+ // ld x0, t3, #pool_size
+ // j xzr
+ int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
+ prologue_size += 2 * kInstrSize;
+ return prologue_size;
+}
+
+void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
+ Instruction* entry_offset,
+ const ConstantPoolKey& key) {
+ Instr instr_auipc = assm_->instr_at(load_offset);
+ Instr instr_ld = assm_->instr_at(load_offset + 4);
+ // Instruction to patch must be 'ld t3, t3, offset' with offset == kInstrSize.
+ DCHECK(assm_->IsAuipc(instr_auipc));
+ DCHECK(assm_->IsLd(instr_ld));
+ DCHECK_EQ(assm_->LdOffset(instr_ld), 0);
+ DCHECK_EQ(assm_->AuipcOffset(instr_auipc), 0);
+ int32_t distance = static_cast<int32_t>(
+ reinterpret_cast<Address>(entry_offset) -
+ reinterpret_cast<Address>(assm_->toAddress(load_offset)));
+ int32_t Hi20 = (((int32_t)distance + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)distance << 20 >> 20;
+ CHECK(is_int32(distance));
+ assm_->instr_at_put(load_offset, SetAuipcOffset(Hi20, instr_auipc));
+ assm_->instr_at_put(load_offset + 4, SetLdOffset(Lo12, instr_ld));
+}
+
+void ConstantPool::Check(Emission force_emit, Jump require_jump,
+ size_t margin) {
+ // Some short sequence of instruction must not be broken up by constant pool
+ // emission, such sequences are protected by a ConstPool::BlockScope.
+ if (IsBlocked()) {
+ // Something is wrong if emission is forced and blocked at the same time.
+ DCHECK_EQ(force_emit, Emission::kIfNeeded);
+ return;
+ }
+
+ // We emit a constant pool only if :
+ // * it is not empty
+ // * emission is forced by parameter force_emit (e.g. at function end).
+ // * emission is mandatory or opportune according to {ShouldEmitNow}.
+ if (!IsEmpty() && (force_emit == Emission::kForced ||
+ ShouldEmitNow(require_jump, margin))) {
+ // Emit veneers for branches that would go out of range during emission of
+ // the constant pool.
+ int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (this includes the gap to the relocation information).
+ int needed_space = worst_case_size + assm_->kGap;
+ while (assm_->buffer_space() <= needed_space) {
+ assm_->GrowBuffer();
+ }
+
+ EmitAndClear(require_jump);
+ }
+ // Since a constant pool is (now) empty, move the check offset forward by
+ // the standard interval.
+ SetNextCheckIn(ConstantPool::kCheckInterval);
+}
+
+// Pool entries are accessed with pc relative load therefore this cannot be more
+// than 1 * MB. Since constant pool emission checks are interval based, and we
+// want to keep entries close to the code, we try to emit every 64KB.
+const size_t ConstantPool::kMaxDistToPool32 = 1 * MB;
+const size_t ConstantPool::kMaxDistToPool64 = 1 * MB;
+const size_t ConstantPool::kCheckInterval = 128 * kInstrSize;
+const size_t ConstantPool::kApproxDistToPool32 = 64 * KB;
+const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32;
+
+const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB;
+const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB;
+const size_t ConstantPool::kApproxMaxEntryCount = 512;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
new file mode 100644
index 0000000000..1dcf4e0aae
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -0,0 +1,1243 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2021 the V8 project authors. All rights reserved.
+
+#ifndef V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_H_
+#define V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_H_
+
+#include <stdio.h>
+
+#include <memory>
+#include <set>
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/constant-pool.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/label.h"
+#include "src/codegen/riscv64/constants-riscv64.h"
+#include "src/codegen/riscv64/register-riscv64.h"
+#include "src/objects/contexts.h"
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEBUG_PRINTF(...) \
+ if (FLAG_debug_riscv) { \
+ printf(__VA_ARGS__); \
+ }
+
+class SafepointTableBuilder;
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands.
+constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
+constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand {
+ public:
+ // Immediate.
+ V8_INLINE explicit Operand(int64_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : rm_(no_reg), rmode_(rmode) {
+ value_.immediate = immediate;
+ }
+ V8_INLINE explicit Operand(const ExternalReference& f)
+ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) {
+ value_.immediate = static_cast<int64_t>(f.address());
+ }
+ V8_INLINE explicit Operand(const char* s);
+ explicit Operand(Handle<HeapObject> handle);
+ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
+ }
+
+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
+
+ // Register.
+ V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
+
+ // Return true if this is a register operand.
+ V8_INLINE bool is_reg() const;
+
+ inline int64_t immediate() const;
+
+ bool IsImmediate() const { return !rm_.is_valid(); }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(IsHeapObjectRequest());
+ return value_.heap_object_request;
+ }
+
+ bool IsHeapObjectRequest() const {
+ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
+ }
+
+ Register rm() const { return rm_; }
+
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ private:
+ Register rm_;
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request; // if is_heap_object_request_
+ int64_t immediate; // otherwise
+ } value_; // valid if rm_ == no_reg
+ bool is_heap_object_request_ = false;
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+// On RISC-V we have only one addressing mode with base_reg + offset.
+// Class MemOperand represents a memory operand in load and store instructions.
+class V8_EXPORT_PRIVATE MemOperand : public Operand {
+ public:
+ // Immediate value attached to offset.
+ enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 };
+
+ explicit MemOperand(Register rn, int32_t offset = 0);
+ explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
+ OffsetAddend offset_addend = offset_zero);
+ int32_t offset() const { return offset_; }
+
+ bool OffsetIsInt12Encodable() const { return is_int12(offset_); }
+
+ private:
+ int32_t offset_;
+
+ friend class Assembler;
+};
+
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
+ virtual ~Assembler() { CHECK(constpool_.IsEmpty()); }
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
+
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
+
+ // Label operations & relative jumps (PPUM Appendix D).
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+ void bind(Label* L); // Binds an unbound label L to current code position.
+
+ enum OffsetSize : int {
+ kOffset21 = 21, // RISCV jal
+ kOffset12 = 12, // RISCV imm12
+ kOffset20 = 20, // RISCV imm20
+ kOffset13 = 13, // RISCV branch
+ kOffset32 = 32, // RISCV auipc + instr_I
+ kOffset11 = 11 // RISCV C_J
+ };
+
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ bool is_near(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_branch(Label* L);
+
+ // Get offset from instr.
+ int BranchOffset(Instr instr);
+ int BrachlongOffset(Instr auipc, Instr jalr);
+ int JumpOffset(Instr instr);
+ int CJumpOffset(Instr instr);
+ static int LdOffset(Instr instr);
+ static int AuipcOffset(Instr instr);
+
+ // Returns the branch offset to the given label from the current code
+ // position. Links the label to the current position if it is still unbound.
+ // Manages the jump elimination optimization if the second parameter is true.
+ int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset13);
+ }
+ inline int32_t jump_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+ inline int16_t cjump_offset(Label* L) {
+ return (int16_t)branch_offset_helper(L, OffsetSize::kOffset11);
+ }
+
+ uint64_t jump_address(Label* L);
+ uint64_t branch_long_offset(Label* L);
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ // The isolate argument is unused (and may be nullptr) when skipping flushing.
+ static Address target_address_at(Address pc);
+ V8_INLINE static void set_target_address_at(
+ Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ set_target_value_at(pc, target, icache_flush_mode);
+ }
+
+ static Address target_address_at(Address pc, Address constant_pool);
+
+ static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ static bool IsConstantPoolAt(Instruction* instr);
+ static int ConstantPoolSizeAt(Instruction* instr);
+ // See Assembler::CheckConstPool for more info.
+ void EmitPoolGuard();
+
+ static void set_target_value_at(
+ Address pc, uint64_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ static void JumpLabelToJumpRegister(Address pc);
+
+ // This sets the branch destination (which gets loaded at the call address).
+ // This is for calls and branches within generated code. The serializer
+ // has already deserialized the lui/ori instructions etc.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Code code, Address target);
+
+ // Get the size of the special target encoded at 'instruction_payload'.
+ inline static int deserialization_special_target_size(
+ Address instruction_payload);
+
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
+ // Difference between address of current opcode and target address offset.
+ static constexpr int kBranchPCOffset = kInstrSize;
+
+ // Difference between address of current opcode and target address offset,
+ // when we are generatinga sequence of instructions for long relative PC
+ // branches
+ static constexpr int kLongBranchPCOffset = 3 * kInstrSize;
+
+ // Adjust ra register in branch delay slot of bal instruction so to skip
+ // instructions not needed after optimization of PIC in
+ // TurboAssembler::BranchAndLink method.
+
+ static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;
+
+ // Here we are patching the address in the LUI/ADDI instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // RISC-V platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static constexpr int kSpecialTargetSize = 0;
+
+ // Number of consecutive instructions used to store 32bit/64bit constant.
+ // This constant was used in RelocInfo::target_address_address() function
+ // to tell serializer address of the instruction that follows
+ // LUI/ADDI instruction pair.
+ static constexpr int kInstructionsFor32BitConstant = 2;
+ static constexpr int kInstructionsFor64BitConstant = 8;
+
+ // Difference between address of current opcode and value read from pc
+ // register.
+ static constexpr int kPcLoadDelta = 4;
+
+ // Bits available for offset field in branches
+ static constexpr int kBranchOffsetBits = 13;
+
+ // Bits available for offset field in jump
+ static constexpr int kJumpOffsetBits = 21;
+
+ // Bits available for offset field in compresed jump
+ static constexpr int kCJalOffsetBits = 12;
+
+ // Max offset for b instructions with 12-bit offset field (multiple of 2)
+ static constexpr int kMaxBranchOffset = (1 << (13 - 1)) - 1;
+
+ // Max offset for jal instruction with 20-bit offset field (multiple of 2)
+ static constexpr int kMaxJumpOffset = (1 << (21 - 1)) - 1;
+
+ static constexpr int kTrampolineSlotsSize = 2 * kInstrSize;
+
+ RegList* GetScratchRegisterList() { return &scratch_register_list_; }
+
+ // ---------------------------------------------------------------------------
+ // Code generation.
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+ };
+
+ // RISC-V Instructions Emited to a buffer
+
+ void lui(Register rd, int32_t imm20);
+ void auipc(Register rd, int32_t imm20);
+
+ // Jumps
+ void jal(Register rd, int32_t imm20);
+ void jalr(Register rd, Register rs1, int16_t imm12);
+
+ // Branches
+ void beq(Register rs1, Register rs2, int16_t imm12);
+ inline void beq(Register rs1, Register rs2, Label* L) {
+ beq(rs1, rs2, branch_offset(L));
+ }
+ void bne(Register rs1, Register rs2, int16_t imm12);
+ inline void bne(Register rs1, Register rs2, Label* L) {
+ bne(rs1, rs2, branch_offset(L));
+ }
+ void blt(Register rs1, Register rs2, int16_t imm12);
+ inline void blt(Register rs1, Register rs2, Label* L) {
+ blt(rs1, rs2, branch_offset(L));
+ }
+ void bge(Register rs1, Register rs2, int16_t imm12);
+ inline void bge(Register rs1, Register rs2, Label* L) {
+ bge(rs1, rs2, branch_offset(L));
+ }
+ void bltu(Register rs1, Register rs2, int16_t imm12);
+ inline void bltu(Register rs1, Register rs2, Label* L) {
+ bltu(rs1, rs2, branch_offset(L));
+ }
+ void bgeu(Register rs1, Register rs2, int16_t imm12);
+ inline void bgeu(Register rs1, Register rs2, Label* L) {
+ bgeu(rs1, rs2, branch_offset(L));
+ }
+
+ // Loads
+ void lb(Register rd, Register rs1, int16_t imm12);
+ void lh(Register rd, Register rs1, int16_t imm12);
+ void lw(Register rd, Register rs1, int16_t imm12);
+ void lbu(Register rd, Register rs1, int16_t imm12);
+ void lhu(Register rd, Register rs1, int16_t imm12);
+
+ // Stores
+ void sb(Register source, Register base, int16_t imm12);
+ void sh(Register source, Register base, int16_t imm12);
+ void sw(Register source, Register base, int16_t imm12);
+
+ // Arithmetic with immediate
+ void addi(Register rd, Register rs1, int16_t imm12);
+ void slti(Register rd, Register rs1, int16_t imm12);
+ void sltiu(Register rd, Register rs1, int16_t imm12);
+ void xori(Register rd, Register rs1, int16_t imm12);
+ void ori(Register rd, Register rs1, int16_t imm12);
+ void andi(Register rd, Register rs1, int16_t imm12);
+ void slli(Register rd, Register rs1, uint8_t shamt);
+ void srli(Register rd, Register rs1, uint8_t shamt);
+ void srai(Register rd, Register rs1, uint8_t shamt);
+
+ // Arithmetic
+ void add(Register rd, Register rs1, Register rs2);
+ void sub(Register rd, Register rs1, Register rs2);
+ void sll(Register rd, Register rs1, Register rs2);
+ void slt(Register rd, Register rs1, Register rs2);
+ void sltu(Register rd, Register rs1, Register rs2);
+ void xor_(Register rd, Register rs1, Register rs2);
+ void srl(Register rd, Register rs1, Register rs2);
+ void sra(Register rd, Register rs1, Register rs2);
+ void or_(Register rd, Register rs1, Register rs2);
+ void and_(Register rd, Register rs1, Register rs2);
+
+ // Memory fences
+ void fence(uint8_t pred, uint8_t succ);
+ void fence_tso();
+
+ // Environment call / break
+ void ecall();
+ void ebreak();
+
+ // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+ // instruction (i.e., it should always trap, if your implementation has
+ // invalid instruction traps).
+ void unimp();
+
+ // CSR
+ void csrrw(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrs(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrc(Register rd, ControlStatusReg csr, Register rs1);
+ void csrrwi(Register rd, ControlStatusReg csr, uint8_t imm5);
+ void csrrsi(Register rd, ControlStatusReg csr, uint8_t imm5);
+ void csrrci(Register rd, ControlStatusReg csr, uint8_t imm5);
+
+ // RV64I
+ void lwu(Register rd, Register rs1, int16_t imm12);
+ void ld(Register rd, Register rs1, int16_t imm12);
+ void sd(Register source, Register base, int16_t imm12);
+ void addiw(Register rd, Register rs1, int16_t imm12);
+ void slliw(Register rd, Register rs1, uint8_t shamt);
+ void srliw(Register rd, Register rs1, uint8_t shamt);
+ void sraiw(Register rd, Register rs1, uint8_t shamt);
+ void addw(Register rd, Register rs1, Register rs2);
+ void subw(Register rd, Register rs1, Register rs2);
+ void sllw(Register rd, Register rs1, Register rs2);
+ void srlw(Register rd, Register rs1, Register rs2);
+ void sraw(Register rd, Register rs1, Register rs2);
+
+ // RV32M Standard Extension
+ void mul(Register rd, Register rs1, Register rs2);
+ void mulh(Register rd, Register rs1, Register rs2);
+ void mulhsu(Register rd, Register rs1, Register rs2);
+ void mulhu(Register rd, Register rs1, Register rs2);
+ void div(Register rd, Register rs1, Register rs2);
+ void divu(Register rd, Register rs1, Register rs2);
+ void rem(Register rd, Register rs1, Register rs2);
+ void remu(Register rd, Register rs1, Register rs2);
+
+ // RV64M Standard Extension (in addition to RV32M)
+ void mulw(Register rd, Register rs1, Register rs2);
+ void divw(Register rd, Register rs1, Register rs2);
+ void divuw(Register rd, Register rs1, Register rs2);
+ void remw(Register rd, Register rs1, Register rs2);
+ void remuw(Register rd, Register rs1, Register rs2);
+
+ // RV32A Standard Extension
+ void lr_w(bool aq, bool rl, Register rd, Register rs1);
+ void sc_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoadd_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoxor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoand_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomin_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomax_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amominu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomaxu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+
+ // RV64A Standard Extension (in addition to RV32A)
+ void lr_d(bool aq, bool rl, Register rd, Register rs1);
+ void sc_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoswap_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoadd_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoxor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoand_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amoor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomin_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomax_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amominu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+ void amomaxu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
+
+ // RV32F Standard Extension
+ void flw(FPURegister rd, Register rs1, int16_t imm12);
+ void fsw(FPURegister source, Register base, int16_t imm12);
+ void fmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm = RNE);
+ void fmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm = RNE);
+ void fnmsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm = RNE);
+ void fnmadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm = RNE);
+ void fadd_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm = RNE);
+ void fsub_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm = RNE);
+ void fmul_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm = RNE);
+ void fdiv_s(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm = RNE);
+ void fsqrt_s(FPURegister rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjn_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjx_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmin_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmax_s(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fcvt_w_s(Register rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fcvt_wu_s(Register rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fmv_x_w(Register rd, FPURegister rs1);
+ void feq_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void flt_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void fle_s(Register rd, FPURegister rs1, FPURegister rs2);
+ void fclass_s(Register rd, FPURegister rs1);
+ void fcvt_s_w(FPURegister rd, Register rs1, RoundingMode frm = RNE);
+ void fcvt_s_wu(FPURegister rd, Register rs1, RoundingMode frm = RNE);
+ void fmv_w_x(FPURegister rd, Register rs1);
+
+ // RV64F Standard Extension (in addition to RV32F)
+ void fcvt_l_s(Register rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fcvt_lu_s(Register rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fcvt_s_l(FPURegister rd, Register rs1, RoundingMode frm = RNE);
+ void fcvt_s_lu(FPURegister rd, Register rs1, RoundingMode frm = RNE);
+
+ // RV32D Standard Extension
+ void fld(FPURegister rd, Register rs1, int16_t imm12);
+ void fsd(FPURegister source, Register base, int16_t imm12);
+ void fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm = RNE);
+ void fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm = RNE);
+ void fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm = RNE);
+ void fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ FPURegister rs3, RoundingMode frm = RNE);
+ void fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm = RNE);
+ void fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm = RNE);
+ void fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm = RNE);
+ void fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
+ RoundingMode frm = RNE);
+ void fsqrt_d(FPURegister rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjn_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fsgnjx_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
+ void fcvt_s_d(FPURegister rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fcvt_d_s(FPURegister rd, FPURegister rs1, RoundingMode frm = RNE);
+ void feq_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void flt_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void fle_d(Register rd, FPURegister rs1, FPURegister rs2);
+ void fclass_d(Register rd, FPURegister rs1);
+ void fcvt_w_d(Register rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fcvt_wu_d(Register rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fcvt_d_w(FPURegister rd, Register rs1, RoundingMode frm = RNE);
+ void fcvt_d_wu(FPURegister rd, Register rs1, RoundingMode frm = RNE);
+
+ // RV64D Standard Extension (in addition to RV32D)
+ void fcvt_l_d(Register rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fcvt_lu_d(Register rd, FPURegister rs1, RoundingMode frm = RNE);
+ void fmv_x_d(Register rd, FPURegister rs1);
+ void fcvt_d_l(FPURegister rd, Register rs1, RoundingMode frm = RNE);
+ void fcvt_d_lu(FPURegister rd, Register rs1, RoundingMode frm = RNE);
+ void fmv_d_x(FPURegister rd, Register rs1);
+
+ // RV64C Standard Extension
+ void c_nop();
+ void c_addi(Register rd, int8_t imm6);
+ void c_addiw(Register rd, int8_t imm6);
+ void c_addi16sp(int16_t imm10);
+ void c_addi4spn(Register rd, int16_t uimm10);
+ void c_li(Register rd, int8_t imm6);
+ void c_lui(Register rd, int8_t imm6);
+ void c_slli(Register rd, uint8_t uimm6);
+ void c_fldsp(FPURegister rd, uint16_t uimm9);
+ void c_lwsp(Register rd, uint16_t uimm8);
+ void c_ldsp(Register rd, uint16_t uimm9);
+ void c_jr(Register rs1);
+ void c_mv(Register rd, Register rs2);
+ void c_ebreak();
+ void c_jalr(Register rs1);
+ void c_j(int16_t imm12);
+ inline void c_j(Label* L) { c_j(cjump_offset(L)); }
+ void c_add(Register rd, Register rs2);
+ void c_sub(Register rd, Register rs2);
+ void c_and(Register rd, Register rs2);
+ void c_xor(Register rd, Register rs2);
+ void c_or(Register rd, Register rs2);
+ void c_subw(Register rd, Register rs2);
+ void c_addw(Register rd, Register rs2);
+ void c_swsp(Register rs2, uint16_t uimm8);
+ void c_sdsp(Register rs2, uint16_t uimm9);
+ void c_fsdsp(FPURegister rs2, uint16_t uimm9);
+ void c_lw(Register rd, Register rs1, uint16_t uimm7);
+ void c_ld(Register rd, Register rs1, uint16_t uimm8);
+ void c_fld(FPURegister rd, Register rs1, uint16_t uimm8);
+ void c_sw(Register rs2, Register rs1, uint16_t uimm7);
+ void c_sd(Register rs2, Register rs1, uint16_t uimm8);
+ void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8);
+
+ // Privileged
+ void uret();
+ void sret();
+ void mret();
+ void wfi();
+ void sfence_vma(Register rs1, Register rs2);
+
+ // Assembler Pseudo Instructions (Tables 25.2, 25.3, RISC-V Unprivileged ISA)
+ void nop();
+ void RV_li(Register rd, int64_t imm);
+ // Returns the number of instructions required to load the immediate
+ static int li_estimate(int64_t imm, bool is_get_temp_reg = false);
+ // Loads an immediate, always using 8 instructions, regardless of the value,
+ // so that it can be modified later.
+ void li_constant(Register rd, int64_t imm);
+ void li_ptr(Register rd, int64_t imm);
+
+ void mv(Register rd, Register rs) { addi(rd, rs, 0); }
+ void not_(Register rd, Register rs) { xori(rd, rs, -1); }
+ void neg(Register rd, Register rs) { sub(rd, zero_reg, rs); }
+ void negw(Register rd, Register rs) { subw(rd, zero_reg, rs); }
+ void sext_w(Register rd, Register rs) { addiw(rd, rs, 0); }
+ void seqz(Register rd, Register rs) { sltiu(rd, rs, 1); }
+ void snez(Register rd, Register rs) { sltu(rd, zero_reg, rs); }
+ void sltz(Register rd, Register rs) { slt(rd, rs, zero_reg); }
+ void sgtz(Register rd, Register rs) { slt(rd, zero_reg, rs); }
+
+ void fmv_s(FPURegister rd, FPURegister rs) { fsgnj_s(rd, rs, rs); }
+ void fabs_s(FPURegister rd, FPURegister rs) { fsgnjx_s(rd, rs, rs); }
+ void fneg_s(FPURegister rd, FPURegister rs) { fsgnjn_s(rd, rs, rs); }
+ void fmv_d(FPURegister rd, FPURegister rs) { fsgnj_d(rd, rs, rs); }
+ void fabs_d(FPURegister rd, FPURegister rs) { fsgnjx_d(rd, rs, rs); }
+ void fneg_d(FPURegister rd, FPURegister rs) { fsgnjn_d(rd, rs, rs); }
+
+ void beqz(Register rs, int16_t imm13) { beq(rs, zero_reg, imm13); }
+ inline void beqz(Register rs1, Label* L) { beqz(rs1, branch_offset(L)); }
+ void bnez(Register rs, int16_t imm13) { bne(rs, zero_reg, imm13); }
+ inline void bnez(Register rs1, Label* L) { bnez(rs1, branch_offset(L)); }
+ void blez(Register rs, int16_t imm13) { bge(zero_reg, rs, imm13); }
+ inline void blez(Register rs1, Label* L) { blez(rs1, branch_offset(L)); }
+ void bgez(Register rs, int16_t imm13) { bge(rs, zero_reg, imm13); }
+ inline void bgez(Register rs1, Label* L) { bgez(rs1, branch_offset(L)); }
+ void bltz(Register rs, int16_t imm13) { blt(rs, zero_reg, imm13); }
+ inline void bltz(Register rs1, Label* L) { bltz(rs1, branch_offset(L)); }
+ void bgtz(Register rs, int16_t imm13) { blt(zero_reg, rs, imm13); }
+
+ inline void bgtz(Register rs1, Label* L) { bgtz(rs1, branch_offset(L)); }
+ void bgt(Register rs1, Register rs2, int16_t imm13) { blt(rs2, rs1, imm13); }
+ inline void bgt(Register rs1, Register rs2, Label* L) {
+ bgt(rs1, rs2, branch_offset(L));
+ }
+ void ble(Register rs1, Register rs2, int16_t imm13) { bge(rs2, rs1, imm13); }
+ inline void ble(Register rs1, Register rs2, Label* L) {
+ ble(rs1, rs2, branch_offset(L));
+ }
+ void bgtu(Register rs1, Register rs2, int16_t imm13) {
+ bltu(rs2, rs1, imm13);
+ }
+ inline void bgtu(Register rs1, Register rs2, Label* L) {
+ bgtu(rs1, rs2, branch_offset(L));
+ }
+ void bleu(Register rs1, Register rs2, int16_t imm13) {
+ bgeu(rs2, rs1, imm13);
+ }
+ inline void bleu(Register rs1, Register rs2, Label* L) {
+ bleu(rs1, rs2, branch_offset(L));
+ }
+
+ void j(int32_t imm21) { jal(zero_reg, imm21); }
+ inline void j(Label* L) { j(jump_offset(L)); }
+ inline void b(Label* L) { j(L); }
+ void jal(int32_t imm21) { jal(ra, imm21); }
+ inline void jal(Label* L) { jal(jump_offset(L)); }
+ void jr(Register rs) { jalr(zero_reg, rs, 0); }
+ void jr(Register rs, int32_t imm12) { jalr(zero_reg, rs, imm12); }
+ void jalr(Register rs, int32_t imm12) { jalr(ra, rs, imm12); }
+ void jalr(Register rs) { jalr(ra, rs, 0); }
+ void ret() { jalr(zero_reg, ra, 0); }
+ void call(int32_t offset) {
+ auipc(ra, (offset >> 12) + ((offset & 0x800) >> 11));
+ jalr(ra, ra, offset << 20 >> 20);
+ }
+
+ // Read instructions-retired counter
+ void rdinstret(Register rd) { csrrs(rd, csr_instret, zero_reg); }
+ void rdinstreth(Register rd) { csrrs(rd, csr_instreth, zero_reg); }
+ void rdcycle(Register rd) { csrrs(rd, csr_cycle, zero_reg); }
+ void rdcycleh(Register rd) { csrrs(rd, csr_cycleh, zero_reg); }
+ void rdtime(Register rd) { csrrs(rd, csr_time, zero_reg); }
+ void rdtimeh(Register rd) { csrrs(rd, csr_timeh, zero_reg); }
+
+ void csrr(Register rd, ControlStatusReg csr) { csrrs(rd, csr, zero_reg); }
+ void csrw(ControlStatusReg csr, Register rs) { csrrw(zero_reg, csr, rs); }
+ void csrs(ControlStatusReg csr, Register rs) { csrrs(zero_reg, csr, rs); }
+ void csrc(ControlStatusReg csr, Register rs) { csrrc(zero_reg, csr, rs); }
+
+ void csrwi(ControlStatusReg csr, uint8_t imm) { csrrwi(zero_reg, csr, imm); }
+ void csrsi(ControlStatusReg csr, uint8_t imm) { csrrsi(zero_reg, csr, imm); }
+ void csrci(ControlStatusReg csr, uint8_t imm) { csrrci(zero_reg, csr, imm); }
+
+ void frcsr(Register rd) { csrrs(rd, csr_fcsr, zero_reg); }
+ void fscsr(Register rd, Register rs) { csrrw(rd, csr_fcsr, rs); }
+ void fscsr(Register rs) { csrrw(zero_reg, csr_fcsr, rs); }
+
+ void frrm(Register rd) { csrrs(rd, csr_frm, zero_reg); }
+ void fsrm(Register rd, Register rs) { csrrw(rd, csr_frm, rs); }
+ void fsrm(Register rs) { csrrw(zero_reg, csr_frm, rs); }
+
+ void frflags(Register rd) { csrrs(rd, csr_fflags, zero_reg); }
+ void fsflags(Register rd, Register rs) { csrrw(rd, csr_fflags, rs); }
+ void fsflags(Register rs) { csrrw(zero_reg, csr_fflags, rs); }
+
+ // Other pseudo instructions that are not part of RISCV pseudo assemly
+ void nor(Register rd, Register rs, Register rt) {
+ or_(rd, rs, rt);
+ not_(rd, rd);
+ }
+
+ void sync() { fence(0b1111, 0b1111); }
+ void break_(uint32_t code, bool break_as_stop = false);
+ void stop(uint32_t code = kMaxStopCode);
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
+ }
+
+ using BlockConstPoolScope = ConstantPool::BlockScope;
+ // Class for scoping postponing the trampoline pool generation.
+ class BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem, int margin = 0)
+ : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+
+ explicit BlockTrampolinePoolScope(Assembler* assem, PoolEmissionCheck check)
+ : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
+
+ private:
+ Assembler* assem_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+ };
+
+ // Class for postponing the assembly buffer growth. Typically used for
+ // sequences of instructions that must be emitted as a unit, before
+ // buffer growth (and relocation) can occur.
+ // This blocking scope is not nestable.
+ class BlockGrowBufferScope {
+ public:
+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockGrowBuffer();
+ }
+ ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ };
+
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
+
+ static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
+
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ dq(data, rmode);
+ }
+ void dd(Label* label);
+
+ Instruction* pc() const { return reinterpret_cast<Instruction*>(pc_); }
+
+ // Postpone the generation of the trampoline pool for the specified number of
+ // instructions.
+ void BlockTrampolinePoolFor(int instructions);
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+ // Get the number of bytes available in the buffer.
+ inline intptr_t available_space() const {
+ return reloc_info_writer.pos() - pc_;
+ }
+
+ // Read/patch instructions.
+ static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
+ static void instr_at_put(Address pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) {
+ return *reinterpret_cast<Instr*>(buffer_start_ + pos);
+ }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
+ }
+
+ void instr_at_put(int pos, ShortInstr instr) {
+ *reinterpret_cast<ShortInstr*>(buffer_start_ + pos) = instr;
+ }
+
+ Address toAddress(int pos) {
+ return reinterpret_cast<Address>(buffer_start_ + pos);
+ }
+
+ // Check if an instruction is a branch of some kind.
+ static bool IsBranch(Instr instr);
+ static bool IsJump(Instr instr);
+ static bool IsJal(Instr instr);
+ static bool IsCJal(Instr instr);
+ static bool IsJalr(Instr instr);
+ static bool IsLui(Instr instr);
+ static bool IsAuipc(Instr instr);
+ static bool IsAddiw(Instr instr);
+ static bool IsAddi(Instr instr);
+ static bool IsOri(Instr instr);
+ static bool IsSlli(Instr instr);
+ static bool IsLd(Instr instr);
+ void CheckTrampolinePool();
+
+ inline int UnboundLabelsCount() { return unbound_labels_count_; }
+
+ protected:
+ // Readable constants for base and offset adjustment helper, these indicate if
+ // aside from offset, another value like offset + 4 should fit into int16.
+ enum class OffsetAccessType : bool {
+ SINGLE_ACCESS = false,
+ TWO_ACCESSES = true
+ };
+
+ // Determine whether need to adjust base and offset of memroy load/store
+ bool NeedAdjustBaseAndOffset(
+ const MemOperand& src, OffsetAccessType = OffsetAccessType::SINGLE_ACCESS,
+ int second_Access_add_to_offset = 4);
+
+ // Helper function for memory load/store using base register and offset.
+ void AdjustBaseAndOffset(
+ MemOperand* src, Register scratch,
+ OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
+ int second_access_add_to_offset = 4);
+
+ inline static void set_target_internal_reference_encoded_at(Address pc,
+ Address target);
+
+ int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Decode branch instruction at pos and return branch target pos.
+ int target_at(int pos, bool is_internal);
+
+ // Patch branch instruction at pos to branch to given branch target pos.
+ void target_at_put(int pos, int target_pos, bool is_internal);
+
+ // Say if we need to relocate with this mode.
+ bool MustUseReg(RelocInfo::Mode rmode);
+
+ // Record reloc info for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Block the emission of the trampoline pool before pc_offset.
+ void BlockTrampolinePoolBefore(int pc_offset) {
+ if (no_trampoline_pool_before_ < pc_offset)
+ no_trampoline_pool_before_ = pc_offset;
+ }
+
+ void StartBlockTrampolinePool() {
+ DEBUG_PRINTF("\tStartBlockTrampolinePool\n");
+ trampoline_pool_blocked_nesting_++;
+ }
+
+ void EndBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_--;
+ DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n",
+ trampoline_pool_blocked_nesting_);
+ if (trampoline_pool_blocked_nesting_ == 0) {
+ CheckTrampolinePoolQuick(1);
+ }
+ }
+
+ bool is_trampoline_pool_blocked() const {
+ return trampoline_pool_blocked_nesting_ > 0;
+ }
+
+ bool has_exception() const { return internal_trampoline_exception_; }
+
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
+ // Temporarily block automatic assembly buffer growth.
+ void StartBlockGrowBuffer() {
+ DCHECK(!block_buffer_growth_);
+ block_buffer_growth_ = true;
+ }
+
+ void EndBlockGrowBuffer() {
+ DCHECK(block_buffer_growth_);
+ block_buffer_growth_ = false;
+ }
+
+ bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
+
+ void CheckTrampolinePoolQuick(int extra_instructions = 0) {
+ DEBUG_PRINTF("\tpc_offset:%d %d\n", pc_offset(),
+ next_buffer_check_ - extra_instructions * kInstrSize);
+ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
+ CheckTrampolinePool();
+ }
+ }
+
+ using BlockPoolsScope = BlockTrampolinePoolScope;
+
+ void RecordConstPool(int size);
+
+ void ForceConstantPoolEmissionWithoutJump() {
+ constpool_.Check(Emission::kForced, Jump::kOmitted);
+ }
+ void ForceConstantPoolEmissionWithJump() {
+ constpool_.Check(Emission::kForced, Jump::kRequired);
+ }
+ // Check if the const pool needs to be emitted while pretending that {margin}
+ // more bytes of instructions have already been emitted.
+ void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
+ }
+
+ void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin);
+ }
+
+ void RecordEntry(uint32_t data, RelocInfo::Mode rmode) {
+ constpool_.RecordEntry(data, rmode);
+ }
+
+ void RecordEntry(uint64_t data, RelocInfo::Mode rmode) {
+ constpool_.RecordEntry(data, rmode);
+ }
+
+ private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes.
+ static constexpr int kBufferCheckInterval = 1 * KB / 2;
+
+ // Code generation.
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static constexpr int kGap = 64;
+ STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
+
+ // Repeated checking whether the trampoline pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated.
+ static constexpr int kCheckConstIntervalInst = 32;
+ static constexpr int kCheckConstInterval =
+ kCheckConstIntervalInst * kInstrSize;
+
+ int next_buffer_check_; // pc offset of next buffer check.
+
+ // Emission of the trampoline pool may be blocked in some code sequences.
+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_trampoline_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the last emitted pool to guarantee a maximal distance.
+ int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+
+ // Automatic growth of the assembly buffer may be blocked for some sequences.
+ bool block_buffer_growth_; // Block growth when true.
+
+ // Relocation information generation.
+ // Each relocation is encoded as a variable size value.
+ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // Code emission.
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+ inline void emit(ShortInstr x);
+ inline void emit(uint64_t x);
+ template <typename T>
+ inline void EmitHelper(T x);
+
+ static void disassembleInstr(Instr instr);
+
+ // Instruction generation.
+
+ // ----- Top-level instruction formats match those in the ISA manual
+ // (R, I, S, B, U, J). These match the formats defined in LLVM's
+ // RISCVInstrFormats.td.
+ void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, Register rd,
+ Register rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, FPURegister rd,
+ FPURegister rs1, FPURegister rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, Register rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, FPURegister rd,
+ Register rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, FPURegister rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrR(uint8_t funct7, uint8_t funct3, Opcode opcode, Register rd,
+ FPURegister rs1, FPURegister rs2);
+ void GenInstrR4(uint8_t funct2, Opcode opcode, Register rd, Register rs1,
+ Register rs2, Register rs3, RoundingMode frm);
+ void GenInstrR4(uint8_t funct2, Opcode opcode, FPURegister rd,
+ FPURegister rs1, FPURegister rs2, FPURegister rs3,
+ RoundingMode frm);
+ void GenInstrRAtomic(uint8_t funct5, bool aq, bool rl, uint8_t funct3,
+ Register rd, Register rs1, Register rs2);
+ void GenInstrRFrm(uint8_t funct7, Opcode opcode, Register rd, Register rs1,
+ Register rs2, RoundingMode frm);
+ void GenInstrI(uint8_t funct3, Opcode opcode, Register rd, Register rs1,
+ int16_t imm12);
+ void GenInstrI(uint8_t funct3, Opcode opcode, FPURegister rd, Register rs1,
+ int16_t imm12);
+ void GenInstrIShift(bool arithshift, uint8_t funct3, Opcode opcode,
+ Register rd, Register rs1, uint8_t shamt);
+ void GenInstrIShiftW(bool arithshift, uint8_t funct3, Opcode opcode,
+ Register rd, Register rs1, uint8_t shamt);
+ void GenInstrS(uint8_t funct3, Opcode opcode, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrS(uint8_t funct3, Opcode opcode, Register rs1, FPURegister rs2,
+ int16_t imm12);
+ void GenInstrB(uint8_t funct3, Opcode opcode, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrU(Opcode opcode, Register rd, int32_t imm20);
+ void GenInstrJ(Opcode opcode, Register rd, int32_t imm20);
+ void GenInstrCR(uint8_t funct4, Opcode opcode, Register rd, Register rs2);
+ void GenInstrCA(uint8_t funct6, Opcode opcode, Register rd, uint8_t funct,
+ Register rs2);
+ void GenInstrCI(uint8_t funct3, Opcode opcode, Register rd, int8_t imm6);
+ void GenInstrCIU(uint8_t funct3, Opcode opcode, Register rd, uint8_t uimm6);
+ void GenInstrCIU(uint8_t funct3, Opcode opcode, FPURegister rd,
+ uint8_t uimm6);
+ void GenInstrCIW(uint8_t funct3, Opcode opcode, Register rd, uint8_t uimm8);
+ void GenInstrCSS(uint8_t funct3, Opcode opcode, FPURegister rs2,
+ uint8_t uimm6);
+ void GenInstrCSS(uint8_t funct3, Opcode opcode, Register rs2, uint8_t uimm6);
+ void GenInstrCL(uint8_t funct3, Opcode opcode, Register rd, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCL(uint8_t funct3, Opcode opcode, FPURegister rd, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCS(uint8_t funct3, Opcode opcode, Register rs2, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCS(uint8_t funct3, Opcode opcode, FPURegister rs2, Register rs1,
+ uint8_t uimm5);
+ void GenInstrCJ(uint8_t funct3, Opcode opcode, uint16_t uint11);
+
+ // ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td
+ void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrLoad_ri(uint8_t funct3, Register rd, Register rs1,
+ int16_t imm12);
+ void GenInstrStore_rri(uint8_t funct3, Register rs1, Register rs2,
+ int16_t imm12);
+ void GenInstrALU_ri(uint8_t funct3, Register rd, Register rs1, int16_t imm12);
+ void GenInstrShift_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt);
+ void GenInstrALU_rr(uint8_t funct7, uint8_t funct3, Register rd, Register rs1,
+ Register rs2);
+ void GenInstrCSR_ir(uint8_t funct3, Register rd, ControlStatusReg csr,
+ Register rs1);
+ void GenInstrCSR_ii(uint8_t funct3, Register rd, ControlStatusReg csr,
+ uint8_t rs1);
+ void GenInstrShiftW_ri(bool arithshift, uint8_t funct3, Register rd,
+ Register rs1, uint8_t shamt);
+ void GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ Register rs1, Register rs2);
+ void GenInstrPriv(uint8_t funct7, Register rs1, Register rs2);
+ void GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, Register rs1,
+ int16_t imm12);
+ void GenInstrStoreFP_rri(uint8_t funct3, Register rs1, FPURegister rs2,
+ int16_t imm12);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, FPURegister rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ Register rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, Register rs2);
+ void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
+ FPURegister rs1, FPURegister rs2);
+
+ // Labels.
+ void print(const Label* L);
+ void bind_to(Label* L, int pos);
+ void next(Label* L, bool is_internal);
+
+ // One trampoline consists of:
+ // - space for trampoline slots,
+ // - space for labels.
+ //
+ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+ // Space for trampoline slots precedes space for labels. Each label is of one
+ // instruction size, so total amount for labels is equal to
+ // label_count * kInstrSize.
+ class Trampoline {
+ public:
+ Trampoline() {
+ start_ = 0;
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ end_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
+ start_ = start;
+ next_slot_ = start;
+ free_slot_count_ = slot_count;
+ end_ = start + slot_count * kTrampolineSlotsSize;
+ }
+ int start() { return start_; }
+ int end() { return end_; }
+ int take_slot() {
+ int trampoline_slot = kInvalidSlotPos;
+ if (free_slot_count_ <= 0) {
+ // We have run out of space on trampolines.
+ // Make sure we fail in debug mode, so we become aware of each case
+ // when this happens.
+ DCHECK(0);
+ // Internal exception will be caught.
+ } else {
+ trampoline_slot = next_slot_;
+ free_slot_count_--;
+ next_slot_ += kTrampolineSlotsSize;
+ }
+ return trampoline_slot;
+ }
+
+ private:
+ int start_;
+ int end_;
+ int next_slot_;
+ int free_slot_count_;
+ };
+
+ int32_t get_trampoline_entry(int32_t pos);
+ int unbound_labels_count_;
+ // After trampoline is emitted, long branches are used in generated code for
+ // the forward branches whose target offsets could be beyond reach of branch
+ // instruction. We use this information to trigger different mode of
+ // branch instruction generation, where we use jump instructions rather
+ // than regular branch instructions.
+ bool trampoline_emitted_ = false;
+ static constexpr int kInvalidSlotPos = -1;
+
+ // Internal reference positions, required for unbounded internal reference
+ // labels.
+ std::set<int64_t> internal_reference_positions_;
+ bool is_internal_reference(Label* L) {
+ return internal_reference_positions_.find(L->pos()) !=
+ internal_reference_positions_.end();
+ }
+
+ Trampoline trampoline_;
+ bool internal_trampoline_exception_;
+
+ RegList scratch_register_list_;
+
+ private:
+ ConstantPool constpool_;
+
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ int WriteCodeComments();
+
+ friend class RegExpMacroAssemblerRISCV;
+ friend class RelocInfo;
+ friend class BlockTrampolinePoolScope;
+ friend class EnsureSpace;
+ friend class ConstantPool;
+};
+
+class EnsureSpace {
+ public:
+ explicit inline EnsureSpace(Assembler* assembler);
+};
+
+class V8_EXPORT_PRIVATE UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+ bool hasAvailable() const;
+
+ private:
+ RegList* available_;
+ RegList old_available_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_H_
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.cc b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
new file mode 100644
index 0000000000..045488bf7f
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
@@ -0,0 +1,201 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/codegen/riscv64/constants-riscv64.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Registers.
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumSimuRegisters] = {
+ "zero_reg", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "fp", "s1", "a0",
+ "a1", "a2", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5",
+ "s6", "s7", "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", "pc"};
+
+// List of alias names which can be used when referring to RISC-V registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+ {0, "zero"},
+ {33, "pc"},
+ {8, "s0"},
+ {8, "s0_fp"},
+ {kInvalidRegister, nullptr}};
+
+const char* Registers::Name(int reg) {
+ const char* result;
+ if ((0 <= reg) && (reg < kNumSimuRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the reguested name found.
+ return kInvalidRegister;
+}
+
+/*
+const char* FPURegisters::names_[kNumFPURegisters] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
+ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+*/
+const char* FPURegisters::names_[kNumFPURegisters] = {
+ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",
+ "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",
+ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",
+ "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"};
+
+// List of alias names which can be used when referring to RISC-V FP registers.
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
+ {kInvalidRegister, nullptr}};
+
+const char* FPURegisters::Name(int creg) {
+ const char* result;
+ if ((0 <= creg) && (creg < kNumFPURegisters)) {
+ result = names_[creg];
+ } else {
+ result = "nocreg";
+ }
+ return result;
+}
+
+int FPURegisters::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].creg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].creg;
+ }
+ i++;
+ }
+
+ // No Cregister with the reguested name found.
+ return kInvalidFPURegister;
+}
+
+InstructionBase::Type InstructionBase::InstructionType() const {
+ // RV64C Instruction
+ if (IsShortInstruction()) {
+ switch (InstructionBits() & kRvcOpcodeMask) {
+ case RO_C_ADDI4SPN:
+ return kCIWType;
+ case RO_C_FLD:
+ case RO_C_LW:
+ case RO_C_LD:
+ return kCLType;
+ case RO_C_FSD:
+ case RO_C_SW:
+ case RO_C_SD:
+ return kCSType;
+ case RO_C_NOP_ADDI:
+ case RO_C_ADDIW:
+ case RO_C_LI:
+ case RO_C_LUI_ADD:
+ return kCIType;
+ case RO_C_MISC_ALU:
+ if (Bits(11, 10) != 0b11)
+ return kCBType;
+ else
+ return kCAType;
+ case RO_C_J:
+ return kCJType;
+ case RO_C_BEQZ:
+ case RO_C_BNEZ:
+ return kCBType;
+ case RO_C_SLLI:
+ case RO_C_FLDSP:
+ case RO_C_LWSP:
+ case RO_C_LDSP:
+ return kCIType;
+ case RO_C_JR_MV_ADD:
+ return kCRType;
+ case RO_C_FSDSP:
+ case RO_C_SWSP:
+ case RO_C_SDSP:
+ return kCSSType;
+ default:
+ break;
+ }
+ } else {
+ // RISCV routine
+ switch (InstructionBits() & kBaseOpcodeMask) {
+ case LOAD:
+ return kIType;
+ case LOAD_FP:
+ return kIType;
+ case MISC_MEM:
+ return kIType;
+ case OP_IMM:
+ return kIType;
+ case AUIPC:
+ return kUType;
+ case OP_IMM_32:
+ return kIType;
+ case STORE:
+ return kSType;
+ case STORE_FP:
+ return kSType;
+ case AMO:
+ return kRType;
+ case OP:
+ return kRType;
+ case LUI:
+ return kUType;
+ case OP_32:
+ return kRType;
+ case MADD:
+ case MSUB:
+ case NMSUB:
+ case NMADD:
+ return kR4Type;
+ case OP_FP:
+ return kRType;
+ case BRANCH:
+ return kBType;
+ case JALR:
+ return kIType;
+ case JAL:
+ return kJType;
+ case SYSTEM:
+ return kIType;
+ }
+ }
+ return kUnsupported;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
new file mode 100644
index 0000000000..3b5ffff6da
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -0,0 +1,1170 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_RISCV64_CONSTANTS_RISCV64_H_
+#define V8_CODEGEN_RISCV64_CONSTANTS_RISCV64_H_
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+
+// UNIMPLEMENTED_ macro for RISCV.
+#ifdef DEBUG
+#define UNIMPLEMENTED_RISCV() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+ __FILE__, __LINE__, __func__)
+#else
+#define UNIMPLEMENTED_RISCV()
+#endif
+
+#define UNSUPPORTED_RISCV() v8::internal::PrintF("Unsupported instruction.\n")
+
+enum Endianness { kLittle, kBig };
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+static const Endianness kArchEndian = kLittle;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+static const Endianness kArchEndian = kBig;
+#else
+#error Unknown endianness
+#endif
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const uint32_t kLeastSignificantByteInInt32Offset = 0;
+const uint32_t kLessSignificantWordInDoublewordOffset = 0;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const uint32_t kLeastSignificantByteInInt32Offset = 3;
+const uint32_t kLessSignificantWordInDoublewordOffset = 4;
+#else
+#error Unknown endianness
+#endif
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate RISC-V instructions.
+//
+// See: The RISC-V Instruction Set Manual
+// Volume I: User-Level ISA
+// Try https://content.riscv.org/wp-content/uploads/2017/05/riscv-spec-v2.2.pdf.
+
+namespace v8 {
+namespace internal {
+
+// TODO(sigurds): Change this value once we use relative jumps.
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+
+// -----------------------------------------------------------------------------
+// Registers and FPURegisters.
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+const int kInvalidRegister = -1;
+
+// Number of registers with pc.
+const int kNumSimuRegisters = 33;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+const int kInvalidFPURegister = -1;
+
+// 'pref' instruction hints
+const int32_t kPrefHintLoad = 0;
+const int32_t kPrefHintStore = 1;
+const int32_t kPrefHintLoadStreamed = 4;
+const int32_t kPrefHintStoreStreamed = 5;
+const int32_t kPrefHintLoadRetained = 6;
+const int32_t kPrefHintStoreRetained = 7;
+const int32_t kPrefHintWritebackInvalidate = 25;
+const int32_t kPrefHintPrepareForStore = 30;
+
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 256;
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char* name;
+ };
+
+ static const int64_t kMaxValue = 0x7fffffffffffffffl;
+ static const int64_t kMinValue = 0x8000000000000000l;
+
+ private:
+ static const char* names_[kNumSimuRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between register numbers and names.
+class FPURegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int creg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumFPURegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// -----------------------------------------------------------------------------
+// Instructions encoding constants.
+
+// On RISCV all instructions are 32 bits, except for RVC.
+using Instr = int32_t;
+using ShortInstr = int16_t;
+
+// Special Software Interrupt codes when used in the presence of the RISC-V
+// simulator.
+enum SoftwareInterruptCodes {
+ // Transition to C code.
+ call_rt_redirected = 0xfffff
+};
+
+// On RISC-V Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+// ----- Fields offset and length.
+// RISCV constants
+const int kBaseOpcodeShift = 0;
+const int kBaseOpcodeBits = 7;
+const int kFunct7Shift = 25;
+const int kFunct7Bits = 7;
+const int kFunct5Shift = 27;
+const int kFunct5Bits = 5;
+const int kFunct3Shift = 12;
+const int kFunct3Bits = 3;
+const int kFunct2Shift = 25;
+const int kFunct2Bits = 2;
+const int kRs1Shift = 15;
+const int kRs1Bits = 5;
+const int kRs2Shift = 20;
+const int kRs2Bits = 5;
+const int kRs3Shift = 27;
+const int kRs3Bits = 5;
+const int kRdShift = 7;
+const int kRdBits = 5;
+const int kRlShift = 25;
+const int kAqShift = 26;
+const int kImm12Shift = 20;
+const int kImm12Bits = 12;
+const int kImm11Shift = 2;
+const int kImm11Bits = 11;
+const int kShamtShift = 20;
+const int kShamtBits = 5;
+const int kShamtWShift = 20;
+const int kShamtWBits = 6;
+const int kArithShiftShift = 30;
+const int kImm20Shift = 12;
+const int kImm20Bits = 20;
+const int kCsrShift = 20;
+const int kCsrBits = 12;
+const int kMemOrderBits = 4;
+const int kPredOrderShift = 24;
+const int kSuccOrderShift = 20;
+// for C extension
+const int kRvcFunct4Shift = 12;
+const int kRvcFunct4Bits = 4;
+const int kRvcFunct3Shift = 13;
+const int kRvcFunct3Bits = 3;
+const int kRvcRs1Shift = 7;
+const int kRvcRs1Bits = 5;
+const int kRvcRs2Shift = 2;
+const int kRvcRs2Bits = 5;
+const int kRvcRdShift = 7;
+const int kRvcRdBits = 5;
+const int kRvcRs1sShift = 7;
+const int kRvcRs1sBits = 3;
+const int kRvcRs2sShift = 2;
+const int kRvcRs2sBits = 3;
+const int kRvcFunct2Shift = 5;
+const int kRvcFunct2Bits = 2;
+const int kRvcFunct6Shift = 10;
+const int kRvcFunct6Bits = 6;
+
+// RISCV Instruction bit masks
+const int kBaseOpcodeMask = ((1 << kBaseOpcodeBits) - 1) << kBaseOpcodeShift;
+const int kFunct3Mask = ((1 << kFunct3Bits) - 1) << kFunct3Shift;
+const int kFunct5Mask = ((1 << kFunct5Bits) - 1) << kFunct5Shift;
+const int kFunct7Mask = ((1 << kFunct7Bits) - 1) << kFunct7Shift;
+const int kFunct2Mask = 0b11 << kFunct7Shift;
+const int kRTypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct7Mask;
+const int kRATypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct5Mask;
+const int kRFPTypeMask = kBaseOpcodeMask | kFunct7Mask;
+const int kR4TypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct2Mask;
+const int kITypeMask = kBaseOpcodeMask | kFunct3Mask;
+const int kSTypeMask = kBaseOpcodeMask | kFunct3Mask;
+const int kBTypeMask = kBaseOpcodeMask | kFunct3Mask;
+const int kUTypeMask = kBaseOpcodeMask;
+const int kJTypeMask = kBaseOpcodeMask;
+const int kRs1FieldMask = ((1 << kRs1Bits) - 1) << kRs1Shift;
+const int kRs2FieldMask = ((1 << kRs2Bits) - 1) << kRs2Shift;
+const int kRs3FieldMask = ((1 << kRs3Bits) - 1) << kRs3Shift;
+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const int kBImm12Mask = kFunct7Mask | kRdFieldMask;
+const int kImm20Mask = ((1 << kImm20Bits) - 1) << kImm20Shift;
+const int kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift;
+const int kImm11Mask = ((1 << kImm11Bits) - 1) << kImm11Shift;
+const int kImm31_12Mask = ((1 << 20) - 1) << 12;
+const int kImm19_0Mask = ((1 << 20) - 1);
+const int kRvcOpcodeMask =
+ 0b11 | (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift);
+const int kRvcFunct3Mask = (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift);
+const int kRvcFunct4Mask = (((1 << kRvcFunct4Bits) - 1) << kRvcFunct4Shift);
+const int kRvcFunct6Mask = (((1 << kRvcFunct6Bits) - 1) << kRvcFunct6Shift);
+const int kRvcFunct2Mask = (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2Shift);
+const int kCRTypeMask = kRvcOpcodeMask | kRvcFunct4Mask;
+const int kCSTypeMask = kRvcOpcodeMask | kRvcFunct6Mask;
+const int kCATypeMask = kRvcOpcodeMask | kRvcFunct6Mask | kRvcFunct2Mask;
+
+// RISCV CSR related bit mask and shift
+const int kFcsrFlagsBits = 5;
+const int kFcsrFlagsMask = (1 << kFcsrFlagsBits) - 1;
+const int kFcsrFrmBits = 3;
+const int kFcsrFrmShift = kFcsrFlagsBits;
+const int kFcsrFrmMask = ((1 << kFcsrFrmBits) - 1) << kFcsrFrmShift;
+const int kFcsrBits = kFcsrFlagsBits + kFcsrFrmBits;
+const int kFcsrMask = kFcsrFlagsMask | kFcsrFrmMask;
+
+// Original MIPS constants
+// TODO(RISCV): to be cleaned up
+const int kImm16Shift = 0;
+const int kImm16Bits = 16;
+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+// end of TODO(RISCV): to be cleaned up
+
+// ----- RISCV Base Opcodes
+
+enum BaseOpcode : uint32_t {};
+
+// ----- RISC-V Opcodes and Function Fields.
+enum Opcode : uint32_t {
+ LOAD = 0b0000011, // I form: LB LH LW LBU LHU
+ LOAD_FP = 0b0000111, // I form: FLW FLD FLQ
+ MISC_MEM = 0b0001111, // I special form: FENCE FENCE.I
+ OP_IMM = 0b0010011, // I form: ADDI SLTI SLTIU XORI ORI ANDI SLLI SRLI SARI
+ // Note: SLLI/SRLI/SRAI I form first, then func3 001/101 => R type
+ AUIPC = 0b0010111, // U form: AUIPC
+ OP_IMM_32 = 0b0011011, // I form: ADDIW SLLIW SRLIW SRAIW
+ // Note: SRLIW SRAIW I form first, then func3 101 special shift encoding
+ STORE = 0b0100011, // S form: SB SH SW SD
+ STORE_FP = 0b0100111, // S form: FSW FSD FSQ
+ AMO = 0b0101111, // R form: All A instructions
+ OP = 0b0110011, // R: ADD SUB SLL SLT SLTU XOR SRL SRA OR AND and 32M set
+ LUI = 0b0110111, // U form: LUI
+ OP_32 = 0b0111011, // R: ADDW SUBW SLLW SRLW SRAW MULW DIVW DIVUW REMW REMUW
+ MADD = 0b1000011, // R4 type: FMADD.S FMADD.D FMADD.Q
+ MSUB = 0b1000111, // R4 type: FMSUB.S FMSUB.D FMSUB.Q
+ NMSUB = 0b1001011, // R4 type: FNMSUB.S FNMSUB.D FNMSUB.Q
+ NMADD = 0b1001111, // R4 type: FNMADD.S FNMADD.D FNMADD.Q
+ OP_FP = 0b1010011, // R type: Q ext
+ BRANCH = 0b1100011, // B form: BEQ BNE, BLT, BGE, BLTU BGEU
+ JALR = 0b1100111, // I form: JALR
+ JAL = 0b1101111, // J form: JAL
+ SYSTEM = 0b1110011, // I form: ECALL EBREAK Zicsr ext
+ // C extension
+ C0 = 0b00,
+ C1 = 0b01,
+ C2 = 0b10,
+ FUNCT2_0 = 0b00,
+ FUNCT2_1 = 0b01,
+ FUNCT2_2 = 0b10,
+ FUNCT2_3 = 0b11,
+
+ // Note use RO (RiscV Opcode) prefix
+ // RV32I Base Instruction Set
+ RO_LUI = LUI,
+ RO_AUIPC = AUIPC,
+ RO_JAL = JAL,
+ RO_JALR = JALR | (0b000 << kFunct3Shift),
+ RO_BEQ = BRANCH | (0b000 << kFunct3Shift),
+ RO_BNE = BRANCH | (0b001 << kFunct3Shift),
+ RO_BLT = BRANCH | (0b100 << kFunct3Shift),
+ RO_BGE = BRANCH | (0b101 << kFunct3Shift),
+ RO_BLTU = BRANCH | (0b110 << kFunct3Shift),
+ RO_BGEU = BRANCH | (0b111 << kFunct3Shift),
+ RO_LB = LOAD | (0b000 << kFunct3Shift),
+ RO_LH = LOAD | (0b001 << kFunct3Shift),
+ RO_LW = LOAD | (0b010 << kFunct3Shift),
+ RO_LBU = LOAD | (0b100 << kFunct3Shift),
+ RO_LHU = LOAD | (0b101 << kFunct3Shift),
+ RO_SB = STORE | (0b000 << kFunct3Shift),
+ RO_SH = STORE | (0b001 << kFunct3Shift),
+ RO_SW = STORE | (0b010 << kFunct3Shift),
+ RO_ADDI = OP_IMM | (0b000 << kFunct3Shift),
+ RO_SLTI = OP_IMM | (0b010 << kFunct3Shift),
+ RO_SLTIU = OP_IMM | (0b011 << kFunct3Shift),
+ RO_XORI = OP_IMM | (0b100 << kFunct3Shift),
+ RO_ORI = OP_IMM | (0b110 << kFunct3Shift),
+ RO_ANDI = OP_IMM | (0b111 << kFunct3Shift),
+ RO_SLLI = OP_IMM | (0b001 << kFunct3Shift),
+ RO_SRLI = OP_IMM | (0b101 << kFunct3Shift),
+ // RO_SRAI = OP_IMM | (0b101 << kFunct3Shift), // Same as SRLI, use func7
+ RO_ADD = OP | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SUB = OP | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+ RO_SLL = OP | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SLT = OP | (0b010 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SLTU = OP | (0b011 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_XOR = OP | (0b100 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRL = OP | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRA = OP | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+ RO_OR = OP | (0b110 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_AND = OP | (0b111 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_FENCE = MISC_MEM | (0b000 << kFunct3Shift),
+ RO_ECALL = SYSTEM | (0b000 << kFunct3Shift),
+ // RO_EBREAK = SYSTEM | (0b000 << kFunct3Shift), // Same as ECALL, use imm12
+
+ // RV64I Base Instruction Set (in addition to RV32I)
+ RO_LWU = LOAD | (0b110 << kFunct3Shift),
+ RO_LD = LOAD | (0b011 << kFunct3Shift),
+ RO_SD = STORE | (0b011 << kFunct3Shift),
+ RO_ADDIW = OP_IMM_32 | (0b000 << kFunct3Shift),
+ RO_SLLIW = OP_IMM_32 | (0b001 << kFunct3Shift),
+ RO_SRLIW = OP_IMM_32 | (0b101 << kFunct3Shift),
+ // RO_SRAIW = OP_IMM_32 | (0b101 << kFunct3Shift), // Same as SRLIW, use func7
+ RO_ADDW = OP_32 | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SUBW = OP_32 | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+ RO_SLLW = OP_32 | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRLW = OP_32 | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
+ RO_SRAW = OP_32 | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
+
+ // RV32/RV64 Zifencei Standard Extension
+ RO_FENCE_I = MISC_MEM | (0b001 << kFunct3Shift),
+
+ // RV32/RV64 Zicsr Standard Extension
+ RO_CSRRW = SYSTEM | (0b001 << kFunct3Shift),
+ RO_CSRRS = SYSTEM | (0b010 << kFunct3Shift),
+ RO_CSRRC = SYSTEM | (0b011 << kFunct3Shift),
+ RO_CSRRWI = SYSTEM | (0b101 << kFunct3Shift),
+ RO_CSRRSI = SYSTEM | (0b110 << kFunct3Shift),
+ RO_CSRRCI = SYSTEM | (0b111 << kFunct3Shift),
+
+ // RV32M Standard Extension
+ RO_MUL = OP | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_MULH = OP | (0b001 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_MULHSU = OP | (0b010 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_MULHU = OP | (0b011 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIV = OP | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIVU = OP | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REM = OP | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REMU = OP | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+
+ // RV64M Standard Extension (in addition to RV32M)
+ RO_MULW = OP_32 | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIVW = OP_32 | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_DIVUW = OP_32 | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REMW = OP_32 | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+ RO_REMUW = OP_32 | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
+
+ // RV32A Standard Extension
+ RO_LR_W = AMO | (0b010 << kFunct3Shift) | (0b00010 << kFunct5Shift),
+ RO_SC_W = AMO | (0b010 << kFunct3Shift) | (0b00011 << kFunct5Shift),
+ RO_AMOSWAP_W = AMO | (0b010 << kFunct3Shift) | (0b00001 << kFunct5Shift),
+ RO_AMOADD_W = AMO | (0b010 << kFunct3Shift) | (0b00000 << kFunct5Shift),
+ RO_AMOXOR_W = AMO | (0b010 << kFunct3Shift) | (0b00100 << kFunct5Shift),
+ RO_AMOAND_W = AMO | (0b010 << kFunct3Shift) | (0b01100 << kFunct5Shift),
+ RO_AMOOR_W = AMO | (0b010 << kFunct3Shift) | (0b01000 << kFunct5Shift),
+ RO_AMOMIN_W = AMO | (0b010 << kFunct3Shift) | (0b10000 << kFunct5Shift),
+ RO_AMOMAX_W = AMO | (0b010 << kFunct3Shift) | (0b10100 << kFunct5Shift),
+ RO_AMOMINU_W = AMO | (0b010 << kFunct3Shift) | (0b11000 << kFunct5Shift),
+ RO_AMOMAXU_W = AMO | (0b010 << kFunct3Shift) | (0b11100 << kFunct5Shift),
+
+ // RV64A Standard Extension (in addition to RV32A)
+ RO_LR_D = AMO | (0b011 << kFunct3Shift) | (0b00010 << kFunct5Shift),
+ RO_SC_D = AMO | (0b011 << kFunct3Shift) | (0b00011 << kFunct5Shift),
+ RO_AMOSWAP_D = AMO | (0b011 << kFunct3Shift) | (0b00001 << kFunct5Shift),
+ RO_AMOADD_D = AMO | (0b011 << kFunct3Shift) | (0b00000 << kFunct5Shift),
+ RO_AMOXOR_D = AMO | (0b011 << kFunct3Shift) | (0b00100 << kFunct5Shift),
+ RO_AMOAND_D = AMO | (0b011 << kFunct3Shift) | (0b01100 << kFunct5Shift),
+ RO_AMOOR_D = AMO | (0b011 << kFunct3Shift) | (0b01000 << kFunct5Shift),
+ RO_AMOMIN_D = AMO | (0b011 << kFunct3Shift) | (0b10000 << kFunct5Shift),
+ RO_AMOMAX_D = AMO | (0b011 << kFunct3Shift) | (0b10100 << kFunct5Shift),
+ RO_AMOMINU_D = AMO | (0b011 << kFunct3Shift) | (0b11000 << kFunct5Shift),
+ RO_AMOMAXU_D = AMO | (0b011 << kFunct3Shift) | (0b11100 << kFunct5Shift),
+
+ // RV32F Standard Extension
+ RO_FLW = LOAD_FP | (0b010 << kFunct3Shift),
+ RO_FSW = STORE_FP | (0b010 << kFunct3Shift),
+ RO_FMADD_S = MADD | (0b00 << kFunct2Shift),
+ RO_FMSUB_S = MSUB | (0b00 << kFunct2Shift),
+ RO_FNMSUB_S = NMSUB | (0b00 << kFunct2Shift),
+ RO_FNMADD_S = NMADD | (0b00 << kFunct2Shift),
+ RO_FADD_S = OP_FP | (0b0000000 << kFunct7Shift),
+ RO_FSUB_S = OP_FP | (0b0000100 << kFunct7Shift),
+ RO_FMUL_S = OP_FP | (0b0001000 << kFunct7Shift),
+ RO_FDIV_S = OP_FP | (0b0001100 << kFunct7Shift),
+ RO_FSQRT_S = OP_FP | (0b0101100 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FSGNJ_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
+ RO_FSGNJN_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
+ RO_FSQNJX_S = OP_FP | (0b010 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
+ RO_FMIN_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
+ RO_FMAX_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
+ RO_FCVT_W_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_WU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FMV = OP_FP | (0b1110000 << kFunct7Shift) | (0b000 << kFunct3Shift) |
+ (0b00000 << kRs2Shift),
+ RO_FEQ_S = OP_FP | (0b010 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
+ RO_FLT_S = OP_FP | (0b001 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
+ RO_FLE_S = OP_FP | (0b000 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
+ RO_FCLASS_S = OP_FP | (0b001 << kFunct3Shift) | (0b1110000 << kFunct7Shift),
+ RO_FCVT_S_W = OP_FP | (0b1101000 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_S_WU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FMV_W_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111000 << kFunct7Shift),
+
+ // RV64F Standard Extension (in addition to RV32F)
+ RO_FCVT_L_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_LU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00011 << kRs2Shift),
+ RO_FCVT_S_L = OP_FP | (0b1101000 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_S_LU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00011 << kRs2Shift),
+
+ // RV32D Standard Extension
+ RO_FLD = LOAD_FP | (0b011 << kFunct3Shift),
+ RO_FSD = STORE_FP | (0b011 << kFunct3Shift),
+ RO_FMADD_D = MADD | (0b01 << kFunct2Shift),
+ RO_FMSUB_D = MSUB | (0b01 << kFunct2Shift),
+ RO_FNMSUB_D = NMSUB | (0b01 << kFunct2Shift),
+ RO_FNMADD_D = NMADD | (0b01 << kFunct2Shift),
+ RO_FADD_D = OP_FP | (0b0000001 << kFunct7Shift),
+ RO_FSUB_D = OP_FP | (0b0000101 << kFunct7Shift),
+ RO_FMUL_D = OP_FP | (0b0001001 << kFunct7Shift),
+ RO_FDIV_D = OP_FP | (0b0001101 << kFunct7Shift),
+ RO_FSQRT_D = OP_FP | (0b0101101 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FSGNJ_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
+ RO_FSGNJN_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
+ RO_FSQNJX_D = OP_FP | (0b010 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
+ RO_FMIN_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
+ RO_FMAX_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
+ RO_FCVT_S_D = OP_FP | (0b0100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FCVT_D_S = OP_FP | (0b0100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FEQ_D = OP_FP | (0b010 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
+ RO_FLT_D = OP_FP | (0b001 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
+ RO_FLE_D = OP_FP | (0b000 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
+ RO_FCLASS_D = OP_FP | (0b001 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift),
+ RO_FCVT_W_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_WU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00001 << kRs2Shift),
+ RO_FCVT_D_W = OP_FP | (0b1101001 << kFunct7Shift) | (0b00000 << kRs2Shift),
+ RO_FCVT_D_WU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00001 << kRs2Shift),
+
+ // RV64D Standard Extension (in addition to RV32D)
+ RO_FCVT_L_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_LU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00011 << kRs2Shift),
+ RO_FMV_X_D = OP_FP | (0b000 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift),
+ RO_FCVT_D_L = OP_FP | (0b1101001 << kFunct7Shift) | (0b00010 << kRs2Shift),
+ RO_FCVT_D_LU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00011 << kRs2Shift),
+ RO_FMV_D_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111001 << kFunct7Shift) |
+ (0b00000 << kRs2Shift),
+
+ // RV64C Standard Extension
+ RO_C_ADDI4SPN = C0 | (0b000 << kRvcFunct3Shift),
+ RO_C_FLD = C0 | (0b001 << kRvcFunct3Shift),
+ RO_C_LW = C0 | (0b010 << kRvcFunct3Shift),
+ RO_C_LD = C0 | (0b011 << kRvcFunct3Shift),
+ RO_C_FSD = C0 | (0b101 << kRvcFunct3Shift),
+ RO_C_SW = C0 | (0b110 << kRvcFunct3Shift),
+ RO_C_SD = C0 | (0b111 << kRvcFunct3Shift),
+ RO_C_NOP_ADDI = C1 | (0b000 << kRvcFunct3Shift),
+ RO_C_ADDIW = C1 | (0b001 << kRvcFunct3Shift),
+ RO_C_LI = C1 | (0b010 << kRvcFunct3Shift),
+ RO_C_SUB = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
+ RO_C_XOR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
+ RO_C_OR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_2 << kRvcFunct2Shift),
+ RO_C_AND = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_3 << kRvcFunct2Shift),
+ RO_C_SUBW =
+ C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
+ RO_C_ADDW =
+ C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
+ RO_C_LUI_ADD = C1 | (0b011 << kRvcFunct3Shift),
+ RO_C_MISC_ALU = C1 | (0b100 << kRvcFunct3Shift),
+ RO_C_J = C1 | (0b101 << kRvcFunct3Shift),
+ RO_C_BEQZ = C1 | (0b110 << kRvcFunct3Shift),
+ RO_C_BNEZ = C1 | (0b111 << kRvcFunct3Shift),
+ RO_C_SLLI = C2 | (0b000 << kRvcFunct3Shift),
+ RO_C_FLDSP = C2 | (0b001 << kRvcFunct3Shift),
+ RO_C_LWSP = C2 | (0b010 << kRvcFunct3Shift),
+ RO_C_LDSP = C2 | (0b011 << kRvcFunct3Shift),
+ RO_C_JR_MV_ADD = C2 | (0b100 << kRvcFunct3Shift),
+ RO_C_JR = C2 | (0b1000 << kRvcFunct4Shift),
+ RO_C_MV = C2 | (0b1000 << kRvcFunct4Shift),
+ RO_C_EBREAK = C2 | (0b1001 << kRvcFunct4Shift),
+ RO_C_JALR = C2 | (0b1001 << kRvcFunct4Shift),
+ RO_C_ADD = C2 | (0b1001 << kRvcFunct4Shift),
+ RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift),
+ RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift),
+ RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift),
+};
+
+// ----- Emulated conditions.
+// On RISC-V we use this enum to abstract from conditional branch instructions.
+// The 'U' prefix is used to specify unsigned comparisons.
+// Opposite conditions must be paired as odd/even numbers
+// because 'NegateCondition' function flips LSB to negate condition.
+enum Condition { // Any value < 0 is considered no_condition.
+ kNoCondition = -1,
+ overflow = 0,
+ no_overflow = 1,
+ Uless = 2,
+ Ugreater_equal = 3,
+ Uless_equal = 4,
+ Ugreater = 5,
+ equal = 6,
+ not_equal = 7, // Unordered or Not Equal.
+ less = 8,
+ greater_equal = 9,
+ less_equal = 10,
+ greater = 11,
+ cc_always = 12,
+
+ // Aliases.
+ eq = equal,
+ ne = not_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ al = cc_always,
+ ult = Uless,
+ uge = Ugreater_equal,
+ ule = Uless_equal,
+ ugt = Ugreater,
+};
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ return static_cast<Condition>(cc ^ 1);
+}
+
+inline Condition NegateFpuCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ switch (cc) {
+ case ult:
+ return ge;
+ case ugt:
+ return le;
+ case uge:
+ return lt;
+ case ule:
+ return gt;
+ case lt:
+ return uge;
+ case gt:
+ return ule;
+ case ge:
+ return ult;
+ case le:
+ return ugt;
+ case eq:
+ return ne;
+ case ne:
+ return eq;
+ default:
+ return cc;
+ }
+}
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+ kNoFPUCondition = -1,
+ EQ = 0x02, // Ordered and Equal
+ NE = 0x03, // Unordered or Not Equal
+ LT = 0x04, // Ordered and Less Than
+ GE = 0x05, // Ordered and Greater Than or Equal
+ LE = 0x06, // Ordered and Less Than or Equal
+ GT = 0x07, // Ordered and Greater Than
+};
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
+
+// ----------------------------------------------------------------------------
+// RISCV flags
+
+enum ControlStatusReg {
+ csr_fflags = 0x001, // Floating-Point Accrued Exceptions (RW)
+ csr_frm = 0x002, // Floating-Point Dynamic Rounding Mode (RW)
+ csr_fcsr = 0x003, // Floating-Point Control and Status Register (RW)
+ csr_cycle = 0xc00, // Cycle counter for RDCYCLE instruction (RO)
+ csr_time = 0xc01, // Timer for RDTIME instruction (RO)
+ csr_instret = 0xc02, // Insns-retired counter for RDINSTRET instruction (RO)
+ csr_cycleh = 0xc80, // Upper 32 bits of cycle, RV32I only (RO)
+ csr_timeh = 0xc81, // Upper 32 bits of time, RV32I only (RO)
+ csr_instreth = 0xc82 // Upper 32 bits of instret, RV32I only (RO)
+};
+
+enum FFlagsMask {
+ kInvalidOperation = 0b10000, // NV: Invalid
+ kDivideByZero = 0b1000, // DZ: Divide by Zero
+ kOverflow = 0b100, // OF: Overflow
+ kUnderflow = 0b10, // UF: Underflow
+ kInexact = 0b1 // NX: Inexact
+};
+
+enum RoundingMode {
+ RNE = 0b000, // Round to Nearest, ties to Even
+ RTZ = 0b001, // Round towards Zero
+ RDN = 0b010, // Round Down (towards -infinity)
+ RUP = 0b011, // Round Up (towards +infinity)
+ RMM = 0b100, // Round to Nearest, tiest to Max Magnitude
+ DYN = 0b111 // In instruction's rm field, selects dynamic rounding mode;
+ // In Rounding Mode register, Invalid
+};
+
+enum MemoryOdering {
+ PSI = 0b1000, // PI or SI
+ PSO = 0b0100, // PO or SO
+ PSR = 0b0010, // PR or SR
+ PSW = 0b0001, // PW or SW
+ PSIORW = PSI | PSO | PSR | PSW
+};
+
+enum FClassFlag {
+ kNegativeInfinity = 1,
+ kNegativeNormalNumber = 1 << 1,
+ kNegativeSubnormalNumber = 1 << 2,
+ kNegativeZero = 1 << 3,
+ kPositiveZero = 1 << 4,
+ kPositiveSubnormalNumber = 1 << 5,
+ kPositiveNormalNumber = 1 << 6,
+ kPositiveInfinity = 1 << 7,
+ kSignalingNaN = 1 << 8,
+ kQuietNaN = 1 << 9
+};
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on RISC-V. They are defined so that they can
+// appear in shared function signatures, but will be ignored in RISC-V
+// implementations.
+enum Hint { no_hint = 0 };
+
+inline Hint NegateHint(Hint hint) { return no_hint; }
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-riscv64.cc, as they use named
+// registers and other constants.
+
+// An Illegal instruction
+const Instr kIllegalInstr = 0; // All other bits are 0s (i.e., ecall)
+// An ECALL instruction, used for redirected real time call
+const Instr rtCallRedirInstr = SYSTEM; // All other bits are 0s (i.e., ecall)
+// An EBreak instruction, used for debugging and semi-hosting
+const Instr kBreakInstr = SYSTEM | 1 << kImm12Shift; // ebreak
+
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kShortInstrSize = 2;
+constexpr uint8_t kInstrSizeLog2 = 2;
+
+class InstructionBase {
+ public:
+ enum {
+ // On RISC-V, PC cannot actually be directly accessed. We behave as if PC
+ // was always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Instruction type.
+ enum Type {
+ kRType,
+ kR4Type, // Special R4 for Q extension
+ kIType,
+ kSType,
+ kBType,
+ kUType,
+ kJType,
+ // C extension
+ kCRType,
+ kCIType,
+ kCSSType,
+ kCIWType,
+ kCLType,
+ kCSType,
+ kCAType,
+ kCBType,
+ kCJType,
+ kUnsupported = -1
+ };
+
+ inline bool IsShortInstruction() const {
+ uint8_t FirstByte = *reinterpret_cast<const uint8_t*>(this);
+ return (FirstByte & 0x03) <= C2;
+ }
+
+ inline uint8_t InstructionSize() const {
+ return this->IsShortInstruction() ? kShortInstrSize : kInstrSize;
+ }
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ if (this->IsShortInstruction()) {
+ return 0x0000FFFF & (*reinterpret_cast<const ShortInstr*>(this));
+ }
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
+ }
+
+ // Accessors for the different named fields used in the RISC-V encoding.
+ inline Opcode BaseOpcodeValue() const {
+ return static_cast<Opcode>(
+ Bits(kBaseOpcodeShift + kBaseOpcodeBits - 1, kBaseOpcodeShift));
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline Opcode BaseOpcodeFieldRaw() const {
+ return static_cast<Opcode>(InstructionBits() & kBaseOpcodeMask);
+ }
+
+ // Safe to call within R-type instructions
+ inline int Funct7FieldRaw() const { return InstructionBits() & kFunct7Mask; }
+
+ // Safe to call within R-, I-, S-, or B-type instructions
+ inline int Funct3FieldRaw() const { return InstructionBits() & kFunct3Mask; }
+
+ // Safe to call within R-, I-, S-, or B-type instructions
+ inline int Rs1FieldRawNoAssert() const {
+ return InstructionBits() & kRs1FieldMask;
+ }
+
+ // Safe to call within R-, S-, or B-type instructions
+ inline int Rs2FieldRawNoAssert() const {
+ return InstructionBits() & kRs2FieldMask;
+ }
+
+ // Safe to call within R4-type instructions
+ inline int Rs3FieldRawNoAssert() const {
+ return InstructionBits() & kRs3FieldMask;
+ }
+
+ inline int32_t ITypeBits() const { return InstructionBits() & kITypeMask; }
+
+ inline int32_t InstructionOpcodeType() const {
+ if (IsShortInstruction()) {
+ return InstructionBits() & kRvcOpcodeMask;
+ } else {
+ return InstructionBits() & kBaseOpcodeMask;
+ }
+ }
+
+ // Get the encoding type of the instruction.
+ Type InstructionType() const;
+
+ protected:
+ InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
+ inline int BaseOpcode() const {
+ return this->InstructionBits() & kBaseOpcodeMask;
+ }
+
+ inline int RvcOpcode() const {
+ DCHECK(this->IsShortInstruction());
+ return this->InstructionBits() & kRvcOpcodeMask;
+ }
+
+ inline int Rs1Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kBType);
+ return this->Bits(kRs1Shift + kRs1Bits - 1, kRs1Shift);
+ }
+
+ inline int Rs2Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kBType);
+ return this->Bits(kRs2Shift + kRs2Bits - 1, kRs2Shift);
+ }
+
+ inline int Rs3Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kR4Type);
+ return this->Bits(kRs3Shift + kRs3Bits - 1, kRs3Shift);
+ }
+
+ inline int RdValue() const {
+ DCHECK(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kUType ||
+ this->InstructionType() == InstructionBase::kJType);
+ return this->Bits(kRdShift + kRdBits - 1, kRdShift);
+ }
+
+ inline int RvcRdValue() const {
+ DCHECK(this->IsShortInstruction());
+ return this->Bits(kRvcRdShift + kRvcRdBits - 1, kRvcRdShift);
+ }
+
+ inline int RvcRs1Value() const { return this->RvcRdValue(); }
+
+ inline int RvcRs2Value() const {
+ DCHECK(this->IsShortInstruction());
+ return this->Bits(kRvcRs2Shift + kRvcRs2Bits - 1, kRvcRs2Shift);
+ }
+
+ inline int RvcRs1sValue() const {
+ DCHECK(this->IsShortInstruction());
+ return 0b1000 + this->Bits(kRvcRs1sShift + kRvcRs1sBits - 1, kRvcRs1sShift);
+ }
+
+ inline int RvcRs2sValue() const {
+ DCHECK(this->IsShortInstruction());
+ return 0b1000 + this->Bits(kRvcRs2sShift + kRvcRs2sBits - 1, kRvcRs2sShift);
+ }
+
+ inline int Funct7Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kRType);
+ return this->Bits(kFunct7Shift + kFunct7Bits - 1, kFunct7Shift);
+ }
+
+ inline int Funct3Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
+ this->InstructionType() == InstructionBase::kBType);
+ return this->Bits(kFunct3Shift + kFunct3Bits - 1, kFunct3Shift);
+ }
+
+ inline int Funct5Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kRType &&
+ this->BaseOpcode() == OP_FP);
+ return this->Bits(kFunct5Shift + kFunct5Bits - 1, kFunct5Shift);
+ }
+
+ inline int RvcFunct6Value() const {
+ DCHECK(this->IsShortInstruction());
+ return this->Bits(kRvcFunct6Shift + kRvcFunct6Bits - 1, kRvcFunct6Shift);
+ }
+
+ inline int RvcFunct4Value() const {
+ DCHECK(this->IsShortInstruction());
+ return this->Bits(kRvcFunct4Shift + kRvcFunct4Bits - 1, kRvcFunct4Shift);
+ }
+
+ inline int RvcFunct3Value() const {
+ DCHECK(this->IsShortInstruction());
+ return this->Bits(kRvcFunct3Shift + kRvcFunct3Bits - 1, kRvcFunct3Shift);
+ }
+
+ inline int RvcFunct2Value() const {
+ DCHECK(this->IsShortInstruction());
+ return this->Bits(kRvcFunct2Shift + kRvcFunct2Bits - 1, kRvcFunct2Shift);
+ }
+
+ inline int CsrValue() const {
+ DCHECK(this->InstructionType() == InstructionBase::kIType &&
+ this->BaseOpcode() == SYSTEM);
+ return (this->Bits(kCsrShift + kCsrBits - 1, kCsrShift));
+ }
+
+ inline int RoundMode() const {
+ DCHECK((this->InstructionType() == InstructionBase::kRType ||
+ this->InstructionType() == InstructionBase::kR4Type) &&
+ this->BaseOpcode() == OP_FP);
+ return this->Bits(kFunct3Shift + kFunct3Bits - 1, kFunct3Shift);
+ }
+
+ inline int MemoryOrder(bool is_pred) const {
+ DCHECK((this->InstructionType() == InstructionBase::kIType &&
+ this->BaseOpcode() == MISC_MEM));
+ if (is_pred) {
+ return this->Bits(kPredOrderShift + kMemOrderBits - 1, kPredOrderShift);
+ } else {
+ return this->Bits(kSuccOrderShift + kMemOrderBits - 1, kSuccOrderShift);
+ }
+ }
+
+ inline int Imm12Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kIType);
+ int Value = this->Bits(kImm12Shift + kImm12Bits - 1, kImm12Shift);
+ return Value << 20 >> 20;
+ }
+
+ inline int32_t Imm12SExtValue() const {
+ int32_t Value = this->Imm12Value() << 20 >> 20;
+ return Value;
+ }
+
+ inline int BranchOffset() const {
+ DCHECK(this->InstructionType() == InstructionBase::kBType);
+ // | imm[12|10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
+ // 31 25 11 7
+ uint32_t Bits = this->InstructionBits();
+ int16_t imm13 = ((Bits & 0xf00) >> 7) | ((Bits & 0x7e000000) >> 20) |
+ ((Bits & 0x80) << 4) | ((Bits & 0x80000000) >> 19);
+ return imm13 << 19 >> 19;
+ }
+
+ inline int StoreOffset() const {
+ DCHECK(this->InstructionType() == InstructionBase::kSType);
+ // | imm[11:5] | rs2 | rs1 | funct3 | imm[4:0] | opcode |
+ // 31 25 11 7
+ uint32_t Bits = this->InstructionBits();
+ int16_t imm12 = ((Bits & 0xf80) >> 7) | ((Bits & 0xfe000000) >> 20);
+ return imm12 << 20 >> 20;
+ }
+
+ inline int Imm20UValue() const {
+ DCHECK(this->InstructionType() == InstructionBase::kUType);
+ // | imm[31:12] | rd | opcode |
+ // 31 12
+ int32_t Bits = this->InstructionBits();
+ return Bits >> 12;
+ }
+
+ inline int Imm20JValue() const {
+ DCHECK(this->InstructionType() == InstructionBase::kJType);
+ // | imm[20|10:1|11|19:12] | rd | opcode |
+ // 31 12
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm20 = ((Bits & 0x7fe00000) >> 20) | ((Bits & 0x100000) >> 9) |
+ (Bits & 0xff000) | ((Bits & 0x80000000) >> 11);
+ return imm20 << 11 >> 11;
+ }
+
+ inline bool IsArithShift() const {
+ // Valid only for right shift operations
+ DCHECK((this->BaseOpcode() == OP || this->BaseOpcode() == OP_32 ||
+ this->BaseOpcode() == OP_IMM || this->BaseOpcode() == OP_IMM_32) &&
+ this->Funct3Value() == 0b101);
+ return this->InstructionBits() & 0x40000000;
+ }
+
+ inline int Shamt() const {
+ // Valid only for shift instructions (SLLI, SRLI, SRAI)
+ DCHECK((this->InstructionBits() & kBaseOpcodeMask) == OP_IMM &&
+ (this->Funct3Value() == 0b001 || this->Funct3Value() == 0b101));
+ // | 0A0000 | shamt | rs1 | funct3 | rd | opcode |
+ // 31 25 20
+ return this->Bits(kImm12Shift + 5, kImm12Shift);
+ }
+
+ inline int Shamt32() const {
+ // Valid only for shift instructions (SLLIW, SRLIW, SRAIW)
+ DCHECK((this->InstructionBits() & kBaseOpcodeMask) == OP_IMM_32 &&
+ (this->Funct3Value() == 0b001 || this->Funct3Value() == 0b101));
+ // | 0A00000 | shamt | rs1 | funct3 | rd | opcode |
+ // 31 24 20
+ return this->Bits(kImm12Shift + 4, kImm12Shift);
+ }
+
+ inline int RvcImm6Value() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | imm[5] | rs1/rd | imm[4:0] | opcode |
+ // 15 12 6 2
+ // | funct3 | nzimm[17] | rs1/rd | nzimm[16:12] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm6 = ((Bits & 0x1000) >> 7) | ((Bits & 0x7c) >> 2);
+ return imm6 << 26 >> 26;
+ }
+
+ inline int RvcImm6Addi16spValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | nzimm[9] | 2 | nzimm[4|6|8:7|5] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm10 = ((Bits & 0x1000) >> 3) | ((Bits & 0x40) >> 2) |
+ ((Bits & 0x20) << 1) | ((Bits & 0x18) << 4) |
+ ((Bits & 0x4) << 3);
+ DCHECK_NE(imm10, 0);
+ return imm10 << 22 >> 22;
+ }
+
+ inline int RvcImm8Addi4spnValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | nzimm[11] | rd' | opcode |
+ // 15 13 5 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t uimm10 = ((Bits & 0x20) >> 2) | ((Bits & 0x40) >> 4) |
+ ((Bits & 0x780) >> 1) | ((Bits & 0x1800) >> 7);
+ DCHECK_NE(uimm10, 0);
+ return uimm10;
+ }
+
+ inline int RvcShamt6() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | nzuimm[5] | rs1/rd | nzuimm[4:0] | opcode |
+ // 15 12 6 2
+ int32_t imm6 = this->RvcImm6Value();
+ return imm6 & 0x3f;
+ }
+
+ inline int RvcImm6LwspValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | uimm[5] | rs1 | uimm[4:2|7:6] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm8 =
+ ((Bits & 0x1000) >> 7) | ((Bits & 0x70) >> 2) | ((Bits & 0xc) << 4);
+ return imm8;
+ }
+
+ inline int RvcImm6LdspValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | uimm[5] | rs1 | uimm[4:3|8:6] | opcode |
+ // 15 12 6 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm9 =
+ ((Bits & 0x1000) >> 7) | ((Bits & 0x60) >> 2) | ((Bits & 0x1c) << 4);
+ return imm9;
+ }
+
+ inline int RvcImm6SwspValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | uimm[5:2|7:6] | rs2 | opcode |
+ // 15 12 7
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm8 = ((Bits & 0x1e00) >> 7) | ((Bits & 0x180) >> 1);
+ return imm8;
+ }
+
+ inline int RvcImm6SdspValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | uimm[5:3|8:6] | rs2 | opcode |
+ // 15 12 7
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm9 = ((Bits & 0x1c00) >> 7) | ((Bits & 0x380) >> 1);
+ return imm9;
+ }
+
+ inline int RvcImm5WValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | imm[5:3] | rs1 | imm[2|6] | rd | opcode |
+ // 15 12 10 6 4 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm7 =
+ ((Bits & 0x1c00) >> 7) | ((Bits & 0x40) >> 4) | ((Bits & 0x20) << 1);
+ return imm7;
+ }
+
+ inline int RvcImm5DValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | imm[5:3] | rs1 | imm[7:6] | rd | opcode |
+ // 15 12 10 6 4 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm8 = ((Bits & 0x1c00) >> 7) | ((Bits & 0x60) << 1);
+ return imm8;
+ }
+
+ inline int RvcImm11CJValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | [11|4|9:8|10|6|7|3:1|5] | opcode |
+ // 15 12 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm12 = ((Bits & 0x4) << 3) | ((Bits & 0x38) >> 2) |
+ ((Bits & 0x40) << 1) | ((Bits & 0x80) >> 1) |
+ ((Bits & 0x100) << 2) | ((Bits & 0x600) >> 1) |
+ ((Bits & 0x800) >> 7) | ((Bits & 0x1000) >> 1);
+ return imm12 << 20 >> 20;
+ }
+
+ inline bool AqValue() const { return this->Bits(kAqShift, kAqShift); }
+
+ inline bool RlValue() const { return this->Bits(kRlShift, kRlShift); }
+
+ // Say if the instruction is a break or a trap.
+ bool IsTrap() const;
+};
+
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+// -----------------------------------------------------------------------------
+// RISC-V assembly various constants.
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 0;
+
+// TODO(plind): below should be based on kPointerSize
+// TODO(plind): find all usages and remove the needless instructions for n64.
+const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2;
+
+const int kInvalidStackOffset = -1;
+const int kBranchReturnOffset = 2 * kInstrSize;
+
+static const int kNegOffset = 0x00008000;
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+ return (this->InstructionBits() == kBreakInstr);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_RISCV64_CONSTANTS_RISCV64_H_
diff --git a/deps/v8/src/codegen/riscv64/cpu-riscv64.cc b/deps/v8/src/codegen/riscv64/cpu-riscv64.cc
new file mode 100644
index 0000000000..aad09378f9
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/cpu-riscv64.cc
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/codegen/cpu-features.h"
+
+namespace v8 {
+namespace internal {
+
+void CpuFeatures::FlushICache(void* start, size_t size) {
+#if !defined(USE_SIMULATOR)
+ char* end = reinterpret_cast<char*>(start) + size;
+ // The definition of this syscall is
+ // SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start,
+ // uintptr_t, end, uintptr_t, flags)
+ // The flag here is set to be SYS_RISCV_FLUSH_ICACHE_LOCAL, which is
+ // defined as 1 in the Linux kernel.
+ syscall(SYS_riscv_flush_icache, start, end, 1);
+#endif // !USE_SIMULATOR.
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
new file mode 100644
index 0000000000..26730aceca
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
@@ -0,0 +1,301 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3};
+ CHECK_EQ(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
+void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
+ CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
+ default_stub_registers);
+}
+
+void RecordWriteDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
+void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
+void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register default_stub_registers[] = {kReturnRegister0, a1, a2, a3, cp};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
+const Register LoadDescriptor::ReceiverRegister() { return a1; }
+const Register LoadDescriptor::NameRegister() { return a2; }
+const Register LoadDescriptor::SlotRegister() { return a0; }
+
+const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+const Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return a4;
+}
+
+const Register StoreDescriptor::ReceiverRegister() { return a1; }
+const Register StoreDescriptor::NameRegister() { return a2; }
+const Register StoreDescriptor::ValueRegister() { return a0; }
+const Register StoreDescriptor::SlotRegister() { return a4; }
+
+const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
+const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
+const Register StoreTransitionDescriptor::MapRegister() { return a5; }
+
+const Register ApiGetterDescriptor::HolderRegister() { return a0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a0: number of arguments
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ Register registers[] = {a1, a0, a4, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ Register registers[] = {a1, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ Register registers[] = {a1, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1 : the target to call
+ // a2 : the arguments list
+ Register registers[] = {a1, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ Register registers[] = {a1, a3, a0, a4, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ Register registers[] = {a1, a3, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void AbortDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ApiCallbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a1, // kApiFunctionAddress
+ a2, // kArgc
+ a3, // kCallData
+ a0, // kHolder
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (not including receiver)
+ a2, // address of first argument
+ a1 // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (not including receiver)
+ a4, // address of the first argument
+ a1, // constructor to call
+ a3, // new target
+ a2, // allocation site feedback if available, undefined otherwise
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // the value to pass to the generator
+ a1 // the JSGeneratorObject to resume
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a1, // loaded new FP
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0, a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
new file mode 100644
index 0000000000..a479666120
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -0,0 +1,4575 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h> // For LONG_MIN, LONG_MAX.
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/debug/debug.h"
+#include "src/execution/frames-inl.h"
+#include "src/heap/memory-chunk.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/objects/heap-number.h"
+#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
+
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
+#include "src/codegen/riscv64/macro-assembler-riscv64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm() == zero_reg;
+ } else {
+ return rt.immediate() == 0;
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == kSaveFPRegs) {
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPush(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == kSaveFPRegs) {
+ MultiPushFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ if (fp_mode == kSaveFPRegs) {
+ MultiPopFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPop(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
+}
+
+void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+ Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
+}
+
+void TurboAssembler::LoadRoot(Register destination, RootIndex index,
+ Condition cond, Register src1,
+ const Operand& src2) {
+ Label skip;
+ Branch(&skip, NegateCondition(cond), src1, src2);
+ Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
+ bind(&skip);
+}
+
+void TurboAssembler::PushCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ Push(ra, fp, marker_reg);
+ Add64(fp, sp, Operand(kPointerSize));
+ } else {
+ Push(ra, fp);
+ mv(fp, sp);
+ }
+}
+
+void TurboAssembler::PushStandardFrame(Register function_reg) {
+ int offset = -StandardFrameConstants::kContextOffset;
+ if (function_reg.is_valid()) {
+ Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
+ offset += 2 * kPointerSize;
+ } else {
+ Push(ra, fp, cp, kJavaScriptCallArgCountRegister);
+ offset += kPointerSize;
+ }
+ Add64(fp, sp, Operand(offset));
+}
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // The registers are pushed starting with the highest encoding,
+ // which means that lowest encodings are closest to the stack pointer.
+ return kSafepointRegisterStackIndexMap[reg_code];
+}
+
+// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ DCHECK(IsAligned(offset, kPointerSize));
+
+ Add64(dst, object, Operand(offset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label ok;
+ DCHECK(!AreAliased(value, dst, scratch, object));
+ And(scratch, dst, Operand(kPointerSize - 1));
+ Branch(&ok, eq, scratch, Operand(zero_reg));
+ ebreak();
+ bind(&ok);
+ }
+
+ RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
+ li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
+ }
+}
+
+void TurboAssembler::SaveRegisters(RegList registers) {
+ DCHECK_GT(NumRegs(registers), 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ MultiPush(regs);
+}
+
+void TurboAssembler::RestoreRegisters(RegList registers) {
+ DCHECK_GT(NumRegs(registers), 0);
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ MultiPop(regs);
+}
+
+void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+ SaveFPRegsMode fp_mode) {
+ EphemeronKeyBarrierDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
+
+ SaveRegisters(registers);
+
+ Register object_parameter(
+ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
+ Register slot_parameter(descriptor.GetRegisterParameter(
+ EphemeronKeyBarrierDescriptor::kSlotAddress));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
+
+ Push(object);
+ Push(address);
+
+ Pop(slot_parameter);
+ Pop(object_parameter);
+
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ RelocInfo::CODE_TARGET);
+ RestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Builtins::kRecordWrite, kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Builtins::kNoBuiltinId, wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ int builtin_index, Address wasm_target) {
+ DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
+ wasm_target == kNullAddress);
+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If
+ // large performance regression is observed, we should use these values to
+ // avoid unnecessary work.
+
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
+
+ SaveRegisters(registers);
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
+ Register slot_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
+
+ Push(object);
+ Push(address);
+
+ Pop(slot_parameter);
+ Pop(object_parameter);
+
+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ if (builtin_index == Builtins::kNoBuiltinId) {
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else if (options().inline_offheap_trampolines) {
+ // Inline the trampoline. //qj
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(scratch);
+ } else {
+ Handle<Code> code_target =
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
+
+ RestoreRegisters(registers);
+}
+
+// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object, Register address,
+ Register value, RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ if (emit_debug_code()) {
+ DCHECK(!AreAliased(object, address, value, kScratchReg));
+ Ld(kScratchReg, MemOperand(address));
+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite,
+ kScratchReg, Operand(value));
+ }
+
+ if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) ||
+ FLAG_disable_write_barriers) {
+ return;
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ DCHECK_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ // Record the actual write.
+ if (ra_status == kRAHasNotBeenSaved) {
+ push(ra);
+ }
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+ if (ra_status == kRAHasNotBeenSaved) {
+ pop(ra);
+ }
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
+ li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
+ }
+}
+
+// ---------------------------------------------------------------------------
+// Instruction macros.
+
+void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ addw(rd, rs, rt.rm());
+ } else {
+ if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ addw(rd, rs, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ add(rd, rs, rt.rm());
+ } else {
+ if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ addi(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RV_li(scratch, rt.immediate());
+ add(rd, rs, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ subw(rd, rs, rt.rm());
+ } else {
+ DCHECK(is_int32(rt.immediate()));
+ if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
+ addiw(rd, rs,
+ static_cast<int32_t>(
+ -rt.immediate())); // No subiw instr, use addiw(x, y, -imm).
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (-rt.immediate() >> 12 == 0 && !MustUseReg(rt.rmode())) {
+ // Use load -imm and addu when loading -imm generates one instruction.
+ RV_li(scratch, -rt.immediate());
+ addw(rd, rs, scratch);
+ } else {
+ // li handles the relocation.
+ RV_li(scratch, rt.immediate());
+ subw(rd, rs, scratch);
+ }
+ }
+ }
+}
+
+void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sub(rd, rs, rt.rm());
+ } else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
+ addi(rd, rs,
+ static_cast<int32_t>(
+ -rt.immediate())); // No subi instr, use addi(x, y, -imm).
+ } else {
+ int li_count = InstrCountForLi64Bit(rt.immediate());
+ int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
+ if (li_neg_count < li_count && !MustUseReg(rt.rmode())) {
+ // Use load -imm and add when loading -imm generates one instruction.
+ DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, -rt.immediate());
+ add(rd, rs, scratch);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ sub(rd, rs, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ mulw(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ mulw(rd, rs, scratch);
+ }
+}
+
+void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ mul(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ mul(rd, rs, scratch);
+ }
+ srai(rd, rd, 32);
+}
+
+void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
+ Register rsz, Register rtz) {
+ slli(rsz, rs, 32);
+ if (rt.is_reg())
+ slli(rtz, rt.rm(), 32);
+ else
+ RV_li(rtz, rt.immediate() << 32);
+ mulhu(rd, rsz, rtz);
+ srai(rd, rd, 32);
+}
+
+void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ mul(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ mul(rd, rs, scratch);
+ }
+}
+
+void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ mulh(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ mulh(rd, rs, scratch);
+ }
+}
+
+void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ divw(res, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ divw(res, rs, scratch);
+ }
+}
+
+void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ remw(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ remw(rd, rs, scratch);
+ }
+}
+
+void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ remuw(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ remuw(rd, rs, scratch);
+ }
+}
+
+void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ div(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ div(rd, rs, scratch);
+ }
+}
+
+void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ divuw(res, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ divuw(res, rs, scratch);
+ }
+}
+
+void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ divu(res, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ divu(res, rs, scratch);
+ }
+}
+
+void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ rem(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ rem(rd, rs, scratch);
+ }
+}
+
+void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ remu(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ remu(rd, rs, scratch);
+ }
+}
+
+void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ and_(rd, rs, rt.rm());
+ } else {
+ if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ andi(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ and_(rd, rs, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ or_(rd, rs, rt.rm());
+ } else {
+ if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ ori(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ or_(rd, rs, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ xor_(rd, rs, rt.rm());
+ } else {
+ if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ xori(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ RV_li(scratch, rt.immediate());
+ xor_(rd, rs, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ or_(rd, rs, rt.rm());
+ not_(rd, rd);
+ } else {
+ Or(rd, rs, rt);
+ not_(rd, rd);
+ }
+}
+
+void TurboAssembler::Neg(Register rs, const Operand& rt) {
+ DCHECK(rt.is_reg());
+ neg(rs, rt.rm());
+}
+
+void TurboAssembler::Seqz(Register rd, const Operand& rt) {
+ if (rt.is_reg()) {
+ seqz(rd, rt.rm());
+ } else {
+ li(rd, rt.immediate() == 0);
+ }
+}
+
+void TurboAssembler::Snez(Register rd, const Operand& rt) {
+ if (rt.is_reg()) {
+ snez(rd, rt.rm());
+ } else {
+ li(rd, rt.immediate() != 0);
+ }
+}
+
+void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) {
+ if (rs == zero_reg) {
+ Seqz(rd, rt);
+ } else if (IsZero(rt)) {
+ seqz(rd, rs);
+ } else {
+ Sub64(rd, rs, rt);
+ seqz(rd, rd);
+ }
+}
+
+void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) {
+ if (rs == zero_reg) {
+ Snez(rd, rt);
+ } else if (IsZero(rt)) {
+ snez(rd, rs);
+ } else {
+ Sub64(rd, rs, rt);
+ snez(rd, rd);
+ }
+}
+
+void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rs, rt.rm());
+ } else {
+ if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ slti(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RV_li(scratch, rt.immediate());
+ slt(rd, rs, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rs, rt.rm());
+ } else {
+ if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ sltiu(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RV_li(scratch, rt.immediate());
+ sltu(rd, rs, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RV_li(scratch, rt.immediate());
+ slt(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RV_li(scratch, rt.immediate());
+ sltu(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
+ Slt(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
+ Sltu(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RV_li(scratch, rt.immediate());
+ slt(rd, scratch, rs);
+ }
+}
+
+void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RV_li(scratch, rt.immediate());
+ sltu(rd, scratch, rs);
+ }
+}
+
+void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sllw(rd, rs, rt.rm());
+ } else {
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ slliw(rd, rs, shamt);
+ }
+}
+
+void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sraw(rd, rs, rt.rm());
+ } else {
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ sraiw(rd, rs, shamt);
+ }
+}
+
+void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ srlw(rd, rs, rt.rm());
+ } else {
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ srliw(rd, rs, shamt);
+ }
+}
+
+void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sra(rd, rs, rt.rm());
+ } else {
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ srai(rd, rs, shamt);
+ }
+}
+
+void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ srl(rd, rs, rt.rm());
+ } else {
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ srli(rd, rs, shamt);
+ }
+}
+
+void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sll(rd, rs, rt.rm());
+ } else {
+ uint8_t shamt = static_cast<uint8_t>(rt.immediate());
+ slli(rd, rs, shamt);
+ }
+}
+
+void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (rt.is_reg()) {
+ negw(scratch, rt.rm());
+ sllw(scratch, rs, scratch);
+ srlw(rd, rs, rt.rm());
+ or_(rd, scratch, rd);
+ sext_w(rd, rd);
+ } else {
+ int64_t ror_value = rt.immediate() % 32;
+ if (ror_value == 0) {
+ mv(rd, rs);
+ return;
+ } else if (ror_value < 0) {
+ ror_value += 32;
+ }
+ srliw(scratch, rs, ror_value);
+ slliw(rd, rs, 32 - ror_value);
+ or_(rd, scratch, rd);
+ sext_w(rd, rd);
+ }
+}
+
+void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (rt.is_reg()) {
+ negw(scratch, rt.rm());
+ sll(scratch, rs, scratch);
+ srl(rd, rs, rt.rm());
+ or_(rd, scratch, rd);
+ } else {
+ int64_t dror_value = rt.immediate() % 64;
+ if (dror_value == 0) {
+ mv(rd, rs);
+ return;
+ } else if (dror_value < 0) {
+ dror_value += 64;
+ }
+ srli(scratch, rs, dror_value);
+ slli(rd, rs, 64 - dror_value);
+ or_(rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
+ uint8_t sa, Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
+ Register tmp = rd == rt ? scratch : rd;
+ DCHECK(tmp != rt);
+ slli(tmp, rs, sa);
+ Add64(rd, rt, tmp);
+}
+
+// ------------Pseudo-instructions-------------
+// Change endianness
+void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
+ DCHECK(operand_size == 4 || operand_size == 8);
+ if (operand_size == 4) {
+ // Uint32_t x1 = 0x00FF00FF;
+ // x0 = (x0 << 16 | x0 >> 16);
+ // x0 = (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8));
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((rd != t6) && (rs != t6));
+ Register x0 = temps.Acquire();
+ Register x1 = temps.Acquire();
+ Register x2 = t6;
+ li(x1, 0x00FF00FF);
+ slliw(x0, rs, 16);
+ srliw(rd, rs, 16);
+ or_(x0, rd, x0); // x0 <- x0 << 16 | x0 >> 16
+ and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF
+ slliw(x2, x2, 8); // x2 <- (x0 & x1) << 8
+ slliw(x1, x1, 8); // x1 <- 0xFF00FF00
+ and_(rd, x0, x1); // x0 & 0xFF00FF00
+ srliw(rd, rd, 8);
+ or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8))
+ } else {
+ // uinx24_t x1 = 0x0000FFFF0000FFFFl;
+ // uinx24_t x1 = 0x00FF00FF00FF00FFl;
+ // x0 = (x0 << 32 | x0 >> 32);
+ // x0 = (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16;
+ // x0 = (x0 & x1) << 8 | (x0 & (x1 << 8)) >> 8;
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((rd != t6) && (rs != t6));
+ Register x0 = temps.Acquire();
+ Register x1 = temps.Acquire();
+ Register x2 = t6;
+ li(x1, 0x0000FFFF0000FFFFl);
+ slli(x0, rs, 32);
+ srli(rd, rs, 32);
+ or_(x0, rd, x0); // x0 <- x0 << 32 | x0 >> 32
+ and_(x2, x0, x1); // x2 <- x0 & 0x0000FFFF0000FFFF
+ slli(x2, x2, 16); // x2 <- (x0 & 0x0000FFFF0000FFFF) << 16
+ slli(x1, x1, 16); // x1 <- 0xFFFF0000FFFF0000
+ and_(rd, x0, x1); // rd <- x0 & 0xFFFF0000FFFF0000
+ srli(rd, rd, 16); // rd <- x0 & (x1 << 16)) >> 16
+ or_(x0, rd, x2); // (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16;
+ li(x1, 0x00FF00FF00FF00FFl);
+ and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF00FF00FF
+ slli(x2, x2, 8); // x2 <- (x0 & x1) << 8
+ slli(x1, x1, 8); // x1 <- 0xFF00FF00FF00FF00
+ and_(rd, x0, x1);
+ srli(rd, rd, 8); // rd <- (x0 & (x1 << 8)) >> 8
+ or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8))
+ }
+}
+
+template <int NBYTES, bool LOAD_SIGNED>
+void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK(rd != rs.rm() && rd != scratch);
+ DCHECK_LE(NBYTES, 8);
+
+ // load the most significant byte
+ if (LOAD_SIGNED) {
+ lb(rd, rs.rm(), rs.offset() + (NBYTES - 1));
+ } else {
+ lbu(rd, rs.rm(), rs.offset() + (NBYTES - 1));
+ }
+
+ // load remaining (nbytes-1) bytes from higher to lower
+ slli(rd, rd, 8 * (NBYTES - 1));
+ for (int i = (NBYTES - 2); i >= 0; i--) {
+ lbu(scratch, rs.rm(), rs.offset() + i);
+ if (i) slli(scratch, scratch, i * 8);
+ or_(rd, rd, scratch);
+ }
+}
+
+template <int NBYTES, bool LOAD_SIGNED>
+void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs,
+ Register scratch0,
+ Register scratch1) {
+ // This function loads nbytes from memory specified by rs and into rs.rm()
+ DCHECK(rs.rm() != scratch0 && rs.rm() != scratch1 && scratch0 != scratch1);
+ DCHECK_LE(NBYTES, 8);
+
+ // load the most significant byte
+ if (LOAD_SIGNED) {
+ lb(scratch0, rs.rm(), rs.offset() + (NBYTES - 1));
+ } else {
+ lbu(scratch0, rs.rm(), rs.offset() + (NBYTES - 1));
+ }
+
+ // load remaining (nbytes-1) bytes from higher to lower
+ slli(scratch0, scratch0, 8 * (NBYTES - 1));
+ for (int i = (NBYTES - 2); i >= 0; i--) {
+ lbu(scratch1, rs.rm(), rs.offset() + i);
+ if (i) {
+ slli(scratch1, scratch1, i * 8);
+ or_(scratch0, scratch0, scratch1);
+ } else {
+ // write to rs.rm() when processing the last byte
+ or_(rs.rm(), scratch0, scratch1);
+ }
+ }
+}
+
+template <int NBYTES, bool IS_SIGNED>
+void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+
+ if (NeedAdjustBaseAndOffset(rs, OffsetAccessType::TWO_ACCESSES, NBYTES - 1)) {
+ // Adjust offset for two accesses and check if offset + 3 fits into int12.
+ MemOperand source = rs;
+ Register scratch_base = temps.Acquire();
+ DCHECK(scratch_base != rs.rm());
+ AdjustBaseAndOffset(&source, scratch_base, OffsetAccessType::TWO_ACCESSES,
+ NBYTES - 1);
+
+ // Since source.rm() is scratch_base, assume rd != source.rm()
+ DCHECK(rd != source.rm());
+ Register scratch_other = temps.Acquire();
+ LoadNBytes<NBYTES, IS_SIGNED>(rd, source, scratch_other);
+ } else {
+ // no need to adjust base-and-offset
+ if (rd != rs.rm()) {
+ Register scratch = temps.Acquire();
+ LoadNBytes<NBYTES, IS_SIGNED>(rd, rs, scratch);
+ } else { // rd == rs.rm()
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ LoadNBytesOverwritingBaseReg<NBYTES, IS_SIGNED>(rs, scratch, scratch2);
+ }
+ }
+}
+
+template <int NBYTES>
+void TurboAssembler::UnalignedFLoadHelper(FPURegister frd,
+ const MemOperand& rs) {
+ DCHECK(NBYTES == 4 || NBYTES == 8);
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ MemOperand source = rs;
+ UseScratchRegisterScope temps(this);
+ Register scratch_base = temps.Acquire();
+ if (NeedAdjustBaseAndOffset(rs, OffsetAccessType::TWO_ACCESSES, NBYTES - 1)) {
+ // Adjust offset for two accesses and check if offset + 3 fits into int12.
+ DCHECK(scratch_base != rs.rm());
+ AdjustBaseAndOffset(&source, scratch_base, OffsetAccessType::TWO_ACCESSES,
+ NBYTES - 1);
+ }
+ Register scratch_other = temps.Acquire();
+ Register scratch = t2;
+ push(t2);
+ DCHECK(scratch != rs.rm() && scratch_other != scratch &&
+ scratch_other != rs.rm());
+ LoadNBytes<NBYTES, true>(scratch, source, scratch_other);
+ if (NBYTES == 4)
+ fmv_w_x(frd, scratch);
+ else
+ fmv_d_x(frd, scratch);
+ pop(t2);
+}
+
+template <int NBYTES>
+void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
+ Register scratch_other) {
+ DCHECK(scratch_other != rs.rm());
+ DCHECK_LE(NBYTES, 8);
+ MemOperand source = rs;
+ UseScratchRegisterScope temps(this);
+ Register scratch_base = temps.Acquire();
+ // Adjust offset for two accesses and check if offset + 3 fits into int12.
+ if (NeedAdjustBaseAndOffset(rs, OffsetAccessType::TWO_ACCESSES, NBYTES - 1)) {
+ DCHECK(scratch_base != rd && scratch_base != rs.rm());
+ AdjustBaseAndOffset(&source, scratch_base, OffsetAccessType::TWO_ACCESSES,
+ NBYTES - 1);
+ }
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (scratch_other == no_reg) {
+ if (temps.hasAvailable()) {
+ scratch_other = temps.Acquire();
+ } else {
+ push(t2);
+ scratch_other = t2;
+ }
+ }
+
+ DCHECK(scratch_other != rd && scratch_other != rs.rm() &&
+ scratch_other != source.rm());
+
+ sb(rd, source.rm(), source.offset());
+ for (size_t i = 1; i <= (NBYTES - 1); i++) {
+ srli(scratch_other, rd, i * 8);
+ sb(scratch_other, source.rm(), source.offset() + i);
+ }
+ if (scratch_other == t2) {
+ pop(t2);
+ }
+}
+
+template <int NBYTES>
+void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
+ const MemOperand& rs) {
+ DCHECK(NBYTES == 8 || NBYTES == 4);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (NBYTES == 4) {
+ fmv_x_w(scratch, frd);
+ } else {
+ fmv_x_d(scratch, frd);
+ }
+ UnalignedStoreHelper<NBYTES>(scratch, rs);
+}
+
+template <typename Reg_T, typename Func>
+void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs,
+ Func generator) {
+ MemOperand source = rs;
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (NeedAdjustBaseAndOffset(source)) {
+ Register scratch = temps.Acquire();
+ DCHECK(scratch != rs.rm());
+ AdjustBaseAndOffset(&source, scratch);
+ }
+ generator(target, source);
+}
+
+template <typename Reg_T, typename Func>
+void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs,
+ Func generator) {
+ MemOperand source = rs;
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (NeedAdjustBaseAndOffset(source)) {
+ Register scratch = temps.Acquire();
+ // make sure scratch does not overwrite value
+ if (std::is_same<Reg_T, Register>::value)
+ DCHECK(scratch.code() != value.code());
+ DCHECK(scratch != rs.rm());
+ AdjustBaseAndOffset(&source, scratch);
+ }
+ generator(value, source);
+}
+
+void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
+ UnalignedLoadHelper<4, true>(rd, rs);
+}
+
+void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) {
+ UnalignedLoadHelper<4, false>(rd, rs);
+}
+
+void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
+ UnalignedStoreHelper<4>(rd, rs);
+}
+
+void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
+ UnalignedLoadHelper<2, true>(rd, rs);
+}
+
+void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
+ UnalignedLoadHelper<2, false>(rd, rs);
+}
+
+void TurboAssembler::Ush(Register rd, const MemOperand& rs) {
+ UnalignedStoreHelper<2>(rd, rs);
+}
+
+void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
+ UnalignedLoadHelper<8, true>(rd, rs);
+}
+
+// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
+// bits,
+// second word in high bits.
+void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
+ Register scratch) {
+ Lwu(rd, rs);
+ Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ slli(scratch, scratch, 32);
+ Add64(rd, rd, scratch);
+}
+
+void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
+ UnalignedStoreHelper<8>(rd, rs);
+}
+
+// Do 64-bit store as two consequent 32-bit stores to unaligned address.
+void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
+ Register scratch) {
+ Sw(rd, rs);
+ srai(scratch, rd, 32);
+ Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+}
+
+void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs) {
+ UnalignedFLoadHelper<4>(fd, rs);
+}
+
+void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs) {
+ UnalignedFStoreHelper<4>(fd, rs);
+}
+
+void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs) {
+ UnalignedFLoadHelper<8>(fd, rs);
+}
+
+void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs) {
+ UnalignedFStoreHelper<8>(fd, rs);
+}
+
+void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register target, const MemOperand& source) {
+ this->lb(target, source.rm(), source.offset());
+ };
+ AlignedLoadHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Lbu(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register target, const MemOperand& source) {
+ this->lbu(target, source.rm(), source.offset());
+ };
+ AlignedLoadHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Sb(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register value, const MemOperand& source) {
+ this->sb(value, source.rm(), source.offset());
+ };
+ AlignedStoreHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Lh(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register target, const MemOperand& source) {
+ this->lh(target, source.rm(), source.offset());
+ };
+ AlignedLoadHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Lhu(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register target, const MemOperand& source) {
+ this->lhu(target, source.rm(), source.offset());
+ };
+ AlignedLoadHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register value, const MemOperand& source) {
+ this->sh(value, source.rm(), source.offset());
+ };
+ AlignedStoreHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register target, const MemOperand& source) {
+ this->lw(target, source.rm(), source.offset());
+ };
+ AlignedLoadHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register target, const MemOperand& source) {
+ this->lwu(target, source.rm(), source.offset());
+ };
+ AlignedLoadHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register value, const MemOperand& source) {
+ this->sw(value, source.rm(), source.offset());
+ };
+ AlignedStoreHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register target, const MemOperand& source) {
+ this->ld(target, source.rm(), source.offset());
+ };
+ AlignedLoadHelper(rd, rs, fn);
+}
+
+void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
+ auto fn = [this](Register value, const MemOperand& source) {
+ this->sd(value, source.rm(), source.offset());
+ };
+ AlignedStoreHelper(rd, rs, fn);
+}
+
+void TurboAssembler::LoadFloat(FPURegister fd, const MemOperand& src) {
+ auto fn = [this](FPURegister target, const MemOperand& source) {
+ this->flw(target, source.rm(), source.offset());
+ };
+ AlignedLoadHelper(fd, src, fn);
+}
+
+void TurboAssembler::StoreFloat(FPURegister fs, const MemOperand& src) {
+ auto fn = [this](FPURegister value, const MemOperand& source) {
+ this->fsw(value, source.rm(), source.offset());
+ };
+ AlignedStoreHelper(fs, src, fn);
+}
+
+void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) {
+ auto fn = [this](FPURegister target, const MemOperand& source) {
+ this->fld(target, source.rm(), source.offset());
+ };
+ AlignedLoadHelper(fd, src, fn);
+}
+
+void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) {
+ auto fn = [this](FPURegister value, const MemOperand& source) {
+ this->fsd(value, source.rm(), source.offset());
+ };
+ AlignedStoreHelper(fs, src, fn);
+}
+
+void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
+ bool is_one_instruction = rs.offset() == 0;
+ if (is_one_instruction) {
+ lr_w(false, false, rd, rs.rm());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add64(scratch, rs.rm(), rs.offset());
+ lr_w(false, false, rd, scratch);
+ }
+}
+
+void TurboAssembler::Lld(Register rd, const MemOperand& rs) {
+ bool is_one_instruction = rs.offset() == 0;
+ if (is_one_instruction) {
+ lr_d(false, false, rd, rs.rm());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add64(scratch, rs.rm(), rs.offset());
+ lr_d(false, false, rd, scratch);
+ }
+}
+
+void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
+ bool is_one_instruction = rs.offset() == 0;
+ if (is_one_instruction) {
+ sc_w(false, false, rd, rs.rm(), rd);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add64(scratch, rs.rm(), rs.offset());
+ sc_w(false, false, rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
+ bool is_one_instruction = rs.offset() == 0;
+ if (is_one_instruction) {
+ sc_d(false, false, rd, rs.rm(), rd);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add64(scratch, rs.rm(), rs.offset());
+ sc_d(false, false, rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
+ }
+ li(dst, Operand(value), mode);
+}
+
+void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, value);
+ return;
+ }
+ li(dst, Operand(value), mode);
+}
+
+void TurboAssembler::li(Register dst, const StringConstantBase* string,
+ LiFlags mode) {
+ li(dst, Operand::EmbeddedStringConstant(string), mode);
+}
+
+static inline int InstrCountForLiLower32Bit(int64_t value) {
+ int64_t Hi20 = ((value + 0x800) >> 12);
+ int64_t Lo12 = value << 52 >> 52;
+ if (Hi20 == 0 || Lo12 == 0) {
+ return 1;
+ }
+ return 2;
+}
+
+int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
+ if (is_int32(value)) {
+ return InstrCountForLiLower32Bit(value);
+ } else {
+ return li_estimate(value);
+ }
+ UNREACHABLE();
+ return INT_MAX;
+}
+
+void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
+ DCHECK(!j.is_reg());
+ DCHECK(!MustUseReg(j.rmode()));
+ DCHECK(mode == OPTIMIZE_SIZE);
+ RV_li(rd, j.immediate());
+}
+
+void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
+ DCHECK(!j.is_reg());
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
+ UseScratchRegisterScope temps(this);
+ int count = li_estimate(j.immediate(), temps.hasAvailable());
+ int reverse_count = li_estimate(~j.immediate(), temps.hasAvailable());
+ if (!FLAG_disable_riscv_constant_pool && count >= 4 && reverse_count >= 4) {
+ // Ld a Address from a constant pool.
+ RecordEntry((uint64_t)j.immediate(), j.rmode());
+ auipc(rd, 0);
+ // Record a value into constant pool.
+ ld(rd, rd, 0);
+ } else {
+ if ((count - reverse_count) > 1) {
+ RV_li(rd, ~j.immediate());
+ not_(rd, rd);
+ } else {
+ RV_li(rd, j.immediate());
+ }
+ }
+ } else if (MustUseReg(j.rmode())) {
+ int64_t immediate;
+ if (j.IsHeapObjectRequest()) {
+ RequestHeapObject(j.heap_object_request());
+ immediate = 0;
+ } else {
+ immediate = j.immediate();
+ }
+
+ RecordRelocInfo(j.rmode(), immediate);
+ li_ptr(rd, immediate);
+ } else if (mode == ADDRESS_LOAD) {
+ // We always need the same number of instructions as we may need to patch
+ // this code to load another value which may need all 6 instructions.
+ RecordRelocInfo(j.rmode());
+ li_ptr(rd, j.immediate());
+ } else { // Always emit the same 48 bit instruction
+ // sequence.
+ li_ptr(rd, j.immediate());
+ }
+}
+
+static RegList t_regs = Register::ListOf(t0, t1, t2, t3, t4, t5, t6);
+static RegList a_regs = Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7);
+static RegList s_regs =
+ Register::ListOf(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11);
+
+void TurboAssembler::MultiPush(RegList regs) {
+ int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t stack_offset = num_to_push * kPointerSize;
+
+#define TEST_AND_PUSH_REG(reg) \
+ if ((regs & reg.bit()) != 0) { \
+ stack_offset -= kPointerSize; \
+ Sd(reg, MemOperand(sp, stack_offset)); \
+ regs &= ~reg.bit(); \
+ }
+
+#define T_REGS(V) V(t6) V(t5) V(t4) V(t3) V(t2) V(t1) V(t0)
+#define A_REGS(V) V(a7) V(a6) V(a5) V(a4) V(a3) V(a2) V(a1) V(a0)
+#define S_REGS(V) \
+ V(s11) V(s10) V(s9) V(s8) V(s7) V(s6) V(s5) V(s4) V(s3) V(s2) V(s1)
+
+ Sub64(sp, sp, Operand(stack_offset));
+
+ // Certain usage of MultiPush requires that registers are pushed onto the
+ // stack in a particular: ra, fp, sp, gp, .... (basically in the decreasing
+ // order of register numbers according to MIPS register numbers)
+ TEST_AND_PUSH_REG(ra);
+ TEST_AND_PUSH_REG(fp);
+ TEST_AND_PUSH_REG(sp);
+ TEST_AND_PUSH_REG(gp);
+ TEST_AND_PUSH_REG(tp);
+ if ((regs & s_regs) != 0) {
+ S_REGS(TEST_AND_PUSH_REG)
+ }
+ if ((regs & a_regs) != 0) {
+ A_REGS(TEST_AND_PUSH_REG)
+ }
+ if ((regs & t_regs) != 0) {
+ T_REGS(TEST_AND_PUSH_REG)
+ }
+
+ DCHECK_EQ(regs, 0);
+
+#undef TEST_AND_PUSH_REG
+#undef T_REGS
+#undef A_REGS
+#undef S_REGS
+}
+
+void TurboAssembler::MultiPop(RegList regs) {
+ int16_t stack_offset = 0;
+
+#define TEST_AND_POP_REG(reg) \
+ if ((regs & reg.bit()) != 0) { \
+ Ld(reg, MemOperand(sp, stack_offset)); \
+ stack_offset += kPointerSize; \
+ regs &= ~reg.bit(); \
+ }
+
+#define T_REGS(V) V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6)
+#define A_REGS(V) V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7)
+#define S_REGS(V) \
+ V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) V(s9) V(s10) V(s11)
+
+ // MultiPop pops from the stack in reverse order as MultiPush
+ if ((regs & t_regs) != 0) {
+ T_REGS(TEST_AND_POP_REG)
+ }
+ if ((regs & a_regs) != 0) {
+ A_REGS(TEST_AND_POP_REG)
+ }
+ if ((regs & s_regs) != 0) {
+ S_REGS(TEST_AND_POP_REG)
+ }
+ TEST_AND_POP_REG(tp);
+ TEST_AND_POP_REG(gp);
+ TEST_AND_POP_REG(sp);
+ TEST_AND_POP_REG(fp);
+ TEST_AND_POP_REG(ra);
+
+ DCHECK_EQ(regs, 0);
+
+ addi(sp, sp, stack_offset);
+
+#undef TEST_AND_POP_REG
+#undef T_REGS
+#undef S_REGS
+#undef A_REGS
+}
+
+void TurboAssembler::MultiPushFPU(RegList regs) {
+ int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t stack_offset = num_to_push * kDoubleSize;
+
+ Sub64(sp, sp, Operand(stack_offset));
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kDoubleSize;
+ StoreDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+void TurboAssembler::MultiPopFPU(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ LoadDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ stack_offset += kDoubleSize;
+ }
+ }
+ addi(sp, sp, stack_offset);
+}
+
+void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
+ uint16_t size, bool sign_extend) {
+ DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
+ pos + size <= 64);
+ slli(rt, rs, 64 - (pos + size));
+ if (sign_extend) {
+ srai(rt, rt, 64 - size);
+ } else {
+ srli(rt, rt, 64 - size);
+ }
+}
+
+void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
+ int size) {
+ DCHECK_LT(size, 64);
+ UseScratchRegisterScope temps(this);
+ Register mask = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register source_ = temps.Acquire();
+ // Create a mask of the length=size.
+ li(mask, 1);
+ slli(mask, mask, size);
+ addi(mask, mask, -1);
+ and_(source_, mask, source);
+ sll(source_, source_, pos);
+ // Make a mask containing 0's. 0's start at "pos" with length=size.
+ sll(mask, mask, pos);
+ not_(mask, mask);
+ // cut area for insertion of source.
+ and_(dest, mask, dest);
+ // insert source
+ or_(dest, dest, source_);
+}
+
+void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); }
+
+void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); }
+
+void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ fcvt_d_wu(fd, rs);
+}
+
+void TurboAssembler::Cvt_d_w(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ fcvt_d_w(fd, rs);
+}
+
+void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ fcvt_d_lu(fd, rs);
+}
+
+void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ fcvt_s_wu(fd, rs);
+}
+
+void TurboAssembler::Cvt_s_w(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ fcvt_s_w(fd, rs);
+}
+
+void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ fcvt_s_lu(fd, rs);
+}
+
+template <typename CvtFunc>
+void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs,
+ Register result,
+ CvtFunc fcvt_generator) {
+ // Save csr_fflags to scratch & clear exception flags
+ if (result.is_valid()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ int exception_flags = kInvalidOperation;
+ csrrci(scratch, csr_fflags, exception_flags);
+
+ // actual conversion instruction
+ fcvt_generator(this, rd, fs);
+
+ // check kInvalidOperation flag (out-of-range, NaN)
+ // set result to 1 if normal, otherwise set result to 0 for abnormal
+ frflags(result);
+ andi(result, result, exception_flags);
+ seqz(result, result); // result <-- 1 (normal), result <-- 0 (abnormal)
+
+ // restore csr_fflags
+ csrw(csr_fflags, scratch);
+ } else {
+ // actual conversion instruction
+ fcvt_generator(this, rd, fs);
+ }
+}
+
+void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_wu_d(dst, src, RTZ);
+ });
+}
+
+void TurboAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_w_d(dst, src, RTZ);
+ });
+}
+
+void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_wu_s(dst, src, RTZ);
+ });
+}
+
+void TurboAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_w_s(dst, src, RTZ);
+ });
+}
+
+void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_lu_d(dst, src, RTZ);
+ });
+}
+
+void TurboAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_l_d(dst, src, RTZ);
+ });
+}
+
+void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_lu_s(dst, src, RTZ);
+ });
+}
+
+void TurboAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_l_s(dst, src, RTZ);
+ });
+}
+
+void TurboAssembler::Round_w_s(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_w_s(dst, src, RNE);
+ });
+}
+
+void TurboAssembler::Round_w_d(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_w_d(dst, src, RNE);
+ });
+}
+
+void TurboAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_w_s(dst, src, RUP);
+ });
+}
+
+void TurboAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_w_d(dst, src, RUP);
+ });
+}
+
+void TurboAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_w_s(dst, src, RDN);
+ });
+}
+
+void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) {
+ RoundFloatingPointToInteger(
+ rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
+ tasm->fcvt_w_d(dst, src, RDN);
+ });
+}
+
+// According to JS ECMA specification, for floating-point round operations, if
+// the input is NaN, +/-infinity, or +/-0, the same input is returned as the
+// rounded result; this differs from behavior of RISCV fcvt instructions (which
+// round out-of-range values to the nearest max or min value), therefore special
+// handling is needed by NaN, +/-Infinity, +/-0
+template <typename F>
+void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch, RoundingMode frm) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.Acquire();
+
+ DCHECK((std::is_same<float, F>::value) || (std::is_same<double, F>::value));
+ // Need at least two FPRs, so check against dst == src == fpu_scratch
+ DCHECK(!(dst == src && dst == fpu_scratch));
+
+ const int kFloat32ExponentBias = 127;
+ const int kFloat32MantissaBits = 23;
+ const int kFloat32ExponentBits = 8;
+ const int kFloat64ExponentBias = 1023;
+ const int kFloat64MantissaBits = 52;
+ const int kFloat64ExponentBits = 11;
+ const int kFloatMantissaBits =
+ sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits;
+ const int kFloatExponentBits =
+ sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits;
+ const int kFloatExponentBias =
+ sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias;
+
+ Label done;
+
+ {
+ UseScratchRegisterScope temps2(this);
+ Register scratch = temps2.Acquire();
+ // extract exponent value of the source floating-point to scratch
+ if (std::is_same<F, double>::value) {
+ fmv_x_d(scratch, src);
+ } else {
+ fmv_x_w(scratch, src);
+ }
+ ExtractBits(scratch2, scratch, kFloatMantissaBits, kFloatExponentBits);
+ }
+
+ // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
+ // in mantissa, the result is the same as src, so move src to dest (to avoid
+ // generating another branch)
+ if (dst != src) {
+ if (std::is_same<F, double>::value) {
+ fmv_d(dst, src);
+ } else {
+ fmv_s(dst, src);
+ }
+ }
+
+ // If real exponent (i.e., t6 - kFloatExponentBias) is greater than
+ // kFloat32MantissaBits, it means the floating-point value has no fractional
+ // part, thus the input is already rounded, jump to done. Note that, NaN and
+ // Infinity in floating-point representation sets maximal exponent value, so
+ // they also satisfy (t6-kFloatExponentBias >= kFloatMantissaBits), and JS
+ // round semantics specify that rounding of NaN (Infinity) returns NaN
+ // (Infinity), so NaN and Infinity are considered rounded value too.
+ Branch(&done, greater_equal, scratch2,
+ Operand(kFloatExponentBias + kFloatMantissaBits));
+
+ // Actual rounding is needed along this path
+
+ // old_src holds the original input, needed for the case of src == dst
+ FPURegister old_src = src;
+ if (src == dst) {
+ DCHECK(fpu_scratch != dst);
+ Move(fpu_scratch, src);
+ old_src = fpu_scratch;
+ }
+
+ // Since only input whose real exponent value is less than kMantissaBits
+ // (i.e., 23 or 52-bits) falls into this path, the value range of the input
+ // falls into that of 23- or 53-bit integers. So we round the input to integer
+ // values, then convert them back to floating-point.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (std::is_same<F, double>::value) {
+ fcvt_l_d(scratch, src, frm);
+ fcvt_d_l(dst, scratch, frm);
+ } else {
+ fcvt_w_s(scratch, src, frm);
+ fcvt_s_w(dst, scratch, frm);
+ }
+ }
+ // A special handling is needed if the input is a very small positive/negative
+ // number that rounds to zero. JS semantics requires that the rounded result
+ // retains the sign of the input, so a very small positive (negative)
+ // floating-point number should be rounded to positive (negative) 0.
+ // Therefore, we use sign-bit injection to produce +/-0 correctly. Instead of
+ // testing for zero w/ a branch, we just insert sign-bit for everyone on this
+ // path (this is where old_src is needed)
+ if (std::is_same<F, double>::value) {
+ fsgnj_d(dst, dst, old_src);
+ } else {
+ fsgnj_s(dst, dst, old_src);
+ }
+
+ bind(&done);
+}
+
+void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RDN);
+}
+
+void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RUP);
+}
+
+void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RTZ);
+}
+
+void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<double>(dst, src, fpu_scratch, RNE);
+}
+
+void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RDN);
+}
+
+void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RUP);
+}
+
+void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RTZ);
+}
+
+void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src,
+ FPURegister fpu_scratch) {
+ RoundHelper<float>(dst, src, fpu_scratch, RNE);
+}
+
+void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ fmadd_s(fd, fs, ft, fr);
+}
+
+void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ fmadd_d(fd, fs, ft, fr);
+}
+
+void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ fmsub_s(fd, fs, ft, fr);
+}
+
+void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ fmsub_d(fd, fs, ft, fr);
+}
+
+void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
+ FPURegister cmp2) {
+ switch (cc) {
+ case EQ:
+ feq_s(rd, cmp1, cmp2);
+ break;
+ case NE:
+ feq_s(rd, cmp1, cmp2);
+ NegateBool(rd, rd);
+ break;
+ case LT:
+ flt_s(rd, cmp1, cmp2);
+ break;
+ case GE:
+ fle_s(rd, cmp2, cmp1);
+ break;
+ case LE:
+ fle_s(rd, cmp1, cmp2);
+ break;
+ case GT:
+ flt_s(rd, cmp2, cmp1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
+ FPURegister cmp2) {
+ switch (cc) {
+ case EQ:
+ feq_d(rd, cmp1, cmp2);
+ break;
+ case NE:
+ feq_d(rd, cmp1, cmp2);
+ NegateBool(rd, rd);
+ break;
+ case LT:
+ flt_d(rd, cmp1, cmp2);
+ break;
+ case GE:
+ fle_d(rd, cmp2, cmp1);
+ break;
+ case LE:
+ fle_d(rd, cmp1, cmp2);
+ break;
+ case GT:
+ flt_d(rd, cmp2, cmp1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+
+ feq_s(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
+ feq_s(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
+ And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+
+ feq_d(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
+ feq_d(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
+ And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void TurboAssembler::BranchTrueShortF(Register rs, Label* target) {
+ Branch(target, not_equal, rs, Operand(zero_reg));
+}
+
+void TurboAssembler::BranchFalseShortF(Register rs, Label* target) {
+ Branch(target, equal, rs, Operand(zero_reg));
+}
+
+void TurboAssembler::BranchTrueF(Register rs, Label* target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchFalseShortF(rs, &skip);
+ BranchLong(target);
+ bind(&skip);
+ } else {
+ BranchTrueShortF(rs, target);
+ }
+}
+
+void TurboAssembler::BranchFalseF(Register rs, Label* target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchTrueShortF(rs, &skip);
+ BranchLong(target);
+ bind(&skip);
+ } else {
+ BranchFalseShortF(rs, target);
+ }
+}
+
+void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ DCHECK(src_high != scratch2 && src_high != scratch);
+
+ fmv_x_d(scratch, dst);
+ slli(scratch2, src_high, 32);
+ slli(scratch, scratch, 32);
+ srli(scratch, scratch, 32);
+ or_(scratch, scratch, scratch2);
+ fmv_d_x(dst, scratch);
+}
+
+void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ DCHECK(src_low != scratch && src_low != scratch2);
+ fmv_x_d(scratch, dst);
+ slli(scratch2, src_low, 32);
+ srli(scratch2, scratch2, 32);
+ srli(scratch, scratch, 32);
+ slli(scratch, scratch, 32);
+ or_(scratch, scratch, scratch2);
+ fmv_d_x(dst, scratch);
+}
+
+void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) {
+ // Handle special values first.
+ if (src == bit_cast<uint32_t>(0.0f) && has_single_zero_reg_set_) {
+ if (dst != kDoubleRegZero) fmv_s(dst, kDoubleRegZero);
+ } else if (src == bit_cast<uint32_t>(-0.0f) && has_single_zero_reg_set_) {
+ Neg_s(dst, kDoubleRegZero);
+ } else {
+ if (dst == kDoubleRegZero) {
+ DCHECK(src == bit_cast<uint32_t>(0.0f));
+ fmv_w_x(dst, zero_reg);
+ has_single_zero_reg_set_ = true;
+ has_double_zero_reg_set_ = false;
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(static_cast<int32_t>(src)));
+ fmv_w_x(dst, scratch);
+ }
+ }
+}
+
+void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) {
+ // Handle special values first.
+ if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
+ if (dst != kDoubleRegZero) fmv_d(dst, kDoubleRegZero);
+ } else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
+ Neg_d(dst, kDoubleRegZero);
+ } else {
+ if (dst == kDoubleRegZero) {
+ DCHECK(src == bit_cast<uint64_t>(0.0));
+ fmv_d_x(dst, zero_reg);
+ has_double_zero_reg_set_ = true;
+ has_single_zero_reg_set_ = false;
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(src));
+ fmv_d_x(dst, scratch);
+ }
+ }
+}
+
+void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt,
+ Condition cond) {
+ switch (cond) {
+ case eq:
+ Seq(rd, rs, rt);
+ break;
+ case ne:
+ Sne(rd, rs, rt);
+ break;
+
+ // Signed comparison.
+ case greater:
+ Sgt(rd, rs, rt);
+ break;
+ case greater_equal:
+ Sge(rd, rs, rt); // rs >= rt
+ break;
+ case less:
+ Slt(rd, rs, rt); // rs < rt
+ break;
+ case less_equal:
+ Sle(rd, rs, rt); // rs <= rt
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sgtu(rd, rs, rt); // rs > rt
+ break;
+ case Ugreater_equal:
+ Sgeu(rd, rs, rt); // rs >= rt
+ break;
+ case Uless:
+ Sltu(rd, rs, rt); // rs < rt
+ break;
+ case Uless_equal:
+ Sleu(rd, rs, rt); // rs <= rt
+ break;
+ case cc_always:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// dest <- (condition != 0 ? zero : dest)
+void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+ Register condition) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ seqz(scratch, condition);
+ // neg + and may be more efficient than mul(dest, dest, scratch)
+ neg(scratch, scratch); // 0 is still 0, 1 becomes all 1s
+ and_(dest, dest, scratch);
+}
+
+// dest <- (condition == 0 ? 0 : dest)
+void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+ Register condition) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ snez(scratch, condition);
+ // neg + and may be more efficient than mul(dest, dest, scratch);
+ neg(scratch, scratch); // 0 is still 0, 1 becomes all 1s
+ and_(dest, dest, scratch);
+}
+
+void TurboAssembler::Clz32(Register rd, Register xx) {
+ // 32 bit unsigned in lower word: count number of leading zeros.
+ // int n = 32;
+ // unsigned y;
+
+ // y = x >>16; if (y != 0) { n = n -16; x = y; }
+ // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
+ // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
+ // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
+ // y = x >> 1; if (y != 0) {rd = n - 2; return;}
+ // rd = n - x;
+
+ Label L0, L1, L2, L3, L4;
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register x = rd;
+ Register y = temps.Acquire();
+ Register n = temps.Acquire();
+ DCHECK(xx != y && xx != n);
+ Move(x, xx);
+ li(n, Operand(32));
+ srliw(y, x, 16);
+ Branch(&L0, eq, y, Operand(zero_reg));
+ Move(x, y);
+ addiw(n, n, -16);
+ bind(&L0);
+ srliw(y, x, 8);
+ Branch(&L1, eq, y, Operand(zero_reg));
+ addiw(n, n, -8);
+ Move(x, y);
+ bind(&L1);
+ srliw(y, x, 4);
+ Branch(&L2, eq, y, Operand(zero_reg));
+ addiw(n, n, -4);
+ Move(x, y);
+ bind(&L2);
+ srliw(y, x, 2);
+ Branch(&L3, eq, y, Operand(zero_reg));
+ addiw(n, n, -2);
+ Move(x, y);
+ bind(&L3);
+ srliw(y, x, 1);
+ subw(rd, n, x);
+ Branch(&L4, eq, y, Operand(zero_reg));
+ addiw(rd, n, -2);
+ bind(&L4);
+}
+
+void TurboAssembler::Clz64(Register rd, Register xx) {
+ // 64 bit: count number of leading zeros.
+ // int n = 64;
+ // unsigned y;
+
+ // y = x >>32; if (y != 0) { n = n - 32; x = y; }
+ // y = x >>16; if (y != 0) { n = n - 16; x = y; }
+ // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
+ // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
+ // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
+ // y = x >> 1; if (y != 0) {rd = n - 2; return;}
+ // rd = n - x;
+
+ Label L0, L1, L2, L3, L4, L5;
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register x = rd;
+ Register y = temps.Acquire();
+ Register n = temps.Acquire();
+ DCHECK(xx != y && xx != n);
+ Move(x, xx);
+ li(n, Operand(64));
+ srli(y, x, 32);
+ Branch(&L0, eq, y, Operand(zero_reg));
+ addiw(n, n, -32);
+ Move(x, y);
+ bind(&L0);
+ srli(y, x, 16);
+ Branch(&L1, eq, y, Operand(zero_reg));
+ addiw(n, n, -16);
+ Move(x, y);
+ bind(&L1);
+ srli(y, x, 8);
+ Branch(&L2, eq, y, Operand(zero_reg));
+ addiw(n, n, -8);
+ Move(x, y);
+ bind(&L2);
+ srli(y, x, 4);
+ Branch(&L3, eq, y, Operand(zero_reg));
+ addiw(n, n, -4);
+ Move(x, y);
+ bind(&L3);
+ srli(y, x, 2);
+ Branch(&L4, eq, y, Operand(zero_reg));
+ addiw(n, n, -2);
+ Move(x, y);
+ bind(&L4);
+ srli(y, x, 1);
+ subw(rd, n, x);
+ Branch(&L5, eq, y, Operand(zero_reg));
+ addiw(rd, n, -2);
+ bind(&L5);
+}
+
+void TurboAssembler::Ctz32(Register rd, Register rs) {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add64(scratch, rs, -1);
+ Xor(rd, scratch, rs);
+ And(rd, rd, scratch);
+ // Count number of leading zeroes.
+ }
+ Clz32(rd, rd);
+ {
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, 32);
+ Sub32(rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::Ctz64(Register rd, Register rs) {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add64(scratch, rs, -1);
+ Xor(rd, scratch, rs);
+ And(rd, rd, scratch);
+ // Count number of leading zeroes.
+ }
+ Clz64(rd, rd);
+ {
+ // Subtract number of leading zeroes from 64 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, 64);
+ Sub64(rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::Popcnt32(Register rd, Register rs) {
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ // The number of instruction is 20.
+ // uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ // uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ // uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ // uint32_t value = 0x01010101; // (T)~(T)0/255
+
+ uint32_t shift = 24;
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register value = t6;
+ DCHECK((rd != t6) && (rs != t6));
+ li(value, 0x01010101); // value = 0x01010101;
+ li(scratch2, 0x55555555); // B0 = 0x55555555;
+ Srl32(scratch, rs, 1);
+ And(scratch, scratch, scratch2);
+ Sub32(scratch, rs, scratch);
+ li(scratch2, 0x33333333); // B1 = 0x33333333;
+ slli(rd, scratch2, 4);
+ or_(scratch2, scratch2, rd);
+ And(rd, scratch, scratch2);
+ Srl32(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Add32(scratch, rd, scratch);
+ srliw(rd, scratch, 4);
+ Add32(rd, rd, scratch);
+ li(scratch2, 0xF);
+ Mul32(scratch2, value, scratch2); // B2 = 0x0F0F0F0F;
+ And(rd, rd, scratch2);
+ Mul32(rd, rd, value);
+ Srl32(rd, rd, shift);
+}
+
+void TurboAssembler::Popcnt64(Register rd, Register rs) {
+ // uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ // uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ // uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
+ // uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ // uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ uint64_t shift = 24;
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register value = t6;
+ DCHECK((rd != t6) && (rs != t6));
+ li(value, 0x1111111111111111l); // value = 0x1111111111111111l;
+ li(scratch2, 5);
+ Mul64(scratch2, value, scratch2); // B0 = 0x5555555555555555l;
+ Srl64(scratch, rs, 1);
+ And(scratch, scratch, scratch2);
+ Sub64(scratch, rs, scratch);
+ li(scratch2, 3);
+ Mul64(scratch2, value, scratch2); // B1 = 0x3333333333333333l;
+ And(rd, scratch, scratch2);
+ Srl64(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Add64(scratch, rd, scratch);
+ Srl64(rd, scratch, 4);
+ Add64(rd, rd, scratch);
+ li(scratch2, 0xF);
+ li(value, 0x0101010101010101l); // value = 0x0101010101010101l;
+ Mul64(scratch2, value, scratch2); // B2 = 0x0F0F0F0F0F0F0F0Fl;
+ And(rd, rd, scratch2);
+ Mul64(rd, rd, value);
+ srli(rd, rd, 32 + shift);
+}
+
+void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ // if scratch == 1, exception happens during truncation
+ Trunc_w_d(result, double_input, scratch);
+ // If we had no exceptions (i.e., scratch==1) we are done.
+ Branch(done, eq, scratch, Operand(1));
+}
+
+void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+ Register result,
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub
+ // instead.
+ push(ra);
+ Sub64(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ fsd(double_input, sp, 0);
+
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
+ ld(result, sp, 0);
+
+ Add64(sp, sp, Operand(kDoubleSize));
+ pop(ra);
+
+ bind(&done);
+}
+
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) \
+ DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
+ (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
+
+void TurboAssembler::Branch(int32_t offset) {
+ DCHECK(is_int21(offset));
+ BranchShort(offset);
+}
+
+void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
+ const Operand& rt) {
+ bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt);
+ DCHECK(is_near);
+ USE(is_near);
+}
+
+void TurboAssembler::Branch(Label* L) {
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchShort(L);
+ } else {
+ BranchLong(L);
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ BranchLong(L);
+ } else {
+ BranchShort(L);
+ }
+ }
+}
+
+void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt) {
+ if (L->is_bound()) {
+ if (!BranchShortCheck(0, L, cond, rs, rt)) {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ BranchLong(L);
+ bind(&skip);
+ } else {
+ BranchLong(L);
+ EmitConstPoolWithJumpIfNeeded();
+ }
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ BranchLong(L);
+ bind(&skip);
+ } else {
+ BranchLong(L);
+ EmitConstPoolWithJumpIfNeeded();
+ }
+ } else {
+ BranchShort(L, cond, rs, rt);
+ }
+ }
+}
+
+void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+ RootIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(L, cond, rs, Operand(scratch));
+}
+
+void TurboAssembler::BranchShortHelper(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset21);
+ j(offset);
+}
+
+void TurboAssembler::BranchShort(int32_t offset) {
+ DCHECK(is_int21(offset));
+ BranchShortHelper(offset, nullptr);
+}
+
+void TurboAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); }
+
+int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+ if (L) {
+ offset = branch_offset_helper(L, bits);
+ } else {
+ DCHECK(is_intn(offset, bits));
+ }
+ return offset;
+}
+
+Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
+ Register scratch) {
+ Register r2 = no_reg;
+ if (rt.is_reg()) {
+ r2 = rt.rm();
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ return r2;
+}
+
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset,
+ OffsetSize bits) {
+ if (!is_near(L, bits)) return false;
+ *offset = GetOffset(*offset, L, bits);
+ return true;
+}
+
+bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt) {
+ if (!is_near(L, bits)) return false;
+ *scratch = GetRtAsRegisterHelper(rt, *scratch);
+ *offset = GetOffset(*offset, L, bits);
+ return true;
+}
+
+bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = no_reg;
+ if (!rt.is_reg()) {
+ scratch = temps.Acquire();
+ li(scratch, rt);
+ } else {
+ scratch = rt.rm();
+ }
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ j(offset);
+ EmitConstPoolWithJumpIfNeeded();
+ break;
+ case eq:
+ // rs == rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ beq(rs, scratch, offset);
+ }
+ break;
+ case ne:
+ // rs != rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ bne(rs, scratch, offset);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ bgt(rs, scratch, offset);
+ }
+ break;
+ case greater_equal:
+ // rs >= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ bge(rs, scratch, offset);
+ }
+ break;
+ case less:
+ // rs < rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ blt(rs, scratch, offset);
+ }
+ break;
+ case less_equal:
+ // rs <= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ ble(rs, scratch, offset);
+ }
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ bgtu(rs, scratch, offset);
+ }
+ break;
+ case Ugreater_equal:
+ // rs >= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ bgeu(rs, scratch, offset);
+ }
+ break;
+ case Uless:
+ // rs < rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ break; // No code needs to be emitted.
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ bltu(rs, scratch, offset);
+ }
+ break;
+ case Uless_equal:
+ // rs <= rt
+ if (rt.is_reg() && rs == rt.rm()) {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
+ j(offset);
+ } else {
+ if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
+ bleu(rs, scratch, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ CheckTrampolinePoolQuick(1);
+ return true;
+}
+
+bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ DCHECK(is_int13(offset));
+ return BranchShortHelper(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK_EQ(offset, 0);
+ return BranchShortHelper(0, L, cond, rs, rt);
+ }
+ return false;
+}
+
+void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+ const Operand& rt) {
+ BranchShortCheck(offset, nullptr, cond, rs, rt);
+}
+
+void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt) {
+ BranchShortCheck(0, L, cond, rs, rt);
+}
+
+void TurboAssembler::BranchAndLink(int32_t offset) {
+ BranchAndLinkShort(offset);
+}
+
+void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+ const Operand& rt) {
+ bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt);
+ DCHECK(is_near);
+ USE(is_near);
+}
+
+void TurboAssembler::BranchAndLink(Label* L) {
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchAndLinkShort(L);
+ } else {
+ BranchAndLinkLong(L);
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ BranchAndLinkLong(L);
+ } else {
+ BranchAndLinkShort(L);
+ }
+ }
+}
+
+void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt) {
+ if (L->is_bound()) {
+ if (!BranchAndLinkShortCheck(0, L, cond, rs, rt)) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ BranchAndLinkLong(L);
+ bind(&skip);
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ BranchAndLinkLong(L);
+ bind(&skip);
+ } else {
+ BranchAndLinkShortCheck(0, L, cond, rs, rt);
+ }
+ }
+}
+
+void TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset21);
+ jal(offset);
+}
+
+void TurboAssembler::BranchAndLinkShort(int32_t offset) {
+ DCHECK(is_int21(offset));
+ BranchAndLinkShortHelper(offset, nullptr);
+}
+
+void TurboAssembler::BranchAndLinkShort(Label* L) {
+ BranchAndLinkShortHelper(0, L);
+}
+
+// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
+// with the slt instructions. We could use sub or add instead but we would miss
+// overflow cases, so we keep slt and add an intermediate third instruction.
+bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset21)) return false;
+
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ if (cond == cc_always) {
+ offset = GetOffset(offset, L, OffsetSize::kOffset21);
+ jal(offset);
+ } else {
+ Branch(kInstrSize * 2, NegateCondition(cond), rs,
+ Operand(GetRtAsRegisterHelper(rt, scratch)));
+ offset = GetOffset(offset, L, OffsetSize::kOffset21);
+ jal(offset);
+ }
+
+ return true;
+}
+
+bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ DCHECK(is_int21(offset));
+ return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK_EQ(offset, 0);
+ return BranchAndLinkShortHelper(0, L, cond, rs, rt);
+ }
+ return false;
+}
+
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
+ Ld(destination,
+ FieldMemOperand(destination,
+ FixedArray::kHeaderSize + constant_index * kPointerSize));
+}
+
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ Ld(destination, MemOperand(kRootRegister, offset));
+}
+
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ Add64(destination, kRootRegister, Operand(offset));
+ }
+}
+
+void TurboAssembler::Jump(Register target, Condition cond, Register rs,
+ const Operand& rt) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jr(target);
+ ForceConstantPoolEmissionWithoutJump();
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(kInstrSize * 2, NegateCondition(cond), rs, rt);
+ jr(target);
+ }
+}
+
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt) {
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), rs, rt);
+ }
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(t6, Operand(target, rmode));
+ Jump(t6, al, zero_reg, Operand(zero_reg));
+ EmitConstPoolWithJumpIfNeeded();
+ bind(&skip);
+ }
+}
+
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rs, const Operand& rt) {
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
+ Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt);
+}
+
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+
+ if (root_array_available_ && options().isolate_independent_code &&
+ target_is_isolate_independent_builtin) {
+ int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ Ld(t6, MemOperand(kRootRegister, offset));
+ Jump(t6, cond, rs, rt);
+ return;
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t6, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(t6, cond, rs, rt);
+ return;
+ }
+
+ Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt);
+}
+
+void TurboAssembler::Jump(const ExternalReference& reference) {
+ li(t6, reference);
+ Jump(t6);
+}
+
+// Note: To call gcc-compiled C code on riscv64, you must call through t6.
+void TurboAssembler::Call(Register target, Condition cond, Register rs,
+ const Operand& rt) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jalr(ra, target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(kInstrSize * 2, NegateCondition(cond), rs, rt);
+ jalr(ra, target, 0);
+ }
+}
+
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Sub64(scratch, value, Operand(lower_limit));
+ Branch(on_in_range, Uless_equal, scratch,
+ Operand(higher_limit - lower_limit));
+ } else {
+ Branch(on_in_range, Uless_equal, value,
+ Operand(higher_limit - lower_limit));
+ }
+}
+
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rs, const Operand& rt) {
+ li(t6, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
+ Call(t6, cond, rs, rt);
+}
+
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+ if (root_array_available_ && options().isolate_independent_code &&
+ target_is_isolate_independent_builtin) {
+ int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ LoadRootRelative(t6, offset);
+ Call(t6, cond, rs, rt);
+ return;
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ li(t6, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Call(t6, cond, rs, rt);
+ return;
+ }
+
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(code->IsExecutable());
+ Call(code.address(), rmode, cond, rs, rt);
+}
+
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ CalcScaledAddress(builtin_index, kRootRegister, builtin_index,
+ kSystemPointerSizeLog2);
+ Ld(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
+}
+
+void TurboAssembler::PatchAndJump(Address target) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ auipc(scratch, 0); // Load PC into scratch
+ Ld(t6, MemOperand(scratch, kInstrSize * 4));
+ jr(t6);
+ nop(); // For alignment
+ DCHECK_EQ(reinterpret_cast<uint64_t>(pc_) % 8, 0);
+ *reinterpret_cast<uint64_t*>(pc_) = target; // pc_ should be align.
+ pc_ += sizeof(uint64_t);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ // Compute the return address in lr to return to after the jump below. The
+ // pc is already at '+ 8' from the current instruction; but return is after
+ // three instructions, so add another 4 to pc to get the return address.
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
+ static constexpr int kNumInstructionsToJump = 5;
+ Label find_ra;
+ // Adjust the value in ra to point to the correct return location, one
+ // instruction past the real call into C code (the jalr(t6)), and push it.
+ // This is the return address of the exit frame.
+ auipc(ra, 0); // Set ra the current PC
+ bind(&find_ra);
+ addi(ra, ra,
+ (kNumInstructionsToJump + 1) *
+ kInstrSize); // Set ra to insn after the call
+
+ // This spot was reserved in EnterExitFrame.
+ Sd(ra, MemOperand(sp));
+ addi(sp, sp, -kCArgsSlotsSize);
+ // Stack is still aligned.
+
+ // Call the C routine.
+ mv(t6,
+ target); // Function pointer to t6 to conform to ABI for PIC.
+ jalr(t6);
+ // Make sure the stored 'ra' points to this position.
+ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
+}
+
+void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
+ Jump(ra, cond, rs, rt);
+ if (cond == al) {
+ ForceConstantPoolEmissionWithoutJump();
+ }
+}
+
+void TurboAssembler::BranchLong(Label* L) {
+ // Generate position independent long branch.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int64_t imm64;
+ imm64 = branch_long_offset(L);
+ DCHECK(is_int32(imm64));
+ int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
+ auipc(t6, Hi20); // Read PC + Hi20 into scratch.
+ jr(t6, Lo12); // jump PC + Hi20 + Lo12
+ EmitConstPoolWithJumpIfNeeded();
+}
+
+void TurboAssembler::BranchAndLinkLong(Label* L) {
+ // Generate position independent long branch and link.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int64_t imm64;
+ imm64 = branch_long_offset(L);
+ DCHECK(is_int32(imm64));
+ int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
+ auipc(t6, Hi20); // Read PC + Hi20 into scratch.
+ jalr(t6, Lo12); // jump PC + Hi20 + Lo12 and read PC + 4 to ra
+}
+
+void TurboAssembler::DropAndRet(int drop) {
+ Add64(sp, sp, drop * kPointerSize);
+ Ret();
+}
+
+void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
+ const Operand& r2) {
+ // Both Drop and Ret need to be conditional.
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), r1, r2);
+ }
+
+ Drop(drop);
+ Ret();
+
+ if (cond != cc_always) {
+ bind(&skip);
+ }
+}
+
+void TurboAssembler::Drop(int count, Condition cond, Register reg,
+ const Operand& op) {
+ if (count <= 0) {
+ return;
+ }
+
+ Label skip;
+
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), reg, op);
+ }
+
+ Add64(sp, sp, Operand(count * kPointerSize));
+
+ if (cond != al) {
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
+ if (scratch == no_reg) {
+ Xor(reg1, reg1, Operand(reg2));
+ Xor(reg2, reg2, Operand(reg1));
+ Xor(reg1, reg1, Operand(reg2));
+ } else {
+ mv(scratch, reg1);
+ mv(reg1, reg2);
+ mv(reg2, scratch);
+ }
+}
+
+void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
+
+void TurboAssembler::LoadAddress(Register dst, Label* target,
+ RelocInfo::Mode rmode) {
+ int32_t offset;
+ if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) {
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
+ auipc(dst, Hi20);
+ addi(dst, dst, Lo12);
+ } else {
+ uint64_t address = jump_address(target);
+ li(dst, Operand(address, rmode), ADDRESS_LOAD);
+ }
+}
+
+void TurboAssembler::Push(Smi smi) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(smi));
+ push(scratch);
+}
+
+void TurboAssembler::PushArray(Register array, Register size,
+ PushArrayOrder order) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Label loop, entry;
+ if (order == PushArrayOrder::kReverse) {
+ mv(scratch, zero_reg);
+ jmp(&entry);
+ bind(&loop);
+ CalcScaledAddress(scratch2, array, scratch, kPointerSizeLog2);
+ Ld(scratch2, MemOperand(scratch2));
+ push(scratch2);
+ Add64(scratch, scratch, Operand(1));
+ bind(&entry);
+ Branch(&loop, less, scratch, Operand(size));
+ } else {
+ mv(scratch, size);
+ jmp(&entry);
+ bind(&loop);
+ CalcScaledAddress(scratch2, array, scratch, kPointerSizeLog2);
+ Ld(scratch2, MemOperand(scratch2));
+ push(scratch2);
+ bind(&entry);
+ Add64(scratch, scratch, Operand(-1));
+ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
+ }
+}
+
+void TurboAssembler::Push(Handle<HeapObject> handle) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(handle));
+ push(scratch);
+}
+
+void MacroAssembler::MaybeDropFrames() {
+ // Check whether we need to drop frames to restart a function on the stack.
+ li(a1, ExternalReference::debug_restart_fp_address(isolate()));
+ Ld(a1, MemOperand(a1));
+ Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
+ ne, a1, Operand(zero_reg));
+}
+
+// ---------------------------------------------------------------------------
+// Exception handling.
+
+void MacroAssembler::PushStackHandler() {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+
+ Push(Smi::zero()); // Padding.
+
+ // Link the current handler as the next handler.
+ li(t2,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ Ld(t1, MemOperand(t2));
+ push(t1);
+
+ // Set this new handler as the current one.
+ Sd(sp, MemOperand(t2));
+}
+
+void MacroAssembler::PopStackHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(a1);
+ Add64(sp, sp,
+ Operand(
+ static_cast<int64_t>(StackHandlerConstants::kSize - kPointerSize)));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ Sd(a1, MemOperand(scratch));
+}
+
+void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+ const DoubleRegister src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label NotNaN;
+
+ fmv_d(dst, src);
+ feq_d(scratch, src, src);
+ bne(scratch, zero_reg, &NotNaN);
+ RV_li(scratch, 0x7ff8000000000000ULL); // This is the canonical NaN
+ fmv_d_x(dst, scratch);
+ bind(&NotNaN);
+}
+
+void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
+ Move(dst, fa0); // Reg fa0 is FP return value.
+}
+
+void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+ Move(dst, fa0); // Reg fa0 is FP first argument value.
+}
+
+void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); }
+
+void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); }
+
+void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
+ DoubleRegister src2) {
+ const DoubleRegister fparg2 = fa1;
+ if (src2 == fa0) {
+ DCHECK(src1 != fparg2);
+ Move(fparg2, src2);
+ Move(fa0, src1);
+ } else {
+ Move(fa0, src1);
+ Move(fparg2, src2);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// JavaScript invokes.
+
+void TurboAssembler::PrepareForTailCall(Register callee_args_count,
+ Register caller_args_count,
+ Register scratch0, Register scratch1) {
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch0;
+ CalcScaledAddress(dst_reg, fp, caller_args_count, kPointerSizeLog2);
+ Add64(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = caller_args_count;
+ // Calculate the end of source area. +kPointerSize is for the receiver.
+ CalcScaledAddress(src_reg, sp, callee_args_count, kPointerSizeLog2);
+ Add64(src_reg, src_reg, Operand(kPointerSize));
+
+ if (FLAG_debug_code) {
+ Check(Uless, AbortReason::kStackAccessBelowStackPointer, src_reg,
+ Operand(dst_reg));
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch1;
+ Label loop, entry;
+ Branch(&entry);
+ bind(&loop);
+ Sub64(src_reg, src_reg, Operand(kPointerSize));
+ Sub64(dst_reg, dst_reg, Operand(kPointerSize));
+ Ld(tmp_reg, MemOperand(src_reg));
+ Sd(tmp_reg, MemOperand(dst_reg));
+ bind(&entry);
+ Branch(&loop, ne, sp, Operand(src_reg));
+
+ // Leave current frame.
+ mv(sp, dst_reg);
+}
+
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ Ld(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+
+ LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ Sub64(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ Sll64(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
+void MacroAssembler::InvokePrologue(Register expected_parameter_count,
+ Register actual_parameter_count,
+ Label* done, InvokeFlag flag) {
+ Label regular_invoke;
+
+ // a0: actual arguments count
+ // a1: function (passed through to callee)
+ // a2: expected arguments count
+
+ DCHECK_EQ(actual_parameter_count, a0);
+ DCHECK_EQ(expected_parameter_count, a2);
+
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ Sub64(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ Move(src, sp);
+ Sll64(t0, expected_parameter_count, kSystemPointerSizeLog2);
+ Sub64(sp, sp, Operand(t0));
+ // Update stack pointer.
+ Move(dest, sp);
+ Move(t0, actual_parameter_count);
+ bind(&copy);
+ Ld(t1, MemOperand(src, 0));
+ Sd(t1, MemOperand(dest, 0));
+ Sub64(t0, t0, Operand(1));
+ Add64(src, src, Operand(kSystemPointerSize));
+ Add64(dest, dest, Operand(kSystemPointerSize));
+ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(t0, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ Sd(t0, MemOperand(a7, 0));
+ Sub64(expected_parameter_count, expected_parameter_count, Operand(1));
+ Add64(a7, a7, Operand(kSystemPointerSize));
+ Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
+ }
+ Branch(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ break_(0xCC);
+ }
+ bind(&regular_invoke);
+}
+
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count) {
+ Label skip_hook;
+
+ li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
+ Lb(t0, MemOperand(t0));
+ Branch(&skip_hook, eq, t0, Operand(zero_reg));
+
+ {
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ LoadReceiver(t0, actual_parameter_count);
+
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ SmiTag(expected_parameter_count);
+ Push(expected_parameter_count);
+
+ SmiTag(actual_parameter_count);
+ Push(actual_parameter_count);
+
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ Push(t0);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+
+ Pop(actual_parameter_count);
+ SmiUntag(actual_parameter_count);
+
+ Pop(expected_parameter_count);
+ SmiUntag(expected_parameter_count);
+ }
+ bind(&skip_hook);
+}
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeFlag flag) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_EQ(function, a1);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
+
+ // On function call, call into the debugger if necessary.
+ CheckDebugHook(function, new_target, expected_parameter_count,
+ actual_parameter_count);
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, RootIndex::kUndefinedValue);
+ }
+
+ Label done;
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = kJavaScriptCallCodeStartRegister;
+ Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ if (flag == CALL_FUNCTION) {
+ CallCodeObject(code);
+ } else {
+ DCHECK(flag == JUMP_FUNCTION);
+ JumpCodeObject(code);
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+}
+
+void MacroAssembler::InvokeFunctionWithNewTarget(
+ Register function, Register new_target, Register actual_parameter_count,
+ InvokeFlag flag) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK_EQ(function, a1);
+ Register expected_parameter_count = a2;
+ Register temp_reg = t0;
+ Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // The argument count is stored as uint16_t
+ Lhu(expected_parameter_count,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+
+ InvokeFunctionCode(a1, new_target, expected_parameter_count,
+ actual_parameter_count, flag);
+}
+
+void MacroAssembler::InvokeFunction(Register function,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeFlag flag) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK_EQ(function, a1);
+
+ // Get the function and setup the context.
+ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ InvokeFunctionCode(a1, no_reg, expected_parameter_count,
+ actual_parameter_count, flag);
+}
+
+// ---------------------------------------------------------------------------
+// Support functions.
+
+void MacroAssembler::GetObjectType(Register object, Register map,
+ Register type_reg) {
+ LoadMap(map, object);
+ Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
+
+void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ Register range) {
+ Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Sub64(range, type_reg, Operand(lower_limit));
+}
+
+// -----------------------------------------------------------------------------
+// Runtime calls.
+
+void TurboAssembler::AddOverflow64(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register right_reg = no_reg;
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+ if (dst == left || dst == right_reg) {
+ add(scratch2, left, right_reg);
+ xor_(overflow, scratch2, left);
+ xor_(scratch, scratch2, right_reg);
+ and_(overflow, overflow, scratch);
+ mv(dst, scratch2);
+ } else {
+ add(dst, left, right_reg);
+ xor_(overflow, dst, left);
+ xor_(scratch, dst, right_reg);
+ and_(overflow, overflow, scratch);
+ }
+}
+
+void TurboAssembler::SubOverflow64(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register right_reg = no_reg;
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ sub(scratch2, left, right_reg);
+ xor_(overflow, left, scratch2);
+ xor_(scratch, left, right_reg);
+ and_(overflow, overflow, scratch);
+ mv(dst, scratch2);
+ } else {
+ sub(dst, left, right_reg);
+ xor_(overflow, left, dst);
+ xor_(scratch, left, right_reg);
+ and_(overflow, overflow, scratch);
+ }
+}
+
+void TurboAssembler::MulOverflow32(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register right_reg = no_reg;
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+ sext_w(overflow, left);
+ sext_w(scratch2, right_reg);
+
+ mul(overflow, overflow, scratch2);
+ sext_w(dst, overflow);
+ xor_(overflow, overflow, dst);
+}
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All parameters are on the stack. a0 has the return value after call.
+
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ExternalReference::Create(f));
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Call(code, RelocInfo::CODE_TARGET);
+}
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference::Create(fid));
+}
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
+ PrepareCEntryFunction(builtin);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
+ kArgvOnStack, builtin_exit_frame);
+ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ // Ld a Address from a constant pool.
+ // Record a value into constant pool.
+ if (FLAG_disable_riscv_constant_pool) {
+ li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ } else {
+ RecordEntry(entry, RelocInfo::OFF_HEAP_TARGET);
+ RecordRelocInfo(RelocInfo::OFF_HEAP_TARGET, entry);
+ auipc(kOffHeapTrampolineRegister, 0);
+ ld(kOffHeapTrampolineRegister, kOffHeapTrampolineRegister, 0);
+ }
+ Jump(kOffHeapTrampolineRegister);
+}
+
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
+ And(out, in, Operand(~kWeakHeapObjectMask));
+}
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ DCHECK_GT(value, 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ // This operation has to be exactly 32-bit wide in case the external
+ // reference table redirects the counter to a uint32_t
+ // dummy_stats_counter_ field.
+ li(scratch2, ExternalReference::Create(counter));
+ Lw(scratch1, MemOperand(scratch2));
+ Add32(scratch1, scratch1, Operand(value));
+ Sw(scratch1, MemOperand(scratch2));
+ }
+}
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ DCHECK_GT(value, 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ // This operation has to be exactly 32-bit wide in case the external
+ // reference table redirects the counter to a uint32_t
+ // dummy_stats_counter_ field.
+ li(scratch2, ExternalReference::Create(counter));
+ Lw(scratch1, MemOperand(scratch2));
+ Sub32(scratch1, scratch1, Operand(value));
+ Sw(scratch1, MemOperand(scratch2));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Debugging.
+
+void TurboAssembler::Trap() { stop(); }
+void TurboAssembler::DebugBreak() { stop(); }
+
+void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
+ Operand rt) {
+ if (emit_debug_code()) Check(cc, reason, rs, rt);
+}
+
+void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
+ Operand rt) {
+ Label L;
+ Branch(&L, cc, rs, rt);
+ Abort(reason);
+ // Will not return here.
+ bind(&L);
+}
+
+void TurboAssembler::Abort(AbortReason reason) {
+ Label abort_start;
+ bind(&abort_start);
+#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+#endif
+
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
+ ebreak();
+ return;
+ }
+
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ PrepareCallCFunction(0, a0);
+ li(a0, Operand(static_cast<int>(reason)));
+ CallCFunction(ExternalReference::abort_with_reason(), 1);
+ return;
+ }
+
+ Move(a0, Smi::FromInt(static_cast<int>(reason)));
+
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame()) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ } else {
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ }
+ // Will not return here.
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
+}
+
+void TurboAssembler::LoadMap(Register destination, Register object) {
+ Ld(destination, FieldMemOperand(object, HeapObject::kMapOffset));
+}
+
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ LoadMap(dst, cp);
+ Ld(dst,
+ FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
+ Ld(dst, MemOperand(dst, Context::SlotOffset(index)));
+}
+
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(StackFrame::TypeToMarker(type)));
+ PushCommonFrame(scratch);
+}
+
+void TurboAssembler::Prologue() { PushStandardFrame(a1); }
+
+void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int stack_offset = -3 * kPointerSize;
+ const int fp_offset = 1 * kPointerSize;
+ addi(sp, sp, stack_offset);
+ stack_offset = -stack_offset - kPointerSize;
+ Sd(ra, MemOperand(sp, stack_offset));
+ stack_offset -= kPointerSize;
+ Sd(fp, MemOperand(sp, stack_offset));
+ stack_offset -= kPointerSize;
+ li(scratch, Operand(StackFrame::TypeToMarker(type)));
+ Sd(scratch, MemOperand(sp, stack_offset));
+ // Adjust FP to point to saved FP.
+ DCHECK_EQ(stack_offset, 0);
+ Add64(fp, sp, Operand(fp_offset));
+}
+
+void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ addi(sp, fp, 2 * kPointerSize);
+ Ld(ra, MemOperand(fp, 1 * kPointerSize));
+ Ld(fp, MemOperand(fp, 0 * kPointerSize));
+}
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
+
+ // Set up the frame structure on the stack.
+ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
+
+ // This is how the stack will look:
+ // fp + 2 (==kCallerSPDisplacement) - old stack's end
+ // [fp + 1 (==kCallerPCOffset)] - saved old ra
+ // [fp + 0 (==kCallerFPOffset)] - saved old fp
+ // [fp - 1 StackFrame::EXIT Smi
+ // [fp - 2 (==kSPOffset)] - sp of the called function
+ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+ // new stack (will contain saved ra)
+
+ // Save registers and reserve room for saved entry sp.
+ addi(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+ Sd(ra, MemOperand(sp, 3 * kPointerSize));
+ Sd(fp, MemOperand(sp, 2 * kPointerSize));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
+ Sd(scratch, MemOperand(sp, 1 * kPointerSize));
+ }
+ // Set up new frame pointer.
+ addi(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
+
+ if (emit_debug_code()) {
+ Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
+
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Save the frame pointer and the context in top.
+ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ isolate()));
+ Sd(fp, MemOperand(scratch));
+ li(scratch,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ Sd(cp, MemOperand(scratch));
+ }
+
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ if (save_doubles) {
+ // The stack is already aligned to 0 modulo 8 for stores with sdc1.
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters;
+ int space = kNumOfSavedRegisters * kDoubleSize;
+ Sub64(sp, sp, Operand(space));
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(i);
+ StoreDouble(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+
+ // Reserve place for the return address, stack space and an optional slot
+ // (used by DirectCEntry to hold the return value if a struct is
+ // returned) and align the frame preparing for calling the runtime function.
+ DCHECK_GE(stack_space, 0);
+ Sub64(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ if (frame_alignment > 0) {
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
+ }
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ addi(scratch, sp, kPointerSize);
+ Sd(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool do_return,
+ bool argument_count_is_length) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // Remember: we only need to restore every 2nd double FPU value.
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
+ Sub64(scratch, fp,
+ Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
+ kNumOfSavedRegisters * kDoubleSize));
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(2 * i);
+ LoadDouble(reg, MemOperand(scratch, i * kDoubleSize));
+ }
+ }
+
+ // Clear top frame.
+ li(scratch,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
+ Sd(zero_reg, MemOperand(scratch));
+
+ // Restore current context from top and clear it in debug mode.
+ li(scratch,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ Ld(cp, MemOperand(scratch));
+
+#ifdef DEBUG
+ li(scratch,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ Sd(a3, MemOperand(scratch));
+#endif
+
+ // Pop the arguments, restore registers, and return.
+ mv(sp, fp); // Respect ABI stack constraint.
+ Ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ Ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+
+ if (argument_count.is_valid()) {
+ if (argument_count_is_length) {
+ add(sp, sp, argument_count);
+ } else {
+ CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2, scratch);
+ }
+ }
+
+ addi(sp, sp, 2 * kPointerSize);
+
+ if (do_return) {
+ Ret();
+ }
+}
+
+int TurboAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_RISCV64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one RISC-V
+ // platform for another RISC-V platform with a different alignment.
+ return base::OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_RISCV64
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_RISCV64
+}
+
+void MacroAssembler::AssertStackIsAligned() {
+ if (emit_debug_code()) {
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
+
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ }
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ ebreak();
+ bind(&alignment_as_expected);
+ }
+ }
+}
+
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+ if (SmiValuesAre32Bits()) {
+ Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Lw(dst, src);
+ SmiUntag(dst);
+ }
+}
+
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
+ Register scratch) {
+ DCHECK_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(smi_label, eq, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
+ Register scratch) {
+ DCHECK_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(not_smi_label, ne, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK(object != kScratchReg);
+ andi(kScratchReg, object, kSmiTagMask);
+ Check(ne, AbortReason::kOperandIsASmi, kScratchReg, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK(object != kScratchReg);
+ andi(kScratchReg, object, kSmiTagMask);
+ Check(eq, AbortReason::kOperandIsASmi, kScratchReg, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertConstructor(Register object) {
+ if (emit_debug_code()) {
+ DCHECK(object != kScratchReg);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, kScratchReg);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, kScratchReg,
+ Operand(zero_reg));
+
+ LoadMap(kScratchReg, object);
+ Lbu(kScratchReg, FieldMemOperand(kScratchReg, Map::kBitFieldOffset));
+ And(kScratchReg, kScratchReg, Operand(Map::Bits1::IsConstructorBit::kMask));
+ Check(ne, AbortReason::kOperandIsNotAConstructor, kScratchReg,
+ Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK(object != kScratchReg);
+ SmiTst(object, kScratchReg);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, kScratchReg,
+ Operand(zero_reg));
+ push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t5);
+ Check(Uless_equal, AbortReason::kOperandIsNotAFunction, t5,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ pop(object);
+ }
+}
+
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK(object != kScratchReg);
+ SmiTst(object, kScratchReg);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, kScratchReg,
+ Operand(zero_reg));
+ GetObjectType(object, kScratchReg, kScratchReg);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction, kScratchReg,
+ Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (!emit_debug_code()) return;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK(object != kScratchReg);
+ SmiTst(object, kScratchReg);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, kScratchReg,
+ Operand(zero_reg));
+
+ GetObjectType(object, kScratchReg, kScratchReg);
+
+ Label done;
+
+ // Check if JSGeneratorObject
+ Branch(&done, eq, kScratchReg, Operand(JS_GENERATOR_OBJECT_TYPE));
+
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ Branch(&done, eq, kScratchReg, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+
+ // Check if JSAsyncGeneratorObject
+ Branch(&done, eq, kScratchReg, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
+
+ Abort(AbortReason::kOperandIsNotAGeneratorObject);
+
+ bind(&done);
+}
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ GetObjectType(object, scratch, scratch);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
+ Operand(ALLOCATION_SITE_TYPE));
+ bind(&done_checking);
+ }
+}
+
+template <typename F_TYPE>
+void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
+ FPURegister src2, MaxMinKind kind) {
+ DCHECK((std::is_same<F_TYPE, float>::value) ||
+ (std::is_same<F_TYPE, double>::value));
+
+ if (src1 == src2 && dst != src1) {
+ if (std::is_same<float, F_TYPE>::value) {
+ fmv_s(dst, src1);
+ } else {
+ fmv_d(dst, src1);
+ }
+ return;
+ }
+
+ Label done, nan;
+
+ // For RISCV, fmin_s returns the other non-NaN operand as result if only one
+ // operand is NaN; but for JS, if any operand is NaN, result is Nan. The
+ // following handles the discrepency between handling of NaN between ISA and
+ // JS semantics
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (std::is_same<float, F_TYPE>::value) {
+ CompareIsNanF32(scratch, src1, src2);
+ } else {
+ CompareIsNanF64(scratch, src1, src2);
+ }
+ BranchTrueF(scratch, &nan);
+
+ if (kind == MaxMinKind::kMax) {
+ if (std::is_same<float, F_TYPE>::value) {
+ fmax_s(dst, src1, src2);
+ } else {
+ fmax_d(dst, src1, src2);
+ }
+ } else {
+ if (std::is_same<float, F_TYPE>::value) {
+ fmin_s(dst, src1, src2);
+ } else {
+ fmin_d(dst, src1, src2);
+ }
+ }
+ j(&done);
+
+ bind(&nan);
+ // if any operand is NaN, return NaN (fadd returns NaN if any operand is NaN)
+ if (std::is_same<float, F_TYPE>::value) {
+ fadd_s(dst, src1, src2);
+ } else {
+ fadd_d(dst, src1, src2);
+ }
+
+ bind(&done);
+}
+
+void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMax);
+}
+
+void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ FloatMinMaxHelper<float>(dst, src1, src2, MaxMinKind::kMin);
+}
+
+void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMax);
+}
+
+void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ FloatMinMaxHelper<double>(dst, src1, src2, MaxMinKind::kMin);
+}
+
+static const int kRegisterPassedArguments = 8;
+
+int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments,
+ int num_fp_arguments) {
+ int stack_passed_dwords = 0;
+
+ // Up to eight integer arguments are passed in registers a0..a7 and
+ // up to eight floating point arguments are passed in registers fa0..fa7
+ if (num_gp_arguments > kRegisterPassedArguments) {
+ stack_passed_dwords += num_gp_arguments - kRegisterPassedArguments;
+ }
+ if (num_fp_arguments > kRegisterPassedArguments) {
+ stack_passed_dwords += num_fp_arguments - kRegisterPassedArguments;
+ }
+ stack_passed_dwords += kCArgSlotCount;
+ return stack_passed_dwords;
+}
+
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+
+ // Up to eight simple arguments in a0..a7, fa0..fa7.
+ // Remaining arguments are pushed on the stack (arg slot calculation handled
+ // by CalculateStackPassedDWords()).
+ int stack_passed_arguments =
+ CalculateStackPassedDWords(num_reg_arguments, num_double_arguments);
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for stack arguments and the
+ // original value of sp.
+ mv(scratch, sp);
+ Sub64(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment));
+ Sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Sub64(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+void TurboAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(scratch, function);
+ CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
+}
+
+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+void TurboAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void TurboAssembler::CallCFunctionHelper(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
+ DCHECK(has_frame());
+ // Make sure that the stack is aligned before calling a C function unless
+ // running in the simulator. The simulator has its own alignment check which
+ // provides more information.
+ // The argument stots are presumed to have been set up by
+ // PrepareCallCFunction.
+
+#if V8_HOST_ARCH_RISCV64
+ if (emit_debug_code()) {
+ int frame_alignment = base::OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ Label alignment_as_expected;
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, sp, Operand(frame_alignment_mask));
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ }
+ // Don't use Check here, as it will call Runtime_Abort possibly
+ // re-entering here.
+ ebreak();
+ bind(&alignment_as_expected);
+ }
+ }
+#endif // V8_HOST_ARCH_RISCV64
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+ {
+ UseScratchRegisterScope temps(this);
+ Register func_scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (function != func_scratch) {
+ mv(func_scratch, function);
+ function = func_scratch;
+ }
+
+ // Save the frame pointer and PC so that the stack layout remains
+ // iterable, even without an ExitFrame which normally exists between JS
+ // and C frames.
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register pc_scratch = t1;
+ Register scratch = t2;
+ DCHECK(!AreAliased(pc_scratch, scratch, function));
+
+ auipc(pc_scratch, 0);
+ // TODO(RISCV): Does this need an offset? It seems like this should be the
+ // PC of the call, but MIPS does not seem to do that.
+ // https://github.com/v8-riscv/v8/issues/378
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ Sd(pc_scratch, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ Sd(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ Sd(pc_scratch, MemOperand(scratch));
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Sd(fp, MemOperand(scratch));
+ }
+
+ Call(function);
+
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch = t1;
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ Sd(zero_reg, MemOperand(scratch));
+ }
+ }
+
+ int stack_passed_arguments =
+ CalculateStackPassedDWords(num_reg_arguments, num_double_arguments);
+
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Add64(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+#undef BRANCH_ARGS_CHECK
+
+void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+ Condition cc, Label* condition_met) {
+ And(scratch, object, Operand(~kPageAlignmentMask));
+ Ld(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
+ And(scratch, scratch, Operand(mask));
+ Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+}
+
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // This push on ra and the pop below together ensure that we restore the
+ // register ra, which is needed while computing the code start address.
+ push(ra);
+
+ auipc(ra, 0);
+ addi(ra, ra, kInstrSize * 2); // ra = address of li
+ int pc = pc_offset();
+ li(dst, Operand(pc));
+ Sub64(dst, ra, dst);
+
+ pop(ra); // Restore ra
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ li(kSpeculationPoisonRegister, -1);
+}
+
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label* ret, Label*) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ Ld(scratch,
+ MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
+ Call(scratch);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+ if (kind == DeoptimizeKind::kEagerWithResume) {
+ Branch(ret);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ Deoptimizer::kEagerWithResumeBeforeArgsSize);
+ }
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_off_heap, no_builtin_index, out;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is an off-heap trampoline. If so, call its
+ // (off-heap) entry point directly without going through the (on-heap)
+ // trampoline. Otherwise, just call the Code object as always.
+ Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ Branch(&if_code_is_off_heap, ne, scratch,
+ Operand(Code::IsOffHeapTrampoline::kMask));
+ // Not an off-heap trampoline object, the entry point is at
+ // Code::raw_instruction_start().
+ bind(&no_builtin_index);
+ Add64(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ Branch(&out);
+
+ // An off-heap trampoline, the entry point is loaded from the builtin entry
+ // table.
+ bind(&if_code_is_off_heap);
+ Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ // TODO(RISCV): https://github.com/v8-riscv/v8/issues/373
+ Branch(&no_builtin_index, eq, scratch, Operand(Builtins::kNoBuiltinId));
+ slli(destination, scratch, kSystemPointerSizeLog2);
+ Add64(destination, destination, kRootRegister);
+ Ld(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Add64(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
new file mode 100644
index 0000000000..75c03cc27b
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -0,0 +1,1209 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
+#ifndef V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_
+#define V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/riscv64/assembler-riscv64.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+enum class AbortReason : uint8_t;
+
+// Reserved Register Usage Summary.
+//
+// Registers t5, t6, and t3 are reserved for use by the MacroAssembler.
+//
+// The programmer should know that the MacroAssembler may clobber these three,
+// but won't touch other registers except in special cases.
+//
+// TODO(RISCV): Cannot find info about this ABI. We chose t6 for now.
+// Per the RISC-V ABI, register t6 must be used for indirect function call
+// via 'jalr t6' or 'jr t6' instructions. This is relied upon by gcc when
+// trying to update gp register for position-independent-code. Whenever
+// RISC-V generated code calls C code, it must be via t6 register.
+
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
+
+// Flags used for the li macro-assembler function.
+enum LiFlags {
+ // If the constant value can be represented in just 16 bits, then
+ // optimize the li to use a single instruction, rather than lui/ori/slli
+ // sequence. A number of other optimizations that emits less than
+ // maximum number of instructions exists.
+ OPTIMIZE_SIZE = 0,
+ // Always use 8 instructions (lui/addi/slliw sequence), even if the
+ // constant
+ // could be loaded with just one, so that this value is patchable later.
+ CONSTANT_SIZE = 1,
+ // For address loads 8 instruction are required. Used to mark
+ // constant load that will be used as address without relocation
+ // information. It ensures predictable code size, so specific sites
+ // in code are patchable.
+ ADDRESS_LOAD = 2
+};
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#else
+#define SmiWordOffset(offset) offset
+#endif
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+// TODO(plind): Currently ONLY used for O32. Should be fixed for
+// n64, and used in RegExp code, and other places
+// with more than 8 arguments.
+inline MemOperand CFunctionArgumentOperand(int index) {
+ DCHECK_GT(index, kCArgSlotCount);
+ // Argument 5 takes the slot just past the four Arg-slots.
+ int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+ return MemOperand(sp, offset);
+}
+
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+ public:
+ using TurboAssemblerBase::TurboAssemblerBase;
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on RISC-V.
+ UNREACHABLE();
+ }
+ void LeaveFrame(StackFrame::Type type);
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type);
+ void Prologue();
+
+ void InitializeRootRegister() {
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ li(kRootRegister, Operand(isolate_root));
+ }
+
+ // Jump unconditionally to given label.
+ void jmp(Label* L) { Branch(L); }
+
+ // -------------------------------------------------------------------------
+ // Debugging.
+
+ void Trap() override;
+ void DebugBreak() override;
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
+
+ // Print a message to stdout and abort execution.
+ void Abort(AbortReason msg);
+
+ // Arguments macros.
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2
+#define COND_ARGS cond, r1, r2
+
+ // Cases when relocation is not needed.
+#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target); \
+ void Name(target_type target, COND_TYPED_ARGS);
+
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
+ DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
+ DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
+
+ DECLARE_BRANCH_PROTOTYPES(Branch)
+ DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+ DECLARE_BRANCH_PROTOTYPES(BranchShort)
+
+#undef DECLARE_BRANCH_PROTOTYPES
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+
+ inline void NegateBool(Register rd, Register rs) { Xor(rd, rs, 1); }
+
+ // Compare float, if any operand is NaN, result is false except for NE
+ void CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
+ FPURegister cmp2);
+ // Compare double, if any operand is NaN, result is false except for NE
+ void CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
+ FPURegister cmp2);
+ void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
+
+ // Floating point branches
+ void BranchTrueShortF(Register rs, Label* target);
+ void BranchFalseShortF(Register rs, Label* target);
+
+ void BranchTrueF(Register rs, Label* target);
+ void BranchFalseF(Register rs, Label* target);
+
+ void Branch(Label* L, Condition cond, Register rs, RootIndex index);
+
+ static int InstrCountForLi64Bit(int64_t value);
+ inline void LiLower32BitHelper(Register rd, Operand j);
+ void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ // Load int32 in the rd register.
+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(j), mode);
+ }
+
+ void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, const StringConstantBase* string,
+ LiFlags mode = OPTIMIZE_SIZE);
+
+ void LoadFromConstantsTable(Register destination,
+ int constant_index) override;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
+ void LoadRootRelative(Register destination, int32_t offset) override;
+
+// Jump, Call, and Ret pseudo instructions implementing inter-working.
+#define COND_ARGS \
+ Condition cond = al, Register rs = zero_reg, \
+ const Operand &rt = Operand(zero_reg)
+
+ void Jump(Register target, COND_ARGS);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ // Deffer from li, this method save target to the memory, and then load
+ // it to register use ld, it can be used in wasm jump table for concurrent
+ // patching.
+ void PatchAndJump(Address target);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(const ExternalReference& reference) override;
+ void Call(Register target, COND_ARGS);
+ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ COND_ARGS);
+ void Call(Label* target);
+ void LoadAddress(
+ Register dst, Label* target,
+ RelocInfo::Mode rmode = RelocInfo::INTERNAL_REFERENCE_ENCODED);
+
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
+
+ void LoadCodeObjectEntry(Register destination, Register code_object) override;
+ void CallCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object) override;
+
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label* jump_deoptimization_entry_label);
+
+ void Ret(COND_ARGS);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ // Trivial case of DropAndRet that only emits 2 instructions.
+ void DropAndRet(int drop);
+
+ void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
+
+ void Ld(Register rd, const MemOperand& rs);
+ void Sd(Register rd, const MemOperand& rs);
+
+ void push(Register src) {
+ Add64(sp, sp, Operand(-kPointerSize));
+ Sd(src, MemOperand(sp, 0));
+ }
+ void Push(Register src) { push(src); }
+ void Push(Handle<HeapObject> handle);
+ void Push(Smi smi);
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ Sub64(sp, sp, Operand(2 * kPointerSize));
+ Sd(src1, MemOperand(sp, 1 * kPointerSize));
+ Sd(src2, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Sub64(sp, sp, Operand(3 * kPointerSize));
+ Sd(src1, MemOperand(sp, 2 * kPointerSize));
+ Sd(src2, MemOperand(sp, 1 * kPointerSize));
+ Sd(src3, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Sub64(sp, sp, Operand(4 * kPointerSize));
+ Sd(src1, MemOperand(sp, 3 * kPointerSize));
+ Sd(src2, MemOperand(sp, 2 * kPointerSize));
+ Sd(src3, MemOperand(sp, 1 * kPointerSize));
+ Sd(src4, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ Sub64(sp, sp, Operand(5 * kPointerSize));
+ Sd(src1, MemOperand(sp, 4 * kPointerSize));
+ Sd(src2, MemOperand(sp, 3 * kPointerSize));
+ Sd(src3, MemOperand(sp, 2 * kPointerSize));
+ Sd(src4, MemOperand(sp, 1 * kPointerSize));
+ Sd(src5, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ void Push(Register src, Condition cond, Register tst1, Register tst2) {
+ // Since we don't have conditional execution we use a Branch.
+ Branch(3, cond, tst1, Operand(tst2));
+ Sub64(sp, sp, Operand(kPointerSize));
+ Sd(src, MemOperand(sp, 0));
+ }
+
+ enum PushArrayOrder { kNormal, kReverse };
+ void PushArray(Register array, Register size, PushArrayOrder order = kNormal);
+
+ void SaveRegisters(RegList registers);
+ void RestoreRegisters(RegList registers);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
+ void CallEphemeronKeyBarrier(Register object, Register address,
+ SaveFPRegsMode fp_mode);
+
+ // Push multiple registers on the stack.
+ // Registers are saved in numerical order, with higher numbered registers
+ // saved in higher memory addresses.
+ void MultiPush(RegList regs);
+ void MultiPushFPU(RegList regs);
+
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+
+ void pop(Register dst) {
+ Ld(dst, MemOperand(sp, 0));
+ Add64(sp, sp, Operand(kPointerSize));
+ }
+ void Pop(Register dst) { pop(dst); }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ DCHECK(src1 != src2);
+ Ld(src2, MemOperand(sp, 0 * kPointerSize));
+ Ld(src1, MemOperand(sp, 1 * kPointerSize));
+ Add64(sp, sp, 2 * kPointerSize);
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ Ld(src3, MemOperand(sp, 0 * kPointerSize));
+ Ld(src2, MemOperand(sp, 1 * kPointerSize));
+ Ld(src1, MemOperand(sp, 2 * kPointerSize));
+ Add64(sp, sp, 3 * kPointerSize);
+ }
+
+ void Pop(uint32_t count = 1) { Add64(sp, sp, Operand(count * kPointerSize)); }
+
+ // Pops multiple values from the stack and load them in the
+ // registers specified in regs. Pop order is the opposite as in MultiPush.
+ void MultiPop(RegList regs);
+ void MultiPopFPU(RegList regs);
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rs, const Operand& rt); \
+ void instr(Register rd, Register rs, Register rt) { \
+ instr(rd, rs, Operand(rt)); \
+ } \
+ void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rs, const Operand& rt); \
+ void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
+ void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
+
+ DEFINE_INSTRUCTION(Add32)
+ DEFINE_INSTRUCTION(Add64)
+ DEFINE_INSTRUCTION(Div32)
+ DEFINE_INSTRUCTION(Divu32)
+ DEFINE_INSTRUCTION(Divu64)
+ DEFINE_INSTRUCTION(Mod32)
+ DEFINE_INSTRUCTION(Modu32)
+ DEFINE_INSTRUCTION(Div64)
+ DEFINE_INSTRUCTION(Sub32)
+ DEFINE_INSTRUCTION(Sub64)
+ DEFINE_INSTRUCTION(Mod64)
+ DEFINE_INSTRUCTION(Modu64)
+ DEFINE_INSTRUCTION(Mul32)
+ DEFINE_INSTRUCTION(Mulh32)
+ DEFINE_INSTRUCTION(Mul64)
+ DEFINE_INSTRUCTION(Mulh64)
+ DEFINE_INSTRUCTION2(Div32)
+ DEFINE_INSTRUCTION2(Div64)
+ DEFINE_INSTRUCTION2(Divu32)
+ DEFINE_INSTRUCTION2(Divu64)
+
+ DEFINE_INSTRUCTION(And)
+ DEFINE_INSTRUCTION(Or)
+ DEFINE_INSTRUCTION(Xor)
+ DEFINE_INSTRUCTION(Nor)
+ DEFINE_INSTRUCTION2(Neg)
+
+ DEFINE_INSTRUCTION(Slt)
+ DEFINE_INSTRUCTION(Sltu)
+ DEFINE_INSTRUCTION(Sle)
+ DEFINE_INSTRUCTION(Sleu)
+ DEFINE_INSTRUCTION(Sgt)
+ DEFINE_INSTRUCTION(Sgtu)
+ DEFINE_INSTRUCTION(Sge)
+ DEFINE_INSTRUCTION(Sgeu)
+ DEFINE_INSTRUCTION(Seq)
+ DEFINE_INSTRUCTION(Sne)
+
+ DEFINE_INSTRUCTION(Sll64)
+ DEFINE_INSTRUCTION(Sra64)
+ DEFINE_INSTRUCTION(Srl64)
+ DEFINE_INSTRUCTION(Sll32)
+ DEFINE_INSTRUCTION(Sra32)
+ DEFINE_INSTRUCTION(Srl32)
+
+ DEFINE_INSTRUCTION2(Seqz)
+ DEFINE_INSTRUCTION2(Snez)
+
+ DEFINE_INSTRUCTION(Ror)
+ DEFINE_INSTRUCTION(Dror)
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+
+ void SmiUntag(Register dst, const MemOperand& src);
+ void SmiUntag(Register dst, Register src) {
+ if (SmiValuesAre32Bits()) {
+ srai(dst, src, kSmiShift);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ sraiw(dst, src, kSmiShift);
+ }
+ }
+
+ void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count| do not include
+ // receiver. |callee_args_count| is not modified. |caller_args_count|
+ // is trashed.
+ void PrepareForTailCall(Register callee_args_count,
+ Register caller_args_count, Register scratch0,
+ Register scratch1);
+
+ int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+ // Arguments 1-8 are placed in registers a0 through a7 respectively.
+ // Arguments 9..n are stored to stack
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+ void MovFromFloatResult(DoubleRegister dst);
+ void MovFromFloatParameter(DoubleRegister dst);
+
+ // These functions abstract parameter passing for the three different ways
+ // we call C functions from generated code.
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
+
+ // See comments at the beginning of Builtins::Generate_CEntry.
+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
+ inline void PrepareCEntryFunction(const ExternalReference& ref) {
+ li(a1, ref);
+ }
+
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met);
+#undef COND_ARGS
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
+ DoubleRegister double_input, StubCallMode stub_mode);
+
+ void CompareI(Register rd, Register rs, const Operand& rt, Condition cond);
+
+ void LoadZeroIfConditionNotZero(Register dest, Register condition);
+ void LoadZeroIfConditionZero(Register dest, Register condition);
+
+ void SignExtendByte(Register rd, Register rs) {
+ slli(rd, rs, 64 - 8);
+ srai(rd, rd, 64 - 8);
+ }
+
+ void SignExtendShort(Register rd, Register rs) {
+ slli(rd, rs, 64 - 16);
+ srai(rd, rd, 64 - 16);
+ }
+
+ void SignExtendWord(Register rd, Register rs) { sext_w(rd, rs); }
+ void ZeroExtendWord(Register rd, Register rs) {
+ slli(rd, rs, 32);
+ srli(rd, rd, 32);
+ }
+
+ void Clz32(Register rd, Register rs);
+ void Clz64(Register rd, Register rs);
+ void Ctz32(Register rd, Register rs);
+ void Ctz64(Register rd, Register rs);
+ void Popcnt32(Register rd, Register rs);
+ void Popcnt64(Register rd, Register rs);
+
+ // Bit field starts at bit pos and extending for size bits is extracted from
+ // rs and stored zero/sign-extended and right-justified in rt
+ void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size,
+ bool sign_extend = false);
+ void ExtractBits(Register dest, Register source, Register pos, int size,
+ bool sign_extend = false) {
+ sra(dest, source, pos);
+ ExtractBits(dest, dest, 0, size, sign_extend);
+ }
+
+ // Insert bits [0, size) of source to bits [pos, pos+size) of dest
+ void InsertBits(Register dest, Register source, Register pos, int size);
+
+ void Neg_s(FPURegister fd, FPURegister fs);
+ void Neg_d(FPURegister fd, FPURegister fs);
+
+ // Change endianness
+ void ByteSwap(Register dest, Register src, int operand_size);
+
+ // Convert single to unsigned word.
+ void Trunc_uw_s(Register rd, FPURegister fs, Register result = no_reg);
+
+ // helper functions for unaligned load/store
+ template <int NBYTES, bool IS_SIGNED>
+ void UnalignedLoadHelper(Register rd, const MemOperand& rs);
+ template <int NBYTES>
+ void UnalignedStoreHelper(Register rd, const MemOperand& rs,
+ Register scratch_other = no_reg);
+
+ template <int NBYTES>
+ void UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs);
+ template <int NBYTES>
+ void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs);
+
+ template <typename Reg_T, typename Func>
+ void AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator);
+ template <typename Reg_T, typename Func>
+ void AlignedStoreHelper(Reg_T value, const MemOperand& rs, Func generator);
+
+ template <int NBYTES, bool LOAD_SIGNED>
+ void LoadNBytes(Register rd, const MemOperand& rs, Register scratch);
+ template <int NBYTES, bool LOAD_SIGNED>
+ void LoadNBytesOverwritingBaseReg(const MemOperand& rs, Register scratch0,
+ Register scratch1);
+ // load/store macros
+ void Ulh(Register rd, const MemOperand& rs);
+ void Ulhu(Register rd, const MemOperand& rs);
+ void Ush(Register rd, const MemOperand& rs);
+
+ void Ulw(Register rd, const MemOperand& rs);
+ void Ulwu(Register rd, const MemOperand& rs);
+ void Usw(Register rd, const MemOperand& rs);
+
+ void Uld(Register rd, const MemOperand& rs);
+ void Usd(Register rd, const MemOperand& rs);
+
+ void ULoadFloat(FPURegister fd, const MemOperand& rs);
+ void UStoreFloat(FPURegister fd, const MemOperand& rs);
+
+ void ULoadDouble(FPURegister fd, const MemOperand& rs);
+ void UStoreDouble(FPURegister fd, const MemOperand& rs);
+
+ void Lb(Register rd, const MemOperand& rs);
+ void Lbu(Register rd, const MemOperand& rs);
+ void Sb(Register rd, const MemOperand& rs);
+
+ void Lh(Register rd, const MemOperand& rs);
+ void Lhu(Register rd, const MemOperand& rs);
+ void Sh(Register rd, const MemOperand& rs);
+
+ void Lw(Register rd, const MemOperand& rs);
+ void Lwu(Register rd, const MemOperand& rs);
+ void Sw(Register rd, const MemOperand& rs);
+
+ void LoadFloat(FPURegister fd, const MemOperand& src);
+ void StoreFloat(FPURegister fs, const MemOperand& dst);
+
+ void LoadDouble(FPURegister fd, const MemOperand& src);
+ void StoreDouble(FPURegister fs, const MemOperand& dst);
+
+ void Ll(Register rd, const MemOperand& rs);
+ void Sc(Register rd, const MemOperand& rs);
+
+ void Lld(Register rd, const MemOperand& rs);
+ void Scd(Register rd, const MemOperand& rs);
+
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2);
+ template <typename F>
+ void FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2,
+ MaxMinKind kind);
+
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+ bool IsSingleZeroRegSet() { return has_single_zero_reg_set_; }
+
+ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
+
+ inline void Move(Register dst, Register src) {
+ if (dst != src) {
+ mv(dst, src);
+ }
+ }
+
+ inline void MoveDouble(FPURegister dst, FPURegister src) {
+ if (dst != src) fmv_d(dst, src);
+ }
+
+ inline void MoveFloat(FPURegister dst, FPURegister src) {
+ if (dst != src) fmv_s(dst, src);
+ }
+
+ inline void Move(FPURegister dst, FPURegister src) { MoveDouble(dst, src); }
+
+ inline void Move(Register dst_low, Register dst_high, FPURegister src) {
+ fmv_x_d(dst_high, src);
+ fmv_x_w(dst_low, src);
+ srli(dst_high, dst_high, 32);
+ }
+
+ inline void Move(Register dst, FPURegister src) { fmv_x_d(dst, src); }
+
+ inline void Move(FPURegister dst, Register src) { fmv_d_x(dst, src); }
+
+ // Extract sign-extended word from high-half of FPR to GPR
+ inline void ExtractHighWordFromF64(Register dst_high, FPURegister src) {
+ fmv_x_d(dst_high, src);
+ srai(dst_high, dst_high, 32);
+ }
+
+ // Insert low-word from GPR (src_high) to the high-half of FPR (dst)
+ void InsertHighWordF64(FPURegister dst, Register src_high);
+
+ // Extract sign-extended word from low-half of FPR to GPR
+ inline void ExtractLowWordFromF64(Register dst_low, FPURegister src) {
+ fmv_x_w(dst_low, src);
+ }
+
+ // Insert low-word from GPR (src_high) to the low-half of FPR (dst)
+ void InsertLowWordF64(FPURegister dst, Register src_low);
+
+ void LoadFPRImmediate(FPURegister dst, float imm) {
+ LoadFPRImmediate(dst, bit_cast<uint32_t>(imm));
+ }
+ void LoadFPRImmediate(FPURegister dst, double imm) {
+ LoadFPRImmediate(dst, bit_cast<uint64_t>(imm));
+ }
+ void LoadFPRImmediate(FPURegister dst, uint32_t src);
+ void LoadFPRImmediate(FPURegister dst, uint64_t src);
+
+ // AddOverflow64 sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void AddOverflow64(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // SubOverflow64 sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void SubOverflow64(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // MulOverflow32 sets overflow register to zero if no overflow occured
+ void MulOverflow32(Register dst, Register left, const Operand& right,
+ Register overflow);
+
+ // MIPS-style 32-bit unsigned mulh
+ void Mulhu32(Register dst, Register left, const Operand& right,
+ Register left_zero, Register right_zero);
+
+ // Number of instructions needed for calculation of switch table entry address
+ static const int kSwitchTablePrologueSize = 6;
+
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Register destination, RootIndex index, Condition cond,
+ Register src1, const Operand& src2);
+
+ void LoadMap(Register destination, Register object);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+
+ // ---------------------------------------------------------------------------
+ // FPU macros. These do not handle special cases like NaN or +- inf.
+
+ // Convert unsigned word to double.
+ void Cvt_d_uw(FPURegister fd, Register rs);
+
+ // convert signed word to double.
+ void Cvt_d_w(FPURegister fd, Register rs);
+
+ // Convert unsigned long to double.
+ void Cvt_d_ul(FPURegister fd, Register rs);
+
+ // Convert unsigned word to float.
+ void Cvt_s_uw(FPURegister fd, Register rs);
+
+ // convert signed word to float.
+ void Cvt_s_w(FPURegister fd, Register rs);
+
+ // Convert unsigned long to float.
+ void Cvt_s_ul(FPURegister fd, Register rs);
+
+ // Convert double to unsigned word.
+ void Trunc_uw_d(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Convert double to signed word.
+ void Trunc_w_d(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Convert single to signed word.
+ void Trunc_w_s(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Convert double to unsigned long.
+ void Trunc_ul_d(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Convert singled to signed long.
+ void Trunc_l_d(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Convert single to unsigned long.
+ void Trunc_ul_s(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Convert singled to signed long.
+ void Trunc_l_s(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Round single to signed word.
+ void Round_w_s(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Round double to signed word.
+ void Round_w_d(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Ceil single to signed word.
+ void Ceil_w_s(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Ceil double to signed word.
+ void Ceil_w_d(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Floor single to signed word.
+ void Floor_w_s(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Floor double to signed word.
+ void Floor_w_d(Register rd, FPURegister fs, Register result = no_reg);
+
+ // Round double functions
+ void Trunc_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Round_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Floor_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Ceil_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+
+ // Round float functions
+ void Trunc_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Round_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Floor_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+
+ // Jump the register contains a smi.
+ void JumpIfSmi(Register value, Label* smi_label, Register scratch = t3);
+
+ void JumpIfEqual(Register a, int32_t b, Label* dest) {
+ Branch(dest, eq, a, Operand(b));
+ }
+
+ void JumpIfLessThan(Register a, int32_t b, Label* dest) {
+ Branch(dest, lt, a, Operand(b));
+ }
+
+ // Push a standard frame, consisting of ra, fp, context and JS function.
+ void PushStandardFrame(Register function_reg);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ // Calculated scaled address (rd) as rt + rs << sa
+ void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = t3);
+
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
+ protected:
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+
+ private:
+ bool has_double_zero_reg_set_ = false;
+ bool has_single_zero_reg_set_ = false;
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ // TODO(RISCV) Reorder parameters so out parameters come last.
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
+ Register* scratch, const Operand& rt);
+
+ void BranchShortHelper(int32_t offset, Label* L);
+ bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt);
+
+ void BranchAndLinkShortHelper(int32_t offset, Label* L);
+ void BranchAndLinkShort(int32_t offset);
+ void BranchAndLinkShort(Label* L);
+ bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ void BranchLong(Label* L);
+ void BranchAndLinkLong(Label* L);
+
+ template <typename F_TYPE>
+ void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
+ RoundingMode mode);
+
+ template <typename TruncFunc>
+ void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
+ TruncFunc trunc);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, int builtin_index,
+ Address wasm_target);
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
+ public:
+ using TurboAssembler::TurboAssembler;
+
+ // It assumes that the arguments are located below the stack pointer.
+ // argc is the number of arguments not including the receiver.
+ // TODO(victorgomes): Remove this function once we stick with the reversed
+ // arguments order.
+ void LoadReceiver(Register dest, Register argc) {
+ Ld(dest, MemOperand(sp, 0));
+ }
+
+ void StoreReceiver(Register rec, Register argc, Register scratch) {
+ Sd(rec, MemOperand(sp, 0));
+ }
+
+ bool IsNear(Label* L, Condition cond, int rs_reg);
+
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+
+ void PushRoot(RootIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Push(scratch);
+ }
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(if_equal, eq, with, Operand(scratch));
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(if_not_equal, ne, with, Operand(scratch));
+ }
+
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object, int offset, Register value, Register scratch,
+ RAStatus ra_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object, Register address, Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // void Pref(int32_t hint, const MemOperand& rs);
+
+ // ---------------------------------------------------------------------------
+ // Pseudo-instructions.
+
+ void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
+ void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
+
+ void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+ void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+
+ // Enter exit frame.
+ // argc - argument count to be dropped by LeaveExitFrame.
+ // save_doubles - saves FPU registers on stack, currently disabled.
+ // stack_space - extra stack space.
+ void EnterExitFrame(bool save_doubles, int stack_space = 0,
+ StackFrame::Type frame_type = StackFrame::EXIT);
+
+ // Leave the current exit frame.
+ void LeaveExitFrame(bool save_doubles, Register arg_count,
+ bool do_return = NO_EMIT_RETURN,
+ bool argument_count_is_length = false);
+
+ // Make sure the stack is aligned. Only emits code in debug mode.
+ void AssertStackIsAligned();
+
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
+
+ void LoadNativeContextSlot(int index, Register dst);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function, Register map,
+ Register scratch);
+
+ // -------------------------------------------------------------------------
+ // JavaScript invokes.
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count, InvokeFlag flag);
+
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunctionWithNewTarget(Register function, Register new_target,
+ Register actual_parameter_count,
+ InvokeFlag flag);
+ void InvokeFunction(Register function, Register expected_parameter_count,
+ Register actual_parameter_count, InvokeFlag flag);
+
+ // Frame restart support.
+ void MaybeDropFrames();
+
+ // Exception handling.
+
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
+
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ // Must preserve the result register.
+ void PopStackHandler();
+
+ // -------------------------------------------------------------------------
+ // Support functions.
+
+ void GetObjectType(Register function, Register map, Register type_reg);
+
+ void GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit, Register range);
+
+ // -------------------------------------------------------------------------
+ // Runtime calls.
+
+ // Call a runtime routine.
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ }
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid);
+
+ // Jump to the builtin routine.
+ void JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame = false);
+
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(Address entry);
+
+ // ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
+
+ // -------------------------------------------------------------------------
+ // StatsCounter support.
+
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // -------------------------------------------------------------------------
+ // Stack limit utilities
+
+ enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2, Label* stack_overflow);
+
+ // -------------------------------------------------------------------------
+ // Smi utilities.
+
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ // Smi goes to upper 32
+ slli(dst, src, 32);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ // Smi is shifted left by 1
+ Add32(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
+ // Left-shifted from int32 equivalent of Smi.
+ void SmiScale(Register dst, Register src, int scale) {
+ if (SmiValuesAre32Bits()) {
+ // The int portion is upper 32-bits of 64-bit word.
+ srai(dst, src, (kSmiShift - scale) & 0x3F);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK_GE(scale, kSmiTagSize);
+ slliw(dst, src, scale - kSmiTagSize);
+ }
+ }
+
+ // Test if the register contains a smi.
+ inline void SmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask));
+ }
+
+ // Jump if the register contains a non-smi.
+ void JumpIfNotSmi(Register value, Label* not_smi_label,
+ Register scratch = t3);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ template <typename Field>
+ void DecodeField(Register dst, Register src) {
+ ExtractBits(dst, src, Field::kShift, Field::kSize);
+ }
+
+ template <typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ private:
+ // Helper functions for generating invokes.
+ void InvokePrologue(Register expected_parameter_count,
+ Register actual_parameter_count, Label* done,
+ InvokeFlag flag);
+
+ // Compute memory operands for safepoint stack slots.
+ static int SafepointRegisterStackIndex(int reg_code);
+
+ // Needs access to SafepointRegisterStackIndex for compiled frame
+ // traversal.
+ friend class CommonFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
+};
+
+template <typename Func>
+void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction) {
+ // Ensure that dd-ed labels following this instruction use 8 bytes aligned
+ // addresses.
+ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
+ kSwitchTablePrologueSize);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+
+ Align(8);
+ // Load the address from the jump table at index and jump to it
+ auipc(scratch, 0); // Load the current PC into scratch
+ slli(scratch2, index,
+ kPointerSizeLog2); // scratch2 = offset of indexth entry
+ add(scratch2, scratch2,
+ scratch); // scratch2 = (saved PC) + (offset of indexth entry)
+ ld(scratch2, scratch2,
+ 6 * kInstrSize); // Add the size of these 6 instructions to the
+ // offset, then load
+ jr(scratch2); // Jump to the address loaded from the table
+ nop(); // For 16-byte alignment
+ for (size_t index = 0; index < case_count; ++index) {
+ dd(GetLabelFunction(index));
+ }
+}
+
+#define ACCESS_MASM(masm) masm->
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
new file mode 100644
index 0000000000..2626c4eae7
--- /dev/null
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -0,0 +1,346 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
+#define V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
+
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+#include "src/codegen/riscv64/constants-riscv64.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(ra) V(sp) V(gp) V(tp) V(t0) V(t1) V(t2) \
+ V(fp) V(s1) V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) \
+ V(a6) V(a7) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) V(s9) \
+ V(s10) V(s11) V(t3) V(t4) V(t5) V(t6)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(a0) V(a1) V(a2) V(a3) \
+ V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(s7) V(t4)
+
+#define DOUBLE_REGISTERS(V) \
+ V(ft0) V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) \
+ V(fs0) V(fs1) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
+ V(fa6) V(fa7) V(fs2) V(fs3) V(fs4) V(fs5) V(fs6) V(fs7) \
+ V(fs8) V(fs9) V(fs10) V(fs11) V(ft8) V(ft9) V(ft10) V(ft11)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS(V) \
+ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
+ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
+ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
+ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(ft0) V(ft1) V(ft2) V(ft3) \
+ V(ft4) V(ft5) V(ft6) V(ft7) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
+ V(fa6) V(fa7)
+
+// clang-format on
+
+// Note that the bit values must match those used in actual instruction
+// encoding.
+const int kNumRegs = 32;
+
+const RegList kJSCallerSaved = 1 << 5 | // t0
+ 1 << 6 | // t1
+ 1 << 7 | // t2
+ 1 << 10 | // a0
+ 1 << 11 | // a1
+ 1 << 12 | // a2
+ 1 << 13 | // a3
+ 1 << 14 | // a4
+ 1 << 15 | // a5
+ 1 << 16 | // a6
+ 1 << 17 | // a7
+ 1 << 29; // t4
+
+const int kNumJSCallerSaved = 12;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = 1 << 8 | // fp/s0
+ 1 << 9 | // s1
+ 1 << 18 | // s2
+ 1 << 19 | // s3
+ 1 << 20 | // s4
+ 1 << 21 | // s5
+ 1 << 22 | // s6 (roots in Javascript code)
+ 1 << 23 | // s7 (cp in Javascript code)
+ 1 << 24 | // s8
+ 1 << 25 | // s9
+ 1 << 26 | // s10
+ 1 << 27; // s11
+
+const int kNumCalleeSaved = 12;
+
+const RegList kCalleeSavedFPU = 1 << 8 | // fs0
+ 1 << 9 | // fs1
+ 1 << 18 | // fs2
+ 1 << 19 | // fs3
+ 1 << 20 | // fs4
+ 1 << 21 | // fs5
+ 1 << 22 | // fs6
+ 1 << 23 | // fs7
+ 1 << 24 | // fs8
+ 1 << 25 | // fs9
+ 1 << 26 | // fs10
+ 1 << 27; // fs11
+
+const int kNumCalleeSavedFPU = 12;
+
+const RegList kCallerSavedFPU = 1 << 0 | // ft0
+ 1 << 1 | // ft1
+ 1 << 2 | // ft2
+ 1 << 3 | // ft3
+ 1 << 4 | // ft4
+ 1 << 5 | // ft5
+ 1 << 6 | // ft6
+ 1 << 7 | // ft7
+ 1 << 10 | // fa0
+ 1 << 11 | // fa1
+ 1 << 12 | // fa2
+ 1 << 13 | // fa3
+ 1 << 14 | // fa4
+ 1 << 15 | // fa5
+ 1 << 16 | // fa6
+ 1 << 17 | // fa7
+ 1 << 28 | // ft8
+ 1 << 29 | // ft9
+ 1 << 30 | // ft10
+ 1 << 31; // ft11
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg
+ kUndefIndex, // ra
+ kUndefIndex, // sp
+ kUndefIndex, // gp
+ kUndefIndex, // tp
+ 0, // t0
+ 1, // t1
+ 2, // t2
+ 3, // s0/fp
+ 4, // s1
+ 5, // a0
+ 6, // a1
+ 7, // a2
+ 8, // a3
+ 9, // a4
+ 10, // a5
+ 11, // a6
+ 12, // a7
+ 13, // s2
+ 14, // s3
+ 15, // s4
+ 16, // s5
+ 17, // s6
+ 18, // s7
+ 19, // s8
+ 10, // s9
+ 21, // s10
+ 22, // s11
+ kUndefIndex, // t3
+ 23, // t4
+ kUndefIndex, // t5
+ kUndefIndex}; // t6
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister.
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static constexpr int kMantissaOffset = 4;
+ static constexpr int kExponentOffset = 0;
+#else
+#error Unknown endianness
+#endif
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+// s7: context register
+// s3: scratch register
+// s4: scratch register 2
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code(kRegCode_##R);
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+
+constexpr Register no_reg = Register::no_reg();
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+constexpr bool kPadArguments = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// Coprocessor register.
+class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
+ public:
+ // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
+ // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
+ // number of Double regs (64-bit regs, or FPU-reg-pairs).
+
+ FPURegister low() const {
+ // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
+ // Find low reg of a Double-reg pair, which is the reg itself.
+ return FPURegister::from_code(code());
+ }
+ FPURegister high() const {
+ // TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
+ // Find high reg of a Doubel-reg pair, which is reg + 1.
+ return FPURegister::from_code(code() + 1);
+ }
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr FPURegister(int code) : RegisterBase(code) {}
+};
+
+enum MSARegisterCode {
+#define REGISTER_CODE(R) kMsaCode_##R,
+ SIMD128_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kMsaAfterLast
+};
+
+// MIPS SIMD (MSA) register
+// TODO(RISCV): Remove MIPS MSA registers.
+// https://github.com/v8-riscv/v8/issues/429
+class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr MSARegister(int code) : RegisterBase(code) {}
+};
+
+// A few double registers are reserved: one as a scratch register and one to
+// hold 0.0.
+// fs9: 0.0
+// fs11: scratch register.
+
+// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
+using FloatRegister = FPURegister;
+
+using DoubleRegister = FPURegister;
+
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R);
+DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+// SIMD registers.
+using Simd128Register = MSARegister;
+
+#define DECLARE_SIMD128_REGISTER(R) \
+ constexpr Simd128Register R = Simd128Register::from_code(kMsaCode_##R);
+SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
+#undef DECLARE_SIMD128_REGISTER
+
+const Simd128Register no_msareg = Simd128Register::no_reg();
+
+// Register aliases.
+// cp is assumed to be a callee saved register.
+constexpr Register kRootRegister = s6;
+constexpr Register cp = s7;
+constexpr Register kScratchReg = s3;
+constexpr Register kScratchReg2 = s4;
+
+constexpr DoubleRegister kScratchDoubleReg = fs11;
+
+constexpr DoubleRegister kDoubleRegZero = fs9;
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
+DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS)
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = a0;
+constexpr Register kReturnRegister1 = a1;
+constexpr Register kReturnRegister2 = a2;
+constexpr Register kJSFunctionRegister = a1;
+constexpr Register kContextRegister = s7;
+constexpr Register kAllocateSizeRegister = a1;
+constexpr Register kSpeculationPoisonRegister = a7;
+constexpr Register kInterpreterAccumulatorRegister = a0;
+constexpr Register kInterpreterBytecodeOffsetRegister = t0;
+constexpr Register kInterpreterBytecodeArrayRegister = t1;
+constexpr Register kInterpreterDispatchTableRegister = t2;
+
+constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kJavaScriptCallExtraArg1Register = a2;
+
+constexpr Register kOffHeapTrampolineRegister = t3;
+constexpr Register kRuntimeCallFunctionRegister = a1;
+constexpr Register kRuntimeCallArgCountRegister = a0;
+constexpr Register kRuntimeCallArgvRegister = a2;
+constexpr Register kWasmInstanceRegister = a0;
+constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
+
+constexpr DoubleRegister kFPReturnRegister0 = fa0;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 3dd0c73e0f..76b3d9953e 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -241,6 +241,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
#endif
supported_ |= (1u << FPU);
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
index 6ea6265b8f..9a9ecdcb8b 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
@@ -86,6 +86,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r5; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
+const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+const Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ UNREACHABLE();
+}
+
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
@@ -209,21 +218,22 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void Compare_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
+}
+
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
+void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // JSFunction
- r5, // the new target
- r2, // actual number of arguments
- r4, // expected number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // TODO(v8:11421): Implement on this platform.
+ InitializePlatformUnimplemented(data, kParameterCount);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 066facfceb..511649af80 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -611,6 +611,20 @@ void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
}
}
+void TurboAssembler::MultiPushV128(RegList dregs, Register location) {
+ int16_t num_to_push = base::bits::CountPopulation(dregs);
+ int16_t stack_offset = num_to_push * kSimd128Size;
+
+ SubS64(location, location, Operand(stack_offset));
+ for (int16_t i = Simd128Register::kNumRegisters - 1; i >= 0; i--) {
+ if ((dregs & (1 << i)) != 0) {
+ Simd128Register dreg = Simd128Register::from_code(i);
+ stack_offset -= kSimd128Size;
+ StoreV128(dreg, MemOperand(location, stack_offset), r0);
+ }
+ }
+}
+
void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
int16_t stack_offset = 0;
@@ -624,6 +638,19 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
+void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < Simd128Register::kNumRegisters; i++) {
+ if ((dregs & (1 << i)) != 0) {
+ Simd128Register dreg = Simd128Register::from_code(i);
+ LoadV128(dreg, MemOperand(location, stack_offset), r0);
+ stack_offset += kSimd128Size;
+ }
+ }
+ AddS64(location, location, Operand(stack_offset));
+}
+
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition) {
LoadU64(destination,
@@ -1519,7 +1546,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
DCHECK_EQ(actual_parameter_count, r2);
DCHECK_EQ(expected_parameter_count, r4);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
@@ -1572,24 +1598,12 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this, StackFrame::MANUAL);
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
-#else
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline.
- CmpS64(expected_parameter_count, actual_parameter_count);
- beq(&regular_invoke);
- Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
- if (flag == CALL_FUNCTION) {
- Call(adaptor);
- b(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-#endif
bind(&regular_invoke);
}
@@ -1776,6 +1790,18 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
CmpS64(type_reg, Operand(type));
}
+void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ InstanceType higher_limit) {
+ DCHECK_LT(lower_limit, higher_limit);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ mov(scratch, type_reg);
+ slgfi(scratch, Operand(lower_limit));
+ CmpU64(scratch, Operand(higher_limit - lower_limit));
+}
+
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
int32_t offset = RootRegisterOffsetForRootIndex(index);
#ifdef V8_TARGET_BIG_ENDIAN
@@ -2018,9 +2044,11 @@ void MacroAssembler::AssertFunction(Register object) {
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
push(object);
- CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
+ LoadMap(object, object);
+ CompareInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
pop(object);
- Check(eq, AbortReason::kOperandIsNotAFunction);
+ Check(le, AbortReason::kOperandIsNotAFunction);
}
}
@@ -2625,6 +2653,10 @@ void TurboAssembler::AddS64(Register dst, const Operand& opnd) {
agfi(dst, opnd);
}
+void TurboAssembler::AddS32(Register dst, Register src, int32_t opnd) {
+ AddS32(dst, src, Operand(opnd));
+}
+
// Add 32-bit (Register dst = Register src + Immediate opnd)
void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
if (dst != src) {
@@ -2637,6 +2669,10 @@ void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
AddS32(dst, opnd);
}
+void TurboAssembler::AddS64(Register dst, Register src, int32_t opnd) {
+ AddS64(dst, src, Operand(opnd));
+}
+
// Add Pointer Size (Register dst = Register src + Immediate opnd)
void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) {
if (dst != src) {
@@ -2796,11 +2832,19 @@ void TurboAssembler::SubS64(Register dst, const Operand& imm) {
AddS64(dst, Operand(-(imm.immediate())));
}
+void TurboAssembler::SubS32(Register dst, Register src, int32_t imm) {
+ SubS32(dst, src, Operand(imm));
+}
+
// Subtract 32-bit (Register dst = Register src - Immediate opnd)
void TurboAssembler::SubS32(Register dst, Register src, const Operand& imm) {
AddS32(dst, src, Operand(-(imm.immediate())));
}
+void TurboAssembler::SubS64(Register dst, Register src, int32_t imm) {
+ SubS64(dst, src, Operand(imm));
+}
+
// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
void TurboAssembler::SubS64(Register dst, Register src, const Operand& imm) {
AddS64(dst, src, Operand(-(imm.immediate())));
@@ -3439,19 +3483,9 @@ void TurboAssembler::StoreU64(Register src, const MemOperand& mem,
DCHECK(scratch != no_reg);
DCHECK(scratch != r0);
mov(scratch, Operand(mem.offset()));
-#if V8_TARGET_ARCH_S390X
stg(src, MemOperand(mem.rb(), scratch));
-#else
- st(src, MemOperand(mem.rb(), scratch));
-#endif
} else {
-#if V8_TARGET_ARCH_S390X
stg(src, mem);
-#else
- // StoreU32 will try to generate ST if offset fits, otherwise
- // it'll generate STY.
- StoreU32(src, mem);
-#endif
}
}
@@ -3464,11 +3498,7 @@ void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
mem.getIndexRegister() == r0 && is_int16(opnd.immediate())) {
-#if V8_TARGET_ARCH_S390X
mvghi(mem, opnd);
-#else
- mvhi(mem, opnd);
-#endif
} else {
mov(scratch, opnd);
StoreU64(scratch, mem);
@@ -3660,18 +3690,199 @@ void TurboAssembler::LoadU8(Register dst, Register src) {
#endif
}
-void TurboAssembler::LoadLogicalReversedWordP(Register dst,
- const MemOperand& mem) {
- lrv(dst, mem);
+#ifdef V8_TARGET_BIG_ENDIAN
+void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
+ Register scratch) {
+ lrvg(dst, mem);
+}
+
+void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
+ Register scratch) {
+ lrv(dst, opnd);
+ LoadS32(dst, dst);
+}
+
+void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
+ Register scratch) {
+ lrv(dst, opnd);
LoadU32(dst, dst);
}
-void TurboAssembler::LoadLogicalReversedHalfWordP(Register dst,
- const MemOperand& mem) {
- lrvh(dst, mem);
+void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
+ lrvh(dst, opnd);
LoadU16(dst, dst);
}
+void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
+ lrvh(dst, opnd);
+ LoadS16(dst, dst);
+}
+
+void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
+ Register scratch0, Register scratch1) {
+ bool use_vlbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
+ is_uint12(opnd.offset());
+ if (use_vlbr) {
+ vlbr(dst, opnd, Condition(4));
+ } else {
+ lrvg(scratch0, opnd);
+ lrvg(scratch1,
+ MemOperand(opnd.rx(), opnd.rb(), opnd.offset() + kSystemPointerSize));
+ vlvgp(dst, scratch1, scratch0);
+ }
+}
+
+void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
+ Register scratch) {
+ lrvg(scratch, opnd);
+ ldgr(dst, scratch);
+}
+
+void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
+ Register scratch) {
+ lrv(scratch, opnd);
+ ShiftLeftU64(scratch, scratch, Operand(32));
+ ldgr(dst, scratch);
+}
+
+void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem,
+ Register scratch) {
+ if (!is_int20(mem.offset())) {
+ DCHECK(scratch != no_reg);
+ DCHECK(scratch != r0);
+ mov(scratch, Operand(mem.offset()));
+ strvg(src, MemOperand(mem.rb(), scratch));
+ } else {
+ strvg(src, mem);
+ }
+}
+
+void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem,
+ Register scratch) {
+ if (!is_int20(mem.offset())) {
+ DCHECK(scratch != no_reg);
+ DCHECK(scratch != r0);
+ mov(scratch, Operand(mem.offset()));
+ strv(src, MemOperand(mem.rb(), scratch));
+ } else {
+ strv(src, mem);
+ }
+}
+
+void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem,
+ Register scratch) {
+ if (!is_int20(mem.offset())) {
+ DCHECK(scratch != no_reg);
+ DCHECK(scratch != r0);
+ mov(scratch, Operand(mem.offset()));
+ strvh(src, MemOperand(mem.rb(), scratch));
+ } else {
+ strvh(src, mem);
+ }
+}
+
+void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
+ Register scratch) {
+ DCHECK(is_uint12(opnd.offset()));
+ lgdr(scratch, src);
+ strvg(scratch, opnd);
+}
+
+void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
+ Register scratch) {
+ DCHECK(is_uint12(opnd.offset()));
+ lgdr(scratch, src);
+ ShiftRightU64(scratch, scratch, Operand(32));
+ strv(scratch, opnd);
+}
+
+void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
+ Register scratch1, Register scratch2) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ vstbr(src, mem, Condition(4));
+ } else {
+ vlgv(scratch1, src, MemOperand(r0, 1), Condition(3));
+ vlgv(scratch2, src, MemOperand(r0, 0), Condition(3));
+ strvg(scratch1, mem);
+ strvg(scratch2,
+ MemOperand(mem.rx(), mem.rb(), mem.offset() + kSystemPointerSize));
+ }
+}
+
+#else
+void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
+ Register scratch) {
+ LoadU64(dst, mem, scratch);
+}
+
+void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
+ Register scratch) {
+ LoadS32(dst, opnd, scratch);
+}
+
+void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
+ Register scratch) {
+ LoadU32(dst, opnd, scratch);
+}
+
+void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
+ LoadU16(dst, opnd);
+}
+
+void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
+ LoadS16(dst, opnd);
+}
+
+void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
+ Register scratch0, Register scratch1) {
+ USE(scratch1);
+ LoadV128(dst, opnd, scratch0);
+}
+
+void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
+ Register scratch) {
+ USE(scratch);
+ LoadF64(dst, opnd);
+}
+
+void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
+ Register scratch) {
+ USE(scratch);
+ LoadF32(dst, opnd);
+}
+
+void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem,
+ Register scratch) {
+ StoreU64(src, mem, scratch);
+}
+
+void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem,
+ Register scratch) {
+ StoreU32(src, mem, scratch);
+}
+
+void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem,
+ Register scratch) {
+ StoreU16(src, mem, scratch);
+}
+
+void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd,
+ Register scratch) {
+ StoreF64(src, opnd);
+}
+
+void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
+ Register scratch) {
+ StoreF32(src, opnd);
+}
+
+void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
+ Register scratch1, Register scratch2) {
+ StoreV128(src, mem, scratch1);
+}
+
+#endif
+
// Load And Test (Reg <- Reg)
void TurboAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
@@ -3770,6 +3981,112 @@ void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
}
}
+void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (dst == lhs) {
+ aebr(dst, rhs);
+ } else if (dst == rhs) {
+ aebr(dst, lhs);
+ } else {
+ ler(dst, lhs);
+ aebr(dst, rhs);
+ }
+}
+
+void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (dst == lhs) {
+ sebr(dst, rhs);
+ } else if (dst == rhs) {
+ sebr(dst, lhs);
+ lcebr(dst, dst);
+ } else {
+ ler(dst, lhs);
+ sebr(dst, rhs);
+ }
+}
+
+void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (dst == lhs) {
+ meebr(dst, rhs);
+ } else if (dst == rhs) {
+ meebr(dst, lhs);
+ } else {
+ ler(dst, lhs);
+ meebr(dst, rhs);
+ }
+}
+
+void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (dst == lhs) {
+ debr(dst, rhs);
+ } else if (dst == rhs) {
+ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ StoreF32(dst, MemOperand(sp));
+ ler(dst, lhs);
+ deb(dst, MemOperand(sp));
+ la(sp, MemOperand(sp, kSystemPointerSize));
+ } else {
+ ler(dst, lhs);
+ debr(dst, rhs);
+ }
+}
+
+void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (dst == lhs) {
+ adbr(dst, rhs);
+ } else if (dst == rhs) {
+ adbr(dst, lhs);
+ } else {
+ ldr(dst, lhs);
+ adbr(dst, rhs);
+ }
+}
+
+void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (dst == lhs) {
+ sdbr(dst, rhs);
+ } else if (dst == rhs) {
+ sdbr(dst, lhs);
+ lcdbr(dst, dst);
+ } else {
+ ldr(dst, lhs);
+ sdbr(dst, rhs);
+ }
+}
+
+void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (dst == lhs) {
+ mdbr(dst, rhs);
+ } else if (dst == rhs) {
+ mdbr(dst, lhs);
+ } else {
+ ldr(dst, lhs);
+ mdbr(dst, rhs);
+ }
+}
+
+void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (dst == lhs) {
+ ddbr(dst, rhs);
+ } else if (dst == rhs) {
+ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ StoreF64(dst, MemOperand(sp));
+ ldr(dst, lhs);
+ ddb(dst, MemOperand(sp));
+ la(sp, MemOperand(sp, kSystemPointerSize));
+ } else {
+ ldr(dst, lhs);
+ ddbr(dst, rhs);
+ }
+}
+
void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
@@ -4338,7 +4655,8 @@ void TurboAssembler::CallCodeObject(Register code_object) {
Call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object) {
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
@@ -4381,6 +4699,62 @@ void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
void TurboAssembler::Trap() { stop(); }
void TurboAssembler::DebugBreak() { stop(); }
+void TurboAssembler::CountLeadingZerosU32(Register dst, Register src,
+ Register scratch_pair) {
+ llgfr(dst, src);
+ flogr(scratch_pair,
+ dst); // will modify a register pair scratch and scratch + 1
+ AddS32(dst, scratch_pair, Operand(-32));
+}
+
+void TurboAssembler::CountLeadingZerosU64(Register dst, Register src,
+ Register scratch_pair) {
+ flogr(scratch_pair,
+ src); // will modify a register pair scratch and scratch + 1
+ mov(dst, scratch_pair);
+}
+
+void TurboAssembler::CountTrailingZerosU32(Register dst, Register src,
+ Register scratch_pair) {
+ Register scratch0 = scratch_pair;
+ Register scratch1 = Register::from_code(scratch_pair.code() + 1);
+ DCHECK(!AreAliased(dst, scratch0, scratch1));
+ DCHECK(!AreAliased(src, scratch0, scratch1));
+
+ Label done;
+ // Check if src is all zeros.
+ ltr(scratch1, src);
+ mov(dst, Operand(32));
+ beq(&done);
+ llgfr(scratch1, scratch1);
+ lcgr(scratch0, scratch1);
+ ngr(scratch1, scratch0);
+ flogr(scratch0, scratch1);
+ mov(dst, Operand(63));
+ SubS64(dst, scratch0);
+ bind(&done);
+}
+
+void TurboAssembler::CountTrailingZerosU64(Register dst, Register src,
+ Register scratch_pair) {
+ Register scratch0 = scratch_pair;
+ Register scratch1 = Register::from_code(scratch_pair.code() + 1);
+ DCHECK(!AreAliased(dst, scratch0, scratch1));
+ DCHECK(!AreAliased(src, scratch0, scratch1));
+
+ Label done;
+ // Check if src is all zeros.
+ ltgr(scratch1, src);
+ mov(dst, Operand(64));
+ beq(&done);
+ lcgr(scratch0, scratch1);
+ ngr(scratch0, scratch1);
+ flogr(scratch0, scratch0);
+ mov(dst, Operand(63));
+ SubS64(dst, scratch0);
+ bind(&done);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index c71f072795..f4c3d038b3 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -102,7 +102,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
- void JumpCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) override;
void CallBuiltinByIndex(Register builtin_index) override;
@@ -147,6 +148,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
+ void MultiPushV128(RegList dregs, Register location = sp);
+ void MultiPopV128(RegList dregs, Register location = sp);
+
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
@@ -181,6 +185,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void AddS64(Register dst, const Operand& imm);
void AddS32(Register dst, Register src, const Operand& imm);
void AddS64(Register dst, Register src, const Operand& imm);
+ void AddS32(Register dst, Register src, int32_t imm);
+ void AddS64(Register dst, Register src, int32_t imm);
// Add (Register - Register)
void AddS32(Register dst, Register src);
@@ -212,6 +218,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SubS64(Register dst, const Operand& imm);
void SubS32(Register dst, Register src, const Operand& imm);
void SubS64(Register dst, Register src, const Operand& imm);
+ void SubS32(Register dst, Register src, int32_t imm);
+ void SubS64(Register dst, Register src, int32_t imm);
// Subtract (Register - Register)
void SubS32(Register dst, Register src);
@@ -326,20 +334,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CmpU32(Register dst, const MemOperand& opnd);
void CmpU64(Register dst, const MemOperand& opnd);
- // Load 32bit
+ // Load
+ void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadS32(Register dst, const MemOperand& opnd, Register scratch = no_reg);
void LoadS32(Register dst, Register src);
void LoadU32(Register dst, const MemOperand& opnd, Register scratch = no_reg);
void LoadU32(Register dst, Register src);
void LoadU16(Register dst, const MemOperand& opnd);
void LoadU16(Register dst, Register src);
+ void LoadS16(Register dst, Register src);
+ void LoadS16(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadS8(Register dst, const MemOperand& opnd);
void LoadS8(Register dst, Register src);
void LoadU8(Register dst, const MemOperand& opnd);
void LoadU8(Register dst, Register src);
-
- void LoadLogicalReversedWordP(Register dst, const MemOperand& opnd);
- void LoadLogicalReversedHalfWordP(Register dst, const MemOperand& opnd);
+ void LoadV128(Simd128Register dst, const MemOperand& mem, Register scratch);
+ void LoadF64(DoubleRegister dst, const MemOperand& opnd);
+ void LoadF32(DoubleRegister dst, const MemOperand& opnd);
+ // LE Load
+ void LoadU64LE(Register dst, const MemOperand& mem,
+ Register scratch = no_reg);
+ void LoadS32LE(Register dst, const MemOperand& opnd,
+ Register scratch = no_reg);
+ void LoadU32LE(Register dst, const MemOperand& opnd,
+ Register scratch = no_reg);
+ void LoadU16LE(Register dst, const MemOperand& opnd);
+ void LoadS16LE(Register dst, const MemOperand& opnd);
+ void LoadV128LE(DoubleRegister dst, const MemOperand& mem, Register scratch0,
+ Register scratch1);
+ void LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
+ void LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
// Load And Test
void LoadAndTest32(Register dst, Register src);
@@ -348,10 +372,39 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadAndTest32(Register dst, const MemOperand& opnd);
void LoadAndTestP(Register dst, const MemOperand& opnd);
- // Load Floating Point
- void LoadF64(DoubleRegister dst, const MemOperand& opnd);
- void LoadF32(DoubleRegister dst, const MemOperand& opnd);
- void LoadV128(Simd128Register dst, const MemOperand& mem, Register scratch);
+ // Store
+ void StoreU64(const MemOperand& mem, const Operand& opnd,
+ Register scratch = no_reg);
+ void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
+ void StoreU32(Register src, const MemOperand& mem, Register scratch = no_reg);
+
+ void StoreU16(Register src, const MemOperand& mem, Register scratch = r0);
+ void StoreU8(Register src, const MemOperand& mem, Register scratch = r0);
+ void StoreF64(DoubleRegister dst, const MemOperand& opnd);
+ void StoreF32(DoubleRegister dst, const MemOperand& opnd);
+ void StoreV128(Simd128Register src, const MemOperand& mem, Register scratch);
+
+ // Store LE
+ void StoreU64LE(Register src, const MemOperand& mem,
+ Register scratch = no_reg);
+ void StoreU32LE(Register src, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreU16LE(Register src, const MemOperand& mem, Register scratch = r0);
+ void StoreF64LE(DoubleRegister src, const MemOperand& opnd, Register scratch);
+ void StoreF32LE(DoubleRegister src, const MemOperand& opnd, Register scratch);
+ void StoreV128LE(Simd128Register src, const MemOperand& mem,
+ Register scratch1, Register scratch2);
+
+ void AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
+ void SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
+ void MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
+ void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
+
+ void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
+ void SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
+ void MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
+ void DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs);
void AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch);
@@ -378,11 +431,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadPositiveP(Register result, Register input);
void LoadPositive32(Register result, Register input);
- // Store Floating Point
- void StoreF64(DoubleRegister dst, const MemOperand& opnd);
- void StoreF32(DoubleRegister dst, const MemOperand& opnd);
- void StoreV128(Simd128Register src, const MemOperand& mem, Register scratch);
-
void Branch(Condition c, const Operand& opnd);
void BranchOnCount(Register r1, Label* l);
@@ -696,21 +744,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
LoadF64(result, static_cast<uint64_t>(int_val) << 32, scratch);
}
- // void LoadF64(DoubleRegister result, double value, Register scratch);
- // void LoadF64(DoubleRegister result, uint64_t value,
- // Register scratch);
-
- // void LoadF32(DoubleRegister result, float value, Register scratch);
-
- void StoreU32(Register src, const MemOperand& mem, Register scratch = no_reg);
-
- void LoadS16(Register dst, Register src);
-
- void LoadS16(Register dst, const MemOperand& mem,
- Register scratch = no_reg);
-
- void StoreU16(Register src, const MemOperand& mem, Register scratch = r0);
- void StoreU8(Register src, const MemOperand& mem, Register scratch = r0);
void CmpSmiLiteral(Register src1, Smi smi, Register scratch);
// Set new rounding mode RN to FPSCR
@@ -723,10 +756,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg) {
LoadU64(dst, mem, scratch);
}
- void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
- void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
- void StoreU64(const MemOperand& mem, const Operand& opnd,
- Register scratch = no_reg);
void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
@@ -999,6 +1028,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(Register destination, MemOperand field_operand);
void DecompressAnyTagged(Register destination, Register source);
+ // CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
+ void CountLeadingZerosU32(Register dst, Register src,
+ Register scratch_pair = r0);
+ void CountLeadingZerosU64(Register dst, Register src,
+ Register scratch_pair = r0);
+ void CountTrailingZerosU32(Register dst, Register src,
+ Register scratch_pair = r0);
+ void CountTrailingZerosU64(Register dst, Register src,
+ Register scratch_pair = r0);
+
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -1074,6 +1113,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
+ // Compare instance type ranges for a map (lower_limit and higher_limit
+ // inclusive).
+ //
+ // Always use unsigned comparisons: ls for a positive result.
+ void CompareInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ InstanceType higher_limit);
+
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, RootIndex index);
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 644931e0ea..dd379e0535 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -88,8 +88,7 @@ void SafepointTable::PrintBits(std::ostream& os, // NOLINT
}
}
-Safepoint SafepointTableBuilder::DefineSafepoint(
- Assembler* assembler, Safepoint::DeoptMode deopt_mode) {
+Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler) {
deoptimization_info_.push_back(
DeoptimizationInfo(zone_, assembler->pc_offset_for_safepoint()));
DeoptimizationInfo& new_info = deoptimization_info_.back();
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index a7046b6477..9efdbfa784 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -168,8 +168,6 @@ class SafepointTable {
class Safepoint {
public:
- enum DeoptMode { kNoLazyDeopt, kLazyDeopt };
-
static const int kNoDeoptimizationIndex = SafepointEntry::kNoDeoptIndex;
void DefinePointerSlot(int index) { indexes_->push_back(index); }
@@ -195,7 +193,7 @@ class SafepointTableBuilder {
unsigned GetCodeOffset() const;
// Define a new safepoint for the current position in the body.
- Safepoint DefineSafepoint(Assembler* assembler, Safepoint::DeoptMode mode);
+ Safepoint DefineSafepoint(Assembler* assembler);
// Emit the safepoint table after the body. The number of bits per
// entry must be enough to hold all the pointer indexes.
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index 3c8a108808..63f1d17c70 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -5,6 +5,8 @@
#include "src/codegen/source-position-table.h"
#include "src/base/export-template.h"
+#include "src/base/logging.h"
+#include "src/common/assert-scope.h"
#include "src/heap/local-factory-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
@@ -36,7 +38,10 @@ using ValueBits = base::BitField8<unsigned, 0, 7>;
void AddAndSetEntry(PositionTableEntry* value,
const PositionTableEntry& other) {
value->code_offset += other.code_offset;
+ DCHECK_IMPLIES(value->code_offset != kFunctionEntryBytecodeOffset,
+ value->code_offset >= 0);
value->source_position += other.source_position;
+ DCHECK_LE(0, value->source_position);
value->is_statement = other.is_statement;
}
@@ -69,7 +74,11 @@ void EncodeInt(ZoneVector<byte>* bytes, T value) {
// Encode a PositionTableEntry.
void EncodeEntry(ZoneVector<byte>* bytes, const PositionTableEntry& entry) {
// We only accept ascending code offsets.
- DCHECK_GE(entry.code_offset, 0);
+ DCHECK_LE(0, entry.code_offset);
+ // All but the first entry must be *strictly* ascending (no two entries for
+ // the same position).
+ // TODO(11496): This DCHECK fails tests.
+ // DCHECK_IMPLIES(!bytes->empty(), entry.code_offset > 0);
// Since code_offset is not negative, we use sign to encode is_statement.
EncodeInt(bytes,
entry.is_statement ? entry.code_offset : -entry.code_offset - 1);
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index 44c26e5781..a6cfc6983a 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -368,7 +368,7 @@ class TNode {
// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
// Node*. It is intended for function arguments as long as some call sites
// still use untyped Node* arguments.
-// TODO(tebbi): Delete this class once transition is finished.
+// TODO(turbofan): Delete this class once transition is finished.
template <class T>
class SloppyTNode : public TNode<T> {
public:
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index 575529e399..e4c694097b 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -118,7 +118,7 @@ bool TurboAssemblerBase::IsAddressableThroughRootRegister(
void TurboAssemblerBase::RecordCommentForOffHeapTrampoline(int builtin_index) {
if (!FLAG_code_comments) return;
std::ostringstream str;
- str << "-- Inlined Trampoline to " << Builtins::name(builtin_index) << " --";
+ str << "[ Inlined Trampoline to " << Builtins::name(builtin_index);
RecordComment(str.str().c_str());
}
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index 337ee7465d..cc9ef92919 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -15,6 +15,12 @@
namespace v8 {
namespace internal {
+enum class JumpMode {
+ kJump, // Does a direct jump to the given address
+ kPushAndReturn // Pushes the given address as the current return address and
+ // does a return
+};
+
// Common base class for platform-specific TurboAssemblers containing
// platform-independent bits.
// You will encounter two subclasses, TurboAssembler (derives from
@@ -67,7 +73,8 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
// Calls/jumps to the given Code object. If builtins are embedded, the
// trampoline Code object on the heap is not used.
virtual void CallCodeObject(Register code_object) = 0;
- virtual void JumpCodeObject(Register code_object) = 0;
+ virtual void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) = 0;
// Loads the given Code object's entry point into the destination register.
virtual void LoadCodeObjectEntry(Register destination,
@@ -117,9 +124,9 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static constexpr int kStackPageSize = 4 * KB;
#endif
- protected:
void RecordCommentForOffHeapTrampoline(int builtin_index);
+ protected:
Isolate* const isolate_ = nullptr;
// This handle will be patched with the code object on installation.
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 26e558b4f9..836566a1ac 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -19,7 +19,7 @@ bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() {
if (IsSupported(SSE4_1)) return true;
- if (FLAG_wasm_simd_ssse3_codegen) return true;
+ if (FLAG_wasm_simd_ssse3_codegen && IsSupported(SSSE3)) return true;
return false;
}
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index e5baf0aa04..18330a9126 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -108,6 +108,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
supported_ |= 1u << ATOM;
}
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {}
@@ -1188,6 +1194,16 @@ void Assembler::cpuid() {
emit(0xA2);
}
+void Assembler::prefetch(Operand src, int level) {
+ DCHECK(is_uint2(level));
+ EnsureSpace ensure_space(this);
+ emit(0x0F);
+ emit(0x18);
+ // Emit hint number in Reg position of RegR/M.
+ XMMRegister code = XMMRegister::from_code(level);
+ emit_sse_operand(code, src);
+}
+
void Assembler::cqo() {
EnsureSpace ensure_space(this);
emit_rex_64();
@@ -2919,6 +2935,15 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
}
}
+void Assembler::movaps(XMMRegister dst, Operand src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x28);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
@@ -3088,6 +3113,10 @@ void Assembler::cmppd(XMMRegister dst, Operand src, int8_t cmp) {
emit(cmp);
}
+void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
+ sse2_instr(dst, src, 0xF3, 0x0F, 0xE6);
+}
+
void Assembler::cvttss2si(Register dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3503,6 +3532,14 @@ void Assembler::vmovq(Register dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
+void Assembler::vmovdqa(XMMRegister dst, Operand src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::vmovdqa(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index a26e98d8a5..c1dc4a3db1 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -786,6 +786,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void ret(int imm16);
void ud2();
void setcc(Condition cc, Register reg);
+ void prefetch(Operand src, int level);
void pblendw(XMMRegister dst, Operand src, uint8_t mask);
void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask);
@@ -920,6 +921,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void ucomiss(XMMRegister dst, XMMRegister src);
void ucomiss(XMMRegister dst, Operand src);
void movaps(XMMRegister dst, XMMRegister src);
+ void movaps(XMMRegister dst, Operand src);
// Don't use this unless it's important to keep the
// top half of the destination register unchanged.
@@ -1205,6 +1207,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movupd(XMMRegister dst, Operand src);
void movupd(Operand dst, XMMRegister src);
+ void cvtdq2pd(XMMRegister dst, XMMRegister src);
+
void cvttsd2si(Register dst, Operand src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttss2siq(Register dst, XMMRegister src);
@@ -1330,6 +1334,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); }
void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
+ void vmovdqa(XMMRegister dst, Operand src);
void vmovdqa(XMMRegister dst, XMMRegister src);
void vmovdqu(XMMRegister dst, Operand src);
void vmovdqu(Operand dst, XMMRegister src);
@@ -1399,6 +1404,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
}
+ void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
+ vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG);
+ }
void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
@@ -1513,6 +1521,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
+ void vmovaps(XMMRegister dst, Operand src) { vps(0x28, dst, xmm0, src); }
void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
index 5b35b5817f..4029b56d2b 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
@@ -93,6 +93,11 @@ const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
+const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return rbx;
+}
+const Register BaselineLeaveFrameDescriptor::WeightRegister() { return rcx; }
+
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx};
@@ -216,20 +221,21 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void Compare_BaselineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdx, rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, rax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
+void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rdi, // JSFunction
- rdx, // the new target
- rax, // actual number of arguments
- rbx, // expected number of arguments
- };
+ Register registers[] = {rdx, rax, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index e696e8b66e..b91e8319ac 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <cstdint>
#if V8_TARGET_ARCH_X64
#include "src/base/bits.h"
@@ -203,6 +204,15 @@ void TurboAssembler::LoadTaggedPointerField(Register destination,
}
}
+void TurboAssembler::LoadTaggedSignedField(Register destination,
+ Operand field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedSigned(destination, field_operand);
+ } else {
+ mov_tagged(destination, field_operand);
+ }
+}
+
void TurboAssembler::LoadAnyTaggedField(Register destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
@@ -256,6 +266,16 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
}
}
+void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
+ Smi value) {
+ if (SmiValuesAre32Bits()) {
+ movl(Operand(dst_field_operand, kSmiShift / kBitsPerByte),
+ Immediate(value.value()));
+ } else {
+ StoreTaggedField(dst_field_operand, Immediate(value));
+ }
+}
+
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedSigned");
@@ -694,6 +714,16 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
+void TurboAssembler::Movdqa(XMMRegister dst, Operand src) {
+ // See comments in Movdqa(XMMRegister, XMMRegister).
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(dst, src);
+ } else {
+ movaps(dst, src);
+ }
+}
+
void TurboAssembler::Movdqa(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
@@ -1078,17 +1108,7 @@ void TurboAssembler::Set(Operand dst, intptr_t x) {
// Smi tagging, untagging and tag detection.
Register TurboAssembler::GetSmiConstant(Smi source) {
- STATIC_ASSERT(kSmiTag == 0);
- int value = source.value();
- if (value == 0) {
- xorl(kScratchRegister, kScratchRegister);
- return kScratchRegister;
- }
- if (SmiValuesAre32Bits()) {
- Move(kScratchRegister, source);
- } else {
- movl(kScratchRegister, Immediate(source));
- }
+ Move(kScratchRegister, source);
return kScratchRegister;
}
@@ -1097,8 +1117,17 @@ void TurboAssembler::Move(Register dst, Smi source) {
int value = source.value();
if (value == 0) {
xorl(dst, dst);
- } else {
+ } else if (SmiValuesAre32Bits() || value < 0) {
Move(dst, source.ptr(), RelocInfo::NONE);
+ } else {
+ uint32_t uvalue = static_cast<uint32_t>(source.ptr());
+ if (uvalue <= 0xFF) {
+ // Emit shorter instructions for small Smis
+ xorl(dst, dst);
+ movb(dst, Immediate(uvalue));
+ } else {
+ movl(dst, Immediate(uvalue));
+ }
}
}
@@ -1340,6 +1369,9 @@ void TurboAssembler::Move(Register dst, Register src) {
}
}
+void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
+void TurboAssembler::Move(Register dst, Immediate src) { movl(dst, src); }
+
void TurboAssembler::Move(XMMRegister dst, XMMRegister src) {
if (dst != src) {
Movaps(dst, src);
@@ -1594,6 +1626,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
jmp(kScratchRegister);
+ if (FLAG_code_comments) RecordComment("]");
bind(&skip);
return;
}
@@ -1676,6 +1709,18 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
+ if (FLAG_code_comments) RecordComment("]");
+}
+
+void TurboAssembler::TailCallBuiltin(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
+ jmp(kScratchRegister);
+ if (FLAG_code_comments) RecordComment("]");
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -1726,9 +1771,17 @@ void TurboAssembler::CallCodeObject(Register code_object) {
call(code_object);
}
-void TurboAssembler::JumpCodeObject(Register code_object) {
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
LoadCodeObjectEntry(code_object, code_object);
- jmp(code_object);
+ switch (jump_mode) {
+ case JumpMode::kJump:
+ jmp(code_object);
+ return;
+ case JumpMode::kPushAndReturn:
+ pushq(code_object);
+ Ret();
+ return;
+ }
}
void TurboAssembler::RetpolineCall(Register reg) {
@@ -1770,29 +1823,69 @@ void TurboAssembler::RetpolineJump(Register reg) {
ret(0);
}
+void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpmaddwd(dst, src1, src2);
+ } else {
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ pmaddwd(dst, src2);
+ }
+}
+
void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpmaddwd(dst, src1, src2);
} else {
- DCHECK_EQ(dst, src1);
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
pmaddwd(dst, src2);
}
}
void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
+ Operand src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpmaddubsw(dst, src1, src2);
+ } else {
+ CpuFeatureScope ssse3_scope(this, SSSE3);
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ pmaddubsw(dst, src2);
+ }
+}
+
+void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpmaddubsw(dst, src1, src2);
} else {
CpuFeatureScope ssse3_scope(this, SSSE3);
- DCHECK_EQ(dst, src1);
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
pmaddubsw(dst, src2);
}
}
+void TurboAssembler::Unpcklps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vunpcklps(dst, src1, src2);
+ } else {
+ DCHECK_EQ(dst, src1);
+ unpcklps(dst, src2);
+ }
+}
+
void TurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
byte imm8) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -2039,10 +2132,12 @@ void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
void TurboAssembler::I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
- // Copy top half (64-bit) of src into both halves of dst.
- vpunpckhqdq(dst, src, src);
- vpmovsxwd(dst, dst);
+ // src = |a|b|c|d|e|f|g|h| (high)
+ // dst = |e|e|f|f|g|g|h|h|
+ vpunpckhwd(dst, src, src);
+ vpsrad(dst, dst, 16);
} else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
if (dst == src) {
// 2 bytes shorter than pshufd, but has depdency on dst.
movhlps(dst, src);
@@ -2065,6 +2160,7 @@ void TurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src) {
vpxor(scratch, scratch, scratch);
vpunpckhwd(dst, src, scratch);
} else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
if (dst == src) {
// xorps can be executed on more ports than pshufd.
xorps(kScratchDoubleReg, kScratchDoubleReg);
@@ -2080,10 +2176,12 @@ void TurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src) {
void TurboAssembler::I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
- // Copy top half (64-bit) of src into both halves of dst.
- vpunpckhqdq(dst, src, src);
- vpmovsxbw(dst, dst);
+ // src = |a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p| (high)
+ // dst = |i|i|j|j|k|k|l|l|m|m|n|n|o|o|p|p|
+ vpunpckhbw(dst, src, src);
+ vpsraw(dst, dst, 8);
} else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
if (dst == src) {
// 2 bytes shorter than pshufd, but has depdency on dst.
movhlps(dst, src);
@@ -2111,6 +2209,7 @@ void TurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src) {
xorps(kScratchDoubleReg, kScratchDoubleReg);
punpckhbw(dst, kScratchDoubleReg);
} else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
// No dependency on dst.
pshufd(dst, src, 0xEE);
pmovzxbw(dst, dst);
@@ -2118,6 +2217,30 @@ void TurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src) {
}
}
+void TurboAssembler::I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpunpckhqdq(dst, src, src);
+ vpmovsxdq(dst, dst);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pshufd(dst, src, 0xEE);
+ pmovsxdq(dst, dst);
+ }
+}
+
+void TurboAssembler::I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ vpunpckhdq(dst, src, kScratchDoubleReg);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pshufd(dst, src, 0xEE);
+ pmovzxdq(dst, dst);
+ }
+}
+
// 1. Unpack src0, src0 into even-number elements of scratch.
// 2. Unpack src1, src1 into even-number elements of dst.
// 3. Multiply 1. with 2.
@@ -2189,6 +2312,313 @@ void TurboAssembler::I16x8ExtMul(XMMRegister dst, XMMRegister src1,
}
}
+void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2) {
+ // k = i16x8.splat(0x8000)
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psllw(kScratchDoubleReg, byte{15});
+
+ Pmulhrsw(dst, src1, src2);
+ Pcmpeqw(kScratchDoubleReg, dst);
+ Pxor(dst, kScratchDoubleReg);
+}
+
+void TurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
+ uint8_t laneidx) {
+ if (laneidx == 0) {
+ Movss(dst, src);
+ } else {
+ DCHECK_GE(3, laneidx);
+ Extractps(dst, src, laneidx);
+ }
+}
+
+void TurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
+ uint8_t laneidx) {
+ if (laneidx == 0) {
+ Movlps(dst, src);
+ } else {
+ DCHECK_EQ(1, laneidx);
+ Movhps(dst, src);
+ }
+}
+
+void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
+ XMMRegister tmp) {
+ DCHECK_NE(dst, tmp);
+ DCHECK_NE(src, tmp);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(tmp, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
+ vpandn(kScratchDoubleReg, tmp, src);
+ vpand(dst, tmp, src);
+ vmovdqa(tmp, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask()));
+ vpsrlw(kScratchDoubleReg, kScratchDoubleReg, 4);
+ vpshufb(dst, tmp, dst);
+ vpshufb(kScratchDoubleReg, tmp, kScratchDoubleReg);
+ vpaddb(dst, dst, kScratchDoubleReg);
+ } else if (CpuFeatures::IsSupported(ATOM)) {
+ // Pre-Goldmont low-power Intel microarchitectures have very slow
+ // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
+ // algorithm on these processors. ATOM CPU feature captures exactly
+ // the right set of processors.
+ xorps(tmp, tmp);
+ pavgb(tmp, src);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ andps(tmp, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x55()));
+ psubb(dst, tmp);
+ Operand splat_0x33 = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x33());
+ movaps(tmp, dst);
+ andps(dst, splat_0x33);
+ psrlw(tmp, 2);
+ andps(tmp, splat_0x33);
+ paddb(dst, tmp);
+ movaps(tmp, dst);
+ psrlw(dst, 4);
+ paddb(dst, tmp);
+ andps(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
+ } else {
+ movaps(tmp, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
+ Operand mask = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask());
+ Move(kScratchDoubleReg, tmp);
+ andps(tmp, src);
+ andnps(kScratchDoubleReg, src);
+ psrlw(kScratchDoubleReg, 4);
+ movaps(dst, mask);
+ pshufb(dst, tmp);
+ movaps(tmp, mask);
+ pshufb(tmp, kScratchDoubleReg);
+ paddb(dst, tmp);
+ }
+}
+
+void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src) {
+ // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
+ // 0x43300000'00000000 is a special double where the significand bits
+ // precisely represents all uint32 numbers.
+ Unpcklps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::
+ address_of_wasm_f64x2_convert_low_i32x4_u_int_mask()));
+ Subpd(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52()));
+}
+
+void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ XMMRegister original_dst = dst;
+ // Make sure we don't overwrite src.
+ if (dst == src) {
+ DCHECK_NE(src, kScratchDoubleReg);
+ dst = kScratchDoubleReg;
+ }
+ // dst = 0 if src == NaN, else all ones.
+ vcmpeqpd(dst, src, src);
+ // dst = 0 if src == NaN, else INT32_MAX as double.
+ vandpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double()));
+ // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
+ vminpd(dst, src, dst);
+ // Values > INT32_MAX already saturated, values < INT32_MIN raises an
+ // exception, which is masked and returns 0x80000000.
+ vcvttpd2dq(dst, dst);
+ if (original_dst != dst) {
+ Move(original_dst, dst);
+ }
+ } else {
+ if (dst != src) {
+ Move(dst, src);
+ }
+ Move(kScratchDoubleReg, dst);
+ cmpeqpd(kScratchDoubleReg, dst);
+ andps(kScratchDoubleReg,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double()));
+ minpd(dst, kScratchDoubleReg);
+ cvttpd2dq(dst, dst);
+ }
+}
+
+void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vxorpd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ // Saturate to 0.
+ vmaxpd(dst, src, kScratchDoubleReg);
+ // Saturate to UINT32_MAX.
+ vminpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double()));
+ // Truncate.
+ vroundpd(dst, dst, kRoundToZero);
+ // Add to special double where significant bits == uint32.
+ vaddpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52()));
+ // Extract low 32 bits of each double's significand, zero top lanes.
+ // dst = [dst[0], dst[2], 0, 0]
+ vshufps(dst, dst, kScratchDoubleReg, 0x88);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst != src) {
+ Move(dst, src);
+ }
+ xorps(kScratchDoubleReg, kScratchDoubleReg);
+ maxpd(dst, kScratchDoubleReg);
+ minpd(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double()));
+ roundpd(dst, dst, kRoundToZero);
+ addpd(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52()));
+ shufps(dst, kScratchDoubleReg, 0x88);
+ }
+}
+
+void TurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ XMMRegister tmp = dst == src ? kScratchDoubleReg : dst;
+ CpuFeatureScope avx_scope(this, AVX);
+ vpxor(tmp, tmp, tmp);
+ vpsubq(tmp, tmp, src);
+ vblendvpd(dst, src, tmp, src);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE3);
+ movshdup(kScratchDoubleReg, src);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ psrad(kScratchDoubleReg, 31);
+ xorps(dst, kScratchDoubleReg);
+ psubq(dst, kScratchDoubleReg);
+ }
+}
+
+void TurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
+ XMMRegister src1) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpcmpgtq(dst, src0, src1);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ CpuFeatureScope sse_scope(this, SSE4_2);
+ DCHECK_EQ(dst, src0);
+ pcmpgtq(dst, src1);
+ } else {
+ DCHECK_NE(dst, src0);
+ DCHECK_NE(dst, src1);
+ movdqa(dst, src1);
+ movdqa(kScratchDoubleReg, src0);
+ psubq(dst, src0);
+ pcmpeqd(kScratchDoubleReg, src1);
+ pand(dst, kScratchDoubleReg);
+ movdqa(kScratchDoubleReg, src0);
+ pcmpgtd(kScratchDoubleReg, src1);
+ por(dst, kScratchDoubleReg);
+ pshufd(dst, dst, 0xF5);
+ }
+}
+
+void TurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
+ XMMRegister src1) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpcmpgtq(dst, src1, src0);
+ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ vpxor(dst, dst, kScratchDoubleReg);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ CpuFeatureScope sse_scope(this, SSE4_2);
+ DCHECK_NE(dst, src0);
+ if (dst != src1) {
+ movdqa(dst, src1);
+ }
+ pcmpgtq(dst, src0);
+ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ pxor(dst, kScratchDoubleReg);
+ } else {
+ DCHECK_NE(dst, src0);
+ DCHECK_NE(dst, src1);
+ movdqa(dst, src0);
+ movdqa(kScratchDoubleReg, src1);
+ psubq(dst, src1);
+ pcmpeqd(kScratchDoubleReg, src0);
+ pand(dst, kScratchDoubleReg);
+ movdqa(kScratchDoubleReg, src1);
+ pcmpgtd(kScratchDoubleReg, src0);
+ por(dst, kScratchDoubleReg);
+ pshufd(dst, dst, 0xF5);
+ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ pxor(dst, kScratchDoubleReg);
+ }
+}
+
+void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst,
+ XMMRegister src) {
+ // pmaddubsw treats the first operand as unsigned, so the external reference
+ // to be passed to it as the first operand.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01());
+ if (dst == src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(kScratchDoubleReg, op);
+ vpmaddubsw(dst, kScratchDoubleReg, src);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movaps(kScratchDoubleReg, op);
+ pmaddubsw(kScratchDoubleReg, src);
+ movaps(dst, kScratchDoubleReg);
+ }
+ } else {
+ Movdqa(dst, op);
+ Pmaddubsw(dst, dst, src);
+ }
+}
+
+void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
+ XMMRegister src) {
+ // src = |a|b|c|d|e|f|g|h|
+ // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psrld(kScratchDoubleReg, byte{16});
+ // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
+ Pand(kScratchDoubleReg, src);
+ // dst = |0|a|0|c|0|e|0|g|
+ Psrld(dst, src, byte{16});
+ // dst = |a+b|c+d|e+f|g+h|
+ Paddd(dst, kScratchDoubleReg);
+}
+
+void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
+ XMMRegister mask) {
+ // Out-of-range indices should return 0, add 112 so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_swizzle_mask());
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpaddusb(kScratchDoubleReg, mask, op);
+ vpshufb(dst, src, kScratchDoubleReg);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movdqa(kScratchDoubleReg, op);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ paddusb(kScratchDoubleReg, mask);
+ pshufb(dst, kScratchDoubleReg);
+ }
+}
+
void TurboAssembler::Abspd(XMMRegister dst) {
Andps(dst, ExternalReferenceAsOperand(
ExternalReference::address_of_double_abs_constant()));
@@ -2432,6 +2862,15 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
+void MacroAssembler::CmpInstanceTypeRange(Register map,
+ InstanceType lower_limit,
+ InstanceType higher_limit) {
+ DCHECK_LT(lower_limit, higher_limit);
+ movzxwl(kScratchRegister, FieldOperand(map, Map::kInstanceTypeOffset));
+ leal(kScratchRegister, Operand(kScratchRegister, 0u - lower_limit));
+ cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
+}
+
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
@@ -2480,9 +2919,10 @@ void MacroAssembler::AssertFunction(Register object) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
- CmpObjectType(object, JS_FUNCTION_TYPE, object);
+ LoadMap(object, object);
+ CmpInstanceTypeRange(object, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
Pop(object);
- Check(equal, AbortReason::kOperandIsNotAFunction);
+ Check(below_equal, AbortReason::kOperandIsNotAFunction);
}
}
@@ -2753,7 +3193,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Label* done, InvokeFlag flag) {
if (expected_parameter_count != actual_parameter_count) {
Label regular_invoke;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
@@ -2811,22 +3250,6 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
CallRuntime(Runtime::kThrowStackOverflow);
int3(); // This should be unreachable.
}
-#else
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmpq(expected_parameter_count, actual_parameter_count);
- j(equal, &regular_invoke, Label::kNear);
- DCHECK_EQ(actual_parameter_count, rax);
- DCHECK_EQ(expected_parameter_count, rbx);
- Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
- if (flag == CALL_FUNCTION) {
- Call(adaptor, RelocInfo::CODE_TARGET);
- jmp(done, Label::kNear);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-#endif
-
bind(&regular_invoke);
} else {
Move(rax, actual_parameter_count);
@@ -2881,11 +3304,16 @@ void TurboAssembler::Prologue() {
void TurboAssembler::EnterFrame(StackFrame::Type type) {
pushq(rbp);
movq(rbp, rsp);
- Push(Immediate(StackFrame::TypeToMarker(type)));
+ if (!StackFrame::IsJavaScript(type)) {
+ Push(Immediate(StackFrame::TypeToMarker(type)));
+ }
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code()) {
+ // TODO(v8:11429): Consider passing BASELINE instead, and checking for
+ // IsJSFrame or similar. Could then unify with manual frame leaves in the
+ // interpreter too.
+ if (emit_debug_code() && !StackFrame::IsJavaScript(type)) {
cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
Check(equal, AbortReason::kStackFrameTypesMustMatch);
@@ -2917,11 +3345,13 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
}
void TurboAssembler::AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
while (bytes > kStackPageSize) {
subq(rsp, Immediate(kStackPageSize));
movb(Operand(rsp, 0), Immediate(0));
bytes -= kStackPageSize;
}
+ if (bytes == 0) return;
subq(rsp, Immediate(bytes));
}
#endif
@@ -3223,7 +3653,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
- // TODO(tebbi): Perhaps, we want to put an lfence here.
+ // TODO(turbofan): Perhaps, we want to put an lfence here.
Set(kSpeculationPoisonRegister, -1);
}
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index df87c07638..be0b07c17f 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -14,6 +14,7 @@
#include "src/codegen/x64/assembler-x64.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
@@ -184,6 +185,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Sqrtps, sqrtps)
AVX_OP(Sqrtpd, sqrtpd)
AVX_OP(Cvttps2dq, cvttps2dq)
+ AVX_OP(Cvttpd2dq, cvttpd2dq)
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
AVX_OP(Pand, pand)
@@ -227,6 +229,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Maxps, maxps)
AVX_OP(Maxpd, maxpd)
AVX_OP(Cvtdq2ps, cvtdq2ps)
+ AVX_OP(Cvtdq2pd, cvtdq2pd)
+ AVX_OP(Cvtpd2ps, cvtpd2ps)
+ AVX_OP(Cvtps2pd, cvtps2pd)
AVX_OP(Rcpps, rcpps)
AVX_OP(Rsqrtps, rsqrtps)
AVX_OP(Addps, addps)
@@ -320,6 +325,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Operand src);
void Push(Immediate value);
void Push(Smi smi);
+ void Push(TaggedIndex index) {
+ Push(Immediate(static_cast<uint32_t>(index.ptr())));
+ }
void Push(Handle<HeapObject> source);
enum class PushArrayOrder { kNormal, kReverse };
@@ -354,6 +362,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Label::Distance condition_met_distance = Label::kFar);
void Movapd(XMMRegister dst, XMMRegister src);
+ void Movdqa(XMMRegister dst, Operand src);
void Movdqa(XMMRegister dst, XMMRegister src);
template <typename Dst, typename Src>
@@ -438,6 +447,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
movq(dst, constant);
}
+ void Move(Register dst, TaggedIndex source) {
+ movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
+ }
+
+ void Move(Operand dst, TaggedIndex source) {
+ movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
+ }
+
void Move(Register dst, ExternalReference ext);
void Move(XMMRegister dst, uint32_t src);
@@ -450,6 +467,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register target, Register source);
void Move(XMMRegister target, XMMRegister source);
+ void Move(Register target, Operand source);
+ void Move(Register target, Immediate source);
+
void Move(Register dst, Handle<HeapObject> source,
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void Move(Operand dst, Handle<HeapObject> source,
@@ -505,10 +525,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
+ void TailCallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
- void JumpCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump) override;
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
@@ -528,10 +550,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Trap() override;
void DebugBreak() override;
- // Supports both AVX (dst != src1) and SSE (checks that dst == src1).
+ // Will move src1 to dst if dst != src1.
+ void Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2);
void Pmaddwd(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void Pmaddubsw(XMMRegister dst, XMMRegister src1, Operand src2);
void Pmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void Unpcklps(XMMRegister dst, XMMRegister src1, Operand src2);
// Shufps that will mov src1 into dst if AVX is not supported.
void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8);
@@ -577,6 +602,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src);
void I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src);
void I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src);
+ void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
+ void I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src);
// Requires dst == mask when AVX is not supported.
void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
@@ -590,6 +617,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I16x8ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
bool low, bool is_signed);
+ void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+
+ void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+ void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+
+ void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp);
+
+ void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src);
+ void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src);
+ void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src);
+
+ void I64x2Abs(XMMRegister dst, XMMRegister src);
+ void I64x2GtS(XMMRegister dst, XMMRegister src0, XMMRegister src1);
+ void I64x2GeS(XMMRegister dst, XMMRegister src0, XMMRegister src1);
+
+ void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src);
+ void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src);
+
+ void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask);
+
void Abspd(XMMRegister dst);
void Negpd(XMMRegister dst);
@@ -639,7 +686,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void AllocateStackSpace(int bytes);
#else
void AllocateStackSpace(Register bytes) { subq(rsp, bytes); }
- void AllocateStackSpace(int bytes) { subq(rsp, Immediate(bytes)); }
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ subq(rsp, Immediate(bytes));
+ }
#endif
// Removes current frame and its arguments from the stack preserving the
@@ -716,6 +767,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// compression is enabled.
void LoadTaggedPointerField(Register destination, Operand field_operand);
+ // Loads a field containing a Smi and decompresses it if pointer compression
+ // is enabled.
+ void LoadTaggedSignedField(Register destination, Operand field_operand);
+
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(Register destination, Operand field_operand);
@@ -736,6 +791,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// location.
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
void StoreTaggedField(Operand dst_field_operand, Register value);
+ void StoreTaggedSignedField(Operand dst_field_operand, Smi value);
// The following macros work even when pointer compression is not enabled.
void DecompressTaggedSigned(Register destination, Operand field_operand);
@@ -982,6 +1038,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
+ // Compare instance type ranges for a map (low and high inclusive)
+ // Always use unsigned comparisons: below_equal for a positive result.
+ void CmpInstanceTypeRange(Register map, InstanceType low, InstanceType high);
+
template <typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index 717a79df07..452cc0f690 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -10,10 +10,12 @@
V(sqrtps, 0F, 51) \
V(rsqrtps, 0F, 52) \
V(rcpps, 0F, 53) \
+ V(cvtps2pd, 0F, 5A) \
V(cvtdq2ps, 0F, 5B)
// SSE instructions whose AVX version has three operands.
#define SSE_BINOP_INSTRUCTION_LIST(V) \
+ V(unpcklps, 0F, 14) \
V(andps, 0F, 54) \
V(andnps, 0F, 55) \
V(orps, 0F, 56) \
@@ -108,7 +110,9 @@
#define SSE2_UNOP_INSTRUCTION_LIST(V) \
V(ucomisd, 66, 0F, 2E) \
V(sqrtpd, 66, 0F, 51) \
- V(cvtps2dq, 66, 0F, 5B)
+ V(cvtpd2ps, 66, 0F, 5A) \
+ V(cvtps2dq, 66, 0F, 5B) \
+ V(cvttpd2dq, 66, 0F, E6)
// SSE2 shift instructions with an immediate operand. The last element is the
// extension to the opcode.
@@ -183,6 +187,7 @@
// These require AVX2, and we only define the VEX-128 versions.
#define AVX2_BROADCAST_LIST(V) \
+ V(vpbroadcastd, 66, 0F, 38, 58) \
V(vpbroadcastb, 66, 0F, 38, 78) \
V(vpbroadcastw, 66, 0F, 38, 79)
diff --git a/deps/v8/src/common/assert-scope.cc b/deps/v8/src/common/assert-scope.cc
index c362a629ff..5eec93aa08 100644
--- a/deps/v8/src/common/assert-scope.cc
+++ b/deps/v8/src/common/assert-scope.cc
@@ -69,6 +69,27 @@ bool PerIsolateAssertScope<kType, kAllow>::IsAllowed(Isolate* isolate) {
return PerIsolateDataBit<kType>::decode(isolate->per_isolate_assert_data());
}
+// static
+template <PerIsolateAssertType kType, bool kAllow>
+void PerIsolateAssertScope<kType, kAllow>::Open(Isolate* isolate,
+ bool* was_execution_allowed) {
+ DCHECK_NOT_NULL(isolate);
+ DCHECK_NOT_NULL(was_execution_allowed);
+ uint32_t old_data = isolate->per_isolate_assert_data();
+ *was_execution_allowed = PerIsolateDataBit<kType>::decode(old_data);
+ isolate->set_per_isolate_assert_data(
+ PerIsolateDataBit<kType>::update(old_data, kAllow));
+}
+// static
+template <PerIsolateAssertType kType, bool kAllow>
+void PerIsolateAssertScope<kType, kAllow>::Close(Isolate* isolate,
+ bool was_execution_allowed) {
+ DCHECK_NOT_NULL(isolate);
+ uint32_t old_data = isolate->per_isolate_assert_data();
+ isolate->set_per_isolate_assert_data(
+ PerIsolateDataBit<kType>::update(old_data, was_execution_allowed));
+}
+
// -----------------------------------------------------------------------------
// Instantiations.
diff --git a/deps/v8/src/common/assert-scope.h b/deps/v8/src/common/assert-scope.h
index be687e7382..114b4782c9 100644
--- a/deps/v8/src/common/assert-scope.h
+++ b/deps/v8/src/common/assert-scope.h
@@ -67,6 +67,11 @@ class V8_NODISCARD PerIsolateAssertScope {
static bool IsAllowed(Isolate* isolate);
+ V8_EXPORT_PRIVATE static void Open(Isolate* isolate,
+ bool* was_execution_allowed);
+ V8_EXPORT_PRIVATE static void Close(Isolate* isolate,
+ bool was_execution_allowed);
+
private:
Isolate* isolate_;
uint32_t old_data_;
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 516319cb98..d9d502aa51 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -58,6 +58,9 @@ constexpr int GB = MB * 1024;
#if (V8_TARGET_ARCH_S390 && !V8_HOST_ARCH_S390)
#define USE_SIMULATOR 1
#endif
+#if (V8_TARGET_ARCH_RISCV64 && !V8_HOST_ARCH_RISCV64)
+#define USE_SIMULATOR 1
+#endif
#endif
// Determine whether the architecture uses an embedded constant pool
@@ -96,13 +99,6 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
kStackLimitSlackForDeoptimizationInBytes <=
MB);
-// Determine whether double field unboxing feature is enabled.
-#if V8_TARGET_ARCH_64_BIT && !defined(V8_COMPRESS_POINTERS)
-#define V8_DOUBLE_FIELDS_UNBOXING false
-#else
-#define V8_DOUBLE_FIELDS_UNBOXING false
-#endif
-
// Determine whether dict mode prototypes feature is enabled.
#ifdef V8_DICT_MODE_PROTOTYPES
#define V8_DICT_MODE_PROTOTYPES_BOOL true
@@ -110,6 +106,13 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
#define V8_DICT_MODE_PROTOTYPES_BOOL false
#endif
+// Determine whether dict property constness tracking feature is enabled.
+#ifdef V8_DICT_PROPERTY_CONST_TRACKING
+#define V8_DICT_PROPERTY_CONST_TRACKING_BOOL true
+#else
+#define V8_DICT_PROPERTY_CONST_TRACKING_BOOL false
+#endif
+
// Determine whether tagged pointers are 8 bytes (used in Torque layouts for
// choosing where to insert padding).
#if V8_TARGET_ARCH_64_BIT && !defined(V8_COMPRESS_POINTERS)
@@ -119,7 +122,7 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
#endif
// Some types of tracing require the SFI to store a unique ID.
-#if defined(V8_TRACE_MAPS) || defined(V8_TRACE_IGNITION)
+#if defined(V8_TRACE_MAPS) || defined(V8_TRACE_UNOPTIMIZED)
#define V8_SFI_HAS_UNIQUE_ID true
#else
#define V8_SFI_HAS_UNIQUE_ID false
@@ -451,6 +454,10 @@ constexpr int kNoSourcePosition = -1;
// bytecode offset.
constexpr int kFunctionEntryBytecodeOffset = -1;
+// This constant is used to signal the function exit interrupt budget handling
+// bytecode offset.
+constexpr int kFunctionExitBytecodeOffset = -1;
+
// This constant is used to indicate missing deoptimization information.
constexpr int kNoDeoptimizationId = -1;
@@ -1691,6 +1698,8 @@ enum class StubCallMode {
constexpr int kFunctionLiteralIdInvalid = -1;
constexpr int kFunctionLiteralIdTopLevel = 0;
+constexpr int kSwissNameDictionaryInitialCapacity = 4;
+
constexpr int kSmallOrderedHashSetMinCapacity = 4;
constexpr int kSmallOrderedHashMapMinCapacity = 4;
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index 2cb3e4e3ba..ff75adea90 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -27,8 +27,9 @@ namespace internal {
T(ApplyNonFunction, \
"Function.prototype.apply was called on %, which is a % and not a " \
"function") \
- T(ArgumentsDisallowedInInitializer, \
- "'arguments' is not allowed in class field initializer") \
+ T(ArgumentsDisallowedInInitializerAndStaticBlock, \
+ "'arguments' is not allowed in class field initializer or static " \
+ "initialization block") \
T(ArrayBufferTooShort, \
"Derived ArrayBuffer constructor created a buffer which was too small") \
T(ArrayBufferSpeciesThis, \
@@ -125,10 +126,14 @@ namespace internal {
T(NonCoercibleWithProperty, \
"Cannot destructure property '%' of '%' as it is %.") \
T(NonExtensibleProto, "% is not extensible") \
+ T(NonObjectAssertOption, "The 'assert' option must be an object") \
T(NonObjectInInstanceOfCheck, \
"Right-hand side of 'instanceof' is not an object") \
T(NonObjectPropertyLoad, "Cannot read property '%' of %") \
T(NonObjectPropertyStore, "Cannot set property '%' of %") \
+ T(NonObjectImportArgument, \
+ "The second argument to import() must be an object") \
+ T(NonStringImportAssertionValue, "Import assertion value must be a string") \
T(NoSetterInCallback, "Cannot set property % of % which has only a getter") \
T(NotAnIterator, "% is not an iterator") \
T(NotAPromise, "% is not a promise") \
@@ -387,6 +392,7 @@ namespace internal {
T(ForInOfLoopInitializer, \
"% loop variable declaration may not have an initializer.") \
T(ForOfLet, "The left-hand side of a for-of loop may not start with 'let'.") \
+ T(ForOfAsync, "The left-hand side of a for-of loop may not be 'async'.") \
T(ForInOfLoopMultiBindings, \
"Invalid left-hand side in % loop: Must have a single binding.") \
T(GeneratorInSingleStatementContext, \
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index d00b9b524b..8bcb609f1b 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -27,7 +27,7 @@ void DisposeCompilationJob(OptimizedCompilationJob* job,
bool restore_function_code) {
if (restore_function_code) {
Handle<JSFunction> function = job->compilation_info()->closure();
- function->set_code(function->shared().GetCode());
+ function->set_code(function->shared().GetCode(), kReleaseStore);
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index afc8551ae0..6175ef3e06 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,7 +1,6 @@
bmeurer@chromium.org
danno@chromium.org
sigurds@chromium.org
-tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
mslekova@chromium.org
@@ -18,6 +17,16 @@ per-file wasm-*=jkummerow@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
+per-file machine-operator.*=ahaas@chromium.org
+per-file machine-operator.*=bbudge@chromium.org
+per-file machine-operator.*=gdeepti@chromium.org
+per-file machine-operator.*=zhin@chromium.org
+
+per-file opcodes.*=ahaas@chromium.org
+per-file opcodes.*=bbudge@chromium.org
+per-file opcodes.*=gdeepti@chromium.org
+per-file opcodes.*=zhin@chromium.org
+
per-file simd-scalar-lowering.*=bbudge@chromium.org
per-file simd-scalar-lowering.*=gdeepti@chromium.org
per-file simd-scalar-lowering.*=zhin@chromium.org
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index ddf742e708..06806feb42 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -394,11 +394,9 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
descriptor));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
- if (!FLAG_unbox_double_fields) {
- unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(
- map_ref, descriptor));
- }
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
+ descriptor));
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
@@ -433,7 +431,8 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
constness = PropertyConstness::kConst;
- } else if (broker()->is_turboprop() && !map->is_prototype_map()) {
+ } else if (broker()->is_turboprop() && !map->is_prototype_map() &&
+ !IsAnyStore(access_mode)) {
// The constness feedback is too unstable for the aggresive compilation
// of turboprop.
constness = PropertyConstness::kMutable;
@@ -861,12 +860,10 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
transition_map_ref, number));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
- if (!FLAG_unbox_double_fields) {
- transition_map_ref.SerializeOwnDescriptor(number);
- unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(
- transition_map_ref, number));
- }
+ transition_map_ref.SerializeOwnDescriptor(number);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ transition_map_ref, number));
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h
index ff1404baa7..022d6aa4d4 100644
--- a/deps/v8/src/compiler/allocation-builder-inl.h
+++ b/deps/v8/src/compiler/allocation-builder-inl.h
@@ -27,11 +27,21 @@ void AllocationBuilder::AllocateContext(int variadic_part_length, MapRef map) {
jsgraph()->Constant(variadic_part_length));
}
+// static
+bool AllocationBuilder::CanAllocateArray(int length, MapRef map,
+ AllocationType allocation) {
+ DCHECK(map.instance_type() == FIXED_ARRAY_TYPE ||
+ map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ int const size = (map.instance_type() == FIXED_ARRAY_TYPE)
+ ? FixedArray::SizeFor(length)
+ : FixedDoubleArray::SizeFor(length);
+ return size <= Heap::MaxRegularHeapObjectSize(allocation);
+}
+
// Compound allocation of a FixedArray.
void AllocationBuilder::AllocateArray(int length, MapRef map,
AllocationType allocation) {
- DCHECK(map.instance_type() == FIXED_ARRAY_TYPE ||
- map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ DCHECK(CanAllocateArray(length, map, allocation));
int size = (map.instance_type() == FIXED_ARRAY_TYPE)
? FixedArray::SizeFor(length)
: FixedDoubleArray::SizeFor(length);
@@ -40,8 +50,16 @@ void AllocationBuilder::AllocateArray(int length, MapRef map,
Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
}
+// static
+bool AllocationBuilder::CanAllocateSloppyArgumentElements(
+ int length, MapRef map, AllocationType allocation) {
+ int const size = SloppyArgumentsElements::SizeFor(length);
+ return size <= Heap::MaxRegularHeapObjectSize(allocation);
+}
+
void AllocationBuilder::AllocateSloppyArgumentElements(
int length, MapRef map, AllocationType allocation) {
+ DCHECK(CanAllocateSloppyArgumentElements(length, map, allocation));
int size = SloppyArgumentsElements::SizeFor(length);
Allocate(size, allocation, Type::OtherInternal());
Store(AccessBuilder::ForMap(), map);
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index c9a2570493..289a06b1ad 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -52,10 +52,16 @@ class AllocationBuilder final {
inline void AllocateContext(int variadic_part_length, MapRef map);
// Compound allocation of a FixedArray.
+ inline static bool CanAllocateArray(
+ int length, MapRef map,
+ AllocationType allocation = AllocationType::kYoung);
inline void AllocateArray(int length, MapRef map,
AllocationType allocation = AllocationType::kYoung);
// Compound allocation of a SloppyArgumentsElements
+ static inline bool CanAllocateSloppyArgumentElements(
+ int length, MapRef map,
+ AllocationType allocation = AllocationType::kYoung);
inline void AllocateSloppyArgumentElements(
int length, MapRef map,
AllocationType allocation = AllocationType::kYoung);
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 74215cac30..d243c07790 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -314,8 +314,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode,
ArmOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister));
@@ -326,8 +325,7 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
InstructionCode opcode,
ArmOperandConverter const& i,
Register address) {
- DCHECK_EQ(kMemoryAccessPoisoned,
- static_cast<MemoryAccessMode>(MiscField::decode(opcode)));
+ DCHECK_EQ(kMemoryAccessPoisoned, AccessModeField::decode(opcode));
switch (AddressingModeField::decode(opcode)) {
case kMode_Offset_RI:
codegen->tasm()->mov(address, i.InputImmediate(1));
@@ -517,8 +515,9 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \
- QwNeonRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.TempRegister(1); \
+ UseScratchRegisterScope temps(tasm()); \
+ Simd128Register tmp = temps.AcquireQ(); \
+ Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \
__ and_(shift, i.InputRegister(1), Operand(mask)); \
__ vdup(sz, tmp, shift); \
@@ -536,8 +535,9 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \
- QwNeonRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.TempRegister(1); \
+ UseScratchRegisterScope temps(tasm()); \
+ Simd128Register tmp = temps.AcquireQ(); \
+ Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \
__ and_(shift, i.InputRegister(1), Operand(mask)); \
__ vdup(sz, tmp, shift); \
@@ -558,30 +558,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ cmp(scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &done);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm,
@@ -786,13 +762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
@@ -901,7 +871,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1678,8 +1648,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
UseScratchRegisterScope temps(tasm());
Register address = temps.Acquire();
@@ -1716,8 +1685,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVldrF64: {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
UseScratchRegisterScope temps(tasm());
Register address = temps.Acquire();
@@ -1795,35 +1763,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ VFPCanonicalizeNaN(result, value);
break;
}
- case kArmPush:
- if (instr->InputAt(0)->IsFPRegister()) {
- LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- switch (op->representation()) {
- case MachineRepresentation::kFloat32:
- __ vpush(i.InputFloatRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
- break;
- case MachineRepresentation::kFloat64:
- __ vpush(i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize /
- kSystemPointerSize);
- break;
- case MachineRepresentation::kSimd128: {
- __ vpush(i.InputSimd128Register(0));
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- __ push(i.InputRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
+ case kArmPush: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(1));
+ MachineRepresentation rep = op->representation();
+ int pushed_slots = ElementSizeInPointers(rep);
+ // Slot-sized arguments are never padded but there may be a gap if
+ // the slot allocator reclaimed other padding slots. Adjust the stack
+ // here to skip any gap.
+ if (slots > pushed_slots) {
+ __ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize);
}
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ __ vpush(i.InputFloatRegister(1));
+ break;
+ case MachineRepresentation::kFloat64:
+ __ vpush(i.InputDoubleRegister(1));
+ break;
+ case MachineRepresentation::kSimd128:
+ __ vpush(i.InputSimd128Register(1));
+ break;
+ default:
+ __ push(i.InputRegister(1));
+ break;
+ }
+ frame_access_state()->IncreaseSPDelta(slots);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ }
case kArmPoke: {
int const slot = MiscField::decode(instr->opcode());
__ str(i.InputRegister(0), MemOperand(sp, slot * kSystemPointerSize));
@@ -2098,6 +2067,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrintn(dst.high(), src.high());
break;
}
+ case kArmF64x2ConvertLowI32x4S: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_f64_s32(dst.low(), SwVfpRegister::from_code(src.code() * 4));
+ __ vcvt_f64_s32(dst.high(), SwVfpRegister::from_code(src.code() * 4 + 1));
+ break;
+ }
+ case kArmF64x2ConvertLowI32x4U: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_f64_u32(dst.low(), SwVfpRegister::from_code(src.code() * 4));
+ __ vcvt_f64_u32(dst.high(), SwVfpRegister::from_code(src.code() * 4 + 1));
+ break;
+ }
+ case kArmF64x2PromoteLowF32x4: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_f64_f32(dst.low(), SwVfpRegister::from_code(src.code() * 4));
+ __ vcvt_f64_f32(dst.high(), SwVfpRegister::from_code(src.code() * 4 + 1));
+ break;
+ }
case kArmI64x2SplatI32Pair: {
Simd128Register dst = i.OutputSimd128Register();
__ vdup(Neon32, dst, i.InputRegister(0));
@@ -2123,11 +2113,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2Mul: {
+ UseScratchRegisterScope temps(tasm());
QwNeonRegister dst = i.OutputSimd128Register();
QwNeonRegister left = i.InputSimd128Register(0);
QwNeonRegister right = i.InputSimd128Register(1);
QwNeonRegister tmp1 = i.TempSimd128Register(0);
- QwNeonRegister tmp2 = i.TempSimd128Register(1);
+ QwNeonRegister tmp2 = temps.AcquireQ();
// This algorithm uses vector operations to perform 64-bit integer
// multiplication by splitting it into a high and low 32-bit integers.
@@ -2167,6 +2158,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// dst: [ (a2*b3 + a3*b2)<<32 + (a2*b2) | (a0*b1 + a1*b0)<<32 + (a0*b0) ]
break;
}
+ case kArmI64x2Abs: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register tmp = temps.AcquireQ();
+ __ vshr(NeonS64, tmp, src, 63);
+ __ veor(dst, src, tmp);
+ __ vsub(Neon64, dst, dst, tmp);
+ break;
+ }
case kArmI64x2Neg: {
Simd128Register dst = i.OutputSimd128Register();
__ vmov(dst, uint64_t{0});
@@ -2190,16 +2191,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2BitMask: {
- UseScratchRegisterScope temps(tasm());
- Register dst = i.OutputRegister();
- Simd128Register src = i.InputSimd128Register(0);
- QwNeonRegister tmp1 = temps.AcquireQ();
- Register tmp = temps.Acquire();
-
- __ vshr(NeonU64, tmp1, src, 63);
- __ vmov(NeonU32, dst, tmp1.low(), 0);
- __ vmov(NeonU32, tmp, tmp1.high(), 0);
- __ add(dst, dst, Operand(tmp, LSL, 1));
+ __ I64x2BitMask(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
case kArmI64x2SConvertI32x4Low: {
@@ -2381,6 +2373,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vbsl(dst, rhs, lhs);
break;
}
+ case kArmF32x4DemoteF64x2Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_f32_f64(SwVfpRegister::from_code(dst.code() * 4), src.low());
+ __ vcvt_f32_f64(SwVfpRegister::from_code(dst.code() * 4 + 1), src.high());
+ __ vmov(dst.high(), 0);
+ break;
+ }
case kArmI32x4Splat: {
__ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
break;
@@ -2450,13 +2450,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2Eq: {
+ __ I64x2Eq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmI64x2Ne: {
Simd128Register dst = i.OutputSimd128Register();
UseScratchRegisterScope temps(tasm());
- Simd128Register scratch = temps.AcquireQ();
+ Simd128Register tmp = temps.AcquireQ();
__ vceq(Neon32, dst, i.InputSimd128Register(0),
i.InputSimd128Register(1));
- __ vrev64(Neon32, scratch, dst);
- __ vand(dst, dst, scratch);
+ __ vrev64(Neon32, tmp, dst);
+ __ vand(dst, dst, tmp);
+ __ vmvn(dst, dst);
+ break;
+ }
+ case kArmI64x2GtS: {
+ __ I64x2GtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmI64x2GeS: {
+ __ I64x2GeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI32x4Eq: {
@@ -2525,19 +2541,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4BitMask: {
Register dst = i.OutputRegister();
+ UseScratchRegisterScope temps(tasm());
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register tmp2 = i.TempSimd128Register(0);
- Simd128Register mask = i.TempSimd128Register(1);
+ Simd128Register tmp = temps.AcquireQ();
+ Simd128Register mask = i.TempSimd128Register(0);
- __ vshr(NeonS32, tmp2, src, 31);
+ __ vshr(NeonS32, tmp, src, 31);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
__ vmov(mask.low(), Double(uint64_t{0x0000'0002'0000'0001}));
__ vmov(mask.high(), Double(uint64_t{0x0000'0008'0000'0004}));
- __ vand(tmp2, mask, tmp2);
- __ vpadd(Neon32, tmp2.low(), tmp2.low(), tmp2.high());
- __ vpadd(Neon32, tmp2.low(), tmp2.low(), kDoubleRegZero);
- __ VmovLow(dst, tmp2.low());
+ __ vand(tmp, mask, tmp);
+ __ vpadd(Neon32, tmp.low(), tmp.low(), tmp.high());
+ __ vpadd(Neon32, tmp.low(), tmp.low(), kDoubleRegZero);
+ __ VmovLow(dst, tmp.low());
break;
}
case kArmI32x4DotI16x8S: {
@@ -2553,6 +2570,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpadd(Neon32, dst.high(), scratch.low(), scratch.high());
break;
}
+ case kArmI32x4TruncSatF64x2SZero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_s32_f64(SwVfpRegister::from_code(dst.code() * 4), src.low());
+ __ vcvt_s32_f64(SwVfpRegister::from_code(dst.code() * 4 + 1), src.high());
+ __ vmov(dst.high(), 0);
+ break;
+ }
+ case kArmI32x4TruncSatF64x2UZero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_u32_f64(SwVfpRegister::from_code(dst.code() * 4), src.low());
+ __ vcvt_u32_f64(SwVfpRegister::from_code(dst.code() * 4 + 1), src.high());
+ __ vmov(dst.high(), 0);
+ break;
+ }
case kArmI16x8Splat: {
__ vdup(Neon16, i.OutputSimd128Register(), i.InputRegister(0));
break;
@@ -2714,21 +2747,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI16x8BitMask: {
+ UseScratchRegisterScope temps(tasm());
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register tmp2 = i.TempSimd128Register(0);
- Simd128Register mask = i.TempSimd128Register(1);
+ Simd128Register tmp = temps.AcquireQ();
+ Simd128Register mask = i.TempSimd128Register(0);
- __ vshr(NeonS16, tmp2, src, 15);
+ __ vshr(NeonS16, tmp, src, 15);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
__ vmov(mask.low(), Double(uint64_t{0x0008'0004'0002'0001}));
__ vmov(mask.high(), Double(uint64_t{0x0080'0040'0020'0010}));
- __ vand(tmp2, mask, tmp2);
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.high());
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
- __ vmov(NeonU16, dst, tmp2.low(), 0);
+ __ vand(tmp, mask, tmp);
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.high());
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ __ vmov(NeonU16, dst, tmp.low(), 0);
break;
}
case kArmI16x8Q15MulRSatS: {
@@ -2873,23 +2907,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI8x16BitMask: {
+ UseScratchRegisterScope temps(tasm());
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register tmp2 = i.TempSimd128Register(0);
- Simd128Register mask = i.TempSimd128Register(1);
+ Simd128Register tmp = temps.AcquireQ();
+ Simd128Register mask = i.TempSimd128Register(0);
- __ vshr(NeonS8, tmp2, src, 7);
+ __ vshr(NeonS8, tmp, src, 7);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
__ vmov(mask.low(), Double(uint64_t{0x8040'2010'0804'0201}));
__ vmov(mask.high(), Double(uint64_t{0x8040'2010'0804'0201}));
- __ vand(tmp2, mask, tmp2);
- __ vext(mask, tmp2, tmp2, 8);
- __ vzip(Neon8, mask, tmp2);
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.high());
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
- __ vmov(NeonU16, dst, tmp2.low(), 0);
+ __ vand(tmp, mask, tmp);
+ __ vext(mask, tmp, tmp, 8);
+ __ vzip(Neon8, mask, tmp);
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.high());
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ __ vmov(NeonU16, dst, tmp.low(), 0);
break;
}
case kArmSignSelect: {
@@ -3241,9 +3276,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrev16(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmV32x4AnyTrue:
- case kArmV16x8AnyTrue:
- case kArmV8x16AnyTrue: {
+ case kArmV128AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3254,6 +3287,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
+ case kArmV64x2AllTrue: {
+ __ V64x2AllTrue(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
case kArmV32x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
@@ -3675,7 +3712,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -3842,7 +3879,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -3926,7 +3963,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = r3;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -3934,9 +3970,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index b5a77a1a10..416f8a564a 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -154,6 +154,9 @@ namespace compiler {
V(ArmF64x2Floor) \
V(ArmF64x2Trunc) \
V(ArmF64x2NearestInt) \
+ V(ArmF64x2ConvertLowI32x4S) \
+ V(ArmF64x2ConvertLowI32x4U) \
+ V(ArmF64x2PromoteLowF32x4) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
@@ -177,8 +180,10 @@ namespace compiler {
V(ArmF32x4Le) \
V(ArmF32x4Pmin) \
V(ArmF32x4Pmax) \
+ V(ArmF32x4DemoteF64x2Zero) \
V(ArmI64x2SplatI32Pair) \
V(ArmI64x2ReplaceLaneI32Pair) \
+ V(ArmI64x2Abs) \
V(ArmI64x2Neg) \
V(ArmI64x2Shl) \
V(ArmI64x2ShrS) \
@@ -188,6 +193,9 @@ namespace compiler {
V(ArmI64x2ShrU) \
V(ArmI64x2BitMask) \
V(ArmI64x2Eq) \
+ V(ArmI64x2Ne) \
+ V(ArmI64x2GtS) \
+ V(ArmI64x2GeS) \
V(ArmI64x2SConvertI32x4Low) \
V(ArmI64x2SConvertI32x4High) \
V(ArmI64x2UConvertI32x4Low) \
@@ -222,6 +230,8 @@ namespace compiler {
V(ArmI32x4Abs) \
V(ArmI32x4BitMask) \
V(ArmI32x4DotI16x8S) \
+ V(ArmI32x4TruncSatF64x2SZero) \
+ V(ArmI32x4TruncSatF64x2UZero) \
V(ArmI16x8Splat) \
V(ArmI16x8ExtractLaneS) \
V(ArmI16x8ReplaceLane) \
@@ -327,11 +337,10 @@ namespace compiler {
V(ArmS8x8Reverse) \
V(ArmS8x4Reverse) \
V(ArmS8x2Reverse) \
- V(ArmV32x4AnyTrue) \
+ V(ArmV64x2AllTrue) \
V(ArmV32x4AllTrue) \
- V(ArmV16x8AnyTrue) \
V(ArmV16x8AllTrue) \
- V(ArmV8x16AnyTrue) \
+ V(ArmV128AnyTrue) \
V(ArmV8x16AllTrue) \
V(ArmS128Load8Splat) \
V(ArmS128Load16Splat) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 8b52a18482..b82369e763 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -134,6 +134,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF64x2Floor:
case kArmF64x2Trunc:
case kArmF64x2NearestInt:
+ case kArmF64x2ConvertLowI32x4S:
+ case kArmF64x2ConvertLowI32x4U:
+ case kArmF64x2PromoteLowF32x4:
case kArmF32x4Splat:
case kArmF32x4ExtractLane:
case kArmF32x4ReplaceLane:
@@ -157,8 +160,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4Le:
case kArmF32x4Pmin:
case kArmF32x4Pmax:
+ case kArmF32x4DemoteF64x2Zero:
case kArmI64x2SplatI32Pair:
case kArmI64x2ReplaceLaneI32Pair:
+ case kArmI64x2Abs:
case kArmI64x2Neg:
case kArmI64x2Shl:
case kArmI64x2ShrS:
@@ -168,6 +173,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI64x2ShrU:
case kArmI64x2BitMask:
case kArmI64x2Eq:
+ case kArmI64x2Ne:
+ case kArmI64x2GtS:
+ case kArmI64x2GeS:
case kArmI64x2SConvertI32x4Low:
case kArmI64x2SConvertI32x4High:
case kArmI64x2UConvertI32x4Low:
@@ -202,6 +210,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI32x4Abs:
case kArmI32x4BitMask:
case kArmI32x4DotI16x8S:
+ case kArmI32x4TruncSatF64x2SZero:
+ case kArmI32x4TruncSatF64x2UZero:
case kArmI16x8Splat:
case kArmI16x8ExtractLaneS:
case kArmI16x8ReplaceLane:
@@ -307,11 +317,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS8x8Reverse:
case kArmS8x4Reverse:
case kArmS8x2Reverse:
- case kArmV32x4AnyTrue:
+ case kArmV64x2AllTrue:
case kArmV32x4AllTrue:
- case kArmV16x8AnyTrue:
case kArmV16x8AllTrue:
- case kArmV8x16AnyTrue:
+ case kArmV128AnyTrue:
case kArmV8x16AllTrue:
return kNoOpcodeFlags;
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index bd1e7c4b4f..3f15323297 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -108,10 +108,7 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseImmediate(node->InputAt(1)));
}
} else {
- InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+ VisitRRR(selector, opcode, node);
}
}
@@ -511,41 +508,9 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
}
-namespace {
-// Helper struct for load lane and store lane to indicate which opcode to use
-// and what memory size to be encoded in the opcode, and the new lane index.
-struct LoadStoreLaneParams {
- bool low_op;
- NeonSize sz;
- uint8_t laneidx;
- LoadStoreLaneParams(uint8_t laneidx, NeonSize sz, int lanes)
- : low_op(laneidx < lanes), sz(sz), laneidx(laneidx % lanes) {}
-};
-
-// The register mapping on ARM (1 Q to 2 D), means that loading/storing high
-// lanes of a Q register is equivalent to loading/storing the high D reg, modulo
-// number of lanes in a D reg. This function decides, based on the laneidx and
-// load/store size, whether the low or high D reg is accessed, and what the new
-// lane index is.
-LoadStoreLaneParams GetLoadStoreLaneParams(MachineRepresentation rep,
- uint8_t laneidx) {
- if (rep == MachineRepresentation::kWord8) {
- return LoadStoreLaneParams(laneidx, Neon8, 8);
- } else if (rep == MachineRepresentation::kWord16) {
- return LoadStoreLaneParams(laneidx, Neon16, 4);
- } else if (rep == MachineRepresentation::kWord32) {
- return LoadStoreLaneParams(laneidx, Neon32, 2);
- } else if (rep == MachineRepresentation::kWord64) {
- return LoadStoreLaneParams(laneidx, Neon64, 1);
- } else {
- UNREACHABLE();
- }
-}
-} // namespace
-
void InstructionSelector::VisitStoreLane(Node* node) {
StoreLaneParameters params = StoreLaneParametersOf(node->op());
- LoadStoreLaneParams f = GetLoadStoreLaneParams(params.rep, params.laneidx);
+ LoadStoreLaneParams f(params.rep, params.laneidx);
InstructionCode opcode =
f.low_op ? kArmS128StoreLaneLow : kArmS128StoreLaneHigh;
opcode |= MiscField::encode(f.sz);
@@ -563,8 +528,7 @@ void InstructionSelector::VisitStoreLane(Node* node) {
void InstructionSelector::VisitLoadLane(Node* node) {
LoadLaneParameters params = LoadLaneParametersOf(node->op());
- LoadStoreLaneParams f =
- GetLoadStoreLaneParams(params.rep.representation(), params.laneidx);
+ LoadStoreLaneParams f(params.rep.representation(), params.laneidx);
InstructionCode opcode =
f.low_op ? kArmS128LoadLaneLow : kArmS128LoadLaneHigh;
opcode |= MiscField::encode(f.sz);
@@ -673,7 +637,7 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
InstructionOperand output = g.DefineAsRegister(node);
@@ -1741,10 +1705,14 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
+ int stack_decrement = 0;
for (PushParameter input : base::Reversed(*arguments)) {
+ stack_decrement += kSystemPointerSize;
// Skip any alignment holes in pushed nodes.
if (input.node == nullptr) continue;
- Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node));
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
+ Emit(kArmPush, g.NoOutput(), decrement, g.UseRegister(input.node));
}
}
}
@@ -1776,8 +1744,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
namespace {
// Shared routine for multiple compare operations.
@@ -2605,6 +2571,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(F32x4Neg, kArmF32x4Neg) \
V(F32x4RecipApprox, kArmF32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kArmF32x4RecipSqrtApprox) \
+ V(I64x2Abs, kArmI64x2Abs) \
V(I64x2SConvertI32x4Low, kArmI64x2SConvertI32x4Low) \
V(I64x2SConvertI32x4High, kArmI64x2SConvertI32x4High) \
V(I64x2UConvertI32x4Low, kArmI64x2UConvertI32x4Low) \
@@ -2627,11 +2594,10 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16Abs, kArmI8x16Abs) \
V(I8x16Popcnt, kArmVcnt) \
V(S128Not, kArmS128Not) \
- V(V32x4AnyTrue, kArmV32x4AnyTrue) \
+ V(V64x2AllTrue, kArmV64x2AllTrue) \
V(V32x4AllTrue, kArmV32x4AllTrue) \
- V(V16x8AnyTrue, kArmV16x8AnyTrue) \
V(V16x8AllTrue, kArmV16x8AllTrue) \
- V(V8x16AnyTrue, kArmV8x16AnyTrue) \
+ V(V128AnyTrue, kArmV128AnyTrue) \
V(V8x16AllTrue, kArmV8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
@@ -2679,6 +2645,9 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I32x4MaxS, kArmI32x4MaxS) \
V(I32x4Eq, kArmI32x4Eq) \
V(I64x2Eq, kArmI64x2Eq) \
+ V(I64x2Ne, kArmI64x2Ne) \
+ V(I64x2GtS, kArmI64x2GtS) \
+ V(I64x2GeS, kArmI64x2GeS) \
V(I32x4Ne, kArmI32x4Ne) \
V(I32x4GtS, kArmI32x4GtS) \
V(I32x4GeS, kArmI32x4GeS) \
@@ -2844,8 +2813,7 @@ void InstructionSelector::VisitI64x2Neg(Node* node) {
void InstructionSelector::VisitI64x2Mul(Node* node) {
ArmOperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register()};
Emit(kArmI64x2Mul, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
@@ -3053,8 +3021,7 @@ namespace {
template <ArchOpcode opcode>
void VisitBitMask(InstructionSelector* selector, Node* node) {
ArmOperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register()};
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
@@ -3182,6 +3149,45 @@ void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
+// TODO(v8:9780)
+// These double precision conversion instructions need a low Q register (q0-q7)
+// because the codegen accesses the S registers they overlap with.
+void InstructionSelector::VisitF64x2ConvertLowI32x4S(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmF64x2ConvertLowI32x4S, g.DefineAsRegister(node),
+ g.UseFixed(node->InputAt(0), q0));
+}
+
+void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmF64x2ConvertLowI32x4U, g.DefineAsRegister(node),
+ g.UseFixed(node->InputAt(0), q0));
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmI32x4TruncSatF64x2SZero, g.DefineAsFixed(node, q0),
+ g.UseUniqueRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmI32x4TruncSatF64x2UZero, g.DefineAsFixed(node, q0),
+ g.UseUniqueRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitF32x4DemoteF64x2Zero(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmF32x4DemoteF64x2Zero, g.DefineAsFixed(node, q0),
+ g.UseUniqueRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmF64x2PromoteLowF32x4, g.DefineAsRegister(node),
+ g.UseFixed(node->InputAt(0), q0));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 76613d1182..5b9c2e4d4f 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -375,11 +375,78 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
+class WasmOutOfLineTrap : public OutOfLineCode {
+ public:
+ WasmOutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), gen_(gen), instr_(instr) {}
+ void Generate() override {
+ Arm64OperandConverter i(gen_, instr_);
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
+ GenerateCallToTrap(trap_id);
+ }
+
+ protected:
+ CodeGenerator* gen_;
+
+ void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); }
+
+ private:
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(),
+ 0);
+ __ LeaveFrame(StackFrame::WASM);
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
+ __ Ret();
+ } else {
+ gen_->AssembleSourcePosition(instr_);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ ReferenceMap* reference_map =
+ gen_->zone()->New<ReferenceMap>(gen_->zone());
+ gen_->RecordSafepoint(reference_map);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
+ }
+ }
+
+ Instruction* instr_;
+};
+
+class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
+ public:
+ WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr)
+ : WasmOutOfLineTrap(gen, instr), pc_(pc) {}
+
+ void Generate() override {
+ gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
+ GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
+ }
+
+ private:
+ int pc_;
+};
+
+void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr, int pc) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(AccessModeField::decode(opcode));
+ if (access_mode == kMemoryAccessProtected) {
+ zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
+ }
+}
+
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
Arm64OperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
@@ -390,8 +457,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
Arm64OperandConverter* i, VRegister output_reg) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
AddressingMode address_mode = AddressingModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) {
UseScratchRegisterScope temps(codegen->tasm());
@@ -560,30 +626,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Cmp(scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(ne, &done);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ Ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
@@ -720,13 +762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
@@ -845,7 +881,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ Bind(&return_location);
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1140,14 +1176,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
case kArm64Saddlp: {
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
__ Saddlp(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f));
break;
}
case kArm64Uaddlp: {
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
__ Uaddlp(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f));
@@ -1159,7 +1195,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister32(1));
} else {
DCHECK(instr->InputAt(0)->IsSimd128Register());
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidth(dst_f);
__ Smull(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f),
@@ -1168,7 +1204,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Smull2: {
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
__ Smull2(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f),
@@ -1181,7 +1217,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister32(1));
} else {
DCHECK(instr->InputAt(0)->IsSimd128Register());
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidth(dst_f);
__ Umull(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f),
@@ -1190,7 +1226,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Umull2: {
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
__ Umull2(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f),
@@ -1477,7 +1513,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Cnt: {
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ Cnt(i.OutputSimd128Register().Format(f),
i.InputSimd128Register(0).Format(f));
break;
@@ -1729,39 +1765,49 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kArm64Ldrb:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strb:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrh:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strh:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrsw:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
break;
case kArm64Ldr:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
@@ -1778,27 +1824,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Str:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64LdrS:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S());
break;
case kArm64StrS:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
break;
case kArm64LdrD:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister());
break;
case kArm64StrD:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
break;
case kArm64LdrQ:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register(), i.MemoryOperand());
break;
case kArm64StrQ:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
case kArm64DmbIsh:
@@ -1960,33 +2013,67 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArm64Sxtl: {
- VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidth(wide);
__ Sxtl(i.OutputSimd128Register().Format(wide),
i.InputSimd128Register(0).Format(narrow));
break;
}
case kArm64Sxtl2: {
- VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidthDoubleLanes(wide);
__ Sxtl2(i.OutputSimd128Register().Format(wide),
i.InputSimd128Register(0).Format(narrow));
break;
}
case kArm64Uxtl: {
- VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidth(wide);
__ Uxtl(i.OutputSimd128Register().Format(wide),
i.InputSimd128Register(0).Format(narrow));
break;
}
case kArm64Uxtl2: {
- VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidthDoubleLanes(wide);
__ Uxtl2(i.OutputSimd128Register().Format(wide),
i.InputSimd128Register(0).Format(narrow));
break;
}
+ case kArm64F64x2ConvertLowI32x4S: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ __ Sxtl(dst, i.InputSimd128Register(0).V2S());
+ __ Scvtf(dst, dst);
+ break;
+ }
+ case kArm64F64x2ConvertLowI32x4U: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ __ Uxtl(dst, i.InputSimd128Register(0).V2S());
+ __ Ucvtf(dst, dst);
+ break;
+ }
+ case kArm64I32x4TruncSatF64x2SZero: {
+ VRegister dst = i.OutputSimd128Register();
+ __ Fcvtzs(dst.V2D(), i.InputSimd128Register(0).V2D());
+ __ Sqxtn(dst.V2S(), dst.V2D());
+ break;
+ }
+ case kArm64I32x4TruncSatF64x2UZero: {
+ VRegister dst = i.OutputSimd128Register();
+ __ Fcvtzu(dst.V2D(), i.InputSimd128Register(0).V2D());
+ __ Uqxtn(dst.V2S(), dst.V2D());
+ break;
+ }
+ case kArm64F32x4DemoteF64x2Zero: {
+ __ Fcvtn(i.OutputSimd128Register().V2S(),
+ i.InputSimd128Register(0).V2D());
+ break;
+ }
+ case kArm64F64x2PromoteLowF32x4: {
+ __ Fcvtl(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2S());
+ break;
+ }
case kArm64F64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
break;
@@ -2146,6 +2233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst, i.InputInt8(1), i.InputRegister64(2));
break;
}
+ SIMD_UNOP_CASE(kArm64I64x2Abs, Abs, 2D);
SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
case kArm64I64x2Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 6, V2D, Sshl, X);
@@ -2217,20 +2305,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
+ case kArm64I64x2Ne: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ __ Cmeq(dst, i.InputSimd128Register(0).V2D(),
+ i.InputSimd128Register(1).V2D());
+ __ Mvn(dst, dst);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
+ SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
case kArm64I64x2ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
}
case kArm64I64x2BitMask: {
- UseScratchRegisterScope scope(tasm());
- Register dst = i.OutputRegister32();
- VRegister src = i.InputSimd128Register(0);
- VRegister tmp1 = scope.AcquireV(kFormat2D);
- Register tmp2 = scope.AcquireX();
- __ Ushr(tmp1.V2D(), src.V2D(), 63);
- __ Mov(dst.X(), tmp1.D(), 0);
- __ Mov(tmp2.X(), tmp1.D(), 1);
- __ Add(dst.W(), dst.W(), Operand(tmp2.W(), LSL, 1));
+ __ I64x2BitMask(i.OutputRegister32(), i.InputSimd128Register(0));
break;
}
case kArm64I32x4Splat: {
@@ -2535,7 +2624,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64SignSelect: {
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ Cmlt(i.OutputSimd128Register().Format(f),
i.InputSimd128Register(2).Format(f), 0);
__ Bsl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
@@ -2670,59 +2759,64 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
case kArm64LoadSplat: {
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ ld1r(i.OutputSimd128Register().Format(f), i.MemoryOperand(0));
break;
}
case kArm64LoadLane: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
int laneidx = i.InputInt8(1);
__ ld1(i.OutputSimd128Register().Format(f), laneidx, i.MemoryOperand(2));
break;
}
case kArm64StoreLane: {
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
int laneidx = i.InputInt8(1);
__ st1(i.InputSimd128Register(0).Format(f), laneidx, i.MemoryOperand(2));
break;
}
case kArm64S128Load8x8S: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
case kArm64S128Load8x8U: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
case kArm64S128Load16x4S: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
case kArm64S128Load16x4U: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
case kArm64S128Load32x2S: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
case kArm64S128Load32x2U: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
- case kArm64S128Load32Zero: {
- __ Ldr(i.OutputSimd128Register().S(), i.MemoryOperand(0));
- break;
- }
- case kArm64S128Load64Zero: {
- __ Ldr(i.OutputSimd128Register().D(), i.MemoryOperand(0));
+ case kArm64V64x2AllTrue: {
+ __ V64x2AllTrue(i.OutputRegister32(), i.InputSimd128Register(0));
break;
}
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
@@ -2838,50 +2932,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
- class OutOfLineTrap final : public OutOfLineCode {
- public:
- OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
- : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
- void Generate() final {
- Arm64OperandConverter i(gen_, instr_);
- TrapId trap_id =
- static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
- GenerateCallToTrap(trap_id);
- }
-
- private:
- void GenerateCallToTrap(TrapId trap_id) {
- if (trap_id == TrapId::kInvalid) {
- // We cannot test calls to the runtime in cctest/test-run-wasm.
- // Therefore we emit a call to C here instead of a call to the runtime.
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(), 0);
- __ LeaveFrame(StackFrame::WASM);
- auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count =
- static_cast<int>(call_descriptor->StackParameterCount());
- pop_count += (pop_count & 1); // align
- __ Drop(pop_count);
- __ Ret();
- } else {
- gen_->AssembleSourcePosition(instr_);
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched when the code
- // is added to the native module and copied into wasm code space.
- __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
- ReferenceMap* reference_map =
- gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
- if (FLAG_debug_code) {
- // The trap code should never return.
- __ Brk(0);
- }
- }
- }
- Instruction* instr_;
- CodeGenerator* gen_;
- };
- auto ool = zone()->New<OutOfLineTrap>(this, instr);
+ auto ool = zone()->New<WasmOutOfLineTrap>(this, instr);
Label* tlabel = ool->entry();
Condition cc = FlagsConditionToCondition(condition);
__ B(cc, tlabel);
@@ -3051,7 +3102,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ Brk(0);
}
@@ -3180,7 +3231,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = x3;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -3188,9 +3238,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index e56d0323fe..ee2c20372e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -198,6 +198,9 @@ namespace compiler {
V(Arm64F64x2Qfms) \
V(Arm64F64x2Pmin) \
V(Arm64F64x2Pmax) \
+ V(Arm64F64x2ConvertLowI32x4S) \
+ V(Arm64F64x2ConvertLowI32x4U) \
+ V(Arm64F64x2PromoteLowF32x4) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
@@ -223,9 +226,11 @@ namespace compiler {
V(Arm64F32x4Qfms) \
V(Arm64F32x4Pmin) \
V(Arm64F32x4Pmax) \
+ V(Arm64F32x4DemoteF64x2Zero) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
+ V(Arm64I64x2Abs) \
V(Arm64I64x2Neg) \
V(Arm64I64x2Shl) \
V(Arm64I64x2ShrS) \
@@ -233,6 +238,9 @@ namespace compiler {
V(Arm64I64x2Sub) \
V(Arm64I64x2Mul) \
V(Arm64I64x2Eq) \
+ V(Arm64I64x2Ne) \
+ V(Arm64I64x2GtS) \
+ V(Arm64I64x2GeS) \
V(Arm64I64x2ShrU) \
V(Arm64I64x2BitMask) \
V(Arm64I32x4Splat) \
@@ -263,6 +271,8 @@ namespace compiler {
V(Arm64I32x4Abs) \
V(Arm64I32x4BitMask) \
V(Arm64I32x4DotI16x8S) \
+ V(Arm64I32x4TruncSatF64x2SZero) \
+ V(Arm64I32x4TruncSatF64x2UZero) \
V(Arm64I16x8Splat) \
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
@@ -368,6 +378,7 @@ namespace compiler {
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
V(Arm64V128AnyTrue) \
+ V(Arm64V64x2AllTrue) \
V(Arm64V32x4AllTrue) \
V(Arm64V16x8AllTrue) \
V(Arm64V8x16AllTrue) \
@@ -380,8 +391,6 @@ namespace compiler {
V(Arm64S128Load16x4U) \
V(Arm64S128Load32x2S) \
V(Arm64S128Load32x2U) \
- V(Arm64S128Load32Zero) \
- V(Arm64S128Load64Zero) \
V(Arm64Word64AtomicLoadUint8) \
V(Arm64Word64AtomicLoadUint16) \
V(Arm64Word64AtomicLoadUint32) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 0e70a424f5..a384a84479 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -163,6 +163,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
case kArm64F64x2Pmax:
+ case kArm64F64x2ConvertLowI32x4S:
+ case kArm64F64x2ConvertLowI32x4U:
+ case kArm64F64x2PromoteLowF32x4:
case kArm64F32x4Splat:
case kArm64F32x4ExtractLane:
case kArm64F32x4ReplaceLane:
@@ -188,9 +191,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4Qfms:
case kArm64F32x4Pmin:
case kArm64F32x4Pmax:
+ case kArm64F32x4DemoteF64x2Zero:
case kArm64I64x2Splat:
case kArm64I64x2ExtractLane:
case kArm64I64x2ReplaceLane:
+ case kArm64I64x2Abs:
case kArm64I64x2Neg:
case kArm64I64x2Shl:
case kArm64I64x2ShrS:
@@ -198,6 +203,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I64x2Sub:
case kArm64I64x2Mul:
case kArm64I64x2Eq:
+ case kArm64I64x2Ne:
+ case kArm64I64x2GtS:
+ case kArm64I64x2GeS:
case kArm64I64x2ShrU:
case kArm64I64x2BitMask:
case kArm64I32x4Splat:
@@ -232,6 +240,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4Abs:
case kArm64I32x4BitMask:
case kArm64I32x4DotI16x8S:
+ case kArm64I32x4TruncSatF64x2SZero:
+ case kArm64I32x4TruncSatF64x2UZero:
case kArm64I16x8Splat:
case kArm64I16x8ExtractLaneU:
case kArm64I16x8ExtractLaneS:
@@ -337,6 +347,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x4Reverse:
case kArm64S8x2Reverse:
case kArm64V128AnyTrue:
+ case kArm64V64x2AllTrue:
case kArm64V32x4AllTrue:
case kArm64V16x8AllTrue:
case kArm64V8x16AllTrue:
@@ -368,8 +379,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S128Load16x4U:
case kArm64S128Load32x2S:
case kArm64S128Load32x2U:
- case kArm64S128Load32Zero:
- case kArm64S128Load64Zero:
return kIsLoadOperation;
case kArm64Claim:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 0f432f3bc1..091272ac4e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -672,7 +672,10 @@ void InstructionSelector::VisitLoadLane(Node* node) {
params.rep == MachineType::Int32() || params.rep == MachineType::Int64());
InstructionCode opcode = kArm64LoadLane;
- opcode |= MiscField::encode(params.rep.MemSize() * kBitsPerByte);
+ opcode |= LaneSizeField::encode(params.rep.MemSize() * kBitsPerByte);
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ }
Arm64OperandGenerator g(this);
InstructionOperand addr = EmitAddBeforeLoadOrStore(this, node, &opcode);
@@ -686,7 +689,11 @@ void InstructionSelector::VisitStoreLane(Node* node) {
DCHECK_GE(MachineRepresentation::kWord64, params.rep);
InstructionCode opcode = kArm64StoreLane;
- opcode |= MiscField::encode(ElementSizeInBytes(params.rep) * kBitsPerByte);
+ opcode |=
+ LaneSizeField::encode(ElementSizeInBytes(params.rep) * kBitsPerByte);
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ }
Arm64OperandGenerator g(this);
InstructionOperand addr = EmitAddBeforeLoadOrStore(this, node, &opcode);
@@ -707,22 +714,22 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
switch (params.transformation) {
case LoadTransformation::kS128Load8Splat:
opcode = kArm64LoadSplat;
- opcode |= MiscField::encode(8);
+ opcode |= LaneSizeField::encode(8);
require_add = true;
break;
case LoadTransformation::kS128Load16Splat:
opcode = kArm64LoadSplat;
- opcode |= MiscField::encode(16);
+ opcode |= LaneSizeField::encode(16);
require_add = true;
break;
case LoadTransformation::kS128Load32Splat:
opcode = kArm64LoadSplat;
- opcode |= MiscField::encode(32);
+ opcode |= LaneSizeField::encode(32);
require_add = true;
break;
case LoadTransformation::kS128Load64Splat:
opcode = kArm64LoadSplat;
- opcode |= MiscField::encode(64);
+ opcode |= LaneSizeField::encode(64);
require_add = true;
break;
case LoadTransformation::kS128Load8x8S:
@@ -744,10 +751,10 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
opcode = kArm64S128Load32x2U;
break;
case LoadTransformation::kS128Load32Zero:
- opcode = kArm64S128Load32Zero;
+ opcode = kArm64LdrS;
break;
case LoadTransformation::kS128Load64Zero:
- opcode = kArm64S128Load64Zero;
+ opcode = kArm64LdrD;
break;
default:
UNIMPLEMENTED();
@@ -774,6 +781,9 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
} else {
opcode |= AddressingModeField::encode(kMode_MRR);
}
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ }
Emit(opcode, 1, outputs, 2, inputs);
}
@@ -844,7 +854,10 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
+ }
+ if (node->opcode() == IrOpcode::kProtectedLoad) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
EmitLoad(this, node, opcode, immediate_mode, rep);
@@ -852,10 +865,7 @@ void InstructionSelector::VisitLoad(Node* node) {
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
Arm64OperandGenerator g(this);
@@ -987,14 +997,15 @@ void InstructionSelector::VisitStore(Node* node) {
opcode |= AddressingModeField::encode(kMode_MRR);
}
+ if (node->opcode() == IrOpcode::kProtectedStore) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ }
+
Emit(opcode, 0, nullptr, input_count, inputs);
}
}
-void InstructionSelector::VisitProtectedStore(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitProtectedStore(Node* node) { VisitStore(node); }
void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
UNREACHABLE();
@@ -1735,7 +1746,7 @@ namespace {
void VisitExtMul(InstructionSelector* selector, ArchOpcode opcode, Node* node,
int dst_lane_size) {
InstructionCode code = opcode;
- code |= MiscField::encode(dst_lane_size);
+ code |= LaneSizeField::encode(dst_lane_size);
VisitRRR(selector, code, node);
}
} // namespace
@@ -1792,7 +1803,7 @@ namespace {
void VisitExtAddPairwise(InstructionSelector* selector, ArchOpcode opcode,
Node* node, int dst_lane_size) {
InstructionCode code = opcode;
- code |= MiscField::encode(dst_lane_size);
+ code |= LaneSizeField::encode(dst_lane_size);
VisitRR(selector, code, node);
}
} // namespace
@@ -2091,21 +2102,24 @@ void InstructionSelector::EmitPrepareArguments(
// Poke the arguments into the stack.
while (slot >= 0) {
PushParameter input0 = (*arguments)[slot];
+ // Skip holes in the param array. These represent both extra slots for
+ // multi-slot values and padding slots for alignment.
+ if (input0.node == nullptr) {
+ slot--;
+ continue;
+ }
PushParameter input1 = slot > 0 ? (*arguments)[slot - 1] : PushParameter();
// Emit a poke-pair if consecutive parameters have the same type.
// TODO(arm): Support consecutive Simd128 parameters.
- if (input0.node != nullptr && input1.node != nullptr &&
+ if (input1.node != nullptr &&
input0.location.GetType() == input1.location.GetType()) {
Emit(kArm64PokePair, g.NoOutput(), g.UseRegister(input0.node),
g.UseRegister(input1.node), g.TempImmediate(slot));
slot -= 2;
- } else if (input0.node != nullptr) {
+ } else {
Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input0.node),
g.TempImmediate(slot));
slot--;
- } else {
- // Skip any alignment holes in pushed nodes.
- slot--;
}
}
}
@@ -2139,8 +2153,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
namespace {
// Shared routine for multiple compare operations.
@@ -3397,36 +3409,42 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kArm64F64x2Abs) \
- V(F64x2Neg, kArm64F64x2Neg) \
- V(F64x2Sqrt, kArm64F64x2Sqrt) \
- V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
- V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
- V(F32x4Abs, kArm64F32x4Abs) \
- V(F32x4Neg, kArm64F32x4Neg) \
- V(F32x4Sqrt, kArm64F32x4Sqrt) \
- V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
- V(I64x2Neg, kArm64I64x2Neg) \
- V(I64x2BitMask, kArm64I64x2BitMask) \
- V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
- V(I32x4Neg, kArm64I32x4Neg) \
- V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
- V(I32x4Abs, kArm64I32x4Abs) \
- V(I32x4BitMask, kArm64I32x4BitMask) \
- V(I16x8Neg, kArm64I16x8Neg) \
- V(I16x8Abs, kArm64I16x8Abs) \
- V(I16x8BitMask, kArm64I16x8BitMask) \
- V(I8x16Neg, kArm64I8x16Neg) \
- V(I8x16Abs, kArm64I8x16Abs) \
- V(I8x16BitMask, kArm64I8x16BitMask) \
- V(S128Not, kArm64S128Not) \
- V(V32x4AnyTrue, kArm64V128AnyTrue) \
- V(V32x4AllTrue, kArm64V32x4AllTrue) \
- V(V16x8AnyTrue, kArm64V128AnyTrue) \
- V(V16x8AllTrue, kArm64V16x8AllTrue) \
- V(V8x16AnyTrue, kArm64V128AnyTrue) \
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kArm64F64x2Abs) \
+ V(F64x2Neg, kArm64F64x2Neg) \
+ V(F64x2Sqrt, kArm64F64x2Sqrt) \
+ V(F64x2ConvertLowI32x4S, kArm64F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kArm64F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kArm64F64x2PromoteLowF32x4) \
+ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
+ V(F32x4Abs, kArm64F32x4Abs) \
+ V(F32x4Neg, kArm64F32x4Neg) \
+ V(F32x4Sqrt, kArm64F32x4Sqrt) \
+ V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
+ V(F32x4DemoteF64x2Zero, kArm64F32x4DemoteF64x2Zero) \
+ V(I64x2Abs, kArm64I64x2Abs) \
+ V(I64x2Neg, kArm64I64x2Neg) \
+ V(I64x2BitMask, kArm64I64x2BitMask) \
+ V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
+ V(I32x4Neg, kArm64I32x4Neg) \
+ V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
+ V(I32x4Abs, kArm64I32x4Abs) \
+ V(I32x4BitMask, kArm64I32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kArm64I16x8Neg) \
+ V(I16x8Abs, kArm64I16x8Abs) \
+ V(I16x8BitMask, kArm64I16x8BitMask) \
+ V(I8x16Neg, kArm64I8x16Neg) \
+ V(I8x16Abs, kArm64I8x16Abs) \
+ V(I8x16BitMask, kArm64I8x16BitMask) \
+ V(S128Not, kArm64S128Not) \
+ V(V128AnyTrue, kArm64V128AnyTrue) \
+ V(V64x2AllTrue, kArm64V64x2AllTrue) \
+ V(V32x4AllTrue, kArm64V32x4AllTrue) \
+ V(V16x8AllTrue, kArm64V16x8AllTrue) \
V(V8x16AllTrue, kArm64V8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
@@ -3468,6 +3486,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Add, kArm64I64x2Add) \
V(I64x2Sub, kArm64I64x2Sub) \
V(I64x2Eq, kArm64I64x2Eq) \
+ V(I64x2Ne, kArm64I64x2Ne) \
+ V(I64x2GtS, kArm64I64x2GtS) \
+ V(I64x2GeS, kArm64I64x2GeS) \
V(I32x4AddHoriz, kArm64I32x4AddHoriz) \
V(I32x4Mul, kArm64I32x4Mul) \
V(I32x4MinS, kArm64I32x4MinS) \
@@ -3603,7 +3624,7 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#define VISIT_SIGN_SELECT(NAME, SIZE) \
void InstructionSelector::Visit##NAME(Node* node) { \
InstructionCode opcode = kArm64SignSelect; \
- opcode |= MiscField::encode(SIZE); \
+ opcode |= LaneSizeField::encode(SIZE); \
VisitRRRR(this, opcode, node); \
}
@@ -3886,7 +3907,7 @@ namespace {
void VisitSignExtendLong(InstructionSelector* selector, ArchOpcode opcode,
Node* node, int lane_size) {
InstructionCode code = opcode;
- code |= MiscField::encode(lane_size);
+ code |= LaneSizeField::encode(lane_size);
VisitRR(selector, code, node);
}
} // namespace
@@ -3941,7 +3962,7 @@ void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
void InstructionSelector::VisitI8x16Popcnt(Node* node) {
InstructionCode code = kArm64Cnt;
- code |= MiscField::encode(8);
+ code |= LaneSizeField::encode(8);
VisitRR(this, code, node);
}
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index c94ca74f73..bc5aa579d6 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -187,7 +187,7 @@ class InstructionOperandConverter {
// Deoptimization exit.
class DeoptimizationExit : public ZoneObject {
public:
- explicit DeoptimizationExit(SourcePosition pos, BailoutId bailout_id,
+ explicit DeoptimizationExit(SourcePosition pos, BytecodeOffset bailout_id,
int translation_id, int pc_offset,
DeoptimizeKind kind, DeoptimizeReason reason)
: deoptimization_id_(kNoDeoptIndex),
@@ -215,7 +215,7 @@ class DeoptimizationExit : public ZoneObject {
Label* label() { return &label_; }
// The label after the deoptimization check, which will resume execution.
Label* continue_label() { return &continue_label_; }
- BailoutId bailout_id() const { return bailout_id_; }
+ BytecodeOffset bailout_id() const { return bailout_id_; }
int translation_id() const { return translation_id_; }
int pc_offset() const { return pc_offset_; }
DeoptimizeKind kind() const { return kind_; }
@@ -238,7 +238,7 @@ class DeoptimizationExit : public ZoneObject {
const SourcePosition pos_;
Label label_;
Label continue_label_;
- const BailoutId bailout_id_;
+ const BytecodeOffset bailout_id_;
const int translation_id_;
const int pc_offset_;
const DeoptimizeKind kind_;
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 83f8fbc4e8..e9a39f74a9 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -606,9 +606,8 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
.IsNext(instructions()->InstructionBlockAt(block)->ao_number());
}
-void CodeGenerator::RecordSafepoint(ReferenceMap* references,
- Safepoint::DeoptMode deopt_mode) {
- Safepoint safepoint = safepoints()->DefineSafepoint(tasm(), deopt_mode);
+void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
+ Safepoint safepoint = safepoints()->DefineSafepoint(tasm());
int frame_header_offset = frame()->GetFixedSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
@@ -856,16 +855,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
DeoptImmedArgsCountField::decode(instr->opcode());
DeoptimizationExit* const exit = AddDeoptimizationExit(
instr, frame_state_offset, immediate_args_count);
+ Label continue_label;
BranchInfo branch;
branch.condition = condition;
branch.true_label = exit->label();
- branch.false_label = exit->continue_label();
+ branch.false_label = &continue_label;
branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch);
- tasm()->bind(exit->continue_label());
+ tasm()->bind(&continue_label);
if (mode == kFlags_deoptimize_and_poison) {
AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
}
+ tasm()->bind(exit->continue_label());
break;
}
case kFlags_set: {
@@ -985,8 +986,8 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
Handle<DeoptimizationData> data =
DeoptimizationData::New(isolate(), deopt_count, AllocationType::kOld);
- Handle<ByteArray> translation_array =
- translations_.CreateByteArray(isolate()->factory());
+ Handle<TranslationArray> translation_array =
+ translations_.ToTranslationArray(isolate()->factory());
data->SetTranslationByteArray(*translation_array);
data->SetInlinedFunctionCount(
@@ -1022,7 +1023,7 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_offset().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
} else {
- BailoutId osr_offset = BailoutId::None();
+ BytecodeOffset osr_offset = BytecodeOffset::None();
data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
data->SetOsrPcOffset(Smi::FromInt(-1));
}
@@ -1049,9 +1050,7 @@ Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
void CodeGenerator::RecordCallPosition(Instruction* instr) {
const bool needs_frame_state =
instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState);
- RecordSafepoint(instr->reference_map(), needs_frame_state
- ? Safepoint::kLazyDeopt
- : Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
InstructionOperandConverter i(this, instr);
@@ -1094,66 +1093,49 @@ DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
void CodeGenerator::TranslateStateValueDescriptor(
StateValueDescriptor* desc, StateValueList* nested,
- Translation* translation, InstructionOperandIterator* iter) {
- // Note:
- // If translation is null, we just skip the relevant instruction operands.
+ InstructionOperandIterator* iter) {
if (desc->IsNested()) {
- if (translation != nullptr) {
- translation->BeginCapturedObject(static_cast<int>(nested->size()));
- }
+ translations_.BeginCapturedObject(static_cast<int>(nested->size()));
for (auto field : *nested) {
- TranslateStateValueDescriptor(field.desc, field.nested, translation,
- iter);
+ TranslateStateValueDescriptor(field.desc, field.nested, iter);
}
} else if (desc->IsArgumentsElements()) {
- if (translation != nullptr) {
- translation->ArgumentsElements(desc->arguments_type());
- }
+ translations_.ArgumentsElements(desc->arguments_type());
} else if (desc->IsArgumentsLength()) {
- if (translation != nullptr) {
- translation->ArgumentsLength();
- }
+ translations_.ArgumentsLength();
} else if (desc->IsDuplicate()) {
- if (translation != nullptr) {
- translation->DuplicateObject(static_cast<int>(desc->id()));
- }
+ translations_.DuplicateObject(static_cast<int>(desc->id()));
} else if (desc->IsPlain()) {
InstructionOperand* op = iter->Advance();
- if (translation != nullptr) {
- AddTranslationForOperand(translation, iter->instruction(), op,
- desc->type());
- }
+ AddTranslationForOperand(iter->instruction(), op, desc->type());
} else {
DCHECK(desc->IsOptimizedOut());
- if (translation != nullptr) {
if (optimized_out_literal_id_ == -1) {
optimized_out_literal_id_ = DefineDeoptimizationLiteral(
DeoptimizationLiteral(isolate()->factory()->optimized_out()));
}
- translation->StoreLiteral(optimized_out_literal_id_);
- }
+ translations_.StoreLiteral(optimized_out_literal_id_);
}
}
void CodeGenerator::TranslateFrameStateDescriptorOperands(
- FrameStateDescriptor* desc, InstructionOperandIterator* iter,
- Translation* translation) {
+ FrameStateDescriptor* desc, InstructionOperandIterator* iter) {
size_t index = 0;
StateValueList* values = desc->GetStateValueDescriptors();
for (StateValueList::iterator it = values->begin(); it != values->end();
++it, ++index) {
- TranslateStateValueDescriptor((*it).desc, (*it).nested, translation, iter);
+ TranslateStateValueDescriptor((*it).desc, (*it).nested, iter);
}
DCHECK_EQ(desc->GetSize(), index);
}
void CodeGenerator::BuildTranslationForFrameStateDescriptor(
FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
- Translation* translation, OutputFrameStateCombine state_combine) {
+ OutputFrameStateCombine state_combine) {
// Outer-most state must be added to translation first.
if (descriptor->outer_state() != nullptr) {
BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
- translation, state_combine);
+ state_combine);
}
Handle<SharedFunctionInfo> shared_info;
@@ -1164,49 +1146,57 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
shared_info = info()->shared_info();
}
- const BailoutId bailout_id = descriptor->bailout_id();
+ const BytecodeOffset bailout_id = descriptor->bailout_id();
const int shared_info_id =
DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
const unsigned int height =
static_cast<unsigned int>(descriptor->GetHeight());
switch (descriptor->type()) {
- case FrameStateType::kInterpretedFunction: {
+ case FrameStateType::kUnoptimizedFunction: {
int return_offset = 0;
int return_count = 0;
if (!state_combine.IsOutputIgnored()) {
return_offset = static_cast<int>(state_combine.GetOffsetToPokeAt());
return_count = static_cast<int>(iter->instruction()->OutputCount());
}
- translation->BeginInterpretedFrame(bailout_id, shared_info_id, height,
- return_offset, return_count);
+ translations_.BeginInterpretedFrame(bailout_id, shared_info_id, height,
+ return_offset, return_count);
break;
}
case FrameStateType::kArgumentsAdaptor:
- translation->BeginArgumentsAdaptorFrame(shared_info_id, height);
+ translations_.BeginArgumentsAdaptorFrame(shared_info_id, height);
break;
case FrameStateType::kConstructStub:
DCHECK(bailout_id.IsValidForConstructStub());
- translation->BeginConstructStubFrame(bailout_id, shared_info_id, height);
+ translations_.BeginConstructStubFrame(bailout_id, shared_info_id, height);
break;
case FrameStateType::kBuiltinContinuation: {
- translation->BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
- height);
+ translations_.BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
+ height);
+ break;
+ }
+ case FrameStateType::kJSToWasmBuiltinContinuation: {
+ const JSToWasmFrameStateDescriptor* js_to_wasm_descriptor =
+ static_cast<const JSToWasmFrameStateDescriptor*>(descriptor);
+ translations_.BeginJSToWasmBuiltinContinuationFrame(
+ bailout_id, shared_info_id, height,
+ js_to_wasm_descriptor->return_type());
break;
}
case FrameStateType::kJavaScriptBuiltinContinuation: {
- translation->BeginJavaScriptBuiltinContinuationFrame(
+ translations_.BeginJavaScriptBuiltinContinuationFrame(
bailout_id, shared_info_id, height);
break;
}
case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
- translation->BeginJavaScriptBuiltinContinuationWithCatchFrame(
+ translations_.BeginJavaScriptBuiltinContinuationWithCatchFrame(
bailout_id, shared_info_id, height);
break;
}
}
- TranslateFrameStateDescriptorOperands(descriptor, iter, translation);
+ TranslateFrameStateDescriptorOperands(descriptor, iter);
}
DeoptimizationExit* CodeGenerator::BuildTranslation(
@@ -1217,23 +1207,21 @@ DeoptimizationExit* CodeGenerator::BuildTranslation(
FrameStateDescriptor* const descriptor = entry.descriptor();
frame_state_offset++;
- int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
- Translation translation(&translations_,
- static_cast<int>(descriptor->GetFrameCount()),
- static_cast<int>(descriptor->GetJSFrameCount()),
- update_feedback_count, zone());
+ const int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
+ const int translation_index = translations_.BeginTranslation(
+ static_cast<int>(descriptor->GetFrameCount()),
+ static_cast<int>(descriptor->GetJSFrameCount()), update_feedback_count);
if (entry.feedback().IsValid()) {
DeoptimizationLiteral literal =
DeoptimizationLiteral(entry.feedback().vector);
int literal_id = DefineDeoptimizationLiteral(literal);
- translation.AddUpdateFeedback(literal_id, entry.feedback().slot.ToInt());
+ translations_.AddUpdateFeedback(literal_id, entry.feedback().slot.ToInt());
}
InstructionOperandIterator iter(instr, frame_state_offset);
- BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
- state_combine);
+ BuildTranslationForFrameStateDescriptor(descriptor, &iter, state_combine);
DeoptimizationExit* const exit = zone()->New<DeoptimizationExit>(
- current_source_position_, descriptor->bailout_id(), translation.index(),
+ current_source_position_, descriptor->bailout_id(), translation_index,
pc_offset, entry.kind(), entry.reason());
if (!Deoptimizer::kSupportsFixedDeoptExitSizes) {
@@ -1253,21 +1241,20 @@ DeoptimizationExit* CodeGenerator::BuildTranslation(
return exit;
}
-void CodeGenerator::AddTranslationForOperand(Translation* translation,
- Instruction* instr,
+void CodeGenerator::AddTranslationForOperand(Instruction* instr,
InstructionOperand* op,
MachineType type) {
if (op->IsStackSlot()) {
if (type.representation() == MachineRepresentation::kBit) {
- translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreBoolStackSlot(LocationOperand::cast(op)->index());
} else if (type == MachineType::Int8() || type == MachineType::Int16() ||
type == MachineType::Int32()) {
- translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreInt32StackSlot(LocationOperand::cast(op)->index());
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
- translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreUint32StackSlot(LocationOperand::cast(op)->index());
} else if (type == MachineType::Int64()) {
- translation->StoreInt64StackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreInt64StackSlot(LocationOperand::cast(op)->index());
} else {
#if defined(V8_COMPRESS_POINTERS)
CHECK(MachineRepresentation::kTagged == type.representation() ||
@@ -1275,27 +1262,27 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
#else
CHECK(MachineRepresentation::kTagged == type.representation());
#endif
- translation->StoreStackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreStackSlot(LocationOperand::cast(op)->index());
}
} else if (op->IsFPStackSlot()) {
if (type.representation() == MachineRepresentation::kFloat64) {
- translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreDoubleStackSlot(LocationOperand::cast(op)->index());
} else {
CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
- translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreFloatStackSlot(LocationOperand::cast(op)->index());
}
} else if (op->IsRegister()) {
InstructionOperandConverter converter(this, instr);
if (type.representation() == MachineRepresentation::kBit) {
- translation->StoreBoolRegister(converter.ToRegister(op));
+ translations_.StoreBoolRegister(converter.ToRegister(op));
} else if (type == MachineType::Int8() || type == MachineType::Int16() ||
type == MachineType::Int32()) {
- translation->StoreInt32Register(converter.ToRegister(op));
+ translations_.StoreInt32Register(converter.ToRegister(op));
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
- translation->StoreUint32Register(converter.ToRegister(op));
+ translations_.StoreUint32Register(converter.ToRegister(op));
} else if (type == MachineType::Int64()) {
- translation->StoreInt64Register(converter.ToRegister(op));
+ translations_.StoreInt64Register(converter.ToRegister(op));
} else {
#if defined(V8_COMPRESS_POINTERS)
CHECK(MachineRepresentation::kTagged == type.representation() ||
@@ -1303,15 +1290,15 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
#else
CHECK(MachineRepresentation::kTagged == type.representation());
#endif
- translation->StoreRegister(converter.ToRegister(op));
+ translations_.StoreRegister(converter.ToRegister(op));
}
} else if (op->IsFPRegister()) {
InstructionOperandConverter converter(this, instr);
if (type.representation() == MachineRepresentation::kFloat64) {
- translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+ translations_.StoreDoubleRegister(converter.ToDoubleRegister(op));
} else {
CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
- translation->StoreFloatRegister(converter.ToFloatRegister(op));
+ translations_.StoreFloatRegister(converter.ToFloatRegister(op));
}
} else {
CHECK(op->IsImmediate());
@@ -1390,10 +1377,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
UNREACHABLE();
}
if (literal.object().equals(info()->closure())) {
- translation->StoreJSFrameFunction();
+ translations_.StoreJSFrameFunction();
} else {
int literal_id = DefineDeoptimizationLiteral(literal);
- translation->StoreLiteral(literal_id);
+ translations_.StoreLiteral(literal_id);
}
}
}
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 9829a070ec..7cead5dbde 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -160,8 +160,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
void AssembleSourcePosition(SourcePosition source_position);
// Record a safepoint with the given pointer map.
- void RecordSafepoint(ReferenceMap* references,
- Safepoint::DeoptMode deopt_mode);
+ void RecordSafepoint(ReferenceMap* references);
Zone* zone() const { return zone_; }
TurboAssembler* tasm() { return &tasm_; }
@@ -298,10 +297,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// Generates code to manipulate the stack in preparation for a tail call.
void AssemblePrepareTailCall();
- // Generates code to pop current frame if it is an arguments adaptor frame.
- void AssemblePopArgumentsAdaptorFrame(Register args_reg, Register scratch1,
- Register scratch2, Register scratch3);
-
enum PushTypeFlag {
kImmediatePush = 0x1,
kRegisterPush = 0x2,
@@ -398,16 +393,14 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
OutputFrameStateCombine state_combine);
void BuildTranslationForFrameStateDescriptor(
FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
- Translation* translation, OutputFrameStateCombine state_combine);
+ OutputFrameStateCombine state_combine);
void TranslateStateValueDescriptor(StateValueDescriptor* desc,
StateValueList* nested,
- Translation* translation,
InstructionOperandIterator* iter);
void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
- InstructionOperandIterator* iter,
- Translation* translation);
- void AddTranslationForOperand(Translation* translation, Instruction* instr,
- InstructionOperand* op, MachineType type);
+ InstructionOperandIterator* iter);
+ void AddTranslationForOperand(Instruction* instr, InstructionOperand* op,
+ MachineType type);
void MarkLazyDeoptSite();
void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
@@ -448,7 +441,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
size_t inlined_function_count_ = 0;
- TranslationBuffer translations_;
+ TranslationArrayBuilder translations_;
int handler_table_offset_ = 0;
int last_lazy_deopt_pc_ = 0;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 45a2c59597..77a4d92b96 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -574,43 +574,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register, Register,
- Register) {
- // There are not enough temp registers left on ia32 for a call instruction
- // so we pick some scratch registers and save/restore them manually here.
- int scratch_count = 3;
- Register scratch1 = esi;
- Register scratch2 = ecx;
- Register scratch3 = edx;
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &done, Label::kNear);
-
- __ push(scratch1);
- __ push(scratch2);
- __ push(scratch3);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3,
- scratch_count);
- __ pop(scratch3);
- __ pop(scratch2);
- __ pop(scratch1);
-
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
@@ -794,12 +757,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- no_reg, no_reg, no_reg);
- }
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -925,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ bind(&return_location);
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1824,69 +1782,58 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kIA32PushFloat32:
- if (instr->InputAt(0)->IsFPRegister()) {
- __ AllocateStackSpace(kFloatSize);
- __ Movss(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- } else if (HasImmediateInput(instr, 0)) {
- __ Move(kScratchDoubleReg, i.InputFloat32(0));
- __ AllocateStackSpace(kFloatSize);
- __ Movss(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- } else {
- __ Movss(kScratchDoubleReg, i.InputOperand(0));
- __ AllocateStackSpace(kFloatSize);
- __ Movss(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- }
- break;
- case kIA32PushFloat64:
- if (instr->InputAt(0)->IsFPRegister()) {
- __ AllocateStackSpace(kDoubleSize);
- __ Movsd(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
- } else if (HasImmediateInput(instr, 0)) {
- __ Move(kScratchDoubleReg, i.InputDouble(0));
- __ AllocateStackSpace(kDoubleSize);
- __ Movsd(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
- } else {
- __ Movsd(kScratchDoubleReg, i.InputOperand(0));
- __ AllocateStackSpace(kDoubleSize);
- __ Movsd(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
- }
- break;
- case kIA32PushSimd128:
- if (instr->InputAt(0)->IsFPRegister()) {
- __ AllocateStackSpace(kSimd128Size);
- __ Movups(Operand(esp, 0), i.InputSimd128Register(0));
- } else {
- __ Movups(kScratchDoubleReg, i.InputOperand(0));
- __ AllocateStackSpace(kSimd128Size);
- __ Movups(Operand(esp, 0), kScratchDoubleReg);
- }
- frame_access_state()->IncreaseSPDelta(kSimd128Size / kSystemPointerSize);
- break;
- case kIA32Push:
- if (HasAddressingMode(instr)) {
- size_t index = 0;
+ case kIA32Push: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ // Whenever codegen uses push, we need to check if stack_decrement
+ // contains any extra padding and adjust the stack before the push.
+ if (HasImmediateInput(instr, 1)) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ push(i.InputImmediate(1));
+ } else if (HasAddressingMode(instr)) {
+ // Only single slot pushes from memory are supported.
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ size_t index = 1;
Operand operand = i.MemoryOperand(&index);
__ push(operand);
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- } else if (instr->InputAt(0)->IsFPRegister()) {
- __ AllocateStackSpace(kFloatSize);
- __ Movsd(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- } else if (HasImmediateInput(instr, 0)) {
- __ push(i.InputImmediate(0));
- frame_access_state()->IncreaseSPDelta(1);
} else {
- __ push(i.InputOperand(0));
- frame_access_state()->IncreaseSPDelta(1);
+ InstructionOperand* input = instr->InputAt(1);
+ if (input->IsRegister()) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ push(i.InputRegister(1));
+ } else if (input->IsFloatRegister()) {
+ DCHECK_GE(stack_decrement, kFloatSize);
+ __ AllocateStackSpace(stack_decrement);
+ __ Movss(Operand(esp, 0), i.InputDoubleRegister(1));
+ } else if (input->IsDoubleRegister()) {
+ DCHECK_GE(stack_decrement, kDoubleSize);
+ __ AllocateStackSpace(stack_decrement);
+ __ Movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+ } else if (input->IsSimd128Register()) {
+ DCHECK_GE(stack_decrement, kSimd128Size);
+ __ AllocateStackSpace(stack_decrement);
+ // TODO(bbudge) Use Movaps when slots are aligned.
+ __ Movups(Operand(esp, 0), i.InputSimd128Register(1));
+ } else if (input->IsStackSlot() || input->IsFloatStackSlot()) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ push(i.InputOperand(1));
+ } else if (input->IsDoubleStackSlot()) {
+ DCHECK_GE(stack_decrement, kDoubleSize);
+ __ Movsd(kScratchDoubleReg, i.InputOperand(1));
+ __ AllocateStackSpace(stack_decrement);
+ __ Movsd(Operand(esp, 0), kScratchDoubleReg);
+ } else {
+ DCHECK(input->IsSimd128StackSlot());
+ DCHECK_GE(stack_decrement, kSimd128Size);
+ // TODO(bbudge) Use Movaps when slots are aligned.
+ __ Movups(kScratchDoubleReg, i.InputOperand(1));
+ __ AllocateStackSpace(stack_decrement);
+ __ Movups(Operand(esp, 0), kScratchDoubleReg);
+ }
}
+ frame_access_state()->IncreaseSPDelta(slots);
break;
+ }
case kIA32Poke: {
int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
@@ -2092,6 +2039,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Roundpd(i.OutputSimd128Register(), i.InputDoubleRegister(0), mode);
break;
}
+ case kIA32F64x2PromoteLowF32x4: {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kIA32F32x4DemoteF64x2Zero: {
+ __ Cvtpd2ps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kIA32I32x4TruncSatF64x2SZero: {
+ __ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
+ break;
+ }
+ case kIA32I32x4TruncSatF64x2UZero: {
+ __ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
+ break;
+ }
+ case kIA32F64x2ConvertLowI32x4S: {
+ __ Cvtdq2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kIA32F64x2ConvertLowI32x4U: {
+ __ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), i.TempRegister(0));
+ break;
+ }
case kIA32I64x2ExtMulLowI32x4S: {
__ I64x2ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), kScratchDoubleReg,
@@ -2177,6 +2153,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pinsrd(i.OutputSimd128Register(), i.InputOperand(3), lane * 2 + 1);
break;
}
+ case kIA32I64x2Abs: {
+ __ I64x2Abs(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ break;
+ }
case kIA32I64x2Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
@@ -2254,7 +2235,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I64x2Eq: {
__ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32I64x2Ne: {
+ __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ __ Pcmpeqq(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kIA32I64x2GtS: {
+ __ I64x2GtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ case kIA32I64x2GeS: {
+ __ I64x2GeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32I64x2SConvertI32x4Low: {
@@ -2262,15 +2260,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2SConvertI32x4High: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpunpckhqdq(dst, src, src);
- } else {
- __ pshufd(dst, src, 0xEE);
- }
- __ Pmovsxdq(dst, dst);
+ __ I64x2SConvertI32x4High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kIA32I64x2UConvertI32x4Low: {
@@ -2278,17 +2269,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2UConvertI32x4High: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpunpckhdq(dst, src, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pshufd(dst, src, 0xEE);
- __ pmovzxdq(dst, dst);
- }
+ __ I64x2UConvertI32x4High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kIA32I8x16SignSelect: {
@@ -2315,64 +2297,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4ExtAddPairwiseI16x8S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // kScratchDoubleReg = i16x8.splat(1)
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrlw(kScratchDoubleReg, byte{15});
- // pmaddwd multiplies signed words in kScratchDoubleReg and src, producing
- // signed doublewords, then adds pairwise.
- // src = |a|b|c|d|e|f|g|h|
- // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- __ Pmaddwd(dst, src, kScratchDoubleReg);
+ __ I32x4ExtAddPairwiseI16x8S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ i.TempRegister(0));
break;
}
case kIA32I32x4ExtAddPairwiseI16x8U: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
-
- // src = |a|b|c|d|e|f|g|h|
- // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, kScratchDoubleReg, uint8_t{16});
- // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
- __ Pand(kScratchDoubleReg, src);
- // dst = |0|a|0|c|0|e|0|g|
- __ Psrld(dst, src, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- __ Paddd(dst, src, kScratchDoubleReg);
+ __ I32x4ExtAddPairwiseI16x8U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kIA32I16x8ExtAddPairwiseI8x16S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- DCHECK_NE(dst, src);
- // dst = i8x16.splat(1)
- __ Move(dst, uint32_t{0x01010101});
- __ Pshufd(dst, dst, byte{0});
- __ Pmaddubsw(dst, dst, src);
- break;
+ __ I16x8ExtAddPairwiseI8x16S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
break;
}
case kIA32I16x8ExtAddPairwiseI8x16U: {
- XMMRegister dst = i.OutputSimd128Register();
- // dst = i8x16.splat(1)
- __ Move(kScratchDoubleReg, uint32_t{0x01010101});
- __ Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
- __ Pmaddubsw(dst, i.InputSimd128Register(0), kScratchDoubleReg);
+ __ I16x8ExtAddPairwiseI8x16U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ i.TempRegister(0));
break;
}
case kIA32I16x8Q15MulRSatS: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- XMMRegister src1 = i.InputSimd128Register(1);
- // k = i16x8.splat(0x8000)
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllw(kScratchDoubleReg, kScratchDoubleReg, byte{15});
-
- __ Pmulhrsw(dst, src0, src1);
- __ Pcmpeqw(kScratchDoubleReg, dst);
- __ Pxor(dst, kScratchDoubleReg);
+ __ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32I32x4SignSelect: {
@@ -2448,36 +2398,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addps(dst, dst, kScratchDoubleReg); // add hi and lo, may round.
break;
}
- case kSSEF32x4Abs: {
+ case kIA32F32x4Abs: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(i.InputSimd128Register(0), dst);
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrld(kScratchDoubleReg, 1);
- __ andps(dst, kScratchDoubleReg);
- break;
- }
- case kAVXF32x4Abs: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 1);
- __ vandps(i.OutputSimd128Register(), kScratchDoubleReg,
- i.InputOperand(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrld(kScratchDoubleReg, kScratchDoubleReg, 1);
+ __ Andps(dst, kScratchDoubleReg);
+ } else {
+ __ Pcmpeqd(dst, dst);
+ __ Psrld(dst, dst, 1);
+ __ Andps(dst, src);
+ }
break;
}
- case kSSEF32x4Neg: {
+ case kIA32F32x4Neg: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pslld(kScratchDoubleReg, 31);
- __ xorps(dst, kScratchDoubleReg);
- break;
- }
- case kAVXF32x4Neg: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpslld(kScratchDoubleReg, kScratchDoubleReg, 31);
- __ vxorps(i.OutputSimd128Register(), kScratchDoubleReg,
- i.InputOperand(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pslld(kScratchDoubleReg, kScratchDoubleReg, 31);
+ __ Xorps(dst, kScratchDoubleReg);
+ } else {
+ __ Pcmpeqd(dst, dst);
+ __ Pslld(dst, dst, 31);
+ __ Xorps(dst, src);
+ }
break;
}
case kIA32F32x4Sqrt: {
@@ -2683,9 +2629,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4SConvertI16x8High: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputOperand(0), 8);
- __ Pmovsxwd(dst, dst);
+ __ I32x4SConvertI16x8High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kIA32I32x4Neg: {
@@ -2893,9 +2838,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4UConvertI16x8High: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputOperand(0), 8);
- __ Pmovzxwd(dst, dst);
+ __ I32x4UConvertI16x8High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kIA32I32x4ShrU: {
@@ -2975,7 +2919,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I32x4DotI16x8S: {
__ Pmaddwd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputOperand(1));
break;
}
case kIA32I16x8Splat: {
@@ -2996,9 +2940,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I16x8SConvertI8x16High: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputOperand(0), 8);
- __ Pmovsxbw(dst, dst);
+ __ I16x8SConvertI8x16High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kIA32I16x8Neg: {
@@ -3180,9 +3123,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I16x8UConvertI8x16High: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputOperand(0), 8);
- __ Pmovzxbw(dst, dst);
+ __ I16x8UConvertI8x16High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kIA32I16x8ShrU: {
@@ -3379,12 +3321,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
uint8_t laneidx = i.InputUint8(index + 1);
- if (laneidx == 0) {
- __ Movss(operand, i.InputSimd128Register(index));
- } else {
- DCHECK_GE(3, laneidx);
- __ Extractps(operand, i.InputSimd128Register(index), 1);
- }
+ __ S128Store32Lane(operand, i.InputSimd128Register(index), laneidx);
break;
}
case kSSEI8x16SConvertI16x8: {
@@ -3473,50 +3410,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kSSEI8x16Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddb(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16Add: {
+ __ Paddb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I8x16AddSatS: {
+ __ Paddsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI8x16AddSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddsb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16AddSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI8x16Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubb(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16Sub: {
+ __ Psubb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I8x16SubSatS: {
+ __ Psubsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI8x16SubSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubsb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16SubSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI8x16Mul: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
@@ -3596,41 +3509,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpor(dst, dst, tmp);
break;
}
- case kSSEI8x16MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16MinS: {
+ __ Pminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI8x16MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsb(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16MaxS: {
+ __ Pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I8x16Eq: {
+ __ Pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI8x16Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI8x16Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqb(i.OutputSimd128Register(), i.InputOperand(1));
@@ -3647,15 +3540,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSEI8x16GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16GtS: {
+ __ Pcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI8x16GeS: {
@@ -3689,26 +3576,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpackuswb(dst, dst, i.InputOperand(1));
break;
}
- case kSSEI8x16AddSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddusb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16AddSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI8x16SubSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubusb(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16AddSatU: {
+ __ Paddusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16SubSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16SubSatU: {
+ __ Psubusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32I8x16ShrU: {
@@ -3743,27 +3618,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kSSEI8x16MinU: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ pminub(dst, i.InputOperand(1));
- break;
- }
- case kAVXI8x16MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI8x16MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmaxub(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16MinU: {
+ __ Pminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16MaxU: {
+ __ Pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI8x16GtU: {
@@ -3816,6 +3678,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pmovmskb(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
+ case kIA32I8x16Popcnt: {
+ __ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg, i.TempSimd128Register(0),
+ i.TempRegister(1));
+ break;
+ }
case kIA32S128Const: {
XMMRegister dst = i.OutputSimd128Register();
Register tmp = i.TempRegister(0);
@@ -3837,17 +3705,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pcmpeqd(dst, dst);
break;
}
- case kSSES128Not: {
+ case kIA32S128Not: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(dst, kScratchDoubleReg);
- break;
- }
- case kAVXS128Not: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpxor(i.OutputSimd128Register(), kScratchDoubleReg, i.InputOperand(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
+ } else {
+ __ Pcmpeqd(dst, dst);
+ __ Pxor(dst, src);
+ }
break;
}
case kSSES128And: {
@@ -4338,6 +4205,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
+ case kIA32V64x2AllTrue:
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
+ break;
case kIA32V32x4AllTrue:
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
@@ -4348,6 +4218,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
+ case kIA32Prefetch:
+ __ prefetch(i.MemoryOperand(), 1);
+ break;
+ case kIA32PrefetchNta:
+ __ prefetch(i.MemoryOperand(), 0);
+ break;
case kIA32Word32AtomicPairLoad: {
XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
__ movq(tmp, i.MemoryOperand());
@@ -4646,7 +4522,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ wasm_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -4953,7 +4829,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ wasm_call(wasm::WasmCode::kWasmStackOverflow,
RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
@@ -5014,7 +4890,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = ecx;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -5022,9 +4897,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 632eeace20..40f7b6e403 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -114,9 +114,6 @@ namespace compiler {
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
- V(IA32PushFloat32) \
- V(IA32PushFloat64) \
- V(IA32PushSimd128) \
V(IA32Poke) \
V(IA32Peek) \
V(IA32F64x2Splat) \
@@ -138,8 +135,12 @@ namespace compiler {
V(IA32F64x2Pmin) \
V(IA32F64x2Pmax) \
V(IA32F64x2Round) \
+ V(IA32F64x2ConvertLowI32x4S) \
+ V(IA32F64x2ConvertLowI32x4U) \
+ V(IA32F64x2PromoteLowF32x4) \
V(IA32I64x2SplatI32Pair) \
V(IA32I64x2ReplaceLaneI32Pair) \
+ V(IA32I64x2Abs) \
V(IA32I64x2Neg) \
V(IA32I64x2Shl) \
V(IA32I64x2ShrS) \
@@ -149,6 +150,9 @@ namespace compiler {
V(IA32I64x2ShrU) \
V(IA32I64x2BitMask) \
V(IA32I64x2Eq) \
+ V(IA32I64x2Ne) \
+ V(IA32I64x2GtS) \
+ V(IA32I64x2GeS) \
V(IA32I64x2SignSelect) \
V(IA32I64x2ExtMulLowI32x4S) \
V(IA32I64x2ExtMulHighI32x4S) \
@@ -163,10 +167,8 @@ namespace compiler {
V(IA32Insertps) \
V(IA32F32x4SConvertI32x4) \
V(IA32F32x4UConvertI32x4) \
- V(SSEF32x4Abs) \
- V(AVXF32x4Abs) \
- V(SSEF32x4Neg) \
- V(AVXF32x4Neg) \
+ V(IA32F32x4Abs) \
+ V(IA32F32x4Neg) \
V(IA32F32x4Sqrt) \
V(IA32F32x4RecipApprox) \
V(IA32F32x4RecipSqrtApprox) \
@@ -190,6 +192,7 @@ namespace compiler {
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
V(IA32F32x4Round) \
+ V(IA32F32x4DemoteF64x2Zero) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(IA32I32x4SConvertF32x4) \
@@ -241,6 +244,8 @@ namespace compiler {
V(IA32I32x4ExtMulHighI16x8U) \
V(IA32I32x4ExtAddPairwiseI16x8S) \
V(IA32I32x4ExtAddPairwiseI16x8U) \
+ V(IA32I32x4TruncSatF64x2SZero) \
+ V(IA32I32x4TruncSatF64x2UZero) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLaneS) \
V(IA32I16x8SConvertI8x16Low) \
@@ -315,39 +320,27 @@ namespace compiler {
V(IA32I8x16Neg) \
V(IA32I8x16Shl) \
V(IA32I8x16ShrS) \
- V(SSEI8x16Add) \
- V(AVXI8x16Add) \
- V(SSEI8x16AddSatS) \
- V(AVXI8x16AddSatS) \
- V(SSEI8x16Sub) \
- V(AVXI8x16Sub) \
- V(SSEI8x16SubSatS) \
- V(AVXI8x16SubSatS) \
+ V(IA32I8x16Add) \
+ V(IA32I8x16AddSatS) \
+ V(IA32I8x16Sub) \
+ V(IA32I8x16SubSatS) \
V(SSEI8x16Mul) \
V(AVXI8x16Mul) \
- V(SSEI8x16MinS) \
- V(AVXI8x16MinS) \
- V(SSEI8x16MaxS) \
- V(AVXI8x16MaxS) \
- V(SSEI8x16Eq) \
- V(AVXI8x16Eq) \
+ V(IA32I8x16MinS) \
+ V(IA32I8x16MaxS) \
+ V(IA32I8x16Eq) \
V(SSEI8x16Ne) \
V(AVXI8x16Ne) \
- V(SSEI8x16GtS) \
- V(AVXI8x16GtS) \
+ V(IA32I8x16GtS) \
V(SSEI8x16GeS) \
V(AVXI8x16GeS) \
V(SSEI8x16UConvertI16x8) \
V(AVXI8x16UConvertI16x8) \
- V(SSEI8x16AddSatU) \
- V(AVXI8x16AddSatU) \
- V(SSEI8x16SubSatU) \
- V(AVXI8x16SubSatU) \
+ V(IA32I8x16AddSatU) \
+ V(IA32I8x16SubSatU) \
V(IA32I8x16ShrU) \
- V(SSEI8x16MinU) \
- V(AVXI8x16MinU) \
- V(SSEI8x16MaxU) \
- V(AVXI8x16MaxU) \
+ V(IA32I8x16MinU) \
+ V(IA32I8x16MaxU) \
V(SSEI8x16GtU) \
V(AVXI8x16GtU) \
V(SSEI8x16GeU) \
@@ -356,11 +349,11 @@ namespace compiler {
V(IA32I8x16Abs) \
V(IA32I8x16BitMask) \
V(IA32I8x16SignSelect) \
+ V(IA32I8x16Popcnt) \
V(IA32S128Const) \
V(IA32S128Zero) \
V(IA32S128AllOnes) \
- V(SSES128Not) \
- V(AVXS128Not) \
+ V(IA32S128Not) \
V(SSES128And) \
V(AVXS128And) \
V(SSES128Or) \
@@ -417,9 +410,12 @@ namespace compiler {
V(SSES8x2Reverse) \
V(AVXS8x2Reverse) \
V(IA32S128AnyTrue) \
+ V(IA32V64x2AllTrue) \
V(IA32V32x4AllTrue) \
V(IA32V16x8AllTrue) \
V(IA32V8x16AllTrue) \
+ V(IA32Prefetch) \
+ V(IA32PrefetchNta) \
V(IA32Word32AtomicPairLoad) \
V(IA32Word32AtomicPairStore) \
V(IA32Word32AtomicPairAdd) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index f82f299c5c..21b650cb61 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -120,8 +120,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F64x2Pmin:
case kIA32F64x2Pmax:
case kIA32F64x2Round:
+ case kIA32F64x2ConvertLowI32x4S:
+ case kIA32F64x2ConvertLowI32x4U:
+ case kIA32F64x2PromoteLowF32x4:
case kIA32I64x2SplatI32Pair:
case kIA32I64x2ReplaceLaneI32Pair:
+ case kIA32I64x2Abs:
case kIA32I64x2Neg:
case kIA32I64x2Shl:
case kIA32I64x2ShrS:
@@ -131,6 +135,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I64x2ShrU:
case kIA32I64x2BitMask:
case kIA32I64x2Eq:
+ case kIA32I64x2Ne:
+ case kIA32I64x2GtS:
+ case kIA32I64x2GeS:
case kIA32I64x2SignSelect:
case kIA32I64x2ExtMulLowI32x4S:
case kIA32I64x2ExtMulHighI32x4S:
@@ -145,10 +152,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Insertps:
case kIA32F32x4SConvertI32x4:
case kIA32F32x4UConvertI32x4:
- case kSSEF32x4Abs:
- case kAVXF32x4Abs:
- case kSSEF32x4Neg:
- case kAVXF32x4Neg:
+ case kIA32F32x4Abs:
+ case kIA32F32x4Neg:
case kIA32F32x4Sqrt:
case kIA32F32x4RecipApprox:
case kIA32F32x4RecipSqrtApprox:
@@ -172,6 +177,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F32x4Pmin:
case kIA32F32x4Pmax:
case kIA32F32x4Round:
+ case kIA32F32x4DemoteF64x2Zero:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kIA32I32x4SConvertF32x4:
@@ -223,6 +229,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4ExtMulHighI16x8U:
case kIA32I32x4ExtAddPairwiseI16x8S:
case kIA32I32x4ExtAddPairwiseI16x8U:
+ case kIA32I32x4TruncSatF64x2SZero:
+ case kIA32I32x4TruncSatF64x2UZero:
case kIA32I16x8Splat:
case kIA32I16x8ExtractLaneS:
case kIA32I16x8SConvertI8x16Low:
@@ -297,39 +305,27 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16Neg:
case kIA32I8x16Shl:
case kIA32I8x16ShrS:
- case kSSEI8x16Add:
- case kAVXI8x16Add:
- case kSSEI8x16AddSatS:
- case kAVXI8x16AddSatS:
- case kSSEI8x16Sub:
- case kAVXI8x16Sub:
- case kSSEI8x16SubSatS:
- case kAVXI8x16SubSatS:
+ case kIA32I8x16Add:
+ case kIA32I8x16AddSatS:
+ case kIA32I8x16Sub:
+ case kIA32I8x16SubSatS:
case kSSEI8x16Mul:
case kAVXI8x16Mul:
- case kSSEI8x16MinS:
- case kAVXI8x16MinS:
- case kSSEI8x16MaxS:
- case kAVXI8x16MaxS:
- case kSSEI8x16Eq:
- case kAVXI8x16Eq:
+ case kIA32I8x16MinS:
+ case kIA32I8x16MaxS:
+ case kIA32I8x16Eq:
case kSSEI8x16Ne:
case kAVXI8x16Ne:
- case kSSEI8x16GtS:
- case kAVXI8x16GtS:
+ case kIA32I8x16GtS:
case kSSEI8x16GeS:
case kAVXI8x16GeS:
case kSSEI8x16UConvertI16x8:
case kAVXI8x16UConvertI16x8:
- case kSSEI8x16AddSatU:
- case kAVXI8x16AddSatU:
- case kSSEI8x16SubSatU:
- case kAVXI8x16SubSatU:
+ case kIA32I8x16AddSatU:
+ case kIA32I8x16SubSatU:
case kIA32I8x16ShrU:
- case kSSEI8x16MinU:
- case kAVXI8x16MinU:
- case kSSEI8x16MaxU:
- case kAVXI8x16MaxU:
+ case kIA32I8x16MinU:
+ case kIA32I8x16MaxU:
case kSSEI8x16GtU:
case kAVXI8x16GtU:
case kSSEI8x16GeU:
@@ -338,11 +334,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16Abs:
case kIA32I8x16BitMask:
case kIA32I8x16SignSelect:
+ case kIA32I8x16Popcnt:
case kIA32S128Const:
case kIA32S128Zero:
case kIA32S128AllOnes:
- case kSSES128Not:
- case kAVXS128Not:
+ case kIA32S128Not:
case kSSES128And:
case kAVXS128And:
case kSSES128Or:
@@ -389,6 +385,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSES8x2Reverse:
case kAVXS8x2Reverse:
case kIA32S128AnyTrue:
+ case kIA32V64x2AllTrue:
case kIA32V32x4AllTrue:
case kIA32V16x8AllTrue:
case kIA32V8x16AllTrue:
@@ -431,12 +428,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kIsLoadOperation;
case kIA32Push:
- case kIA32PushFloat32:
- case kIA32PushFloat64:
- case kIA32PushSimd128:
case kIA32Poke:
case kIA32MFence:
case kIA32LFence:
+ case kIA32Prefetch:
+ case kIA32PrefetchNta:
return kHasSideEffect;
case kIA32Word32AtomicPairLoad:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 0f266cd824..662b40ddf4 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -432,7 +432,8 @@ void InstructionSelector::VisitLoadLane(Node* node) {
}
IA32OperandGenerator g(this);
- InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ InstructionOperand outputs[] = {IsSupported(AVX) ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node)};
// Input 0 is value node, 1 is lane idx, and GetEffectiveAddressMemoryOperand
// uses up to 3 inputs. This ordering is consistent with other operations that
// use the same opcode.
@@ -560,7 +561,7 @@ void InstructionSelector::VisitLoad(Node* node) {
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= MiscField::encode(kMemoryAccessPoisoned);
+ code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
}
@@ -701,6 +702,36 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitPrefetchTemporal(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ InstructionCode opcode = kIA32Prefetch;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ // The maximum number of inputs that can be generated by the function above is
+ // 3, but wasm cases only generate 2 inputs. This check will need to be
+ // modified for any non-wasm uses of prefetch.
+ DCHECK_LE(input_count, 2);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
+void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ InstructionCode opcode = kIA32PrefetchNta;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ // The maximum number of inputs that can be generated by the function above is
+ // 3, but wasm cases only generate 2 inputs. This check will need to be
+ // modified for any non-wasm uses of prefetch.
+ DCHECK_LE(input_count, 2);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
namespace {
// Shared routine for multiple binary operations.
@@ -1363,35 +1394,33 @@ void InstructionSelector::EmitPrepareArguments(
} else {
// Push any stack arguments.
int effect_level = GetEffectLevel(node);
+ int stack_decrement = 0;
for (PushParameter input : base::Reversed(*arguments)) {
- // Skip any alignment holes in pushed nodes.
+ stack_decrement += kSystemPointerSize;
+ // Skip holes in the param array. These represent both extra slots for
+ // multi-slot values and padding slots for alignment.
if (input.node == nullptr) continue;
- if (g.CanBeMemoryOperand(kIA32Push, node, input.node, effect_level)) {
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
+ if (g.CanBeImmediate(input.node)) {
+ Emit(kIA32Push, g.NoOutput(), decrement, g.UseImmediate(input.node));
+ } else if (IsSupported(ATOM) ||
+ sequence()->IsFP(GetVirtualRegister(input.node))) {
+ // TODO(bbudge): IA32Push cannot handle stack->stack double moves
+ // because there is no way to encode fixed double slots.
+ Emit(kIA32Push, g.NoOutput(), decrement, g.UseRegister(input.node));
+ } else if (g.CanBeMemoryOperand(kIA32Push, node, input.node,
+ effect_level)) {
InstructionOperand outputs[1];
- InstructionOperand inputs[4];
+ InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionCode opcode = kIA32Push;
+ inputs[input_count++] = decrement;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
input.node, inputs, &input_count);
- opcode |= AddressingModeField::encode(mode);
+ InstructionCode opcode = kIA32Push | AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
- InstructionOperand value =
- g.CanBeImmediate(input.node)
- ? g.UseImmediate(input.node)
- : IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input.node))
- ? g.UseRegister(input.node)
- : g.Use(input.node);
- if (input.location.GetType() == MachineType::Float32()) {
- Emit(kIA32PushFloat32, g.NoOutput(), value);
- } else if (input.location.GetType() == MachineType::Float64()) {
- Emit(kIA32PushFloat64, g.NoOutput(), value);
- } else if (input.location.GetType() == MachineType::Simd128()) {
- Emit(kIA32PushSimd128, g.NoOutput(), value);
- } else {
- Emit(kIA32Push, g.NoOutput(), value);
- }
+ Emit(kIA32Push, g.NoOutput(), decrement, g.UseAny(input.node));
}
}
}
@@ -1424,8 +1453,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
-
namespace {
void VisitCompareWithMemoryOperand(InstructionSelector* selector,
@@ -2223,20 +2250,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8GtU) \
V(I16x8GeU) \
V(I8x16SConvertI16x8) \
- V(I8x16Add) \
- V(I8x16AddSatS) \
- V(I8x16Sub) \
- V(I8x16SubSatS) \
- V(I8x16MinS) \
- V(I8x16MaxS) \
- V(I8x16Eq) \
V(I8x16Ne) \
- V(I8x16GtS) \
V(I8x16GeS) \
- V(I8x16AddSatU) \
- V(I8x16SubSatU) \
- V(I8x16MinU) \
- V(I8x16MaxU) \
V(I8x16GtU) \
V(I8x16GeU) \
V(S128And) \
@@ -2252,9 +2267,21 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Eq) \
+ V(I64x2Ne) \
V(I32x4DotI16x8S) \
V(I16x8RoundingAverageU) \
- V(I16x8Q15MulRSatS) \
+ V(I8x16Add) \
+ V(I8x16AddSatS) \
+ V(I8x16Sub) \
+ V(I8x16SubSatS) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16Eq) \
+ V(I8x16GtS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU) \
V(I8x16RoundingAverageU)
// These opcodes require all inputs to be registers because the codegen is
@@ -2271,9 +2298,15 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8ExtMulLowI8x16S) \
V(I16x8ExtMulHighI8x16S) \
V(I16x8ExtMulLowI8x16U) \
- V(I16x8ExtMulHighI8x16U)
+ V(I16x8ExtMulHighI8x16U) \
+ V(I16x8Q15MulRSatS)
#define SIMD_UNOP_LIST(V) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2PromoteLowF32x4) \
+ V(F32x4DemoteF64x2Zero) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
V(F32x4Sqrt) \
V(F32x4SConvertI32x4) \
V(F32x4RecipApprox) \
@@ -2298,19 +2331,11 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8Abs) \
V(I8x16Neg) \
V(I8x16Abs) \
- V(I8x16BitMask)
-
-#define SIMD_UNOP_PREFIX_LIST(V) \
- V(F32x4Abs) \
- V(F32x4Neg) \
+ V(I8x16BitMask) \
V(S128Not)
-#define SIMD_ANYTRUE_LIST(V) \
- V(V32x4AnyTrue) \
- V(V16x8AnyTrue) \
- V(V8x16AnyTrue)
-
#define SIMD_ALLTRUE_LIST(V) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2607,36 +2632,12 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
#undef VISIT_SIMD_UNOP
#undef SIMD_UNOP_LIST
-// TODO(v8:9198): SSE instructions that read 16 bytes from memory require the
-// operand to be 16-byte aligned. AVX instructions relax this requirement, but
-// might have reduced performance if the memory crosses cache line. But since we
-// have limited xmm registers, this might be okay to alleviate register
-// pressure.
-#define VISIT_SIMD_UNOP_PREFIX(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- if (IsSupported(AVX)) { \
- Emit(kAVX##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
- } else { \
- Emit(kSSE##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0))); \
- } \
- }
-SIMD_UNOP_PREFIX_LIST(VISIT_SIMD_UNOP_PREFIX)
-#undef VISIT_SIMD_UNOP_PREFIX
-#undef SIMD_UNOP_PREFIX_LIST
-
-// The implementation of AnyTrue is the same for all shapes.
-#define VISIT_SIMD_ANYTRUE(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister()}; \
- Emit(kIA32S128AnyTrue, g.DefineAsRegister(node), \
- g.UseRegister(node->InputAt(0)), arraysize(temps), temps); \
- }
-SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
-#undef VISIT_SIMD_ANYTRUE
-#undef SIMD_ANYTRUE_LIST
+void InstructionSelector::VisitV128AnyTrue(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kIA32S128AnyTrue, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
+}
#define VISIT_SIMD_ALLTRUE(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -3111,22 +3112,120 @@ void InstructionSelector::VisitI64x2SignSelect(Node* node) {
VisitSignSelect(this, node, kIA32I64x2SignSelect);
}
+namespace {
+void VisitExtAddPairwise(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, bool need_temp) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand dst = (selector->IsSupported(AVX))
+ ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node);
+ if (need_temp) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(opcode, dst, operand0, arraysize(temps), temps);
+ } else {
+ selector->Emit(opcode, dst, operand0);
+ }
+}
+} // namespace
+
void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
- VisitRRSimd(this, node, kIA32I32x4ExtAddPairwiseI16x8S);
+ VisitExtAddPairwise(this, node, kIA32I32x4ExtAddPairwiseI16x8S, true);
}
void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
- VisitRRSimd(this, node, kIA32I32x4ExtAddPairwiseI16x8U);
+ VisitExtAddPairwise(this, node, kIA32I32x4ExtAddPairwiseI16x8U, false);
}
void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kIA32I16x8ExtAddPairwiseI8x16S, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)));
+ VisitExtAddPairwise(this, node, kIA32I16x8ExtAddPairwiseI8x16S, true);
}
void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
- VisitRRSimd(this, node, kIA32I16x8ExtAddPairwiseI8x16U);
+ VisitExtAddPairwise(this, node, kIA32I16x8ExtAddPairwiseI8x16U, true);
+}
+
+void InstructionSelector::VisitI8x16Popcnt(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand dst = CpuFeatures::IsSupported(AVX)
+ ? g.DefineAsRegister(node)
+ : g.DefineAsRegister(node);
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
+ Emit(kIA32I8x16Popcnt, dst, g.UseUniqueRegister(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kIA32F64x2ConvertLowI32x4U, dst, g.UseRegister(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ if (IsSupported(AVX)) {
+ // Requires dst != src.
+ Emit(kIA32I32x4TruncSatF64x2SZero, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
+ } else {
+ Emit(kIA32I32x4TruncSatF64x2SZero, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
+ }
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kIA32I32x4TruncSatF64x2UZero, dst, g.UseRegister(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitI64x2GtS(Node* node) {
+ IA32OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kIA32I64x2GtS, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ Emit(kIA32I64x2GtS, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else {
+ Emit(kIA32I64x2GtS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ }
+}
+
+void InstructionSelector::VisitI64x2GeS(Node* node) {
+ IA32OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kIA32I64x2GeS, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ Emit(kIA32I64x2GeS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ } else {
+ Emit(kIA32I64x2GeS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ }
+}
+
+void InstructionSelector::VisitI64x2Abs(Node* node) {
+ IA32OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kIA32I64x2Abs, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)));
+ } else {
+ Emit(kIA32I64x2Abs, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+ }
}
// static
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 44f6d5bcbf..89cd7be864 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -23,6 +23,8 @@
#include "src/compiler/backend/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
#include "src/compiler/backend/s390/instruction-codes-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/compiler/backend/riscv64/instruction-codes-riscv64.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
@@ -67,7 +69,6 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
/* Tail call opcodes are grouped together to make IsTailCall fast */ \
/* and Arch call opcodes are grouped together to make */ \
/* IsCallWithDescriptorFlags fast */ \
- V(ArchTailCallCodeObjectFromJSFunction) \
V(ArchTailCallCodeObject) \
V(ArchTailCallAddress) \
V(ArchTailCallWasm) \
@@ -281,6 +282,10 @@ using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
+// LaneSizeField and AccessModeField are helper types to encode/decode a lane
+// size, an access mode, or both inside the overlapping MiscField.
+using LaneSizeField = base::BitField<int, 22, 8>;
+using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
using MiscField = base::BitField<int, 22, 10>;
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index 28195052df..99c36c923d 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -305,7 +305,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchPrepareCallCFunction:
case kArchPrepareTailCall:
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject:
case kArchTailCallAddress:
case kArchTailCallWasm:
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index a9b2010b7e..6571db1801 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -671,7 +671,7 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
// Returns the number of instruction operands added to inputs.
size_t InstructionSelector::AddInputsToFrameStateDescriptor(
- FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
+ FrameStateDescriptor* descriptor, FrameState state, OperandGenerator* g,
StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
FrameStateInputKind kind, Zone* zone) {
DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
@@ -682,15 +682,15 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
if (descriptor->outer_state()) {
entries += AddInputsToFrameStateDescriptor(
- descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
- g, deduplicator, inputs, kind, zone);
+ descriptor->outer_state(), state.outer_frame_state(), g, deduplicator,
+ inputs, kind, zone);
}
- Node* parameters = state->InputAt(kFrameStateParametersInput);
- Node* locals = state->InputAt(kFrameStateLocalsInput);
- Node* stack = state->InputAt(kFrameStateStackInput);
- Node* context = state->InputAt(kFrameStateContextInput);
- Node* function = state->InputAt(kFrameStateFunctionInput);
+ Node* parameters = state.parameters();
+ Node* locals = state.locals();
+ Node* stack = state.stack();
+ Node* context = state.context();
+ Node* function = state.function();
DCHECK_EQ(descriptor->parameters_count(),
StateValuesAccess(parameters).size());
@@ -803,7 +803,7 @@ Instruction* InstructionSelector::EmitWithContinuation(
DeoptFrameStateOffsetField::encode(static_cast<int>(input_count));
AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
cont->reason(), cont->feedback(),
- cont->frame_state());
+ FrameState{cont->frame_state()});
} else if (cont->IsSet()) {
continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
} else if (cont->IsTrap()) {
@@ -828,7 +828,7 @@ Instruction* InstructionSelector::EmitWithContinuation(
void InstructionSelector::AppendDeoptimizeArguments(
InstructionOperandVector* args, DeoptimizeKind kind,
DeoptimizeReason reason, FeedbackSource const& feedback,
- Node* frame_state) {
+ FrameState frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
DCHECK_NE(DeoptimizeKind::kLazy, kind);
@@ -951,18 +951,12 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
- // TODO(jgruber, v8:7449): The below is a hack to support tail-calls from
- // JS-linkage callers with a register code target. The problem is that the
- // code target register may be clobbered before the final jmp by
- // AssemblePopArgumentsAdaptorFrame. As a more permanent fix we could
- // entirely remove support for tail-calls from JS-linkage callers.
buffer->instruction_args.push_back(
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
- : is_tail_call ? g.UseUniqueRegister(callee)
- : g.UseRegister(callee));
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
@@ -1015,20 +1009,21 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
- Node* frame_state =
- call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
+ FrameState frame_state{
+ call->InputAt(static_cast<int>(buffer->descriptor->InputCount()))};
// If it was a syntactic tail call we need to drop the current frame and
// all the frames on top of it that are either an arguments adaptor frame
// or a tail caller frame.
if (is_tail_call) {
- frame_state = NodeProperties::GetFrameStateInput(frame_state);
+ frame_state = FrameState{NodeProperties::GetFrameStateInput(frame_state)};
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
while (buffer->frame_state_descriptor != nullptr &&
buffer->frame_state_descriptor->type() ==
FrameStateType::kArgumentsAdaptor) {
- frame_state = NodeProperties::GetFrameStateInput(frame_state);
+ frame_state =
+ FrameState{NodeProperties::GetFrameStateInput(frame_state)};
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
}
@@ -1169,8 +1164,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
if (!source_positions_) return true;
SourcePosition source_position = source_positions_->GetSourcePosition(node);
if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
- sequence()->SetSourcePosition(instructions_[instruction_start],
- source_position);
+ sequence()->SetSourcePosition(instructions_.back(), source_position);
}
return true;
};
@@ -1178,8 +1172,9 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
// Generate code for the block control "top down", but schedule the code
// "bottom up".
VisitControl(block);
- if (!FinishEmittedInstructions(block->control_input(), current_block_end))
+ if (!FinishEmittedInstructions(block->control_input(), current_block_end)) {
return;
+ }
// Visit code in reverse control flow order, because architecture-specific
// matching may cover more than one node at a time.
@@ -1288,7 +1283,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
- Node* value = input->InputAt(0);
+ FrameState value{input->InputAt(0)};
VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
break;
}
@@ -1945,6 +1940,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Trunc(node);
case IrOpcode::kF64x2NearestInt:
return MarkAsSimd128(node), VisitF64x2NearestInt(node);
+ case IrOpcode::kF64x2ConvertLowI32x4S:
+ return MarkAsSimd128(node), VisitF64x2ConvertLowI32x4S(node);
+ case IrOpcode::kF64x2ConvertLowI32x4U:
+ return MarkAsSimd128(node), VisitF64x2ConvertLowI32x4U(node);
+ case IrOpcode::kF64x2PromoteLowF32x4:
+ return MarkAsSimd128(node), VisitF64x2PromoteLowF32x4(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -2003,6 +2004,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Trunc(node);
case IrOpcode::kF32x4NearestInt:
return MarkAsSimd128(node), VisitF32x4NearestInt(node);
+ case IrOpcode::kF32x4DemoteF64x2Zero:
+ return MarkAsSimd128(node), VisitF32x4DemoteF64x2Zero(node);
case IrOpcode::kI64x2Splat:
return MarkAsSimd128(node), VisitI64x2Splat(node);
case IrOpcode::kI64x2SplatI32Pair:
@@ -2013,6 +2016,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI64x2ReplaceLane(node);
case IrOpcode::kI64x2ReplaceLaneI32Pair:
return MarkAsSimd128(node), VisitI64x2ReplaceLaneI32Pair(node);
+ case IrOpcode::kI64x2Abs:
+ return MarkAsSimd128(node), VisitI64x2Abs(node);
case IrOpcode::kI64x2Neg:
return MarkAsSimd128(node), VisitI64x2Neg(node);
case IrOpcode::kI64x2SConvertI32x4Low:
@@ -2037,6 +2042,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI64x2Mul(node);
case IrOpcode::kI64x2Eq:
return MarkAsSimd128(node), VisitI64x2Eq(node);
+ case IrOpcode::kI64x2Ne:
+ return MarkAsSimd128(node), VisitI64x2Ne(node);
+ case IrOpcode::kI64x2GtS:
+ return MarkAsSimd128(node), VisitI64x2GtS(node);
+ case IrOpcode::kI64x2GeS:
+ return MarkAsSimd128(node), VisitI64x2GeS(node);
case IrOpcode::kI64x2ShrU:
return MarkAsSimd128(node), VisitI64x2ShrU(node);
case IrOpcode::kI64x2ExtMulLowI32x4S:
@@ -2123,6 +2134,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8S(node);
case IrOpcode::kI32x4ExtAddPairwiseI16x8U:
return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8U(node);
+ case IrOpcode::kI32x4TruncSatF64x2SZero:
+ return MarkAsSimd128(node), VisitI32x4TruncSatF64x2SZero(node);
+ case IrOpcode::kI32x4TruncSatF64x2UZero:
+ return MarkAsSimd128(node), VisitI32x4TruncSatF64x2UZero(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
@@ -2293,16 +2308,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16Swizzle(node);
case IrOpcode::kI8x16Shuffle:
return MarkAsSimd128(node), VisitI8x16Shuffle(node);
- case IrOpcode::kV32x4AnyTrue:
- return MarkAsWord32(node), VisitV32x4AnyTrue(node);
+ case IrOpcode::kV128AnyTrue:
+ return MarkAsWord32(node), VisitV128AnyTrue(node);
+ case IrOpcode::kV64x2AllTrue:
+ return MarkAsWord32(node), VisitV64x2AllTrue(node);
case IrOpcode::kV32x4AllTrue:
return MarkAsWord32(node), VisitV32x4AllTrue(node);
- case IrOpcode::kV16x8AnyTrue:
- return MarkAsWord32(node), VisitV16x8AnyTrue(node);
case IrOpcode::kV16x8AllTrue:
return MarkAsWord32(node), VisitV16x8AllTrue(node);
- case IrOpcode::kV8x16AnyTrue:
- return MarkAsWord32(node), VisitV8x16AnyTrue(node);
case IrOpcode::kV8x16AllTrue:
return MarkAsWord32(node), VisitV8x16AllTrue(node);
default:
@@ -2689,7 +2702,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64
+ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -2714,7 +2727,8 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
- // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
+ // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
+ // !V8_TARGET_ARCH_RISCV64
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// This is only needed on 32-bit to split the 64-bit value into two operands.
@@ -2740,73 +2754,13 @@ void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_ARM64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_MIPS
-void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_ARM64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_MIPS
-
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_ARM
-// TODO(v8:10972) Prototype i64x2 widen i32x4.
-void InstructionSelector::VisitI64x2SConvertI32x4Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI64x2SConvertI32x4High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI64x2UConvertI32x4Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI64x2UConvertI32x4High(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM64 || !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
- // && !V8_TARGET_ARCH_ARM
-
-#if !V8_TARGET_ARCH_ARM64
+#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
// TODO(v8:11168): Prototyping prefetch.
void InstructionSelector::VisitPrefetchTemporal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM64
-
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM
-// TODO(v8:11002) Prototype i8x16.popcnt.
-void InstructionSelector::VisitI8x16Popcnt(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM
-
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32
-// TODO(v8:11086) Prototype extended pairwise add.
-void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32
-
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64
-// TODO(v8:10975): Prototyping load lane and store lane.
-void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
- // && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 || !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
!V8_TARGET_ARCH_ARM
@@ -2938,8 +2892,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (call_descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(call_descriptor->InputCount())));
+ frame_state_descriptor = GetFrameStateDescriptor(FrameState{
+ node->InputAt(static_cast<int>(call_descriptor->InputCount()))});
}
CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
@@ -3040,32 +2994,20 @@ void InstructionSelector::VisitTailCall(Node* node) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
InstructionOperandVector temps(zone());
- if (caller->IsJSFunctionCall()) {
- switch (call_descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObjectFromJSFunction;
- break;
- default:
- UNREACHABLE();
- }
- int temps_count = GetTempsCountForTailCallFromJSFunction();
- for (int i = 0; i < temps_count; i++) {
- temps.push_back(g.TempRegister());
- }
- } else {
- switch (call_descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallAddress:
- opcode = kArchTailCallAddress;
- break;
- case CallDescriptor::kCallWasmFunction:
- opcode = kArchTailCallWasm;
- break;
- default:
- UNREACHABLE();
- }
+ switch (call_descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallAddress:
+ DCHECK(!caller->IsJSFunctionCall());
+ opcode = kArchTailCallAddress;
+ break;
+ case CallDescriptor::kCallWasmFunction:
+ DCHECK(!caller->IsJSFunctionCall());
+ opcode = kArchTailCallWasm;
+ break;
+ default:
+ UNREACHABLE();
}
opcode = EncodeCallDescriptorFlags(opcode, call_descriptor->flags());
@@ -3202,7 +3144,7 @@ void InstructionSelector::EmitIdentity(Node* node) {
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* frame_state) {
+ FrameState frame_state) {
InstructionOperandVector args(instruction_zone());
AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
Emit(kArchDeoptimize, 0, nullptr, args.size(), &args.front(), 0, nullptr);
@@ -3318,18 +3260,28 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64(Node* node,
namespace {
-FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone, Node* state) {
+FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone,
+ FrameState state) {
DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
- DCHECK_EQ(kFrameStateInputCount, state->InputCount());
+ DCHECK_EQ(FrameState::kFrameStateInputCount, state->InputCount());
const FrameStateInfo& state_info = FrameStateInfoOf(state->op());
int parameters = state_info.parameter_count();
int locals = state_info.local_count();
- int stack = state_info.type() == FrameStateType::kInterpretedFunction ? 1 : 0;
+ int stack = state_info.type() == FrameStateType::kUnoptimizedFunction ? 1 : 0;
FrameStateDescriptor* outer_state = nullptr;
- Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
- if (outer_node->opcode() == IrOpcode::kFrameState) {
- outer_state = GetFrameStateDescriptorInternal(zone, outer_node);
+ if (state.has_outer_frame_state()) {
+ outer_state =
+ GetFrameStateDescriptorInternal(zone, state.outer_frame_state());
+ }
+
+ if (state_info.type() == FrameStateType::kJSToWasmBuiltinContinuation) {
+ auto function_info = static_cast<const JSToWasmFrameStateFunctionInfo*>(
+ state_info.function_info());
+ return zone->New<JSToWasmFrameStateDescriptor>(
+ zone, state_info.type(), state_info.bailout_id(),
+ state_info.state_combine(), parameters, locals, stack,
+ state_info.shared_info(), outer_state, function_info->signature());
}
return zone->New<FrameStateDescriptor>(
@@ -3341,7 +3293,7 @@ FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone, Node* state) {
} // namespace
FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
- Node* state) {
+ FrameState state) {
auto* desc = GetFrameStateDescriptorInternal(instruction_zone(), state);
*max_unoptimized_frame_height_ =
std::max(*max_unoptimized_frame_height_,
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 18bc4ccfcb..4a65b5193e 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -491,7 +491,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void AppendDeoptimizeArguments(InstructionOperandVector* args,
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* frame_state);
+ FrameState frame_state);
void EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand const& index_operand);
@@ -561,13 +561,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
CallBufferFlags flags, bool is_tail_call,
int stack_slot_delta = 0);
bool IsTailCallAddressImmediate();
- int GetTempsCountForTailCallFromJSFunction();
void UpdateMaxPushedArgumentCount(size_t count);
- FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+ FrameStateDescriptor* GetFrameStateDescriptor(FrameState node);
size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
- Node* state, OperandGenerator* g,
+ FrameState state, OperandGenerator* g,
StateObjectDeduplicator* deduplicator,
InstructionOperandVector* inputs,
FrameStateInputKind kind, Zone* zone);
@@ -628,7 +627,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* frame_state);
+ FeedbackSource const& feedback, FrameState frame_state);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
void VisitRetain(Node* node);
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index e1e54c9d9f..a14ae2a702 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -14,8 +14,10 @@
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames.h"
#include "src/utils/ostreams.h"
+#include "src/wasm/value-type.h"
namespace v8 {
namespace internal {
@@ -999,31 +1001,32 @@ namespace {
size_t GetConservativeFrameSizeInBytes(FrameStateType type,
size_t parameters_count,
size_t locals_count,
- BailoutId bailout_id) {
+ BytecodeOffset bailout_id) {
switch (type) {
- case FrameStateType::kInterpretedFunction: {
- auto info = InterpretedFrameInfo::Conservative(
+ case FrameStateType::kUnoptimizedFunction: {
+ auto info = UnoptimizedFrameInfo::Conservative(
static_cast<int>(parameters_count), static_cast<int>(locals_count));
return info.frame_size_in_bytes();
}
- case FrameStateType::kArgumentsAdaptor: {
- auto info = ArgumentsAdaptorFrameInfo::Conservative(
- static_cast<int>(parameters_count));
- return info.frame_size_in_bytes();
- }
+ case FrameStateType::kArgumentsAdaptor:
+ // The arguments adaptor frame state is only used in the deoptimizer and
+ // does not occupy any extra space in the stack. Check out the design doc:
+ // https://docs.google.com/document/d/150wGaUREaZI6YWqOQFD5l2mWQXaPbbZjcAIJLOFrzMs/edit
+ return 0;
case FrameStateType::kConstructStub: {
auto info = ConstructStubFrameInfo::Conservative(
static_cast<int>(parameters_count));
return info.frame_size_in_bytes();
}
case FrameStateType::kBuiltinContinuation:
+ case FrameStateType::kJSToWasmBuiltinContinuation:
case FrameStateType::kJavaScriptBuiltinContinuation:
case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
const RegisterConfiguration* config = RegisterConfiguration::Default();
auto info = BuiltinContinuationFrameInfo::Conservative(
static_cast<int>(parameters_count),
Builtins::CallInterfaceDescriptorFor(
- Builtins::GetBuiltinFromBailoutId(bailout_id)),
+ Builtins::GetBuiltinFromBytecodeOffset(bailout_id)),
config);
return info.frame_size_in_bytes();
}
@@ -1034,7 +1037,7 @@ size_t GetConservativeFrameSizeInBytes(FrameStateType type,
size_t GetTotalConservativeFrameSizeInBytes(FrameStateType type,
size_t parameters_count,
size_t locals_count,
- BailoutId bailout_id,
+ BytecodeOffset bailout_id,
FrameStateDescriptor* outer_state) {
size_t outer_total_conservative_frame_size_in_bytes =
(outer_state == nullptr)
@@ -1048,7 +1051,7 @@ size_t GetTotalConservativeFrameSizeInBytes(FrameStateType type,
} // namespace
FrameStateDescriptor::FrameStateDescriptor(
- Zone* zone, FrameStateType type, BailoutId bailout_id,
+ Zone* zone, FrameStateType type, BytecodeOffset bailout_id,
OutputFrameStateCombine state_combine, size_t parameters_count,
size_t locals_count, size_t stack_count,
MaybeHandle<SharedFunctionInfo> shared_info,
@@ -1068,9 +1071,10 @@ FrameStateDescriptor::FrameStateDescriptor(
size_t FrameStateDescriptor::GetHeight() const {
switch (type()) {
- case FrameStateType::kInterpretedFunction:
+ case FrameStateType::kUnoptimizedFunction:
return locals_count(); // The accumulator is *not* included.
case FrameStateType::kBuiltinContinuation:
+ case FrameStateType::kJSToWasmBuiltinContinuation:
// Custom, non-JS calling convention (that does not have a notion of
// a receiver or context).
return parameters_count();
@@ -1122,6 +1126,17 @@ size_t FrameStateDescriptor::GetJSFrameCount() const {
return count;
}
+JSToWasmFrameStateDescriptor::JSToWasmFrameStateDescriptor(
+ Zone* zone, FrameStateType type, BytecodeOffset bailout_id,
+ OutputFrameStateCombine state_combine, size_t parameters_count,
+ size_t locals_count, size_t stack_count,
+ MaybeHandle<SharedFunctionInfo> shared_info,
+ FrameStateDescriptor* outer_state, const wasm::FunctionSig* wasm_signature)
+ : FrameStateDescriptor(zone, type, bailout_id, state_combine,
+ parameters_count, locals_count, stack_count,
+ shared_info, outer_state),
+ return_type_(wasm::WasmReturnTypeFromSignature(wasm_signature)) {}
+
std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
return os << rpo.ToSize();
}
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 55fce0aeeb..9aa808491a 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -1300,7 +1300,8 @@ class StateValueList {
class FrameStateDescriptor : public ZoneObject {
public:
- FrameStateDescriptor(Zone* zone, FrameStateType type, BailoutId bailout_id,
+ FrameStateDescriptor(Zone* zone, FrameStateType type,
+ BytecodeOffset bailout_id,
OutputFrameStateCombine state_combine,
size_t parameters_count, size_t locals_count,
size_t stack_count,
@@ -1308,7 +1309,7 @@ class FrameStateDescriptor : public ZoneObject {
FrameStateDescriptor* outer_state = nullptr);
FrameStateType type() const { return type_; }
- BailoutId bailout_id() const { return bailout_id_; }
+ BytecodeOffset bailout_id() const { return bailout_id_; }
OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
size_t parameters_count() const { return parameters_count_; }
size_t locals_count() const { return locals_count_; }
@@ -1318,6 +1319,7 @@ class FrameStateDescriptor : public ZoneObject {
bool HasContext() const {
return FrameStateFunctionInfo::IsJSFunctionType(type_) ||
type_ == FrameStateType::kBuiltinContinuation ||
+ type_ == FrameStateType::kJSToWasmBuiltinContinuation ||
type_ == FrameStateType::kConstructStub;
}
@@ -1346,7 +1348,7 @@ class FrameStateDescriptor : public ZoneObject {
private:
FrameStateType type_;
- BailoutId bailout_id_;
+ BytecodeOffset bailout_id_;
OutputFrameStateCombine frame_state_combine_;
const size_t parameters_count_;
const size_t locals_count_;
@@ -1357,6 +1359,23 @@ class FrameStateDescriptor : public ZoneObject {
FrameStateDescriptor* const outer_state_;
};
+class JSToWasmFrameStateDescriptor : public FrameStateDescriptor {
+ public:
+ JSToWasmFrameStateDescriptor(Zone* zone, FrameStateType type,
+ BytecodeOffset bailout_id,
+ OutputFrameStateCombine state_combine,
+ size_t parameters_count, size_t locals_count,
+ size_t stack_count,
+ MaybeHandle<SharedFunctionInfo> shared_info,
+ FrameStateDescriptor* outer_state,
+ const wasm::FunctionSig* wasm_signature);
+
+ base::Optional<wasm::ValueKind> return_type() const { return return_type_; }
+
+ private:
+ base::Optional<wasm::ValueKind> return_type_;
+};
+
// A deoptimization entry is a pair of the reason why we deoptimize and the
// frame state descriptor that we have to go back to.
class DeoptimizationEntry final {
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
index 43808526a8..e84f0d9439 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
@@ -263,8 +263,8 @@ class DeferredBlocksRegion final {
// a spill slot until we enter this deferred block region.
void DeferSpillOutputUntilEntry(int vreg) { spilled_vregs_.insert(vreg); }
- ZoneSet<int>::iterator begin() const { return spilled_vregs_.begin(); }
- ZoneSet<int>::iterator end() const { return spilled_vregs_.end(); }
+ ZoneSet<int>::const_iterator begin() const { return spilled_vregs_.begin(); }
+ ZoneSet<int>::const_iterator end() const { return spilled_vregs_.end(); }
const BitVector* blocks_covered() const { return &blocks_covered_; }
@@ -295,17 +295,18 @@ class VirtualRegisterData final {
// Spill an operand that is assigned to this virtual register.
void SpillOperand(InstructionOperand* operand, int instr_index,
+ bool has_constant_policy,
MidTierRegisterAllocationData* data);
// Emit gap moves to / from the spill slot.
- void EmitGapMoveToInputFromSpillSlot(AllocatedOperand to_operand,
+ void EmitGapMoveToInputFromSpillSlot(InstructionOperand to_operand,
int instr_index,
MidTierRegisterAllocationData* data);
- void EmitGapMoveFromOutputToSpillSlot(AllocatedOperand from_operand,
+ void EmitGapMoveFromOutputToSpillSlot(InstructionOperand from_operand,
const InstructionBlock* current_block,
int instr_index,
MidTierRegisterAllocationData* data);
- void EmitGapMoveToSpillSlot(AllocatedOperand from_operand, int instr_index,
+ void EmitGapMoveToSpillSlot(InstructionOperand from_operand, int instr_index,
MidTierRegisterAllocationData* data);
// Adds pending spills for deferred-blocks.
@@ -328,14 +329,14 @@ class VirtualRegisterData final {
return HasSpillOperand() && spill_operand_->IsAllocated();
}
bool HasConstantSpillOperand() const {
- DCHECK_EQ(is_constant(), HasSpillOperand() && spill_operand_->IsConstant());
- return is_constant();
+ return HasSpillOperand() && spill_operand_->IsConstant();
}
// Returns true if the virtual register should be spilled when it is output.
bool NeedsSpillAtOutput() const { return needs_spill_at_output_; }
+
void MarkAsNeedsSpillAtOutput() {
- if (is_constant()) return;
+ if (HasConstantSpillOperand()) return;
needs_spill_at_output_ = true;
if (HasSpillRange()) spill_range()->ClearDeferredBlockSpills();
}
@@ -548,7 +549,8 @@ void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index,
void VirtualRegisterData::EnsureSpillRange(
MidTierRegisterAllocationData* data) {
- DCHECK(!is_constant());
+ DCHECK(!HasConstantSpillOperand());
+
if (HasSpillRange()) return;
const InstructionBlock* definition_block =
@@ -578,13 +580,15 @@ void VirtualRegisterData::EnsureSpillRange(
void VirtualRegisterData::AddSpillUse(int instr_index,
MidTierRegisterAllocationData* data) {
- if (is_constant()) return;
+ if (HasConstantSpillOperand()) return;
EnsureSpillRange(data);
spill_range_->ExtendRangeTo(instr_index);
const InstructionBlock* block = data->GetBlock(instr_index);
if (CouldSpillOnEntryToDeferred(block)) {
+ // TODO(1180335): Remove once crbug.com/1180335 is fixed.
+ CHECK(HasSpillRange());
data->block_state(block->rpo_number())
.deferred_blocks_region()
->DeferSpillOutputUntilEntry(vreg());
@@ -610,12 +614,21 @@ void VirtualRegisterData::AddDeferredSpillOutput(
AllocatedOperand allocated_op, int instr_index,
MidTierRegisterAllocationData* data) {
DCHECK(!NeedsSpillAtOutput());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(HasSpillRange());
spill_range_->AddDeferredSpillOutput(allocated_op, instr_index, data);
}
void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
int instr_index,
+ bool has_constant_policy,
MidTierRegisterAllocationData* data) {
+ if (!has_constant_policy && HasConstantSpillOperand()) {
+ // Reset the constant spill operand to force a real spill slot since this
+ // operand can't use the constant spill operand.
+ spill_operand_ = nullptr;
+ DCHECK(!HasConstantSpillOperand());
+ }
AddSpillUse(instr_index, data);
if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
InstructionOperand::ReplaceWith(operand, spill_operand());
@@ -640,7 +653,7 @@ void VirtualRegisterData::EmitDeferredSpillOutputs(
}
void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
- AllocatedOperand to_operand, int instr_index,
+ InstructionOperand to_operand, int instr_index,
MidTierRegisterAllocationData* data) {
AddSpillUse(instr_index, data);
DCHECK(!to_operand.IsPending());
@@ -656,7 +669,7 @@ void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
}
void VirtualRegisterData::EmitGapMoveToSpillSlot(
- AllocatedOperand from_operand, int instr_index,
+ InstructionOperand from_operand, int instr_index,
MidTierRegisterAllocationData* data) {
AddSpillUse(instr_index, data);
if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
@@ -671,7 +684,7 @@ void VirtualRegisterData::EmitGapMoveToSpillSlot(
}
void VirtualRegisterData::EmitGapMoveFromOutputToSpillSlot(
- AllocatedOperand from_operand, const InstructionBlock* current_block,
+ InstructionOperand from_operand, const InstructionBlock* current_block,
int instr_index, MidTierRegisterAllocationData* data) {
DCHECK_EQ(data->GetBlock(instr_index), current_block);
if (instr_index == current_block->last_instruction_index()) {
@@ -760,7 +773,8 @@ class RegisterState final : public ZoneObject {
// this register, then |operand| will be too, otherwise |operand| will be
// replaced with |virtual_register|'s spill operand.
void AllocatePendingUse(RegisterIndex reg, int virtual_register,
- InstructionOperand* operand, int instr_index);
+ InstructionOperand* operand, bool can_be_constant,
+ int instr_index);
// Mark that the register is holding a phi operand that is yet to be allocated
// by the source block in the gap just before the last instruction in the
@@ -816,7 +830,7 @@ class RegisterState final : public ZoneObject {
MidTierRegisterAllocationData* data);
void Use(int virtual_register, int instr_index);
void PendingUse(InstructionOperand* operand, int virtual_register,
- int instr_index);
+ bool can_be_constant, int instr_index);
void SpillForDeferred(AllocatedOperand allocated, int instr_index,
MidTierRegisterAllocationData* data);
void MoveToSpillSlotOnDeferred(int virtual_register, int instr_index,
@@ -881,6 +895,7 @@ class RegisterState final : public ZoneObject {
bool needs_gap_move_on_spill_;
bool is_shared_;
bool is_phi_gap_move_;
+ bool pending_uses_can_use_constant_;
int last_use_instr_index_;
int num_commits_required_;
@@ -910,6 +925,7 @@ void RegisterState::Register::Reset() {
is_shared_ = false;
is_phi_gap_move_ = false;
needs_gap_move_on_spill_ = false;
+ pending_uses_can_use_constant_ = true;
last_use_instr_index_ = -1;
num_commits_required_ = 0;
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
@@ -921,7 +937,9 @@ void RegisterState::Register::Use(int virtual_register, int instr_index) {
// A register can have many pending uses, but should only ever have a single
// non-pending use, since any subsiquent use will commit the preceeding use
// first.
- DCHECK(!is_allocated());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(!is_allocated());
+ CHECK(!is_shared());
needs_gap_move_on_spill_ = true;
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
@@ -930,13 +948,17 @@ void RegisterState::Register::Use(int virtual_register, int instr_index) {
void RegisterState::Register::PendingUse(InstructionOperand* operand,
int virtual_register,
+ bool can_be_constant,
int instr_index) {
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(!was_spilled_while_shared());
if (!is_allocated()) {
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
num_commits_required_ = 1;
}
DCHECK_EQ(virtual_register_, virtual_register);
+ pending_uses_can_use_constant_ &= can_be_constant;
PendingOperand pending_op(pending_uses());
InstructionOperand::ReplaceWith(operand, &pending_op);
@@ -950,7 +972,8 @@ void RegisterState::Register::MarkAsPhiMove() {
void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
bool on_exit, Zone* zone) {
- DCHECK(is_allocated());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(is_allocated());
if (!deferred_block_spills_) {
deferred_block_spills_.emplace(zone);
}
@@ -958,23 +981,27 @@ void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
}
void RegisterState::Register::AddSharedUses(int shared_use_count) {
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(!was_spilled_while_shared());
is_shared_ = true;
num_commits_required_ += shared_use_count;
}
void RegisterState::Register::CommitAtMerge() {
- DCHECK(is_shared());
- DCHECK(is_allocated());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(is_shared());
+ CHECK(is_allocated());
--num_commits_required_;
// We should still have commits required that will be resolved in the merge
// block.
- DCHECK_GT(num_commits_required_, 0);
+ CHECK_GT(num_commits_required_, 0);
}
void RegisterState::Register::Commit(AllocatedOperand allocated_op,
MidTierRegisterAllocationData* data) {
- DCHECK(is_allocated());
- DCHECK_GT(num_commits_required_, 0);
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(is_allocated());
+ CHECK_GT(num_commits_required_, 0);
if (--num_commits_required_ == 0) {
// Allocate all pending uses to |allocated_op| if this commit is non-shared,
@@ -1011,7 +1038,8 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op,
vreg_data.EmitDeferredSpillOutputs(data);
}
}
- DCHECK_IMPLIES(num_commits_required_ > 0, is_shared());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK_IMPLIES(num_commits_required_ > 0, is_shared());
}
void RegisterState::Register::Spill(AllocatedOperand allocated_op,
@@ -1030,7 +1058,12 @@ void RegisterState::Register::Spill(AllocatedOperand allocated_op,
if (has_deferred_block_spills() || !current_block->IsDeferred()) {
vreg_data.MarkAsNeedsSpillAtOutput();
}
- virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
+ // TODO(1180335): Doing a full reset here shouldn't be necessary, but
+ // investigate if it fixes crbug.com/1180335.
+ bool is_shared = is_shared_;
+ Reset();
+ is_shared_ = is_shared;
+ CHECK_IMPLIES(is_shared_, was_spilled_while_shared());
}
void RegisterState::Register::SpillPhiGapMove(
@@ -1063,7 +1096,8 @@ void RegisterState::Register::SpillPendingUses(
while (pending_use) {
// Spill all the pending operands associated with this register.
PendingOperand* next = pending_use->next();
- vreg_data.SpillOperand(pending_use, last_use_instr_index(), data);
+ vreg_data.SpillOperand(pending_use, last_use_instr_index(),
+ pending_uses_can_use_constant_, data);
pending_use = next;
}
pending_uses_ = nullptr;
@@ -1072,8 +1106,9 @@ void RegisterState::Register::SpillPendingUses(
void RegisterState::Register::SpillForDeferred(
AllocatedOperand allocated, int instr_index,
MidTierRegisterAllocationData* data) {
- DCHECK(is_allocated());
- DCHECK(is_shared());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(is_allocated());
+ CHECK(is_shared());
// Add a pending deferred spill, then commit the register (with the commit
// being fullfilled by the deferred spill if the register is fully commited).
data->VirtualRegisterDataFor(virtual_register())
@@ -1085,6 +1120,8 @@ void RegisterState::Register::SpillForDeferred(
void RegisterState::Register::MoveToSpillSlotOnDeferred(
int virtual_register, int instr_index,
MidTierRegisterAllocationData* data) {
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(!was_spilled_while_shared());
if (!is_allocated()) {
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
@@ -1158,9 +1195,10 @@ void RegisterState::AllocateUse(RegisterIndex reg, int virtual_register,
void RegisterState::AllocatePendingUse(RegisterIndex reg, int virtual_register,
InstructionOperand* operand,
- int instr_index) {
+ bool can_be_constant, int instr_index) {
EnsureRegisterData(reg);
- reg_data(reg).PendingUse(operand, virtual_register, instr_index);
+ reg_data(reg).PendingUse(operand, virtual_register, can_be_constant,
+ instr_index);
}
void RegisterState::UseForPhiGapMove(RegisterIndex reg) {
@@ -1297,7 +1335,7 @@ class SinglePassRegisterAllocator final {
// Allocation routines used to allocate a particular operand to either a
// register or a spill slot.
- void AllocateConstantOutput(ConstantOperand* operand);
+ void AllocateConstantOutput(ConstantOperand* operand, int instr_index);
void AllocateOutput(UnallocatedOperand* operand, int instr_index);
void AllocateInput(UnallocatedOperand* operand, int instr_index);
void AllocateSameInputOutput(UnallocatedOperand* output,
@@ -1387,7 +1425,8 @@ class SinglePassRegisterAllocator final {
// register is not subsequently spilled) for |operand| of the instruction at
// |instr_index|.
void AllocatePendingUse(RegisterIndex reg, int virtual_register,
- InstructionOperand* operand, int instr_index);
+ InstructionOperand* operand, bool can_be_constant,
+ int instr_index);
// Allocate |operand| to |reg| and add a gap move to move |virtual_register|
// to this register for the instruction at |instr_index|. |reg| will be
@@ -1498,6 +1537,7 @@ class SinglePassRegisterAllocator final {
RegisterBitVector in_use_at_instr_start_bits_;
RegisterBitVector in_use_at_instr_end_bits_;
RegisterBitVector allocated_registers_bits_;
+ RegisterBitVector same_input_output_registers_bits_;
// These fields are only used when kSimpleFPAliasing == false.
base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
@@ -1523,7 +1563,8 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
data_(data),
in_use_at_instr_start_bits_(),
in_use_at_instr_end_bits_(),
- allocated_registers_bits_() {
+ allocated_registers_bits_(),
+ same_input_output_registers_bits_() {
for (int i = 0; i < num_allocatable_registers_; i++) {
int reg_code = index_to_reg_code_[i];
reg_code_to_index_[reg_code] = RegisterIndex(i);
@@ -1591,6 +1632,7 @@ void SinglePassRegisterAllocator::UpdateForDeferredBlock(int instr_index) {
void SinglePassRegisterAllocator::EndInstruction() {
in_use_at_instr_end_bits_.Reset();
in_use_at_instr_start_bits_.Reset();
+ same_input_output_registers_bits_.Reset();
}
void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
@@ -1599,6 +1641,7 @@ void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
DCHECK(in_use_at_instr_start_bits_.IsEmpty());
DCHECK(in_use_at_instr_end_bits_.IsEmpty());
DCHECK(allocated_registers_bits_.IsEmpty());
+ DCHECK(same_input_output_registers_bits_.IsEmpty());
// Update the current block we are processing.
current_block_ = block;
@@ -1617,6 +1660,7 @@ void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
DCHECK(in_use_at_instr_start_bits_.IsEmpty());
DCHECK(in_use_at_instr_end_bits_.IsEmpty());
+ DCHECK(same_input_output_registers_bits_.IsEmpty());
// If we didn't allocate any registers of this kind, or we have reached the
// start, nothing to do here.
@@ -1766,7 +1810,8 @@ void SinglePassRegisterAllocator::MoveRegisterOnMerge(
data()->AddPendingOperandGapMove(instr_index, Instruction::START);
succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register),
&move->destination(), data());
- AllocatePendingUse(from, virtual_register, &move->source(), instr_index);
+ AllocatePendingUse(from, virtual_register, &move->source(), true,
+ instr_index);
}
void SinglePassRegisterAllocator::UpdateVirtualRegisterState() {
@@ -1903,6 +1948,9 @@ void SinglePassRegisterAllocator::FreeRegister(RegisterIndex reg,
RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
VirtualRegisterData& virtual_register, int instr_index, UsePosition pos,
bool must_use_register) {
+ DCHECK_NE(pos, UsePosition::kNone);
+ MachineRepresentation rep = RepresentationFor(virtual_register.vreg());
+
// If register is already allocated to the virtual register, use that.
RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
@@ -1910,14 +1958,24 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
// register hasn't yet been spilled, to try to avoid spilling it.
if (!reg.is_valid() && (must_use_register ||
!virtual_register.IsSpilledAt(instr_index, data()))) {
- reg = ChooseRegisterFor(RepresentationFor(virtual_register.vreg()), pos,
- must_use_register);
+ reg = ChooseRegisterFor(rep, pos, must_use_register);
+ } else if (reg.is_valid() &&
+ same_input_output_registers_bits_.Contains(reg, rep) &&
+ pos != UsePosition::kStart) {
+ // If we are trying to allocate a register that was used as a
+ // same_input_output operand, then we can't use it for an input that expands
+ // past UsePosition::kStart. This should only happen for REGISTER_OR_SLOT
+ // operands that are used for the deopt state, so we can just use a spill
+ // slot.
+ CHECK(!must_use_register);
+ return RegisterIndex::Invalid();
}
return reg;
}
RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
MachineRepresentation rep, UsePosition pos, bool must_use_register) {
+ DCHECK_NE(pos, UsePosition::kNone);
RegisterIndex reg = ChooseFreeRegister(rep, pos);
if (!reg.is_valid() && must_use_register) {
reg = ChooseRegisterToSpill(rep, pos);
@@ -2082,6 +2140,8 @@ void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
DCHECK(data()->GetBlock(deferred_block)->IsDeferred());
VirtualRegisterData& vreg_data =
data()->VirtualRegisterDataFor(virtual_register);
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(vreg_data.HasSpillRange());
if (!vreg_data.NeedsSpillAtOutput() &&
!DefinedAfter(virtual_register, instr_index, UsePosition::kEnd)) {
// If a register has been assigned to the virtual register, and the virtual
@@ -2127,12 +2187,12 @@ void SinglePassRegisterAllocator::AllocateUse(RegisterIndex reg,
void SinglePassRegisterAllocator::AllocatePendingUse(
RegisterIndex reg, int virtual_register, InstructionOperand* operand,
- int instr_index) {
+ bool can_be_constant, int instr_index) {
DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register));
register_state()->AllocatePendingUse(reg, virtual_register, operand,
- instr_index);
+ can_be_constant, instr_index);
// Since this is a pending use and the operand doesn't need to use a register,
// allocate with UsePosition::kNone to avoid blocking it's use by other
// operands in this instruction.
@@ -2145,7 +2205,7 @@ void SinglePassRegisterAllocator::AllocateUseWithMove(
int instr_index, UsePosition pos) {
AllocatedOperand to = AllocatedOperandForReg(reg, virtual_register);
UnallocatedOperand from = UnallocatedOperand(
- UnallocatedOperand::REGISTER_OR_SLOT, virtual_register);
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, virtual_register);
data()->AddGapMove(instr_index, Instruction::END, from, to);
InstructionOperand::ReplaceWith(operand, &to);
MarkRegisterUse(reg, RepresentationFor(virtual_register), pos);
@@ -2169,17 +2229,17 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
// instruction since the allocation needs to reflect the state before
// the instruction (at the gap move). For now spilling is fine since
// fixed slot inputs are uncommon.
- UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
- virtual_register);
+ UnallocatedOperand input_copy(
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, virtual_register);
AllocatedOperand allocated = AllocatedOperand(
AllocatedOperand::STACK_SLOT, rep, operand->fixed_slot_index());
InstructionOperand::ReplaceWith(operand, &allocated);
MoveOperands* move_op =
data()->AddGapMove(instr_index, Instruction::END, input_copy, *operand);
- vreg_data.SpillOperand(&move_op->source(), instr_index, data());
+ vreg_data.SpillOperand(&move_op->source(), instr_index, true, data());
return;
} else if (operand->HasSlotPolicy()) {
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index, false, data());
return;
}
@@ -2199,9 +2259,7 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
AllocateUse(reg, virtual_register, operand, instr_index, pos);
}
} else {
- bool must_use_register = operand->HasRegisterPolicy() ||
- (vreg_data.is_constant() &&
- !operand->HasRegisterOrSlotOrConstantPolicy());
+ bool must_use_register = operand->HasRegisterPolicy();
RegisterIndex reg =
ChooseRegisterFor(vreg_data, instr_index, pos, must_use_register);
@@ -2209,10 +2267,14 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
if (must_use_register) {
AllocateUse(reg, virtual_register, operand, instr_index, pos);
} else {
- AllocatePendingUse(reg, virtual_register, operand, instr_index);
+ AllocatePendingUse(reg, virtual_register, operand,
+ operand->HasRegisterOrSlotOrConstantPolicy(),
+ instr_index);
}
} else {
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index,
+ operand->HasRegisterOrSlotOrConstantPolicy(),
+ data());
}
}
}
@@ -2224,23 +2286,28 @@ void SinglePassRegisterAllocator::AllocateGapMoveInput(
VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
// Gap move inputs should be unconstrained.
- DCHECK(operand->HasRegisterOrSlotPolicy());
+ DCHECK(operand->HasRegisterOrSlotOrConstantPolicy());
RegisterIndex reg =
ChooseRegisterFor(vreg_data, instr_index, UsePosition::kStart, false);
if (reg.is_valid()) {
- AllocatePendingUse(reg, virtual_register, operand, instr_index);
+ AllocatePendingUse(reg, virtual_register, operand, true, instr_index);
} else {
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index, true, data());
}
}
void SinglePassRegisterAllocator::AllocateConstantOutput(
- ConstantOperand* operand) {
+ ConstantOperand* operand, int instr_index) {
EnsureRegisterState();
// If the constant is allocated to a register, spill it now to add the
// necessary gap moves from the constant operand to the register.
int virtual_register = operand->virtual_register();
+ VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
SpillRegisterForVirtualRegister(virtual_register);
+ if (vreg_data.NeedsSpillAtOutput()) {
+ vreg_data.EmitGapMoveFromOutputToSpillSlot(*operand, current_block(),
+ instr_index, data());
+ }
}
void SinglePassRegisterAllocator::AllocateOutput(UnallocatedOperand* operand,
@@ -2270,7 +2337,7 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
// TODO(rmcilroy): support secondary storage.
if (!reg.is_valid()) {
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index, false, data());
} else {
InstructionOperand move_output_to;
if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg)) {
@@ -2323,6 +2390,7 @@ void SinglePassRegisterAllocator::AllocateSameInputOutput(
MachineRepresentation rep = RepresentationFor(input_vreg);
UnallocatedOperand fixed_input(policy, ToRegCode(reg, rep), input_vreg);
InstructionOperand::ReplaceWith(input, &fixed_input);
+ same_input_output_registers_bits_.Add(reg, rep);
} else {
// Output was spilled. Due to the SameAsInput allocation policy, we need to
// make the input operand the same as the output, i.e., the output virtual
@@ -2330,14 +2398,14 @@ void SinglePassRegisterAllocator::AllocateSameInputOutput(
// virtual register's spill slot, then add a gap-move to move the input
// value into this spill slot.
VirtualRegisterData& output_vreg_data = VirtualRegisterDataFor(output_vreg);
- output_vreg_data.SpillOperand(input, instr_index, data());
+ output_vreg_data.SpillOperand(input, instr_index, false, data());
// Add an unconstrained gap move for the input virtual register.
- UnallocatedOperand unconstrained_input(UnallocatedOperand::REGISTER_OR_SLOT,
- input_vreg);
+ UnallocatedOperand unconstrained_input(
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, input_vreg);
MoveOperands* move_ops = data()->AddGapMove(
instr_index, Instruction::END, unconstrained_input, PendingOperand());
- output_vreg_data.SpillOperand(&move_ops->destination(), instr_index,
+ output_vreg_data.SpillOperand(&move_ops->destination(), instr_index, true,
data());
}
}
@@ -2365,7 +2433,9 @@ void SinglePassRegisterAllocator::AllocateTemp(UnallocatedOperand* operand,
CommitRegister(reg, virtual_register, operand, UsePosition::kAll);
} else {
VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index,
+ operand->HasRegisterOrSlotOrConstantPolicy(),
+ data());
}
}
@@ -2444,12 +2514,12 @@ void SinglePassRegisterAllocator::AllocatePhiGapMove(int to_vreg, int from_vreg,
CommitRegister(to_register, to_vreg, to_operand, UsePosition::kAll);
} else {
VirtualRegisterDataFor(to_vreg).SpillOperand(to_operand, instr_index,
- data());
+ true, data());
}
// The from side is unconstrained.
- UnallocatedOperand unconstrained_input(UnallocatedOperand::REGISTER_OR_SLOT,
- from_vreg);
+ UnallocatedOperand unconstrained_input(
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, from_vreg);
InstructionOperand::ReplaceWith(from_operand, &unconstrained_input);
}
}
@@ -2729,9 +2799,8 @@ void MidTierRegisterAllocator::AllocateRegisters(
for (RpoNumber successor : block->successors()) {
if (!data()->GetBlock(successor)->IsDeferred()) continue;
DCHECK_GT(successor, block_rpo);
- for (int virtual_register :
+ for (const int virtual_register :
*data()->block_state(successor).deferred_blocks_region()) {
- USE(virtual_register);
AllocatorFor(RepresentationFor(virtual_register))
.AllocateDeferredBlockSpillOutput(block->last_instruction_index(),
successor, virtual_register);
@@ -2756,7 +2825,8 @@ void MidTierRegisterAllocator::AllocateRegisters(
DCHECK(!output->IsAllocated());
if (output->IsConstant()) {
ConstantOperand* constant_operand = ConstantOperand::cast(output);
- AllocatorFor(constant_operand).AllocateConstantOutput(constant_operand);
+ AllocatorFor(constant_operand)
+ .AllocateConstantOutput(constant_operand, instr_index);
} else {
UnallocatedOperand* unallocated_output =
UnallocatedOperand::cast(output);
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 08f8ef7d07..79e8836bd0 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -305,8 +305,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
@@ -546,30 +545,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
}
frame_access_state()->SetFrameAccessToSP();
}
-
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Branch(&done, ne, scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ lw(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
@@ -694,13 +669,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
@@ -821,7 +790,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (isWasmCapiFunction) {
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
@@ -2075,6 +2044,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsI32x4ExtAddPairwiseI16x8S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ hadd_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4ExtAddPairwiseI16x8U: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ hadd_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
case kMipsF64x2Abs: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
@@ -2233,6 +2214,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ctcmsa(MSACSR, kScratchReg);
break;
}
+ case kMipsF64x2ConvertLowI32x4S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
+ __ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
+ __ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
+ __ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero);
+ break;
+ }
+ case kMipsF64x2ConvertLowI32x4U: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
+ __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
+ break;
+ }
+ case kMipsF64x2PromoteLowF32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMipsI64x2Add: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2295,6 +2297,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsI64x2Ne: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kMipsI64x2GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI64x2GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI64x2Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ adds_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
+ case kMipsI64x2SConvertI32x4Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_w(kSimd128ScratchReg, src, src);
+ __ slli_d(dst, kSimd128ScratchReg, 32);
+ __ srai_d(dst, dst, 32);
+ break;
+ }
+ case kMipsI64x2SConvertI32x4High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_w(kSimd128ScratchReg, src, src);
+ __ slli_d(dst, kSimd128ScratchReg, 32);
+ __ srai_d(dst, dst, 32);
+ break;
+ }
+ case kMipsI64x2UConvertI32x4Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI64x2UConvertI32x4High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
case kMipsI64x2ExtMulLowI32x4S:
ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_s_d);
break;
@@ -2364,6 +2425,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kMipsF32x4DemoteF64x2Zero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
case kMipsI32x4Mul: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2648,6 +2716,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsI32x4TruncSatF64x2SZero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
+ kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI32x4TruncSatF64x2UZero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
+ kSimd128ScratchReg);
+ break;
+ }
case kMipsI16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2838,6 +2924,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsI16x8ExtAddPairwiseI8x16S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ hadd_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI16x8ExtAddPairwiseI8x16U: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ hadd_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
case kMipsI8x16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
@@ -3005,6 +3103,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMipsI8x16Popcnt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMipsI8x16BitMask: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
@@ -3047,9 +3150,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMipsV32x4AnyTrue:
- case kMipsV16x8AnyTrue:
- case kMipsV8x16AnyTrue: {
+ case kMipsV128AnyTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
@@ -3061,6 +3162,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
+ case kMipsV64x2AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
case kMipsV32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
@@ -3769,7 +3881,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4059,7 +4171,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4130,7 +4242,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Operand(static_cast<int64_t>(0)));
}
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4138,9 +4249,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 6bc14ca317..2048cbfe40 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -160,6 +160,9 @@ namespace compiler {
V(MipsF64x2Floor) \
V(MipsF64x2Trunc) \
V(MipsF64x2NearestInt) \
+ V(MipsF64x2ConvertLowI32x4S) \
+ V(MipsF64x2ConvertLowI32x4U) \
+ V(MipsF64x2PromoteLowF32x4) \
V(MipsI64x2Add) \
V(MipsI64x2Sub) \
V(MipsI64x2Mul) \
@@ -169,6 +172,14 @@ namespace compiler {
V(MipsI64x2ShrU) \
V(MipsI64x2BitMask) \
V(MipsI64x2Eq) \
+ V(MipsI64x2Ne) \
+ V(MipsI64x2GtS) \
+ V(MipsI64x2GeS) \
+ V(MipsI64x2Abs) \
+ V(MipsI64x2SConvertI32x4Low) \
+ V(MipsI64x2SConvertI32x4High) \
+ V(MipsI64x2UConvertI32x4Low) \
+ V(MipsI64x2UConvertI32x4High) \
V(MipsI64x2ExtMulLowI32x4S) \
V(MipsI64x2ExtMulHighI32x4S) \
V(MipsI64x2ExtMulLowI32x4U) \
@@ -178,6 +189,7 @@ namespace compiler {
V(MipsF32x4ReplaceLane) \
V(MipsF32x4SConvertI32x4) \
V(MipsF32x4UConvertI32x4) \
+ V(MipsF32x4DemoteF64x2Zero) \
V(MipsI32x4Mul) \
V(MipsI32x4MaxS) \
V(MipsI32x4MinS) \
@@ -227,6 +239,10 @@ namespace compiler {
V(MipsI32x4ExtMulHighI16x8S) \
V(MipsI32x4ExtMulLowI16x8U) \
V(MipsI32x4ExtMulHighI16x8U) \
+ V(MipsI32x4TruncSatF64x2SZero) \
+ V(MipsI32x4TruncSatF64x2UZero) \
+ V(MipsI32x4ExtAddPairwiseI16x8S) \
+ V(MipsI32x4ExtAddPairwiseI16x8U) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLaneU) \
V(MipsI16x8ExtractLaneS) \
@@ -261,6 +277,8 @@ namespace compiler {
V(MipsI16x8ExtMulHighI8x16S) \
V(MipsI16x8ExtMulLowI8x16U) \
V(MipsI16x8ExtMulHighI8x16U) \
+ V(MipsI16x8ExtAddPairwiseI8x16S) \
+ V(MipsI16x8ExtAddPairwiseI8x16U) \
V(MipsI8x16Splat) \
V(MipsI8x16ExtractLaneU) \
V(MipsI8x16ExtractLaneS) \
@@ -288,6 +306,7 @@ namespace compiler {
V(MipsI8x16GeU) \
V(MipsI8x16RoundingAverageU) \
V(MipsI8x16Abs) \
+ V(MipsI8x16Popcnt) \
V(MipsI8x16BitMask) \
V(MipsS128And) \
V(MipsS128Or) \
@@ -295,12 +314,11 @@ namespace compiler {
V(MipsS128Not) \
V(MipsS128Select) \
V(MipsS128AndNot) \
- V(MipsV32x4AnyTrue) \
+ V(MipsV64x2AllTrue) \
V(MipsV32x4AllTrue) \
- V(MipsV16x8AnyTrue) \
V(MipsV16x8AllTrue) \
- V(MipsV8x16AnyTrue) \
V(MipsV8x16AllTrue) \
+ V(MipsV128AnyTrue) \
V(MipsS32x4InterleaveRight) \
V(MipsS32x4InterleaveLeft) \
V(MipsS32x4PackEven) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 404f9e4951..291f063053 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -63,6 +63,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF64x2Floor:
case kMipsF64x2Trunc:
case kMipsF64x2NearestInt:
+ case kMipsF64x2ConvertLowI32x4S:
+ case kMipsF64x2ConvertLowI32x4U:
+ case kMipsF64x2PromoteLowF32x4:
case kMipsI64x2Add:
case kMipsI64x2Sub:
case kMipsI64x2Mul:
@@ -72,6 +75,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI64x2ShrU:
case kMipsI64x2BitMask:
case kMipsI64x2Eq:
+ case kMipsI64x2Ne:
+ case kMipsI64x2GtS:
+ case kMipsI64x2GeS:
+ case kMipsI64x2Abs:
+ case kMipsI64x2SConvertI32x4Low:
+ case kMipsI64x2SConvertI32x4High:
+ case kMipsI64x2UConvertI32x4Low:
+ case kMipsI64x2UConvertI32x4High:
case kMipsI64x2ExtMulLowI32x4S:
case kMipsI64x2ExtMulHighI32x4S:
case kMipsI64x2ExtMulLowI32x4U:
@@ -103,6 +114,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF32x4Floor:
case kMipsF32x4Trunc:
case kMipsF32x4NearestInt:
+ case kMipsF32x4DemoteF64x2Zero:
case kMipsFloat32Max:
case kMipsFloat32Min:
case kMipsFloat32RoundDown:
@@ -162,6 +174,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI16x8ExtMulHighI8x16S:
case kMipsI16x8ExtMulLowI8x16U:
case kMipsI16x8ExtMulHighI8x16U:
+ case kMipsI16x8ExtAddPairwiseI8x16S:
+ case kMipsI16x8ExtAddPairwiseI8x16U:
+ case kMipsI32x4ExtAddPairwiseI16x8S:
+ case kMipsI32x4ExtAddPairwiseI16x8U:
case kMipsI32x4Add:
case kMipsI32x4AddHoriz:
case kMipsI32x4Eq:
@@ -196,6 +212,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI32x4ExtMulHighI16x8S:
case kMipsI32x4ExtMulLowI16x8U:
case kMipsI32x4ExtMulHighI16x8U:
+ case kMipsI32x4TruncSatF64x2SZero:
+ case kMipsI32x4TruncSatF64x2UZero:
case kMipsI8x16Add:
case kMipsI8x16AddSatS:
case kMipsI8x16AddSatU:
@@ -225,6 +243,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI8x16SubSatU:
case kMipsI8x16UConvertI16x8:
case kMipsI8x16Abs:
+ case kMipsI8x16Popcnt:
case kMipsI8x16BitMask:
case kMipsIns:
case kMipsLsa:
@@ -269,12 +288,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsS16x8InterleaveRight:
case kMipsS16x8PackEven:
case kMipsS16x8PackOdd:
- case kMipsV8x16AllTrue:
- case kMipsV8x16AnyTrue:
+ case kMipsV64x2AllTrue:
case kMipsV32x4AllTrue:
- case kMipsV32x4AnyTrue:
case kMipsV16x8AllTrue:
- case kMipsV16x8AnyTrue:
+ case kMipsV8x16AllTrue:
+ case kMipsV128AnyTrue:
case kMipsS32x4InterleaveEven:
case kMipsS32x4InterleaveLeft:
case kMipsS32x4InterleaveOdd:
@@ -1263,11 +1281,6 @@ int PrepareForTailCallLatency() {
Latency::BRANCH + 2 * SubuLatency(false) + 2 + Latency::BRANCH + 1;
}
-int AssemblePopArgumentsAdaptorFrameLatency() {
- return 1 + Latency::BRANCH + 1 + SmiUntagLatency() +
- PrepareForTailCallLatency();
-}
-
int JumpLatency() {
// Estimated max.
return 1 + AdduLatency(false) + Latency::BRANCH + 2;
@@ -1380,14 +1393,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArchCallCodeObject:
case kArchCallWasmFunction:
return CallLatency();
- case kArchTailCallCodeObjectFromJSFunction:
- case kArchTailCallCodeObject: {
- int latency = 0;
- if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) {
- latency = AssemblePopArgumentsAdaptorFrameLatency();
- }
- return latency + JumpLatency();
- }
+ case kArchTailCallCodeObject:
case kArchTailCallWasm:
case kArchTailCallAddress:
return JumpLatency();
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 423540b455..be8c17ad9c 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -377,7 +377,7 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
if (g.CanBeImmediate(index, opcode)) {
@@ -481,6 +481,10 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitWord32And(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1395,8 +1399,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
void InstructionSelector::VisitUnalignedLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
@@ -2113,50 +2115,65 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kMipsF64x2Abs) \
- V(F64x2Neg, kMipsF64x2Neg) \
- V(F64x2Sqrt, kMipsF64x2Sqrt) \
- V(F64x2Ceil, kMipsF64x2Ceil) \
- V(F64x2Floor, kMipsF64x2Floor) \
- V(F64x2Trunc, kMipsF64x2Trunc) \
- V(F64x2NearestInt, kMipsF64x2NearestInt) \
- V(I64x2Neg, kMipsI64x2Neg) \
- V(I64x2BitMask, kMipsI64x2BitMask) \
- V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
- V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
- V(F32x4Abs, kMipsF32x4Abs) \
- V(F32x4Neg, kMipsF32x4Neg) \
- V(F32x4Sqrt, kMipsF32x4Sqrt) \
- V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
- V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
- V(F32x4Ceil, kMipsF32x4Ceil) \
- V(F32x4Floor, kMipsF32x4Floor) \
- V(F32x4Trunc, kMipsF32x4Trunc) \
- V(F32x4NearestInt, kMipsF32x4NearestInt) \
- V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
- V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
- V(I32x4Neg, kMipsI32x4Neg) \
- V(I32x4BitMask, kMipsI32x4BitMask) \
- V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
- V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
- V(I16x8Neg, kMipsI16x8Neg) \
- V(I16x8BitMask, kMipsI16x8BitMask) \
- V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
- V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
- V(I8x16Neg, kMipsI8x16Neg) \
- V(I8x16BitMask, kMipsI8x16BitMask) \
- V(S128Not, kMipsS128Not) \
- V(V32x4AnyTrue, kMipsV32x4AnyTrue) \
- V(V32x4AllTrue, kMipsV32x4AllTrue) \
- V(V16x8AnyTrue, kMipsV16x8AnyTrue) \
- V(V16x8AllTrue, kMipsV16x8AllTrue) \
- V(V8x16AnyTrue, kMipsV8x16AnyTrue) \
- V(V8x16AllTrue, kMipsV8x16AllTrue)
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kMipsF64x2Abs) \
+ V(F64x2Neg, kMipsF64x2Neg) \
+ V(F64x2Sqrt, kMipsF64x2Sqrt) \
+ V(F64x2Ceil, kMipsF64x2Ceil) \
+ V(F64x2Floor, kMipsF64x2Floor) \
+ V(F64x2Trunc, kMipsF64x2Trunc) \
+ V(F64x2NearestInt, kMipsF64x2NearestInt) \
+ V(F64x2ConvertLowI32x4S, kMipsF64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kMipsF64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kMipsF64x2PromoteLowF32x4) \
+ V(I64x2Neg, kMipsI64x2Neg) \
+ V(I64x2BitMask, kMipsI64x2BitMask) \
+ V(I64x2Abs, kMipsI64x2Abs) \
+ V(I64x2SConvertI32x4Low, kMipsI64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kMipsI64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kMipsI64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kMipsI64x2UConvertI32x4High) \
+ V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
+ V(F32x4Abs, kMipsF32x4Abs) \
+ V(F32x4Neg, kMipsF32x4Neg) \
+ V(F32x4Sqrt, kMipsF32x4Sqrt) \
+ V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
+ V(F32x4Ceil, kMipsF32x4Ceil) \
+ V(F32x4Floor, kMipsF32x4Floor) \
+ V(F32x4Trunc, kMipsF32x4Trunc) \
+ V(F32x4NearestInt, kMipsF32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero, kMipsF32x4DemoteF64x2Zero) \
+ V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
+ V(I32x4Neg, kMipsI32x4Neg) \
+ V(I32x4BitMask, kMipsI32x4BitMask) \
+ V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
+ V(I32x4ExtAddPairwiseI16x8S, kMipsI32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U, kMipsI32x4ExtAddPairwiseI16x8U) \
+ V(I32x4TruncSatF64x2SZero, kMipsI32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kMipsI32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kMipsI16x8Neg) \
+ V(I16x8BitMask, kMipsI16x8BitMask) \
+ V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
+ V(I16x8ExtAddPairwiseI8x16S, kMipsI16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U, kMipsI16x8ExtAddPairwiseI8x16U) \
+ V(I8x16Neg, kMipsI8x16Neg) \
+ V(I8x16Popcnt, kMipsI8x16Popcnt) \
+ V(I8x16BitMask, kMipsI8x16BitMask) \
+ V(S128Not, kMipsS128Not) \
+ V(V64x2AllTrue, kMipsV64x2AllTrue) \
+ V(V32x4AllTrue, kMipsV32x4AllTrue) \
+ V(V16x8AllTrue, kMipsV16x8AllTrue) \
+ V(V8x16AllTrue, kMipsV8x16AllTrue) \
+ V(V128AnyTrue, kMipsV128AnyTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -2184,9 +2201,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Lt, kMipsF64x2Lt) \
V(F64x2Le, kMipsF64x2Le) \
V(I64x2Eq, kMipsI64x2Eq) \
+ V(I64x2Ne, kMipsI64x2Ne) \
V(I64x2Add, kMipsI64x2Add) \
V(I64x2Sub, kMipsI64x2Sub) \
V(I64x2Mul, kMipsI64x2Mul) \
+ V(I64x2GtS, kMipsI64x2GtS) \
+ V(I64x2GeS, kMipsI64x2GeS) \
V(I64x2ExtMulLowI32x4S, kMipsI64x2ExtMulLowI32x4S) \
V(I64x2ExtMulHighI32x4S, kMipsI64x2ExtMulHighI32x4S) \
V(I64x2ExtMulLowI32x4U, kMipsI64x2ExtMulLowI32x4U) \
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 868134ff04..d6e720b6de 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -313,8 +313,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
@@ -509,29 +508,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ Ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Branch(&done, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ Ld(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
@@ -646,11 +622,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallWasmFunction: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt64());
@@ -663,13 +634,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
@@ -790,7 +755,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (isWasmCapiFunction) {
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
@@ -1852,28 +1817,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kMips64S128Load8Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ Lb(kScratchReg, i.MemoryOperand());
- __ fill_b(i.OutputSimd128Register(), kScratchReg);
- break;
- }
- case kMips64S128Load16Splat: {
+ case kMips64S128LoadSplat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ Lh(kScratchReg, i.MemoryOperand());
- __ fill_h(i.OutputSimd128Register(), kScratchReg);
- break;
- }
- case kMips64S128Load32Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ Lw(kScratchReg, i.MemoryOperand());
- __ fill_w(i.OutputSimd128Register(), kScratchReg);
- break;
- }
- case kMips64S128Load64Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ Ld(kScratchReg, i.MemoryOperand());
- __ fill_d(i.OutputSimd128Register(), kScratchReg);
+ auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
+ __ LoadSplat(sz, i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kMips64S128Load8x8S: {
@@ -2351,6 +2298,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kRoundToNearest);
break;
}
+ case kMips64F64x2ConvertLowI32x4S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
+ __ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
+ __ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
+ __ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero);
+ break;
+ }
+ case kMips64F64x2ConvertLowI32x4U: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
+ __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
+ break;
+ }
+ case kMips64F64x2PromoteLowF32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMips64I64x2ReplaceLane: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
@@ -2441,6 +2409,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMips64I64x2Ne: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kMips64I64x2GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I64x2GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I64x2Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ add_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
+ case kMips64I64x2SConvertI32x4Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_w(kSimd128ScratchReg, src, src);
+ __ slli_d(dst, kSimd128ScratchReg, 32);
+ __ srai_d(dst, dst, 32);
+ break;
+ }
+ case kMips64I64x2SConvertI32x4High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_w(kSimd128ScratchReg, src, src);
+ __ slli_d(dst, kSimd128ScratchReg, 32);
+ __ srai_d(dst, dst, 32);
+ break;
+ }
+ case kMips64I64x2UConvertI32x4Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I64x2UConvertI32x4High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
case kMips64ExtMulLow: {
auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
__ ExtMulLow(dt, i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2453,6 +2480,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMips64ExtAddPairwise: {
+ auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
+ __ ExtAddPairwise(dt, i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ break;
+ }
case kMips64F32x4Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ FmoveLow(kScratchReg, i.InputSingleRegister(0));
@@ -2740,6 +2773,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kRoundToNearest);
break;
}
+ case kMips64F32x4DemoteF64x2Zero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
case kMips64I32x4SConvertF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -2814,6 +2854,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMips64I32x4TruncSatF64x2SZero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
+ kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I32x4TruncSatF64x2UZero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
+ kSimd128ScratchReg);
+ break;
+ }
case kMips64I16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -3209,6 +3267,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I8x16Popcnt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMips64I8x16BitMask: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
@@ -3251,9 +3314,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMips64V32x4AnyTrue:
- case kMips64V16x8AnyTrue:
- case kMips64V8x16AnyTrue: {
+ case kMips64V128AnyTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
@@ -3264,6 +3325,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
+ case kMips64V64x2AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, 1); // branch delay slot
+ __ li(dst, 0l);
+ __ bind(&all_true);
+ break;
+ }
case kMips64V32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
@@ -4011,7 +4083,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4313,7 +4385,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4386,7 +4458,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Operand(static_cast<int64_t>(0)));
}
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
+
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4394,9 +4466,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -4430,7 +4499,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ dsll(t0, t0, kSystemPointerSizeLog2);
__ Daddu(sp, sp, t0);
} else if (additional_pop_count->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_count + additional_count);
} else {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 5a162d90f7..a6bed82ea8 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -210,6 +210,9 @@ namespace compiler {
V(Mips64F64x2Floor) \
V(Mips64F64x2Trunc) \
V(Mips64F64x2NearestInt) \
+ V(Mips64F64x2ConvertLowI32x4S) \
+ V(Mips64F64x2ConvertLowI32x4U) \
+ V(Mips64F64x2PromoteLowF32x4) \
V(Mips64I64x2Splat) \
V(Mips64I64x2ExtractLane) \
V(Mips64I64x2ReplaceLane) \
@@ -222,8 +225,17 @@ namespace compiler {
V(Mips64I64x2ShrU) \
V(Mips64I64x2BitMask) \
V(Mips64I64x2Eq) \
+ V(Mips64I64x2Ne) \
+ V(Mips64I64x2GtS) \
+ V(Mips64I64x2GeS) \
+ V(Mips64I64x2Abs) \
+ V(Mips64I64x2SConvertI32x4Low) \
+ V(Mips64I64x2SConvertI32x4High) \
+ V(Mips64I64x2UConvertI32x4Low) \
+ V(Mips64I64x2UConvertI32x4High) \
V(Mips64ExtMulLow) \
V(Mips64ExtMulHigh) \
+ V(Mips64ExtAddPairwise) \
V(Mips64F32x4Abs) \
V(Mips64F32x4Neg) \
V(Mips64F32x4Sqrt) \
@@ -246,6 +258,7 @@ namespace compiler {
V(Mips64F32x4Floor) \
V(Mips64F32x4Trunc) \
V(Mips64F32x4NearestInt) \
+ V(Mips64F32x4DemoteF64x2Zero) \
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \
@@ -256,6 +269,8 @@ namespace compiler {
V(Mips64I32x4Abs) \
V(Mips64I32x4BitMask) \
V(Mips64I32x4DotI16x8S) \
+ V(Mips64I32x4TruncSatF64x2SZero) \
+ V(Mips64I32x4TruncSatF64x2UZero) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
@@ -313,6 +328,7 @@ namespace compiler {
V(Mips64I8x16GeU) \
V(Mips64I8x16RoundingAverageU) \
V(Mips64I8x16Abs) \
+ V(Mips64I8x16Popcnt) \
V(Mips64I8x16BitMask) \
V(Mips64S128And) \
V(Mips64S128Or) \
@@ -320,12 +336,11 @@ namespace compiler {
V(Mips64S128Not) \
V(Mips64S128Select) \
V(Mips64S128AndNot) \
- V(Mips64V32x4AnyTrue) \
+ V(Mips64V64x2AllTrue) \
V(Mips64V32x4AllTrue) \
- V(Mips64V16x8AnyTrue) \
V(Mips64V16x8AllTrue) \
- V(Mips64V8x16AnyTrue) \
V(Mips64V8x16AllTrue) \
+ V(Mips64V128AnyTrue) \
V(Mips64S32x4InterleaveRight) \
V(Mips64S32x4InterleaveLeft) \
V(Mips64S32x4PackEven) \
@@ -353,10 +368,7 @@ namespace compiler {
V(Mips64S8x8Reverse) \
V(Mips64S8x4Reverse) \
V(Mips64S8x2Reverse) \
- V(Mips64S128Load8Splat) \
- V(Mips64S128Load16Splat) \
- V(Mips64S128Load32Splat) \
- V(Mips64S128Load64Splat) \
+ V(Mips64S128LoadSplat) \
V(Mips64S128Load8x8S) \
V(Mips64S128Load8x8U) \
V(Mips64S128Load16x4S) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 3c7a7738a7..6baff2905e 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -88,6 +88,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F64x2Floor:
case kMips64F64x2Trunc:
case kMips64F64x2NearestInt:
+ case kMips64F64x2ConvertLowI32x4S:
+ case kMips64F64x2ConvertLowI32x4U:
+ case kMips64F64x2PromoteLowF32x4:
case kMips64I64x2Splat:
case kMips64I64x2ExtractLane:
case kMips64I64x2ReplaceLane:
@@ -100,8 +103,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I64x2ShrU:
case kMips64I64x2BitMask:
case kMips64I64x2Eq:
+ case kMips64I64x2Ne:
+ case kMips64I64x2GtS:
+ case kMips64I64x2GeS:
+ case kMips64I64x2Abs:
+ case kMips64I64x2SConvertI32x4Low:
+ case kMips64I64x2SConvertI32x4High:
+ case kMips64I64x2UConvertI32x4Low:
+ case kMips64I64x2UConvertI32x4High:
case kMips64ExtMulLow:
case kMips64ExtMulHigh:
+ case kMips64ExtAddPairwise:
case kMips64F32x4Abs:
case kMips64F32x4Add:
case kMips64F32x4AddHoriz:
@@ -129,6 +141,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F32x4Floor:
case kMips64F32x4Trunc:
case kMips64F32x4NearestInt:
+ case kMips64F32x4DemoteF64x2Zero:
case kMips64F64x2Splat:
case kMips64F64x2ExtractLane:
case kMips64F64x2ReplaceLane:
@@ -219,6 +232,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I32x4Abs:
case kMips64I32x4BitMask:
case kMips64I32x4DotI16x8S:
+ case kMips64I32x4TruncSatF64x2SZero:
+ case kMips64I32x4TruncSatF64x2UZero:
case kMips64I8x16Add:
case kMips64I8x16AddSatS:
case kMips64I8x16AddSatU:
@@ -246,6 +261,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I8x16SubSatU:
case kMips64I8x16RoundingAverageU:
case kMips64I8x16Abs:
+ case kMips64I8x16Popcnt:
case kMips64I8x16BitMask:
case kMips64Ins:
case kMips64Lsa:
@@ -288,12 +304,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S16x8PackOdd:
case kMips64S16x2Reverse:
case kMips64S16x4Reverse:
- case kMips64V8x16AllTrue:
- case kMips64V8x16AnyTrue:
+ case kMips64V64x2AllTrue:
case kMips64V32x4AllTrue:
- case kMips64V32x4AnyTrue:
case kMips64V16x8AllTrue:
- case kMips64V16x8AnyTrue:
+ case kMips64V8x16AllTrue:
+ case kMips64V128AnyTrue:
case kMips64S32x4InterleaveEven:
case kMips64S32x4InterleaveOdd:
case kMips64S32x4InterleaveLeft:
@@ -354,10 +369,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ulw:
case kMips64Ulwu:
case kMips64Ulwc1:
- case kMips64S128Load8Splat:
- case kMips64S128Load16Splat:
- case kMips64S128Load32Splat:
- case kMips64S128Load64Splat:
+ case kMips64S128LoadSplat:
case kMips64S128Load8x8S:
case kMips64S128Load8x8U:
case kMips64S128Load16x4S:
@@ -790,11 +802,6 @@ int PrepareForTailCallLatency() {
Latency::BRANCH + 2 * DsubuLatency(false) + 2 + Latency::BRANCH + 1;
}
-int AssemblePopArgumentsAdoptFrameLatency() {
- return 1 + Latency::BRANCH + 1 + SmiUntagLatency() +
- PrepareForTailCallLatency();
-}
-
int AssertLatency() { return 1; }
int PrepareCallCFunctionLatency() {
@@ -1289,14 +1296,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArchCallCodeObject:
case kArchCallWasmFunction:
return CallLatency();
- case kArchTailCallCodeObjectFromJSFunction:
- case kArchTailCallCodeObject: {
- int latency = 0;
- if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) {
- latency = AssemblePopArgumentsAdoptFrameLatency();
- }
- return latency + JumpLatency();
- }
+ case kArchTailCallCodeObject:
case kArchTailCallWasm:
case kArchTailCallAddress:
return JumpLatency();
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 8bb2f5fc03..f704a03af8 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -397,36 +397,11 @@ InstructionOperand EmitAddBeforeS128LoadStore(InstructionSelector* selector,
return addr_reg;
}
-// Helper struct for load lane and store lane to indicate what memory size
-// to be encoded in the opcode, and the new lane index.
-struct LoadStoreLaneParams {
- MSASize sz;
- uint8_t laneidx;
- LoadStoreLaneParams(uint8_t laneidx, MSASize sz, int lanes)
- : sz(sz), laneidx(laneidx % lanes) {}
-};
-
-LoadStoreLaneParams GetLoadStoreLaneParams(MachineRepresentation rep,
- uint8_t laneidx) {
- switch (rep) {
- case MachineRepresentation::kWord8:
- return LoadStoreLaneParams(laneidx, MSA_B, 16);
- case MachineRepresentation::kWord16:
- return LoadStoreLaneParams(laneidx, MSA_H, 8);
- case MachineRepresentation::kWord32:
- return LoadStoreLaneParams(laneidx, MSA_W, 4);
- case MachineRepresentation::kWord64:
- return LoadStoreLaneParams(laneidx, MSA_D, 2);
- default:
- break;
- }
- UNREACHABLE();
-}
} // namespace
void InstructionSelector::VisitStoreLane(Node* node) {
StoreLaneParameters params = StoreLaneParametersOf(node->op());
- LoadStoreLaneParams f = GetLoadStoreLaneParams(params.rep, params.laneidx);
+ LoadStoreLaneParams f(params.rep, params.laneidx);
InstructionCode opcode = kMips64S128StoreLane;
opcode |= MiscField::encode(f.sz);
@@ -443,8 +418,7 @@ void InstructionSelector::VisitStoreLane(Node* node) {
void InstructionSelector::VisitLoadLane(Node* node) {
LoadLaneParameters params = LoadLaneParametersOf(node->op());
- LoadStoreLaneParams f =
- GetLoadStoreLaneParams(params.rep.representation(), params.laneidx);
+ LoadStoreLaneParams f(params.rep.representation(), params.laneidx);
InstructionCode opcode = kMips64S128LoadLane;
opcode |= MiscField::encode(f.sz);
@@ -460,16 +434,20 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
InstructionCode opcode = kArchNop;
switch (params.transformation) {
case LoadTransformation::kS128Load8Splat:
- opcode = kMips64S128Load8Splat;
+ opcode = kMips64S128LoadSplat;
+ opcode |= MiscField::encode(MSASize::MSA_B);
break;
case LoadTransformation::kS128Load16Splat:
- opcode = kMips64S128Load16Splat;
+ opcode = kMips64S128LoadSplat;
+ opcode |= MiscField::encode(MSASize::MSA_H);
break;
case LoadTransformation::kS128Load32Splat:
- opcode = kMips64S128Load32Splat;
+ opcode = kMips64S128LoadSplat;
+ opcode |= MiscField::encode(MSASize::MSA_W);
break;
case LoadTransformation::kS128Load64Splat:
- opcode = kMips64S128Load64Splat;
+ opcode = kMips64S128LoadSplat;
+ opcode |= MiscField::encode(MSASize::MSA_D);
break;
case LoadTransformation::kS128Load8x8S:
opcode = kMips64S128Load8x8S;
@@ -539,7 +517,7 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
EmitLoad(this, node, opcode);
@@ -1839,8 +1817,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
void InstructionSelector::VisitUnalignedLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
@@ -2874,53 +2850,64 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kMips64F64x2Abs) \
- V(F64x2Neg, kMips64F64x2Neg) \
- V(F64x2Sqrt, kMips64F64x2Sqrt) \
- V(F64x2Ceil, kMips64F64x2Ceil) \
- V(F64x2Floor, kMips64F64x2Floor) \
- V(F64x2Trunc, kMips64F64x2Trunc) \
- V(F64x2NearestInt, kMips64F64x2NearestInt) \
- V(I64x2Neg, kMips64I64x2Neg) \
- V(I64x2BitMask, kMips64I64x2BitMask) \
- V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
- V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
- V(F32x4Abs, kMips64F32x4Abs) \
- V(F32x4Neg, kMips64F32x4Neg) \
- V(F32x4Sqrt, kMips64F32x4Sqrt) \
- V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
- V(F32x4Ceil, kMips64F32x4Ceil) \
- V(F32x4Floor, kMips64F32x4Floor) \
- V(F32x4Trunc, kMips64F32x4Trunc) \
- V(F32x4NearestInt, kMips64F32x4NearestInt) \
- V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
- V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
- V(I32x4Neg, kMips64I32x4Neg) \
- V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
- V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
- V(I32x4Abs, kMips64I32x4Abs) \
- V(I32x4BitMask, kMips64I32x4BitMask) \
- V(I16x8Neg, kMips64I16x8Neg) \
- V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
- V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
- V(I16x8Abs, kMips64I16x8Abs) \
- V(I16x8BitMask, kMips64I16x8BitMask) \
- V(I8x16Neg, kMips64I8x16Neg) \
- V(I8x16Abs, kMips64I8x16Abs) \
- V(I8x16BitMask, kMips64I8x16BitMask) \
- V(S128Not, kMips64S128Not) \
- V(V32x4AnyTrue, kMips64V32x4AnyTrue) \
- V(V32x4AllTrue, kMips64V32x4AllTrue) \
- V(V16x8AnyTrue, kMips64V16x8AnyTrue) \
- V(V16x8AllTrue, kMips64V16x8AllTrue) \
- V(V8x16AnyTrue, kMips64V8x16AnyTrue) \
- V(V8x16AllTrue, kMips64V8x16AllTrue)
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kMips64F64x2Abs) \
+ V(F64x2Neg, kMips64F64x2Neg) \
+ V(F64x2Sqrt, kMips64F64x2Sqrt) \
+ V(F64x2Ceil, kMips64F64x2Ceil) \
+ V(F64x2Floor, kMips64F64x2Floor) \
+ V(F64x2Trunc, kMips64F64x2Trunc) \
+ V(F64x2NearestInt, kMips64F64x2NearestInt) \
+ V(I64x2Neg, kMips64I64x2Neg) \
+ V(I64x2BitMask, kMips64I64x2BitMask) \
+ V(F64x2ConvertLowI32x4S, kMips64F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kMips64F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kMips64F64x2PromoteLowF32x4) \
+ V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
+ V(F32x4Abs, kMips64F32x4Abs) \
+ V(F32x4Neg, kMips64F32x4Neg) \
+ V(F32x4Sqrt, kMips64F32x4Sqrt) \
+ V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
+ V(F32x4Ceil, kMips64F32x4Ceil) \
+ V(F32x4Floor, kMips64F32x4Floor) \
+ V(F32x4Trunc, kMips64F32x4Trunc) \
+ V(F32x4NearestInt, kMips64F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero, kMips64F32x4DemoteF64x2Zero) \
+ V(I64x2Abs, kMips64I64x2Abs) \
+ V(I64x2SConvertI32x4Low, kMips64I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kMips64I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kMips64I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kMips64I64x2UConvertI32x4High) \
+ V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
+ V(I32x4Neg, kMips64I32x4Neg) \
+ V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
+ V(I32x4Abs, kMips64I32x4Abs) \
+ V(I32x4BitMask, kMips64I32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kMips64I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kMips64I32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kMips64I16x8Neg) \
+ V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
+ V(I16x8Abs, kMips64I16x8Abs) \
+ V(I16x8BitMask, kMips64I16x8BitMask) \
+ V(I8x16Neg, kMips64I8x16Neg) \
+ V(I8x16Abs, kMips64I8x16Abs) \
+ V(I8x16Popcnt, kMips64I8x16Popcnt) \
+ V(I8x16BitMask, kMips64I8x16BitMask) \
+ V(S128Not, kMips64S128Not) \
+ V(V64x2AllTrue, kMips64V64x2AllTrue) \
+ V(V32x4AllTrue, kMips64V32x4AllTrue) \
+ V(V16x8AllTrue, kMips64V16x8AllTrue) \
+ V(V8x16AllTrue, kMips64V8x16AllTrue) \
+ V(V128AnyTrue, kMips64V128AnyTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -2948,9 +2935,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Lt, kMips64F64x2Lt) \
V(F64x2Le, kMips64F64x2Le) \
V(I64x2Eq, kMips64I64x2Eq) \
+ V(I64x2Ne, kMips64I64x2Ne) \
V(I64x2Add, kMips64I64x2Add) \
V(I64x2Sub, kMips64I64x2Sub) \
V(I64x2Mul, kMips64I64x2Mul) \
+ V(I64x2GtS, kMips64I64x2GtS) \
+ V(I64x2GeS, kMips64I64x2GeS) \
V(F32x4Add, kMips64F32x4Add) \
V(F32x4AddHoriz, kMips64F32x4AddHoriz) \
V(F32x4Sub, kMips64F32x4Sub) \
@@ -3288,6 +3278,18 @@ VISIT_EXT_MUL(I16x8, I8x16S, MSAS8)
VISIT_EXT_MUL(I16x8, I8x16U, MSAU8)
#undef VISIT_EXT_MUL
+#define VISIT_EXTADD_PAIRWISE(OPCODE, TYPE) \
+ void InstructionSelector::Visit##OPCODE(Node* node) { \
+ Mips64OperandGenerator g(this); \
+ Emit(kMips64ExtAddPairwise | MiscField::encode(TYPE), \
+ g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); \
+ }
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S, MSAS8)
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U, MSAU8)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S, MSAS16)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U, MSAU16)
+#undef VISIT_EXTADD_PAIRWISE
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 3d2e9d9364..4e5393bd22 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -276,8 +276,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
PPCOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
+ const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
@@ -697,30 +696,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ cmpi(scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&done);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ LoadP(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm,
@@ -915,13 +890,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
@@ -1076,7 +1045,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// f5ab7d3.
if (isWasmCapiFunction) {
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1741,39 +1710,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CanonicalizeNaN(result, value);
break;
}
- case kPPC_Push:
- if (instr->InputAt(0)->IsFPRegister()) {
- LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- switch (op->representation()) {
- case MachineRepresentation::kFloat32:
- __ StoreSingleU(i.InputDoubleRegister(0),
- MemOperand(sp, -kSystemPointerSize), r0);
- frame_access_state()->IncreaseSPDelta(1);
- break;
- case MachineRepresentation::kFloat64:
- __ StoreDoubleU(i.InputDoubleRegister(0),
- MemOperand(sp, -kDoubleSize), r0);
- frame_access_state()->IncreaseSPDelta(kDoubleSize /
- kSystemPointerSize);
- break;
- case MachineRepresentation::kSimd128: {
- __ addi(sp, sp, Operand(-kSimd128Size));
- __ StoreSimd128(i.InputSimd128Register(0), MemOperand(r0, sp), r0,
- kScratchSimd128Reg);
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- __ StorePU(i.InputRegister(0), MemOperand(sp, -kSystemPointerSize), r0);
- frame_access_state()->IncreaseSPDelta(1);
+ case kPPC_Push: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(1));
+ MachineRepresentation rep = op->representation();
+ int pushed_slots = ElementSizeInPointers(rep);
+ // Slot-sized arguments are never padded but there may be a gap if
+ // the slot allocator reclaimed other padding slots. Adjust the stack
+ // here to skip any gap.
+ if (slots > pushed_slots) {
+ __ addi(sp, sp,
+ Operand(-((slots - pushed_slots) * kSystemPointerSize)));
+ }
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ __ StoreSingleU(i.InputDoubleRegister(1),
+ MemOperand(sp, -kSystemPointerSize), r0);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ StoreDoubleU(i.InputDoubleRegister(1),
+ MemOperand(sp, -kDoubleSize), r0);
+ break;
+ case MachineRepresentation::kSimd128:
+ __ addi(sp, sp, Operand(-kSimd128Size));
+ __ StoreSimd128(i.InputSimd128Register(1), MemOperand(r0, sp), r0,
+ kScratchSimd128Reg);
+ break;
+ default:
+ __ StorePU(i.InputRegister(1), MemOperand(sp, -kSystemPointerSize),
+ r0);
+ break;
}
+ frame_access_state()->IncreaseSPDelta(slots);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ }
case kPPC_PushFrame: {
int num_slots = i.InputInt32(1);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -3046,10 +3018,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vsububm(i.OutputSimd128Register(), tempFPReg1, kScratchSimd128Reg);
break;
}
- case kPPC_V64x2AnyTrue:
- case kPPC_V32x4AnyTrue:
- case kPPC_V16x8AnyTrue:
- case kPPC_V8x16AnyTrue: {
+ case kPPC_V128AnyTrue: {
Simd128Register src = i.InputSimd128Register(0);
Register dst = i.OutputRegister();
constexpr int bit_number = 24;
@@ -3111,6 +3080,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xvcvuxwsp(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+
+ case kPPC_I64x2SConvertI32x4Low: {
+ __ vupklsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_I64x2SConvertI32x4High: {
+ __ vupkhsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_I64x2UConvertI32x4Low: {
+ constexpr int lane_width_in_bytes = 8;
+ __ vupklsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ // Zero extend.
+ __ mov(ip, Operand(0xFFFFFFFF));
+ __ mtvsrd(kScratchSimd128Reg, ip);
+ __ vinsertd(kScratchSimd128Reg, kScratchSimd128Reg,
+ Operand(1 * lane_width_in_bytes));
+ __ vand(i.OutputSimd128Register(), kScratchSimd128Reg,
+ i.OutputSimd128Register());
+ break;
+ }
+ case kPPC_I64x2UConvertI32x4High: {
+ constexpr int lane_width_in_bytes = 8;
+ __ vupkhsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ // Zero extend.
+ __ mov(ip, Operand(0xFFFFFFFF));
+ __ mtvsrd(kScratchSimd128Reg, ip);
+ __ vinsertd(kScratchSimd128Reg, kScratchSimd128Reg,
+ Operand(1 * lane_width_in_bytes));
+ __ vand(i.OutputSimd128Register(), kScratchSimd128Reg,
+ i.OutputSimd128Register());
+ break;
+ }
+
case kPPC_I32x4SConvertI16x8Low: {
__ vupklsh(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -3720,6 +3723,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1), kScratchSimd128Reg);
break;
}
+#define SIGN_SELECT(compare_gt) \
+ Simd128Register src0 = i.InputSimd128Register(0); \
+ Simd128Register src1 = i.InputSimd128Register(1); \
+ Simd128Register src2 = i.InputSimd128Register(2); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg); \
+ __ compare_gt(kScratchSimd128Reg, kScratchSimd128Reg, src2); \
+ __ vsel(dst, src1, src0, kScratchSimd128Reg);
+ case kPPC_I8x16SignSelect: {
+ SIGN_SELECT(vcmpgtsb)
+ break;
+ }
+ case kPPC_I16x8SignSelect: {
+ SIGN_SELECT(vcmpgtsh)
+ break;
+ }
+ case kPPC_I32x4SignSelect: {
+ SIGN_SELECT(vcmpgtsw)
+ break;
+ }
+ case kPPC_I64x2SignSelect: {
+ SIGN_SELECT(vcmpgtsd)
+ break;
+ }
+#undef SIGN_SELECT
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
@@ -3833,7 +3861,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4073,7 +4101,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4152,7 +4180,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = r6;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4160,9 +4187,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
+
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index b0aa6529c7..2ef553a4f5 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -264,6 +264,11 @@ namespace compiler {
V(PPC_I64x2ShrU) \
V(PPC_I64x2Neg) \
V(PPC_I64x2BitMask) \
+ V(PPC_I64x2SConvertI32x4Low) \
+ V(PPC_I64x2SConvertI32x4High) \
+ V(PPC_I64x2UConvertI32x4Low) \
+ V(PPC_I64x2UConvertI32x4High) \
+ V(PPC_I64x2SignSelect) \
V(PPC_I32x4Splat) \
V(PPC_I32x4ExtractLane) \
V(PPC_I32x4ReplaceLane) \
@@ -296,6 +301,7 @@ namespace compiler {
V(PPC_I32x4DotI16x8S) \
V(PPC_I32x4ExtAddPairwiseI16x8S) \
V(PPC_I32x4ExtAddPairwiseI16x8U) \
+ V(PPC_I32x4SignSelect) \
V(PPC_F32x4Qfma) \
V(PPC_F32x4Qfms) \
V(PPC_I16x8Splat) \
@@ -336,6 +342,7 @@ namespace compiler {
V(PPC_I16x8ExtAddPairwiseI8x16S) \
V(PPC_I16x8ExtAddPairwiseI8x16U) \
V(PPC_I16x8Q15MulRSatS) \
+ V(PPC_I16x8SignSelect) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
@@ -368,14 +375,12 @@ namespace compiler {
V(PPC_I8x16Shuffle) \
V(PPC_I8x16Swizzle) \
V(PPC_I8x16BitMask) \
- V(PPC_V64x2AnyTrue) \
- V(PPC_V32x4AnyTrue) \
- V(PPC_V16x8AnyTrue) \
- V(PPC_V8x16AnyTrue) \
+ V(PPC_I8x16SignSelect) \
V(PPC_V64x2AllTrue) \
V(PPC_V32x4AllTrue) \
V(PPC_V16x8AllTrue) \
V(PPC_V8x16AllTrue) \
+ V(PPC_V128AnyTrue) \
V(PPC_S128And) \
V(PPC_S128Or) \
V(PPC_S128Xor) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index a737d23e9a..90025c5a82 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -189,6 +189,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I64x2ShrU:
case kPPC_I64x2Neg:
case kPPC_I64x2BitMask:
+ case kPPC_I64x2SConvertI32x4Low:
+ case kPPC_I64x2SConvertI32x4High:
+ case kPPC_I64x2UConvertI32x4Low:
+ case kPPC_I64x2UConvertI32x4High:
+ case kPPC_I64x2SignSelect:
case kPPC_I32x4Splat:
case kPPC_I32x4ExtractLane:
case kPPC_I32x4ReplaceLane:
@@ -221,6 +226,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I32x4DotI16x8S:
case kPPC_I32x4ExtAddPairwiseI16x8S:
case kPPC_I32x4ExtAddPairwiseI16x8U:
+ case kPPC_I32x4SignSelect:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
@@ -259,6 +265,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I16x8ExtAddPairwiseI8x16S:
case kPPC_I16x8ExtAddPairwiseI8x16U:
case kPPC_I16x8Q15MulRSatS:
+ case kPPC_I16x8SignSelect:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
@@ -291,14 +298,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I8x16Shuffle:
case kPPC_I8x16Swizzle:
case kPPC_I8x16BitMask:
- case kPPC_V64x2AnyTrue:
- case kPPC_V32x4AnyTrue:
- case kPPC_V16x8AnyTrue:
- case kPPC_V8x16AnyTrue:
+ case kPPC_I8x16SignSelect:
case kPPC_V64x2AllTrue:
case kPPC_V32x4AllTrue:
case kPPC_V16x8AllTrue:
case kPPC_V8x16AllTrue:
+ case kPPC_V128AnyTrue:
case kPPC_S128And:
case kPPC_S128Or:
case kPPC_S128Xor:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 3d4697b380..05fa443b41 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -230,7 +230,7 @@ void InstructionSelector::VisitLoad(Node* node) {
if (node->opcode() == IrOpcode::kPoisonedLoad &&
poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
@@ -1862,18 +1862,20 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
+ int stack_decrement = 0;
for (PushParameter input : base::Reversed(*arguments)) {
+ stack_decrement += kSystemPointerSize;
// Skip any alignment holes in pushed nodes.
if (input.node == nullptr) continue;
- Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node));
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
+ Emit(kPPC_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
}
}
}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node),
@@ -2174,6 +2176,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Mul) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2237,42 +2241,50 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(S128Xor) \
V(S128AndNot)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs) \
- V(F64x2Neg) \
- V(F64x2Sqrt) \
- V(F64x2Ceil) \
- V(F64x2Floor) \
- V(F64x2Trunc) \
- V(F64x2NearestInt) \
- V(F32x4Abs) \
- V(F32x4Neg) \
- V(F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox) \
- V(F32x4Sqrt) \
- V(F32x4SConvertI32x4) \
- V(F32x4UConvertI32x4) \
- V(F32x4Ceil) \
- V(F32x4Floor) \
- V(F32x4Trunc) \
- V(F32x4NearestInt) \
- V(I64x2Neg) \
- V(I32x4Neg) \
- V(I32x4Abs) \
- V(I32x4SConvertF32x4) \
- V(I32x4UConvertF32x4) \
- V(I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High) \
- V(I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High) \
- V(I16x8Neg) \
- V(I16x8Abs) \
- V(I8x16Neg) \
- V(I8x16Abs) \
- V(I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High) \
- V(I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High) \
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Sqrt) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(F32x4Sqrt) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
+ V(I64x2Neg) \
+ V(I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High) \
+ V(I32x4Neg) \
+ V(I32x4Abs) \
+ V(I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
+ V(I16x8Neg) \
+ V(I16x8Abs) \
+ V(I8x16Neg) \
+ V(I8x16Abs) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U) \
V(S128Not)
#define SIMD_SHIFT_LIST(V) \
@@ -2290,9 +2302,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16ShrU)
#define SIMD_BOOL_LIST(V) \
- V(V32x4AnyTrue) \
- V(V16x8AnyTrue) \
- V(V8x16AnyTrue) \
+ V(V128AnyTrue) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2496,6 +2507,28 @@ void InstructionSelector::VisitI16x8ExtMulLowI8x16U(Node* node) {
void InstructionSelector::VisitI16x8ExtMulHighI8x16U(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitI8x16Popcnt(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2ConvertLowI32x4S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitF32x4DemoteF64x2Zero(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
@@ -2522,6 +2555,28 @@ void InstructionSelector::EmitPrepareResults(
}
}
+void InstructionSelector::VisitLoadLane(Node* node) {
+ LoadLaneParameters params = LoadLaneParametersOf(node->op());
+ InstructionCode opcode = kArchNop;
+ if (params.rep == MachineType::Int8()) {
+ opcode = kPPC_S128Load8Lane;
+ } else if (params.rep == MachineType::Int16()) {
+ opcode = kPPC_S128Load16Lane;
+ } else if (params.rep == MachineType::Int32()) {
+ opcode = kPPC_S128Load32Lane;
+ } else if (params.rep == MachineType::Int64()) {
+ opcode = kPPC_S128Load64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ PPCOperandGenerator g(this);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(params.laneidx));
+}
+
void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
PPCOperandGenerator g(this);
@@ -2573,6 +2628,32 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
+void InstructionSelector::VisitStoreLane(Node* node) {
+ PPCOperandGenerator g(this);
+
+ StoreLaneParameters params = StoreLaneParametersOf(node->op());
+ InstructionCode opcode = kArchNop;
+ if (params.rep == MachineRepresentation::kWord8) {
+ opcode = kPPC_S128Store8Lane;
+ } else if (params.rep == MachineRepresentation::kWord16) {
+ opcode = kPPC_S128Store16Lane;
+ } else if (params.rep == MachineRepresentation::kWord32) {
+ opcode = kPPC_S128Store32Lane;
+ } else if (params.rep == MachineRepresentation::kWord64) {
+ opcode = kPPC_S128Store64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ InstructionOperand inputs[4];
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(2));
+ inputs[0] = value_operand;
+ inputs[1] = g.UseRegister(node->InputAt(0));
+ inputs[2] = g.UseRegister(node->InputAt(1));
+ inputs[3] = g.UseImmediate(params.laneidx);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, 4, inputs);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 8280665c90..84145c8779 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -2650,7 +2650,7 @@ bool LiveRangeBundle::TryAddRange(LiveRange* range) {
LiveRangeBundle* LiveRangeBundle::TryMerge(LiveRangeBundle* lhs,
LiveRangeBundle* rhs,
bool trace_alloc) {
- if (rhs == lhs) return nullptr;
+ if (rhs == lhs) return lhs;
auto iter1 = lhs->uses_.begin();
auto iter2 = rhs->uses_.begin();
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
new file mode 100644
index 0000000000..cc83f22c65
--- /dev/null
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -0,0 +1,2775 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/riscv64/constants-riscv64.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/heap/memory-chunk.h"
+#include "src/wasm/wasm-code-manager.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ tasm()->
+
+// TODO(plind): consider renaming these macros.
+#define TRACE_MSG(msg) \
+ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+ __LINE__)
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED code_generator_riscv64: %s at line %d\n", \
+ __FUNCTION__, __LINE__)
+
+// Adds RISC-V-specific methods to convert InstructionOperands.
+class RiscvOperandConverter final : public InstructionOperandConverter {
+ public:
+ RiscvOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ FloatRegister OutputSingleRegister(size_t index = 0) {
+ return ToSingleRegister(instr_->OutputAt(index));
+ }
+
+ FloatRegister InputSingleRegister(size_t index) {
+ return ToSingleRegister(instr_->InputAt(index));
+ }
+
+ FloatRegister ToSingleRegister(InstructionOperand* op) {
+ // Single (Float) and Double register namespace is same on RISC-V,
+ // both are typedefs of FPURegister.
+ return ToDoubleRegister(op);
+ }
+
+ Register InputOrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK_EQ(0, InputInt32(index));
+ return zero_reg;
+ }
+ return InputRegister(index);
+ }
+
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
+ Operand InputImmediate(size_t index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
+ case Constant::kFloat32:
+ return Operand::EmbeddedNumber(constant.ToFloat32());
+ case Constant::kFloat64:
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
+ case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
+ case Constant::kHeapObject:
+ // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
+ // maybe not done on arm due to const pool ??
+ break;
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): RPO immediates
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ Operand InputOperand(size_t index) {
+ InstructionOperand* op = instr_->InputAt(index);
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return InputImmediate(index);
+ }
+
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ // TODO(plind): r6 address mode, to be implemented ...
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ }
+
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+namespace {
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode, StubCallMode stub_mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode),
+ stub_mode_(stub_mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ __ Add64(scratch1_, object_, index_);
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ if (must_save_lr_) {
+ // We need to save and restore ra if the frame was elided.
+ __ Push(ra);
+ }
+ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
+ __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
+ if (must_save_lr_) {
+ __ Pop(ra);
+ }
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+ StubCallMode const stub_mode_;
+ bool must_save_lr_;
+ Zone* zone_;
+};
+
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return Uless;
+ case kUnsignedGreaterThanOrEqual:
+ return Ugreater_equal;
+ case kUnsignedLessThanOrEqual:
+ return Uless_equal;
+ case kUnsignedGreaterThan:
+ return Ugreater;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
+ switch (condition) {
+ case kOverflow:
+ return ne;
+ case kNotOverflow:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return EQ;
+ case kNotEqual:
+ *predicate = false;
+ return EQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return LT;
+ case kUnsignedGreaterThanOrEqual:
+ *predicate = false;
+ return LT;
+ case kUnsignedLessThanOrEqual:
+ *predicate = true;
+ return LE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return LE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ *predicate = true;
+ break;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ RiscvOperandConverter const& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
+ }
+}
+
+} // namespace
+
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ sync(); \
+ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&binop); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, ne, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
+ size, bin_instr, representation) \
+ do { \
+ Label binop; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ And(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ And(i.TempRegister(3), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub64(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(3))); \
+ __ Sll32(i.TempRegister(3), i.TempRegister(3), 3); \
+ __ sync(); \
+ __ bind(&binop); \
+ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
+ size, sign_extend); \
+ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
+ size); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, ne, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
+ do { \
+ Label exchange; \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ Move(i.TempRegister(1), i.InputRegister(2)); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, ne, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label exchange; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ And(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub64(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, ne, i.TempRegister(2), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ Move(i.TempRegister(2), i.InputRegister(3)); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, ne, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ And(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub64(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, ne, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
+ do { \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ Move(sp, fp);
+ __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssemblePrepareTailCall() {
+ if (frame_access_state()->has_frame()) {
+ __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+namespace {
+
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ tasm->Sub64(sp, sp, stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ tasm->Add64(sp, sp, -stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ ComputeCodeStartAddress(kScratchReg);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
+}
+
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
+// the flags in the referenced {CodeDataContainer} object;
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ Lw(kScratchReg,
+ FieldMemOperand(kScratchReg,
+ CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(kScratchReg, kScratchReg,
+ Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
+}
+
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerSystemPointer - 1))
+ __ ComputeCodeStartAddress(kScratchReg);
+ __ Move(kSpeculationPoisonRegister, kScratchReg);
+ __ Sub32(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ Sub32(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ kScratchReg);
+ __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ Sra64(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kBitsPerSystemPointer - 1);
+ __ Nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ And(sp, sp, kSpeculationPoisonRegister);
+}
+
+// Assembles an instruction after register allocation, producing machine code.
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
+ RiscvOperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
+ case kArchCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ CallCodeObject(reg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallWasmFunction: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Call(wasm_code, constant.rmode());
+ } else {
+ __ Add64(kScratchReg, i.InputRegister(0), 0);
+ __ Call(kScratchReg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ JumpCodeObject(reg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
+ } else {
+ __ Add64(kScratchReg, i.InputRegister(0), 0);
+ __ Jump(kScratchReg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
+ Operand(kScratchReg));
+ }
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(a2);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ // kReturnRegister0 should have been saved before entering the stub.
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
+ break;
+ }
+ case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ // Don't overwrite the returned value.
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall();
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ Label after_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ if (isWasmCapiFunction) {
+ // Put the return address in a stack slot.
+ __ LoadAddress(kScratchReg, &after_call, RelocInfo::EXTERNAL_REFERENCE);
+ __ Sd(kScratchReg,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ __ bind(&after_call);
+ if (isWasmCapiFunction) {
+ RecordSafepoint(instr->reference_map());
+ }
+
+ frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
+ frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ }
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchAbortCSAAssert:
+ DCHECK(i.InputRegister(0) == a0);
+ {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(tasm(), StackFrame::NONE);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
+ }
+ __ stop();
+ break;
+ case kArchDebugBreak:
+ __ DebugBreak();
+ break;
+ case kArchComment:
+ __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
+ break;
+ case kArchNop:
+ case kArchThrowTerminator:
+ // don't emit code for nops.
+ break;
+ case kArchDeoptimize: {
+ DeoptimizationExit* exit =
+ BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore());
+ __ Branch(exit->label());
+ break;
+ }
+ case kArchRet:
+ AssembleReturn(instr->InputAt(0));
+ break;
+ case kArchStackPointerGreaterThan:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kArchStackCheckOffset:
+ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
+ break;
+ case kArchFramePointer:
+ __ Move(i.OutputRegister(), fp);
+ break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->has_frame()) {
+ __ Ld(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ Move(i.OutputRegister(), fp);
+ }
+ break;
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0), DetermineStubCallMode());
+ break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = zone()->New<OutOfLineRecordWrite>(this, object, index, value,
+ scratch0, scratch1, mode,
+ DetermineStubCallMode());
+ __ Add64(kScratchReg, object, index);
+ __ Sd(value, MemOperand(kScratchReg));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Add64(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ int alignment = i.InputInt32(1);
+ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
+ alignment == 16);
+ if (FLAG_debug_code && alignment > 0) {
+ // Verify that the output_register is properly aligned
+ __ And(kScratchReg, i.OutputRegister(),
+ Operand(kSystemPointerSize - 1));
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
+ Operand(zero_reg));
+ }
+ if (alignment == 2 * kSystemPointerSize) {
+ Label done;
+ __ Add64(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ Add64(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize);
+ __ bind(&done);
+ } else if (alignment > 2 * kSystemPointerSize) {
+ Label done;
+ __ Add64(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ li(kScratchReg2, alignment);
+ __ Sub64(kScratchReg2, kScratchReg2, Operand(kScratchReg));
+ __ Add64(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
+ __ bind(&done);
+ }
+
+ break;
+ }
+ case kArchWordPoisonOnSpeculation:
+ __ And(i.OutputRegister(), i.InputRegister(0),
+ kSpeculationPoisonRegister);
+ break;
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
+ case kRiscvAdd32:
+ __ Add32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvAdd64:
+ __ Add64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvAddOvf64:
+ __ AddOverflow64(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), kScratchReg);
+ break;
+ case kRiscvSub32:
+ __ Sub32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvSub64:
+ __ Sub64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvSubOvf64:
+ __ SubOverflow64(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), kScratchReg);
+ break;
+ case kRiscvMul32:
+ __ Mul32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvMulOvf32:
+ __ MulOverflow32(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), kScratchReg);
+ break;
+ case kRiscvMulHigh32:
+ __ Mulh32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvMulHighU32:
+ __ Mulhu32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
+ kScratchReg, kScratchReg2);
+ break;
+ case kRiscvMulHigh64:
+ __ Mulh64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvDiv32: {
+ __ Div32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ // Set ouput to zero if divisor == 0
+ __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1));
+ break;
+ }
+ case kRiscvDivU32: {
+ __ Divu32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ // Set ouput to zero if divisor == 0
+ __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1));
+ break;
+ }
+ case kRiscvMod32:
+ __ Mod32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvModU32:
+ __ Modu32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvMul64:
+ __ Mul64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvDiv64: {
+ __ Div64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ // Set ouput to zero if divisor == 0
+ __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1));
+ break;
+ }
+ case kRiscvDivU64: {
+ __ Divu64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ // Set ouput to zero if divisor == 0
+ __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1));
+ break;
+ }
+ case kRiscvMod64:
+ __ Mod64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvModU64:
+ __ Modu64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvAnd:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvAnd32:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ break;
+ case kRiscvOr:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvOr32:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ break;
+ case kRiscvNor:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
+ case kRiscvNor32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ } else {
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ }
+ break;
+ case kRiscvXor:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvXor32:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ break;
+ case kRiscvClz32:
+ __ Clz32(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kRiscvClz64:
+ __ Clz64(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kRiscvCtz32: {
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ __ Ctz32(dst, src);
+ } break;
+ case kRiscvCtz64: {
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ __ Ctz64(dst, src);
+ } break;
+ case kRiscvPopcnt32: {
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ __ Popcnt32(dst, src);
+ } break;
+ case kRiscvPopcnt64: {
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ __ Popcnt64(dst, src);
+ } break;
+ case kRiscvShl32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Sll32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ Sll32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kRiscvShr32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Srl32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ Srl32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kRiscvSar32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Sra32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ Sra32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kRiscvZeroExtendWord: {
+ __ ZeroExtendWord(i.OutputRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvSignExtendWord: {
+ __ SignExtendWord(i.OutputRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvShl64:
+ __ Sll64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvShr64:
+ __ Srl64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvSar64:
+ __ Sra64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvRor32:
+ __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvRor64:
+ __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvTst:
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kRiscvCmp:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kRiscvMov:
+ // TODO(plind): Should we combine mov/li like this, or use separate instr?
+ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+ if (HasRegisterInput(instr, 0)) {
+ __ Move(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ li(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+
+ case kRiscvCmpS: {
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsSingleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0f);
+ }
+ // compare result set to kScratchReg
+ __ CompareF32(kScratchReg, cc, left, right);
+ } break;
+ case kRiscvAddS:
+ // TODO(plind): add special case: combine mult & add.
+ __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvSubS:
+ __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvMulS:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvDivS:
+ __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvModS: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputSingleRegister());
+ break;
+ }
+ case kRiscvAbsS:
+ __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kRiscvNegS:
+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kRiscvSqrtS: {
+ __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kRiscvMaxS:
+ __ fmax_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvMinS:
+ __ fmin_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvCmpD: {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0);
+ }
+ // compare result set to kScratchReg
+ __ CompareF64(kScratchReg, cc, left, right);
+ } break;
+ case kRiscvAddD:
+ // TODO(plind): add special case: combine mult & add.
+ __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvSubD:
+ __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvMulD:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvDivD:
+ __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvModD: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputDoubleRegister());
+ break;
+ }
+ case kRiscvAbsD:
+ __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvNegD:
+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvSqrtD: {
+ __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kRiscvMaxD:
+ __ fmax_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvMinD:
+ __ fmin_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvFloat64RoundDown: {
+ __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32RoundDown: {
+ __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat64RoundTruncate: {
+ __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32RoundTruncate: {
+ __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat64RoundUp: {
+ __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32RoundUp: {
+ __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat64RoundTiesEven: {
+ __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32RoundTiesEven: {
+ __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32Max: {
+ __ Float32Max(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1));
+ break;
+ }
+ case kRiscvFloat64Max: {
+ __ Float64Max(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1));
+ break;
+ }
+ case kRiscvFloat32Min: {
+ __ Float32Min(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1));
+ break;
+ }
+ case kRiscvFloat64Min: {
+ __ Float64Min(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1));
+ break;
+ }
+ case kRiscvFloat64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvCvtSD:
+ __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvCvtDS:
+ __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+ break;
+ case kRiscvCvtDW: {
+ __ fcvt_d_w(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtSW: {
+ __ fcvt_s_w(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtSUw: {
+ __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtSL: {
+ __ fcvt_s_l(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtDL: {
+ __ fcvt_d_l(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtDUw: {
+ __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtDUl: {
+ __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtSUl: {
+ __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvFloorWD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Floor_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvCeilWD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ceil_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvRoundWD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Round_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncWD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvFloorWS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Floor_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvCeilWS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ceil_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvRoundWS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Round_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncWS: {
+ Label done;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ Trunc_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+
+ // On RISCV, if the input value exceeds INT32_MAX, the result of fcvt
+ // is INT32_MAX. Note that, since INT32_MAX means the lower 31-bits are
+ // all 1s, INT32_MAX cannot be represented precisely as a float, so an
+ // fcvt result of INT32_MAX always indicate overflow.
+ //
+ // In wasm_compiler, to detect overflow in converting a FP value, fval, to
+ // integer, V8 checks whether I2F(F2I(fval)) equals fval. However, if fval
+ // == INT32_MAX+1, the value of I2F(F2I(fval)) happens to be fval. So,
+ // INT32_MAX is not a good value to indicate overflow. Instead, we will
+ // use INT32_MIN as the converted result of an out-of-range FP value,
+ // exploiting the fact that INT32_MAX+1 is INT32_MIN.
+ //
+ // If the result of conversion overflow, the result will be set to
+ // INT32_MIN. Here we detect overflow by testing whether output + 1 <
+ // output (i.e., kScratchReg < output)
+ if (set_overflow_to_min_i32) {
+ __ Add32(kScratchReg, i.OutputRegister(), 1);
+ __ Branch(&done, lt, i.OutputRegister(), Operand(kScratchReg));
+ __ Move(i.OutputRegister(), kScratchReg);
+ __ bind(&done);
+ }
+ break;
+ }
+ case kRiscvTruncLS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_l_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncLD: {
+ Label done;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode());
+ __ Trunc_l_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ if (set_overflow_to_min_i64) {
+ __ Add64(kScratchReg, i.OutputRegister(), 1);
+ __ Branch(&done, lt, i.OutputRegister(), Operand(kScratchReg));
+ __ Move(i.OutputRegister(), kScratchReg);
+ __ bind(&done);
+ }
+ break;
+ }
+ case kRiscvTruncUwD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncUwS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ bool set_overflow_to_min_u32 = MiscField::decode(instr->opcode());
+ __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+
+ // On RISCV, if the input value exceeds UINT32_MAX, the result of fcvt
+ // is UINT32_MAX. Note that, since UINT32_MAX means all 32-bits are 1s,
+ // UINT32_MAX cannot be represented precisely as float, so an fcvt result
+ // of UINT32_MAX always indicates overflow.
+ //
+ // In wasm_compiler.cc, to detect overflow in converting a FP value, fval,
+ // to integer, V8 checks whether I2F(F2I(fval)) equals fval. However, if
+ // fval == UINT32_MAX+1, the value of I2F(F2I(fval)) happens to be fval.
+ // So, UINT32_MAX is not a good value to indicate overflow. Instead, we
+ // will use 0 as the converted result of an out-of-range FP value,
+ // exploiting the fact that UINT32_MAX+1 is 0.
+ if (set_overflow_to_min_u32) {
+ __ Add32(kScratchReg, i.OutputRegister(), 1);
+ // Set ouput to zero if result overflows (i.e., UINT32_MAX)
+ __ LoadZeroIfConditionZero(i.OutputRegister(), kScratchReg);
+ }
+ break;
+ }
+ case kRiscvTruncUlS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncUlD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvBitcastDL:
+ __ fmv_x_d(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvBitcastLD:
+ __ fmv_d_x(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ case kRiscvBitcastInt32ToFloat32:
+ __ fmv_w_x(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ case kRiscvBitcastFloat32ToInt32:
+ __ fmv_x_w(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvFloat64ExtractLowWord32:
+ __ ExtractLowWordFromF64(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvFloat64ExtractHighWord32:
+ __ ExtractHighWordFromF64(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvFloat64InsertLowWord32:
+ __ InsertLowWordF64(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kRiscvFloat64InsertHighWord32:
+ __ InsertHighWordF64(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ // ... more basic instructions ...
+
+ case kRiscvSignExtendByte:
+ __ SignExtendByte(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kRiscvSignExtendShort:
+ __ SignExtendShort(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kRiscvLbu:
+ __ Lbu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvLb:
+ __ Lb(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvSb:
+ __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvLhu:
+ __ Lhu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUlhu:
+ __ Ulhu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvLh:
+ __ Lh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUlh:
+ __ Ulh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvSh:
+ __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvUsh:
+ __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvLw:
+ __ Lw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUlw:
+ __ Ulw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvLwu:
+ __ Lwu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUlwu:
+ __ Ulwu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvLd:
+ __ Ld(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUld:
+ __ Uld(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvSw:
+ __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvUsw:
+ __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvSd:
+ __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvUsd:
+ __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvLoadFloat: {
+ __ LoadFloat(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kRiscvULoadFloat: {
+ __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kRiscvStoreFloat: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0f);
+ }
+ __ StoreFloat(ft, operand);
+ break;
+ }
+ case kRiscvUStoreFloat: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0f);
+ }
+ __ UStoreFloat(ft, operand);
+ break;
+ }
+ case kRiscvLoadDouble:
+ __ LoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kRiscvULoadDouble:
+ __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kRiscvStoreDouble: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0);
+ }
+ __ StoreDouble(ft, i.MemoryOperand());
+ break;
+ }
+ case kRiscvUStoreDouble: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0);
+ }
+ __ UStoreDouble(ft, i.MemoryOperand());
+ break;
+ }
+ case kRiscvSync: {
+ __ sync();
+ break;
+ }
+ case kRiscvPush:
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Sub32(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ break;
+ case kRiscvPeek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
+ __ LoadFloat(
+ i.OutputSingleRegister(0),
+ MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset));
+ }
+ } else {
+ __ Ld(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
+ case kRiscvStackClaim: {
+ __ Sub64(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
+ kSystemPointerSize);
+ break;
+ }
+ case kRiscvStoreToStackSlot: {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ UNREACHABLE();
+ } else {
+ __ StoreDouble(i.InputDoubleRegister(0),
+ MemOperand(sp, i.InputInt32(1)));
+ }
+ } else {
+ __ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
+ break;
+ }
+ case kRiscvByteSwap64: {
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8);
+ break;
+ }
+ case kRiscvByteSwap32: {
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4);
+ break;
+ }
+ case kWord32AtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
+ break;
+ case kWord32AtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
+ break;
+ case kWord32AtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
+ break;
+ case kWord32AtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
+ break;
+ case kWord32AtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
+ break;
+ case kRiscvWord64AtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
+ break;
+ case kRiscvWord64AtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
+ break;
+ case kRiscvWord64AtomicLoadUint32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
+ break;
+ case kRiscvWord64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
+ break;
+ case kWord32AtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
+ break;
+ case kWord32AtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
+ break;
+ case kWord32AtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
+ break;
+ case kRiscvWord64AtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
+ break;
+ case kRiscvWord64AtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
+ break;
+ case kRiscvWord64AtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
+ break;
+ case kRiscvWord64AtomicStoreWord64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
+ break;
+ case kWord32AtomicExchangeInt8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
+ break;
+ case kWord32AtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case kWord32AtomicExchangeInt16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
+ break;
+ case kWord32AtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case kWord32AtomicExchangeWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case kRiscvWord64AtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ case kRiscvWord64AtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ case kRiscvWord64AtomicExchangeUint32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ case kRiscvWord64AtomicExchangeUint64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
+ break;
+ case kWord32AtomicCompareExchangeInt8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
+ break;
+ case kWord32AtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case kWord32AtomicCompareExchangeInt16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
+ break;
+ case kWord32AtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case kWord32AtomicCompareExchangeWord32:
+ __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case kRiscvWord64AtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ case kRiscvWord64AtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ case kRiscvWord64AtomicCompareExchangeUint32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ case kRiscvWord64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
+ break;
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
+ break;
+ ATOMIC_BINOP_CASE(Add, Add32)
+ ATOMIC_BINOP_CASE(Sub, Sub32)
+ ATOMIC_BINOP_CASE(And, And)
+ ATOMIC_BINOP_CASE(Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor)
+#undef ATOMIC_BINOP_CASE
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kRiscvWord64Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
+ break; \
+ case kRiscvWord64Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
+ break; \
+ case kRiscvWord64Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
+ break; \
+ case kRiscvWord64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+ break;
+ ATOMIC_BINOP_CASE(Add, Add64)
+ ATOMIC_BINOP_CASE(Sub, Sub64)
+ ATOMIC_BINOP_CASE(And, And)
+ ATOMIC_BINOP_CASE(Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor)
+#undef ATOMIC_BINOP_CASE
+ case kRiscvAssertEqual:
+ __ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
+ i.InputRegister(0), Operand(i.InputRegister(1)));
+ break;
+
+ default:
+ UNIMPLEMENTED();
+ }
+ return kSuccess;
+} // NOLINT(readability/fn_size)
+
+#define UNSUPPORTED_COND(opcode, condition) \
+ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
+ << "\""; \
+ UNIMPLEMENTED();
+
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ tasm->
+ RiscvOperandConverter i(gen, instr);
+
+ Condition cc = kNoCondition;
+ // RISC-V does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit riscv64 pseudo-instructions, which are handled here by branch
+ // instructions that do the actual comparison. Essential that the input
+ // registers to compare pseudo-op are not modified before this branch op, as
+ // they are tested here.
+
+ if (instr->arch_opcode() == kRiscvTst) {
+ cc = FlagsConditionToConditionTst(condition);
+ __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kRiscvAdd64 ||
+ instr->arch_opcode() == kRiscvSub64) {
+ cc = FlagsConditionToConditionOvf(condition);
+ __ Sra64(kScratchReg, i.OutputRegister(), 32);
+ __ Sra64(kScratchReg2, i.OutputRegister(), 31);
+ __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg));
+ } else if (instr->arch_opcode() == kRiscvAddOvf64 ||
+ instr->arch_opcode() == kRiscvSubOvf64) {
+ switch (condition) {
+ // Overflow occurs if overflow register is negative
+ case kOverflow:
+ __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ break;
+ }
+ } else if (instr->arch_opcode() == kRiscvMulOvf32) {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(kRiscvMulOvf32, condition);
+ break;
+ }
+ } else if (instr->arch_opcode() == kRiscvCmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(0);
+ __ Sub64(lhs_register, sp, offset);
+ }
+ __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0)));
+ } else if (instr->arch_opcode() == kRiscvCmpS ||
+ instr->arch_opcode() == kRiscvCmpD) {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ // floating-point compare result is set in kScratchReg
+ if (predicate) {
+ __ BranchTrueF(kScratchReg, tlabel);
+ } else {
+ __ BranchFalseF(kScratchReg, tlabel);
+ }
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+ instr->arch_opcode());
+ UNIMPLEMENTED();
+ }
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ tasm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
+}
+
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ RiscvOperandConverter i(this, instr);
+ condition = NegateFlagsCondition(condition);
+
+ switch (instr->arch_opcode()) {
+ case kRiscvCmp: {
+ __ CompareI(kScratchReg, i.InputRegister(0), i.InputOperand(1),
+ FlagsConditionToConditionCmp(condition));
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
+ }
+ return;
+ case kRiscvTst: {
+ switch (condition) {
+ case kEqual:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ break;
+ case kNotEqual:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return;
+ case kRiscvAdd64:
+ case kRiscvSub64: {
+ // Check for overflow creates 1 or 0 for result.
+ __ Srl64(kScratchReg, i.OutputRegister(), 63);
+ __ Srl32(kScratchReg2, i.OutputRegister(), 31);
+ __ Xor(kScratchReg2, kScratchReg, kScratchReg2);
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg2);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ case kRiscvAddOvf64:
+ case kRiscvSubOvf64: {
+ // Overflow occurs if overflow register is negative
+ __ Slt(kScratchReg2, kScratchReg, zero_reg);
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg2);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ case kRiscvMulOvf32: {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ case kRiscvCmpS:
+ case kRiscvCmpD: {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ if (predicate) {
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
+ } else {
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ }
+ }
+ return;
+ default:
+ UNREACHABLE();
+ }
+}
+
+#undef UNSUPPORTED_COND
+
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
+ void Generate() final {
+ RiscvOperandConverter i(gen_, instr_);
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
+ GenerateCallToTrap(trap_id);
+ }
+
+ private:
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+ __ LeaveFrame(StackFrame::WASM);
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
+ __ Ret();
+ } else {
+ gen_->AssembleSourcePosition(instr_);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ ReferenceMap* reference_map =
+ gen_->zone()->New<ReferenceMap>(gen_->zone());
+ gen_->RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+ }
+ }
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ auto ool = zone()->New<OutOfLineTrap>(this, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
+}
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ RiscvOperandConverter i(this, instr);
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register result = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+ // RISC-V does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit riscv64 pseudo-instructions, which are checked and handled here.
+
+ if (instr->arch_opcode() == kRiscvTst) {
+ cc = FlagsConditionToConditionTst(condition);
+ if (cc == eq) {
+ __ Sltu(result, kScratchReg, 1);
+ } else {
+ __ Sltu(result, zero_reg, kScratchReg);
+ }
+ return;
+ } else if (instr->arch_opcode() == kRiscvAdd64 ||
+ instr->arch_opcode() == kRiscvSub64) {
+ cc = FlagsConditionToConditionOvf(condition);
+ // Check for overflow creates 1 or 0 for result.
+ __ Srl64(kScratchReg, i.OutputRegister(), 63);
+ __ Srl32(kScratchReg2, i.OutputRegister(), 31);
+ __ Xor(result, kScratchReg, kScratchReg2);
+ if (cc == eq) // Toggle result for not overflow.
+ __ Xor(result, result, 1);
+ return;
+ } else if (instr->arch_opcode() == kRiscvAddOvf64 ||
+ instr->arch_opcode() == kRiscvSubOvf64) {
+ // Overflow occurs if overflow register is negative
+ __ Slt(result, kScratchReg, zero_reg);
+ } else if (instr->arch_opcode() == kRiscvMulOvf32) {
+ // Overflow occurs if overflow register is not zero
+ __ Sgtu(result, kScratchReg, zero_reg);
+ } else if (instr->arch_opcode() == kRiscvCmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ if (instr->InputAt(1)->IsImmediate()) {
+ if (is_int12(-right.immediate())) {
+ if (right.immediate() == 0) {
+ if (cc == eq) {
+ __ Sltu(result, left, 1);
+ } else {
+ __ Sltu(result, zero_reg, left);
+ }
+ } else {
+ __ Add64(result, left, Operand(-right.immediate()));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ if (is_uint12(right.immediate())) {
+ __ Xor(result, left, right);
+ } else {
+ __ li(kScratchReg, right);
+ __ Xor(result, left, kScratchReg);
+ }
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, right);
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ case Uless:
+ case Ugreater_equal: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == Ugreater_equal) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ case Ugreater:
+ case Uless_equal: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == Uless_equal) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else if (instr->arch_opcode() == kRiscvCmpD ||
+ instr->arch_opcode() == kRiscvCmpS) {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((instr->arch_opcode() == kRiscvCmpD) &&
+ (left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0);
+ } else if ((instr->arch_opcode() == kRiscvCmpS) &&
+ (left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsSingleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0f);
+ }
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ // RISCV compare returns 0 or 1, do nothing when predicate; otherwise
+ // toggle kScratchReg (i.e., 0 -> 1, 1 -> 0)
+ if (predicate) {
+ __ Move(result, kScratchReg);
+ } else {
+ __ Xor(result, kScratchReg, 1);
+ }
+ return;
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+ instr->arch_opcode());
+ TRACE_UNIMPL();
+ UNIMPLEMENTED();
+ }
+}
+
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ RiscvOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ RiscvOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+
+ __ Branch(GetLabel(i.InputRpo(1)), Ugreater_equal, input,
+ Operand(case_count));
+ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+ return GetLabel(i.InputRpo(index + 2));
+ });
+}
+
+void CodeGenerator::FinishFrame(Frame* frame) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kSystemPointerSize));
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation(saves);
+ DCHECK_EQ(kNumCalleeSaved, count + 1);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ if (frame_access_state()->has_frame()) {
+ if (call_descriptor->IsCFunctionCall()) {
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Sub64(sp, sp, Operand(kSystemPointerSize));
+ } else {
+ __ Push(ra, fp);
+ __ Move(fp, sp);
+ }
+ } else if (call_descriptor->IsJSFunctionCall()) {
+ __ Prologue();
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+ if (call_descriptor->IsWasmFunctionCall()) {
+ __ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
+ // Wasm import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ Ld(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ Ld(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Sub64(sp, sp, Operand(kSystemPointerSize));
+ }
+ }
+ }
+ }
+
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
+ ResetSpeculationPoison();
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+
+ if (required_slots > 0) {
+ DCHECK(frame_access_state()->has_frame());
+ if (info()->IsWasm() && required_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ __ Ld(
+ kScratchReg,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ __ Ld(kScratchReg, MemOperand(kScratchReg));
+ __ Add64(kScratchReg, kScratchReg,
+ Operand(required_slots * kSystemPointerSize));
+ __ Branch(&done, uge, sp, Operand(kScratchReg));
+ }
+
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // We come from WebAssembly, there are no references for the GC.
+ ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
+ RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+
+ __ bind(&done);
+ }
+ }
+
+ const int returns = frame()->GetReturnSlotCount();
+
+ // Skip callee-saved and return slots, which are pushed below.
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= returns;
+ if (required_slots > 0) {
+ __ Sub64(sp, sp, Operand(required_slots * kSystemPointerSize));
+ }
+
+ if (saves_fpu != 0) {
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
+ }
+
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
+ }
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Sub64(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+}
+
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Add64(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+
+ // Restore GP registers.
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ RiscvOperandConverter g(this, nullptr);
+
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+
+ if (call_descriptor->IsCFunctionCall()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ }
+ }
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Ld(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
+ }
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Add64(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_count > 1) {
+ Label done;
+ __ li(kScratchReg, parameter_count);
+ __ Branch(&done, ge, t0, Operand(kScratchReg));
+ __ Move(t0, kScratchReg);
+ __ bind(&done);
+ }
+ __ Sll64(t0, t0, kSystemPointerSizeLog2);
+ __ Add64(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ // it should be a kInt32 or a kInt64
+ DCHECK_LE(g.ToConstant(additional_pop_count).type(), Constant::kInt64);
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_count + additional_count);
+ } else {
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_count);
+ __ Sll64(pop_reg, pop_reg, kSystemPointerSizeLog2);
+ __ Add64(sp, sp, pop_reg);
+ }
+ __ Ret();
+}
+
+void CodeGenerator::FinishCode() {}
+
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ RiscvOperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Move(g.ToRegister(destination), src);
+ } else {
+ __ Sd(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ld(g.ToRegister(destination), src);
+ } else {
+ Register temp = kScratchReg;
+ __ Ld(temp, src);
+ __ Sd(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ li(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kFloat32:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
+ break;
+ case Constant::kInt64:
+ if (RelocInfo::IsWasmReference(src.rmode())) {
+ __ li(dst, Operand(src.ToInt64(), src.rmode()));
+ } else {
+ __ li(dst, Operand(src.ToInt64()));
+ }
+ break;
+ case Constant::kFloat64:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
+ break;
+ case Constant::kExternalReference:
+ __ li(dst, src.ToExternalReference());
+ break;
+ case Constant::kDelayedStringConstant:
+ __ li(dst, src.ToDelayedStringConstant());
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
+ break;
+ }
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): loading RPO numbers
+ break;
+ }
+ if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
+ } else if (src.type() == Constant::kFloat32) {
+ if (destination->IsFPStackSlot()) {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ Sw(zero_reg, dst);
+ } else {
+ __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ Sw(kScratchReg, dst);
+ }
+ } else {
+ DCHECK(destination->IsFPRegister());
+ FloatRegister dst = g.ToSingleRegister(destination);
+ __ LoadFPRImmediate(dst, src.ToFloat32());
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DoubleRegister dst = destination->IsFPRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ __ LoadFPRImmediate(dst, src.ToFloat64().value());
+ if (destination->IsFPStackSlot()) {
+ __ StoreDouble(dst, g.ToMemOperand(destination));
+ }
+ }
+ } else if (source->IsFPRegister()) {
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ UNIMPLEMENTED();
+ } else {
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ if (rep == MachineRepresentation::kFloat32) {
+ __ StoreFloat(src, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ StoreDouble(src, g.ToMemOperand(destination));
+ }
+ }
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ UNIMPLEMENTED();
+ } else {
+ if (destination->IsFPRegister()) {
+ if (rep == MachineRepresentation::kFloat32) {
+ __ LoadFloat(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ LoadDouble(g.ToDoubleRegister(destination), src);
+ }
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ FPURegister temp = kScratchDoubleReg;
+ if (rep == MachineRepresentation::kFloat32) {
+ __ LoadFloat(temp, src);
+ __ StoreFloat(temp, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ LoadDouble(temp, src);
+ __ StoreDouble(temp, g.ToMemOperand(destination));
+ }
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ RiscvOperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ Ld(src, dst);
+ __ Sd(temp, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ Register temp_0 = kScratchReg;
+ Register temp_1 = kScratchReg2;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Ld(temp_0, src);
+ __ Ld(temp_1, dst);
+ __ Sd(temp_0, dst);
+ __ Sd(temp_1, src);
+ } else if (source->IsFPRegister()) {
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ UNIMPLEMENTED();
+ } else {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ if (rep == MachineRepresentation::kFloat32) {
+ __ MoveFloat(temp, src);
+ __ LoadFloat(src, dst);
+ __ StoreFloat(temp, dst);
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ MoveDouble(temp, src);
+ __ LoadDouble(src, dst);
+ __ StoreDouble(temp, dst);
+ }
+ }
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
+ Register temp_0 = kScratchReg;
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ UNIMPLEMENTED();
+ } else {
+ FPURegister temp_1 = kScratchDoubleReg;
+ if (rep == MachineRepresentation::kFloat32) {
+ __ LoadFloat(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ StoreFloat(temp_1, src0);
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ LoadDouble(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ Lw(temp_0, src1);
+ __ Sw(temp_0, dst1);
+ __ StoreDouble(temp_1, src0);
+ }
+ }
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit RISC-V we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+
+#undef TRACE_MSG
+#undef TRACE_UNIMPL
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
new file mode 100644
index 0000000000..fae854ec02
--- /dev/null
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -0,0 +1,447 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_
+#define V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// RISC-V-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(RiscvAdd32) \
+ V(RiscvAdd64) \
+ V(RiscvAddOvf64) \
+ V(RiscvSub32) \
+ V(RiscvSub64) \
+ V(RiscvSubOvf64) \
+ V(RiscvMul32) \
+ V(RiscvMulOvf32) \
+ V(RiscvMulHigh32) \
+ V(RiscvMulHigh64) \
+ V(RiscvMulHighU32) \
+ V(RiscvMul64) \
+ V(RiscvDiv32) \
+ V(RiscvDiv64) \
+ V(RiscvDivU32) \
+ V(RiscvDivU64) \
+ V(RiscvMod32) \
+ V(RiscvMod64) \
+ V(RiscvModU32) \
+ V(RiscvModU64) \
+ V(RiscvAnd) \
+ V(RiscvAnd32) \
+ V(RiscvOr) \
+ V(RiscvOr32) \
+ V(RiscvNor) \
+ V(RiscvNor32) \
+ V(RiscvXor) \
+ V(RiscvXor32) \
+ V(RiscvClz32) \
+ V(RiscvShl32) \
+ V(RiscvShr32) \
+ V(RiscvSar32) \
+ V(RiscvZeroExtendWord) \
+ V(RiscvSignExtendWord) \
+ V(RiscvClz64) \
+ V(RiscvCtz32) \
+ V(RiscvCtz64) \
+ V(RiscvPopcnt32) \
+ V(RiscvPopcnt64) \
+ V(RiscvShl64) \
+ V(RiscvShr64) \
+ V(RiscvSar64) \
+ V(RiscvRor32) \
+ V(RiscvRor64) \
+ V(RiscvMov) \
+ V(RiscvTst) \
+ V(RiscvCmp) \
+ V(RiscvCmpS) \
+ V(RiscvAddS) \
+ V(RiscvSubS) \
+ V(RiscvMulS) \
+ V(RiscvDivS) \
+ V(RiscvModS) \
+ V(RiscvAbsS) \
+ V(RiscvNegS) \
+ V(RiscvSqrtS) \
+ V(RiscvMaxS) \
+ V(RiscvMinS) \
+ V(RiscvCmpD) \
+ V(RiscvAddD) \
+ V(RiscvSubD) \
+ V(RiscvMulD) \
+ V(RiscvDivD) \
+ V(RiscvModD) \
+ V(RiscvAbsD) \
+ V(RiscvNegD) \
+ V(RiscvSqrtD) \
+ V(RiscvMaxD) \
+ V(RiscvMinD) \
+ V(RiscvFloat64RoundDown) \
+ V(RiscvFloat64RoundTruncate) \
+ V(RiscvFloat64RoundUp) \
+ V(RiscvFloat64RoundTiesEven) \
+ V(RiscvFloat32RoundDown) \
+ V(RiscvFloat32RoundTruncate) \
+ V(RiscvFloat32RoundUp) \
+ V(RiscvFloat32RoundTiesEven) \
+ V(RiscvCvtSD) \
+ V(RiscvCvtDS) \
+ V(RiscvTruncWD) \
+ V(RiscvRoundWD) \
+ V(RiscvFloorWD) \
+ V(RiscvCeilWD) \
+ V(RiscvTruncWS) \
+ V(RiscvRoundWS) \
+ V(RiscvFloorWS) \
+ V(RiscvCeilWS) \
+ V(RiscvTruncLS) \
+ V(RiscvTruncLD) \
+ V(RiscvTruncUwD) \
+ V(RiscvTruncUwS) \
+ V(RiscvTruncUlS) \
+ V(RiscvTruncUlD) \
+ V(RiscvCvtDW) \
+ V(RiscvCvtSL) \
+ V(RiscvCvtSW) \
+ V(RiscvCvtSUw) \
+ V(RiscvCvtSUl) \
+ V(RiscvCvtDL) \
+ V(RiscvCvtDUw) \
+ V(RiscvCvtDUl) \
+ V(RiscvLb) \
+ V(RiscvLbu) \
+ V(RiscvSb) \
+ V(RiscvLh) \
+ V(RiscvUlh) \
+ V(RiscvLhu) \
+ V(RiscvUlhu) \
+ V(RiscvSh) \
+ V(RiscvUsh) \
+ V(RiscvLd) \
+ V(RiscvUld) \
+ V(RiscvLw) \
+ V(RiscvUlw) \
+ V(RiscvLwu) \
+ V(RiscvUlwu) \
+ V(RiscvSw) \
+ V(RiscvUsw) \
+ V(RiscvSd) \
+ V(RiscvUsd) \
+ V(RiscvLoadFloat) \
+ V(RiscvULoadFloat) \
+ V(RiscvStoreFloat) \
+ V(RiscvUStoreFloat) \
+ V(RiscvLoadDouble) \
+ V(RiscvULoadDouble) \
+ V(RiscvStoreDouble) \
+ V(RiscvUStoreDouble) \
+ V(RiscvBitcastDL) \
+ V(RiscvBitcastLD) \
+ V(RiscvBitcastInt32ToFloat32) \
+ V(RiscvBitcastFloat32ToInt32) \
+ V(RiscvFloat64ExtractLowWord32) \
+ V(RiscvFloat64ExtractHighWord32) \
+ V(RiscvFloat64InsertLowWord32) \
+ V(RiscvFloat64InsertHighWord32) \
+ V(RiscvFloat32Max) \
+ V(RiscvFloat64Max) \
+ V(RiscvFloat32Min) \
+ V(RiscvFloat64Min) \
+ V(RiscvFloat64SilenceNaN) \
+ V(RiscvPush) \
+ V(RiscvPeek) \
+ V(RiscvByteSwap64) \
+ V(RiscvByteSwap32) \
+ V(RiscvStoreToStackSlot) \
+ V(RiscvStackClaim) \
+ V(RiscvSignExtendByte) \
+ V(RiscvSignExtendShort) \
+ V(RiscvSync) \
+ V(RiscvAssertEqual) \
+ V(RiscvS128Const) \
+ V(RiscvS128Zero) \
+ V(RiscvS128AllOnes) \
+ V(RiscvI32x4Splat) \
+ V(RiscvI32x4ExtractLane) \
+ V(RiscvI32x4ReplaceLane) \
+ V(RiscvI32x4Add) \
+ V(RiscvI32x4AddHoriz) \
+ V(RiscvI32x4Sub) \
+ V(RiscvF64x2Abs) \
+ V(RiscvF64x2Neg) \
+ V(RiscvF32x4Splat) \
+ V(RiscvF32x4ExtractLane) \
+ V(RiscvF32x4ReplaceLane) \
+ V(RiscvF32x4SConvertI32x4) \
+ V(RiscvF32x4UConvertI32x4) \
+ V(RiscvI64x2SConvertI32x4Low) \
+ V(RiscvI64x2SConvertI32x4High) \
+ V(RiscvI64x2UConvertI32x4Low) \
+ V(RiscvI64x2UConvertI32x4High) \
+ V(RiscvI32x4Mul) \
+ V(RiscvI32x4MaxS) \
+ V(RiscvI32x4MinS) \
+ V(RiscvI32x4Eq) \
+ V(RiscvI32x4Ne) \
+ V(RiscvI32x4Shl) \
+ V(RiscvI32x4ShrS) \
+ V(RiscvI32x4ShrU) \
+ V(RiscvI32x4MaxU) \
+ V(RiscvI32x4MinU) \
+ V(RiscvI64x2Eq) \
+ V(RiscvF64x2Sqrt) \
+ V(RiscvF64x2Add) \
+ V(RiscvF64x2Sub) \
+ V(RiscvF64x2Mul) \
+ V(RiscvF64x2Div) \
+ V(RiscvF64x2Min) \
+ V(RiscvF64x2Max) \
+ V(RiscvF64x2ConvertLowI32x4S) \
+ V(RiscvF64x2ConvertLowI32x4U) \
+ V(RiscvF64x2PromoteLowF32x4) \
+ V(RiscvF64x2Eq) \
+ V(RiscvF64x2Ne) \
+ V(RiscvF64x2Lt) \
+ V(RiscvF64x2Le) \
+ V(RiscvF64x2Splat) \
+ V(RiscvF64x2ExtractLane) \
+ V(RiscvF64x2ReplaceLane) \
+ V(RiscvF64x2Pmin) \
+ V(RiscvF64x2Pmax) \
+ V(RiscvF64x2Ceil) \
+ V(RiscvF64x2Floor) \
+ V(RiscvF64x2Trunc) \
+ V(RiscvF64x2NearestInt) \
+ V(RiscvI64x2Splat) \
+ V(RiscvI64x2ExtractLane) \
+ V(RiscvI64x2ReplaceLane) \
+ V(RiscvI64x2Add) \
+ V(RiscvI64x2Sub) \
+ V(RiscvI64x2Mul) \
+ V(RiscvI64x2Neg) \
+ V(RiscvI64x2Shl) \
+ V(RiscvI64x2ShrS) \
+ V(RiscvI64x2ShrU) \
+ V(RiscvI64x2BitMask) \
+ V(RiscvF32x4Abs) \
+ V(RiscvF32x4Neg) \
+ V(RiscvF32x4Sqrt) \
+ V(RiscvF32x4RecipApprox) \
+ V(RiscvF32x4RecipSqrtApprox) \
+ V(RiscvF32x4Add) \
+ V(RiscvF32x4AddHoriz) \
+ V(RiscvF32x4Sub) \
+ V(RiscvF32x4Mul) \
+ V(RiscvF32x4Div) \
+ V(RiscvF32x4Max) \
+ V(RiscvF32x4Min) \
+ V(RiscvF32x4Eq) \
+ V(RiscvF32x4Ne) \
+ V(RiscvF32x4Lt) \
+ V(RiscvF32x4Le) \
+ V(RiscvF32x4Pmin) \
+ V(RiscvF32x4Pmax) \
+ V(RiscvF32x4DemoteF64x2Zero) \
+ V(RiscvF32x4Ceil) \
+ V(RiscvF32x4Floor) \
+ V(RiscvF32x4Trunc) \
+ V(RiscvF32x4NearestInt) \
+ V(RiscvI32x4SConvertF32x4) \
+ V(RiscvI32x4UConvertF32x4) \
+ V(RiscvI32x4Neg) \
+ V(RiscvI32x4GtS) \
+ V(RiscvI32x4GeS) \
+ V(RiscvI32x4GtU) \
+ V(RiscvI32x4GeU) \
+ V(RiscvI32x4Abs) \
+ V(RiscvI32x4BitMask) \
+ V(RiscvI32x4DotI16x8S) \
+ V(RiscvI32x4TruncSatF64x2SZero) \
+ V(RiscvI32x4TruncSatF64x2UZero) \
+ V(RiscvI16x8Splat) \
+ V(RiscvI16x8ExtractLaneU) \
+ V(RiscvI16x8ExtractLaneS) \
+ V(RiscvI16x8ReplaceLane) \
+ V(RiscvI16x8Neg) \
+ V(RiscvI16x8Shl) \
+ V(RiscvI16x8ShrS) \
+ V(RiscvI16x8ShrU) \
+ V(RiscvI16x8Add) \
+ V(RiscvI16x8AddSatS) \
+ V(RiscvI16x8AddHoriz) \
+ V(RiscvI16x8Sub) \
+ V(RiscvI16x8SubSatS) \
+ V(RiscvI16x8Mul) \
+ V(RiscvI16x8MaxS) \
+ V(RiscvI16x8MinS) \
+ V(RiscvI16x8Eq) \
+ V(RiscvI16x8Ne) \
+ V(RiscvI16x8GtS) \
+ V(RiscvI16x8GeS) \
+ V(RiscvI16x8AddSatU) \
+ V(RiscvI16x8SubSatU) \
+ V(RiscvI16x8MaxU) \
+ V(RiscvI16x8MinU) \
+ V(RiscvI16x8GtU) \
+ V(RiscvI16x8GeU) \
+ V(RiscvI16x8RoundingAverageU) \
+ V(RiscvI16x8Q15MulRSatS) \
+ V(RiscvI16x8Abs) \
+ V(RiscvI16x8BitMask) \
+ V(RiscvI8x16Splat) \
+ V(RiscvI8x16ExtractLaneU) \
+ V(RiscvI8x16ExtractLaneS) \
+ V(RiscvI8x16ReplaceLane) \
+ V(RiscvI8x16Neg) \
+ V(RiscvI8x16Shl) \
+ V(RiscvI8x16ShrS) \
+ V(RiscvI8x16Add) \
+ V(RiscvI8x16AddSatS) \
+ V(RiscvI8x16Sub) \
+ V(RiscvI8x16SubSatS) \
+ V(RiscvI8x16Mul) \
+ V(RiscvI8x16MaxS) \
+ V(RiscvI8x16MinS) \
+ V(RiscvI8x16Eq) \
+ V(RiscvI8x16Ne) \
+ V(RiscvI8x16GtS) \
+ V(RiscvI8x16GeS) \
+ V(RiscvI8x16ShrU) \
+ V(RiscvI8x16AddSatU) \
+ V(RiscvI8x16SubSatU) \
+ V(RiscvI8x16MaxU) \
+ V(RiscvI8x16MinU) \
+ V(RiscvI8x16GtU) \
+ V(RiscvI8x16GeU) \
+ V(RiscvI8x16RoundingAverageU) \
+ V(RiscvI8x16Abs) \
+ V(RiscvI8x16BitMask) \
+ V(RiscvI8x16Popcnt) \
+ V(RiscvS128And) \
+ V(RiscvS128Or) \
+ V(RiscvS128Xor) \
+ V(RiscvS128Not) \
+ V(RiscvS128Select) \
+ V(RiscvS128AndNot) \
+ V(RiscvV32x4AllTrue) \
+ V(RiscvV16x8AllTrue) \
+ V(RiscvV128AnyTrue) \
+ V(RiscvV8x16AllTrue) \
+ V(RiscvS32x4InterleaveRight) \
+ V(RiscvS32x4InterleaveLeft) \
+ V(RiscvS32x4PackEven) \
+ V(RiscvS32x4PackOdd) \
+ V(RiscvS32x4InterleaveEven) \
+ V(RiscvS32x4InterleaveOdd) \
+ V(RiscvS32x4Shuffle) \
+ V(RiscvS16x8InterleaveRight) \
+ V(RiscvS16x8InterleaveLeft) \
+ V(RiscvS16x8PackEven) \
+ V(RiscvS16x8PackOdd) \
+ V(RiscvS16x8InterleaveEven) \
+ V(RiscvS16x8InterleaveOdd) \
+ V(RiscvS16x4Reverse) \
+ V(RiscvS16x2Reverse) \
+ V(RiscvS8x16InterleaveRight) \
+ V(RiscvS8x16InterleaveLeft) \
+ V(RiscvS8x16PackEven) \
+ V(RiscvS8x16PackOdd) \
+ V(RiscvS8x16InterleaveEven) \
+ V(RiscvS8x16InterleaveOdd) \
+ V(RiscvS8x16Shuffle) \
+ V(RiscvI8x16Swizzle) \
+ V(RiscvS8x16Concat) \
+ V(RiscvS8x8Reverse) \
+ V(RiscvS8x4Reverse) \
+ V(RiscvS8x2Reverse) \
+ V(RiscvS128Load8Splat) \
+ V(RiscvS128Load16Splat) \
+ V(RiscvS128Load32Splat) \
+ V(RiscvS128Load64Splat) \
+ V(RiscvS128Load8x8S) \
+ V(RiscvS128Load8x8U) \
+ V(RiscvS128Load16x4S) \
+ V(RiscvS128Load16x4U) \
+ V(RiscvS128Load32x2S) \
+ V(RiscvS128Load32x2U) \
+ V(RiscvS128LoadLane) \
+ V(RiscvS128StoreLane) \
+ V(RiscvMsaLd) \
+ V(RiscvMsaSt) \
+ V(RiscvI32x4SConvertI16x8Low) \
+ V(RiscvI32x4SConvertI16x8High) \
+ V(RiscvI32x4UConvertI16x8Low) \
+ V(RiscvI32x4UConvertI16x8High) \
+ V(RiscvI16x8SConvertI8x16Low) \
+ V(RiscvI16x8SConvertI8x16High) \
+ V(RiscvI16x8SConvertI32x4) \
+ V(RiscvI16x8UConvertI32x4) \
+ V(RiscvI16x8UConvertI8x16Low) \
+ V(RiscvI16x8UConvertI8x16High) \
+ V(RiscvI8x16SConvertI16x8) \
+ V(RiscvI8x16UConvertI16x8) \
+ V(RiscvWord64AtomicLoadUint8) \
+ V(RiscvWord64AtomicLoadUint16) \
+ V(RiscvWord64AtomicLoadUint32) \
+ V(RiscvWord64AtomicLoadUint64) \
+ V(RiscvWord64AtomicStoreWord8) \
+ V(RiscvWord64AtomicStoreWord16) \
+ V(RiscvWord64AtomicStoreWord32) \
+ V(RiscvWord64AtomicStoreWord64) \
+ V(RiscvWord64AtomicAddUint8) \
+ V(RiscvWord64AtomicAddUint16) \
+ V(RiscvWord64AtomicAddUint32) \
+ V(RiscvWord64AtomicAddUint64) \
+ V(RiscvWord64AtomicSubUint8) \
+ V(RiscvWord64AtomicSubUint16) \
+ V(RiscvWord64AtomicSubUint32) \
+ V(RiscvWord64AtomicSubUint64) \
+ V(RiscvWord64AtomicAndUint8) \
+ V(RiscvWord64AtomicAndUint16) \
+ V(RiscvWord64AtomicAndUint32) \
+ V(RiscvWord64AtomicAndUint64) \
+ V(RiscvWord64AtomicOrUint8) \
+ V(RiscvWord64AtomicOrUint16) \
+ V(RiscvWord64AtomicOrUint32) \
+ V(RiscvWord64AtomicOrUint64) \
+ V(RiscvWord64AtomicXorUint8) \
+ V(RiscvWord64AtomicXorUint16) \
+ V(RiscvWord64AtomicXorUint32) \
+ V(RiscvWord64AtomicXorUint64) \
+ V(RiscvWord64AtomicExchangeUint8) \
+ V(RiscvWord64AtomicExchangeUint16) \
+ V(RiscvWord64AtomicExchangeUint32) \
+ V(RiscvWord64AtomicExchangeUint64) \
+ V(RiscvWord64AtomicCompareExchangeUint8) \
+ V(RiscvWord64AtomicCompareExchangeUint16) \
+ V(RiscvWord64AtomicCompareExchangeUint32) \
+ V(RiscvWord64AtomicCompareExchangeUint64)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+// TODO(plind): Add the new r6 address modes.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
new file mode 100644
index 0000000000..fdc1346902
--- /dev/null
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -0,0 +1,1579 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/macro-assembler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kRiscvAbsD:
+ case kRiscvAbsS:
+ case kRiscvAdd32:
+ case kRiscvAddD:
+ case kRiscvAddS:
+ case kRiscvAnd:
+ case kRiscvAnd32:
+ case kRiscvAssertEqual:
+ case kRiscvBitcastDL:
+ case kRiscvBitcastLD:
+ case kRiscvBitcastInt32ToFloat32:
+ case kRiscvBitcastFloat32ToInt32:
+ case kRiscvByteSwap32:
+ case kRiscvByteSwap64:
+ case kRiscvCeilWD:
+ case kRiscvCeilWS:
+ case kRiscvClz32:
+ case kRiscvCmp:
+ case kRiscvCmpD:
+ case kRiscvCmpS:
+ case kRiscvCtz32:
+ case kRiscvCvtDL:
+ case kRiscvCvtDS:
+ case kRiscvCvtDUl:
+ case kRiscvCvtDUw:
+ case kRiscvCvtDW:
+ case kRiscvCvtSD:
+ case kRiscvCvtSL:
+ case kRiscvCvtSUl:
+ case kRiscvCvtSUw:
+ case kRiscvCvtSW:
+ case kRiscvMulHigh64:
+ case kRiscvMulHighU32:
+ case kRiscvAdd64:
+ case kRiscvAddOvf64:
+ case kRiscvClz64:
+ case kRiscvCtz64:
+ case kRiscvDiv64:
+ case kRiscvDivU64:
+ case kRiscvZeroExtendWord:
+ case kRiscvSignExtendWord:
+ case kRiscvDiv32:
+ case kRiscvDivD:
+ case kRiscvDivS:
+ case kRiscvDivU32:
+ case kRiscvMod64:
+ case kRiscvModU64:
+ case kRiscvMul64:
+ case kRiscvPopcnt64:
+ case kRiscvRor64:
+ case kRiscvSar64:
+ case kRiscvShl64:
+ case kRiscvShr64:
+ case kRiscvSub64:
+ case kRiscvSubOvf64:
+ case kRiscvF64x2Abs:
+ case kRiscvF64x2Neg:
+ case kRiscvF64x2Sqrt:
+ case kRiscvF64x2Add:
+ case kRiscvF64x2Sub:
+ case kRiscvF64x2Mul:
+ case kRiscvF64x2Div:
+ case kRiscvF64x2Min:
+ case kRiscvF64x2Max:
+ case kRiscvF64x2Eq:
+ case kRiscvF64x2Ne:
+ case kRiscvF64x2Lt:
+ case kRiscvF64x2Le:
+ case kRiscvF64x2Pmin:
+ case kRiscvF64x2Pmax:
+ case kRiscvF64x2ConvertLowI32x4S:
+ case kRiscvF64x2ConvertLowI32x4U:
+ case kRiscvF64x2PromoteLowF32x4:
+ case kRiscvF64x2Ceil:
+ case kRiscvF64x2Floor:
+ case kRiscvF64x2Trunc:
+ case kRiscvF64x2NearestInt:
+ case kRiscvI64x2Splat:
+ case kRiscvI64x2ExtractLane:
+ case kRiscvI64x2ReplaceLane:
+ case kRiscvI64x2Add:
+ case kRiscvI64x2Sub:
+ case kRiscvI64x2Mul:
+ case kRiscvI64x2Neg:
+ case kRiscvI64x2Shl:
+ case kRiscvI64x2ShrS:
+ case kRiscvI64x2ShrU:
+ case kRiscvI64x2BitMask:
+ case kRiscvF32x4Abs:
+ case kRiscvF32x4Add:
+ case kRiscvF32x4AddHoriz:
+ case kRiscvF32x4Eq:
+ case kRiscvF32x4ExtractLane:
+ case kRiscvF32x4Lt:
+ case kRiscvF32x4Le:
+ case kRiscvF32x4Max:
+ case kRiscvF32x4Min:
+ case kRiscvF32x4Mul:
+ case kRiscvF32x4Div:
+ case kRiscvF32x4Ne:
+ case kRiscvF32x4Neg:
+ case kRiscvF32x4Sqrt:
+ case kRiscvF32x4RecipApprox:
+ case kRiscvF32x4RecipSqrtApprox:
+ case kRiscvF32x4ReplaceLane:
+ case kRiscvF32x4SConvertI32x4:
+ case kRiscvF32x4Splat:
+ case kRiscvF32x4Sub:
+ case kRiscvF32x4UConvertI32x4:
+ case kRiscvF32x4Pmin:
+ case kRiscvF32x4Pmax:
+ case kRiscvF32x4DemoteF64x2Zero:
+ case kRiscvF32x4Ceil:
+ case kRiscvF32x4Floor:
+ case kRiscvF32x4Trunc:
+ case kRiscvF32x4NearestInt:
+ case kRiscvI64x2Eq:
+ case kRiscvF64x2Splat:
+ case kRiscvF64x2ExtractLane:
+ case kRiscvF64x2ReplaceLane:
+ case kRiscvFloat32Max:
+ case kRiscvFloat32Min:
+ case kRiscvFloat32RoundDown:
+ case kRiscvFloat32RoundTiesEven:
+ case kRiscvFloat32RoundTruncate:
+ case kRiscvFloat32RoundUp:
+ case kRiscvFloat64ExtractLowWord32:
+ case kRiscvFloat64ExtractHighWord32:
+ case kRiscvFloat64InsertLowWord32:
+ case kRiscvFloat64InsertHighWord32:
+ case kRiscvFloat64Max:
+ case kRiscvFloat64Min:
+ case kRiscvFloat64RoundDown:
+ case kRiscvFloat64RoundTiesEven:
+ case kRiscvFloat64RoundTruncate:
+ case kRiscvFloat64RoundUp:
+ case kRiscvFloat64SilenceNaN:
+ case kRiscvFloorWD:
+ case kRiscvFloorWS:
+ case kRiscvI64x2SConvertI32x4Low:
+ case kRiscvI64x2SConvertI32x4High:
+ case kRiscvI64x2UConvertI32x4Low:
+ case kRiscvI64x2UConvertI32x4High:
+ case kRiscvI16x8Add:
+ case kRiscvI16x8AddHoriz:
+ case kRiscvI16x8AddSatS:
+ case kRiscvI16x8AddSatU:
+ case kRiscvI16x8Eq:
+ case kRiscvI16x8ExtractLaneU:
+ case kRiscvI16x8ExtractLaneS:
+ case kRiscvI16x8GeS:
+ case kRiscvI16x8GeU:
+ case kRiscvI16x8GtS:
+ case kRiscvI16x8GtU:
+ case kRiscvI16x8MaxS:
+ case kRiscvI16x8MaxU:
+ case kRiscvI16x8MinS:
+ case kRiscvI16x8MinU:
+ case kRiscvI16x8Mul:
+ case kRiscvI16x8Ne:
+ case kRiscvI16x8Neg:
+ case kRiscvI16x8ReplaceLane:
+ case kRiscvI8x16SConvertI16x8:
+ case kRiscvI16x8SConvertI32x4:
+ case kRiscvI16x8SConvertI8x16High:
+ case kRiscvI16x8SConvertI8x16Low:
+ case kRiscvI16x8Shl:
+ case kRiscvI16x8ShrS:
+ case kRiscvI16x8ShrU:
+ case kRiscvI32x4TruncSatF64x2SZero:
+ case kRiscvI32x4TruncSatF64x2UZero:
+ case kRiscvI16x8Splat:
+ case kRiscvI16x8Sub:
+ case kRiscvI16x8SubSatS:
+ case kRiscvI16x8SubSatU:
+ case kRiscvI8x16UConvertI16x8:
+ case kRiscvI16x8UConvertI32x4:
+ case kRiscvI16x8UConvertI8x16High:
+ case kRiscvI16x8UConvertI8x16Low:
+ case kRiscvI16x8RoundingAverageU:
+ case kRiscvI16x8Q15MulRSatS:
+ case kRiscvI16x8Abs:
+ case kRiscvI16x8BitMask:
+ case kRiscvI32x4Add:
+ case kRiscvI32x4AddHoriz:
+ case kRiscvI32x4Eq:
+ case kRiscvI32x4ExtractLane:
+ case kRiscvI32x4GeS:
+ case kRiscvI32x4GeU:
+ case kRiscvI32x4GtS:
+ case kRiscvI32x4GtU:
+ case kRiscvI32x4MaxS:
+ case kRiscvI32x4MaxU:
+ case kRiscvI32x4MinS:
+ case kRiscvI32x4MinU:
+ case kRiscvI32x4Mul:
+ case kRiscvI32x4Ne:
+ case kRiscvI32x4Neg:
+ case kRiscvI32x4ReplaceLane:
+ case kRiscvI32x4SConvertF32x4:
+ case kRiscvI32x4SConvertI16x8High:
+ case kRiscvI32x4SConvertI16x8Low:
+ case kRiscvI32x4Shl:
+ case kRiscvI32x4ShrS:
+ case kRiscvI32x4ShrU:
+ case kRiscvI32x4Splat:
+ case kRiscvI32x4Sub:
+ case kRiscvI32x4UConvertF32x4:
+ case kRiscvI32x4UConvertI16x8High:
+ case kRiscvI32x4UConvertI16x8Low:
+ case kRiscvI32x4Abs:
+ case kRiscvI32x4BitMask:
+ case kRiscvI32x4DotI16x8S:
+ case kRiscvI8x16Add:
+ case kRiscvI8x16AddSatS:
+ case kRiscvI8x16AddSatU:
+ case kRiscvI8x16Eq:
+ case kRiscvI8x16ExtractLaneU:
+ case kRiscvI8x16ExtractLaneS:
+ case kRiscvI8x16GeS:
+ case kRiscvI8x16GeU:
+ case kRiscvI8x16GtS:
+ case kRiscvI8x16GtU:
+ case kRiscvI8x16MaxS:
+ case kRiscvI8x16MaxU:
+ case kRiscvI8x16MinS:
+ case kRiscvI8x16MinU:
+ case kRiscvI8x16Mul:
+ case kRiscvI8x16Ne:
+ case kRiscvI8x16Neg:
+ case kRiscvI8x16ReplaceLane:
+ case kRiscvI8x16Shl:
+ case kRiscvI8x16ShrS:
+ case kRiscvI8x16ShrU:
+ case kRiscvI8x16Splat:
+ case kRiscvI8x16Sub:
+ case kRiscvI8x16SubSatS:
+ case kRiscvI8x16SubSatU:
+ case kRiscvI8x16RoundingAverageU:
+ case kRiscvI8x16Abs:
+ case kRiscvI8x16BitMask:
+ case kRiscvI8x16Popcnt:
+ case kRiscvMaxD:
+ case kRiscvMaxS:
+ case kRiscvMinD:
+ case kRiscvMinS:
+ case kRiscvMod32:
+ case kRiscvModU32:
+ case kRiscvMov:
+ case kRiscvMul32:
+ case kRiscvMulD:
+ case kRiscvMulHigh32:
+ case kRiscvMulOvf32:
+ case kRiscvMulS:
+ case kRiscvNegD:
+ case kRiscvNegS:
+ case kRiscvNor:
+ case kRiscvNor32:
+ case kRiscvOr:
+ case kRiscvOr32:
+ case kRiscvPopcnt32:
+ case kRiscvRor32:
+ case kRiscvRoundWD:
+ case kRiscvRoundWS:
+ case kRiscvS128And:
+ case kRiscvS128Or:
+ case kRiscvS128Not:
+ case kRiscvS128Select:
+ case kRiscvS128AndNot:
+ case kRiscvS128Xor:
+ case kRiscvS128Const:
+ case kRiscvS128Zero:
+ case kRiscvS128AllOnes:
+ case kRiscvS16x8InterleaveEven:
+ case kRiscvS16x8InterleaveOdd:
+ case kRiscvS16x8InterleaveLeft:
+ case kRiscvS16x8InterleaveRight:
+ case kRiscvS16x8PackEven:
+ case kRiscvS16x8PackOdd:
+ case kRiscvS16x2Reverse:
+ case kRiscvS16x4Reverse:
+ case kRiscvV8x16AllTrue:
+ case kRiscvV32x4AllTrue:
+ case kRiscvV16x8AllTrue:
+ case kRiscvV128AnyTrue:
+ case kRiscvS32x4InterleaveEven:
+ case kRiscvS32x4InterleaveOdd:
+ case kRiscvS32x4InterleaveLeft:
+ case kRiscvS32x4InterleaveRight:
+ case kRiscvS32x4PackEven:
+ case kRiscvS32x4PackOdd:
+ case kRiscvS32x4Shuffle:
+ case kRiscvS8x16Concat:
+ case kRiscvS8x16InterleaveEven:
+ case kRiscvS8x16InterleaveOdd:
+ case kRiscvS8x16InterleaveLeft:
+ case kRiscvS8x16InterleaveRight:
+ case kRiscvS8x16PackEven:
+ case kRiscvS8x16PackOdd:
+ case kRiscvS8x2Reverse:
+ case kRiscvS8x4Reverse:
+ case kRiscvS8x8Reverse:
+ case kRiscvS8x16Shuffle:
+ case kRiscvI8x16Swizzle:
+ case kRiscvSar32:
+ case kRiscvSignExtendByte:
+ case kRiscvSignExtendShort:
+ case kRiscvShl32:
+ case kRiscvShr32:
+ case kRiscvSqrtD:
+ case kRiscvSqrtS:
+ case kRiscvSub32:
+ case kRiscvSubD:
+ case kRiscvSubS:
+ case kRiscvTruncLD:
+ case kRiscvTruncLS:
+ case kRiscvTruncUlD:
+ case kRiscvTruncUlS:
+ case kRiscvTruncUwD:
+ case kRiscvTruncUwS:
+ case kRiscvTruncWD:
+ case kRiscvTruncWS:
+ case kRiscvTst:
+ case kRiscvXor:
+ case kRiscvXor32:
+ return kNoOpcodeFlags;
+
+ case kRiscvLb:
+ case kRiscvLbu:
+ case kRiscvLd:
+ case kRiscvLoadDouble:
+ case kRiscvLh:
+ case kRiscvLhu:
+ case kRiscvLw:
+ case kRiscvLoadFloat:
+ case kRiscvLwu:
+ case kRiscvMsaLd:
+ case kRiscvPeek:
+ case kRiscvUld:
+ case kRiscvULoadDouble:
+ case kRiscvUlh:
+ case kRiscvUlhu:
+ case kRiscvUlw:
+ case kRiscvUlwu:
+ case kRiscvULoadFloat:
+ case kRiscvS128Load8Splat:
+ case kRiscvS128Load16Splat:
+ case kRiscvS128Load32Splat:
+ case kRiscvS128Load64Splat:
+ case kRiscvS128Load8x8S:
+ case kRiscvS128Load8x8U:
+ case kRiscvS128Load16x4S:
+ case kRiscvS128Load16x4U:
+ case kRiscvS128Load32x2S:
+ case kRiscvS128Load32x2U:
+ case kRiscvS128LoadLane:
+ case kRiscvS128StoreLane:
+ case kRiscvWord64AtomicLoadUint8:
+ case kRiscvWord64AtomicLoadUint16:
+ case kRiscvWord64AtomicLoadUint32:
+ case kRiscvWord64AtomicLoadUint64:
+
+ return kIsLoadOperation;
+
+ case kRiscvModD:
+ case kRiscvModS:
+ case kRiscvMsaSt:
+ case kRiscvPush:
+ case kRiscvSb:
+ case kRiscvSd:
+ case kRiscvStoreDouble:
+ case kRiscvSh:
+ case kRiscvStackClaim:
+ case kRiscvStoreToStackSlot:
+ case kRiscvSw:
+ case kRiscvStoreFloat:
+ case kRiscvUsd:
+ case kRiscvUStoreDouble:
+ case kRiscvUsh:
+ case kRiscvUsw:
+ case kRiscvUStoreFloat:
+ case kRiscvSync:
+ case kRiscvWord64AtomicStoreWord8:
+ case kRiscvWord64AtomicStoreWord16:
+ case kRiscvWord64AtomicStoreWord32:
+ case kRiscvWord64AtomicStoreWord64:
+ case kRiscvWord64AtomicAddUint8:
+ case kRiscvWord64AtomicAddUint16:
+ case kRiscvWord64AtomicAddUint32:
+ case kRiscvWord64AtomicAddUint64:
+ case kRiscvWord64AtomicSubUint8:
+ case kRiscvWord64AtomicSubUint16:
+ case kRiscvWord64AtomicSubUint32:
+ case kRiscvWord64AtomicSubUint64:
+ case kRiscvWord64AtomicAndUint8:
+ case kRiscvWord64AtomicAndUint16:
+ case kRiscvWord64AtomicAndUint32:
+ case kRiscvWord64AtomicAndUint64:
+ case kRiscvWord64AtomicOrUint8:
+ case kRiscvWord64AtomicOrUint16:
+ case kRiscvWord64AtomicOrUint32:
+ case kRiscvWord64AtomicOrUint64:
+ case kRiscvWord64AtomicXorUint8:
+ case kRiscvWord64AtomicXorUint16:
+ case kRiscvWord64AtomicXorUint32:
+ case kRiscvWord64AtomicXorUint64:
+ case kRiscvWord64AtomicExchangeUint8:
+ case kRiscvWord64AtomicExchangeUint16:
+ case kRiscvWord64AtomicExchangeUint32:
+ case kRiscvWord64AtomicExchangeUint64:
+ case kRiscvWord64AtomicCompareExchangeUint8:
+ case kRiscvWord64AtomicCompareExchangeUint16:
+ case kRiscvWord64AtomicCompareExchangeUint32:
+ case kRiscvWord64AtomicCompareExchangeUint64:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+}
+
+enum Latency {
+ BRANCH = 4, // Estimated max.
+ RINT_S = 4, // Estimated.
+ RINT_D = 4, // Estimated.
+
+ // TODO(RISCV): remove MULT instructions (MIPS legacy).
+ MULT = 4,
+ MULTU = 4,
+ DMULT = 4,
+
+ MUL32 = 7,
+
+ DIV32 = 50, // Min:11 Max:50
+ DIV64 = 50,
+ DIVU32 = 50,
+ DIVU64 = 50,
+
+ ABS_S = 4,
+ ABS_D = 4,
+ NEG_S = 4,
+ NEG_D = 4,
+ ADD_S = 4,
+ ADD_D = 4,
+ SUB_S = 4,
+ SUB_D = 4,
+ MAX_S = 4, // Estimated.
+ MIN_S = 4,
+ MAX_D = 4, // Estimated.
+ MIN_D = 4,
+ C_cond_S = 4,
+ C_cond_D = 4,
+ MUL_S = 4,
+
+ MADD_S = 4,
+ MSUB_S = 4,
+ NMADD_S = 4,
+ NMSUB_S = 4,
+
+ CABS_cond_S = 4,
+ CABS_cond_D = 4,
+
+ CVT_D_S = 4,
+ CVT_PS_PW = 4,
+
+ CVT_S_W = 4,
+ CVT_S_L = 4,
+ CVT_D_W = 4,
+ CVT_D_L = 4,
+
+ CVT_S_D = 4,
+
+ CVT_W_S = 4,
+ CVT_W_D = 4,
+ CVT_L_S = 4,
+ CVT_L_D = 4,
+
+ CEIL_W_S = 4,
+ CEIL_W_D = 4,
+ CEIL_L_S = 4,
+ CEIL_L_D = 4,
+
+ FLOOR_W_S = 4,
+ FLOOR_W_D = 4,
+ FLOOR_L_S = 4,
+ FLOOR_L_D = 4,
+
+ ROUND_W_S = 4,
+ ROUND_W_D = 4,
+ ROUND_L_S = 4,
+ ROUND_L_D = 4,
+
+ TRUNC_W_S = 4,
+ TRUNC_W_D = 4,
+ TRUNC_L_S = 4,
+ TRUNC_L_D = 4,
+
+ MOV_S = 4,
+ MOV_D = 4,
+
+ MOVF_S = 4,
+ MOVF_D = 4,
+
+ MOVN_S = 4,
+ MOVN_D = 4,
+
+ MOVT_S = 4,
+ MOVT_D = 4,
+
+ MOVZ_S = 4,
+ MOVZ_D = 4,
+
+ MUL_D = 5,
+ MADD_D = 5,
+ MSUB_D = 5,
+ NMADD_D = 5,
+ NMSUB_D = 5,
+
+ RECIP_S = 13,
+ RECIP_D = 26,
+
+ RSQRT_S = 17,
+ RSQRT_D = 36,
+
+ DIV_S = 17,
+ SQRT_S = 17,
+
+ DIV_D = 32,
+ SQRT_D = 32,
+
+ MOVT_FREG = 4,
+ MOVT_HIGH_FREG = 4,
+ MOVT_DREG = 4,
+ LOAD_FLOAT = 4,
+ LOAD_DOUBLE = 4,
+
+ MOVF_FREG = 1,
+ MOVF_HIGH_FREG = 1,
+ MOVF_HIGH_DREG = 1,
+ MOVF_HIGH = 1,
+ MOVF_LOW = 1,
+ STORE_FLOAT = 1,
+ STORE_DOUBLE = 1,
+};
+
+int Add64Latency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int Sub64Latency(bool is_operand_register = true) {
+ return Add64Latency(is_operand_register);
+}
+
+int AndLatency(bool is_operand_register = true) {
+ return Add64Latency(is_operand_register);
+}
+
+int OrLatency(bool is_operand_register = true) {
+ return Add64Latency(is_operand_register);
+}
+
+int NorLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int XorLatency(bool is_operand_register = true) {
+ return Add64Latency(is_operand_register);
+}
+
+int Mul32Latency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return Latency::MUL32;
+ } else {
+ return Latency::MUL32 + 1;
+ }
+}
+
+int Mul64Latency(bool is_operand_register = true) {
+ int latency = Latency::DMULT + Latency::MOVF_LOW;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mulh32Latency(bool is_operand_register = true) {
+ int latency = Latency::MULT + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mulhu32Latency(bool is_operand_register = true) {
+ int latency = Latency::MULTU + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mulh64Latency(bool is_operand_register = true) {
+ int latency = Latency::DMULT + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Div32Latency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return Latency::DIV32;
+ } else {
+ return Latency::DIV32 + 1;
+ }
+}
+
+int Divu32Latency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return Latency::DIVU32;
+ } else {
+ return Latency::DIVU32 + 1;
+ }
+}
+
+int Div64Latency(bool is_operand_register = true) {
+ int latency = Latency::DIV64 + Latency::MOVF_LOW;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Divu64Latency(bool is_operand_register = true) {
+ int latency = Latency::DIVU64 + Latency::MOVF_LOW;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mod32Latency(bool is_operand_register = true) {
+ int latency = Latency::DIV32 + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Modu32Latency(bool is_operand_register = true) {
+ int latency = Latency::DIVU32 + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mod64Latency(bool is_operand_register = true) {
+ int latency = Latency::DIV64 + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Modu64Latency(bool is_operand_register = true) {
+ int latency = Latency::DIV64 + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int MovzLatency() { return 1; }
+
+int MovnLatency() { return 1; }
+
+int CallLatency() {
+ // Estimated.
+ return Add64Latency(false) + Latency::BRANCH + 5;
+}
+
+int JumpLatency() {
+ // Estimated max.
+ return 1 + Add64Latency() + Latency::BRANCH + 2;
+}
+
+int SmiUntagLatency() { return 1; }
+
+int PrepareForTailCallLatency() {
+ // Estimated max.
+ return 2 * (Add64Latency() + 1 + Add64Latency(false)) + 2 + Latency::BRANCH +
+ Latency::BRANCH + 2 * Sub64Latency(false) + 2 + Latency::BRANCH + 1;
+}
+
+int AssemblePopArgumentsAdoptFrameLatency() {
+ return 1 + Latency::BRANCH + 1 + SmiUntagLatency() +
+ PrepareForTailCallLatency();
+}
+
+int AssertLatency() { return 1; }
+
+int PrepareCallCFunctionLatency() {
+ int frame_alignment = TurboAssembler::ActivationFrameAlignment();
+ if (frame_alignment > kSystemPointerSize) {
+ return 1 + Sub64Latency(false) + AndLatency(false) + 1;
+ } else {
+ return Sub64Latency(false);
+ }
+}
+
+int AdjustBaseAndOffsetLatency() {
+ return 3; // Estimated max.
+}
+
+int AlignedMemoryLatency() { return AdjustBaseAndOffsetLatency() + 1; }
+
+int UlhuLatency() {
+ return AdjustBaseAndOffsetLatency() + 2 * AlignedMemoryLatency() + 2;
+}
+
+int UlwLatency() {
+ // Estimated max.
+ return AdjustBaseAndOffsetLatency() + 3;
+}
+
+int UlwuLatency() { return UlwLatency() + 1; }
+
+int UldLatency() {
+ // Estimated max.
+ return AdjustBaseAndOffsetLatency() + 3;
+}
+
+int ULoadFloatLatency() { return UlwLatency() + Latency::MOVT_FREG; }
+
+int ULoadDoubleLatency() { return UldLatency() + Latency::MOVT_DREG; }
+
+int UshLatency() {
+ // Estimated max.
+ return AdjustBaseAndOffsetLatency() + 2 + 2 * AlignedMemoryLatency();
+}
+
+int UswLatency() { return AdjustBaseAndOffsetLatency() + 2; }
+
+int UsdLatency() { return AdjustBaseAndOffsetLatency() + 2; }
+
+int UStoreFloatLatency() { return Latency::MOVF_FREG + UswLatency(); }
+
+int UStoreDoubleLatency() { return Latency::MOVF_HIGH_DREG + UsdLatency(); }
+
+int LoadFloatLatency() {
+ return AdjustBaseAndOffsetLatency() + Latency::LOAD_FLOAT;
+}
+
+int StoreFloatLatency() {
+ return AdjustBaseAndOffsetLatency() + Latency::STORE_FLOAT;
+}
+
+int StoreDoubleLatency() {
+ return AdjustBaseAndOffsetLatency() + Latency::STORE_DOUBLE;
+}
+
+int LoadDoubleLatency() {
+ return AdjustBaseAndOffsetLatency() + Latency::LOAD_DOUBLE;
+}
+
+int MultiPushLatency() {
+ int latency = Sub64Latency(false);
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ latency++;
+ }
+ return latency;
+}
+
+int MultiPushFPULatency() {
+ int latency = Sub64Latency(false);
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ latency += StoreDoubleLatency();
+ }
+ return latency;
+}
+
+int PushCallerSavedLatency(SaveFPRegsMode fp_mode) {
+ int latency = MultiPushLatency();
+ if (fp_mode == kSaveFPRegs) {
+ latency += MultiPushFPULatency();
+ }
+ return latency;
+}
+
+int MultiPopLatency() {
+ int latency = Add64Latency(false);
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ latency++;
+ }
+ return latency;
+}
+
+int MultiPopFPULatency() {
+ int latency = Add64Latency(false);
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ latency += LoadDoubleLatency();
+ }
+ return latency;
+}
+
+int PopCallerSavedLatency(SaveFPRegsMode fp_mode) {
+ int latency = MultiPopLatency();
+ if (fp_mode == kSaveFPRegs) {
+ latency += MultiPopFPULatency();
+ }
+ return latency;
+}
+
+int CallCFunctionHelperLatency() {
+ // Estimated.
+ int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency();
+ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
+ latency++;
+ } else {
+ latency += Add64Latency(false);
+ }
+ return latency;
+}
+
+int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); }
+
+int AssembleArchJumpLatency() {
+ // Estimated max.
+ return Latency::BRANCH;
+}
+
+int GenerateSwitchTableLatency() {
+ int latency = 6;
+ latency += 2;
+ return latency;
+}
+
+int AssembleArchTableSwitchLatency() {
+ return Latency::BRANCH + GenerateSwitchTableLatency();
+}
+
+int DropAndRetLatency() {
+ // Estimated max.
+ return Add64Latency(false) + JumpLatency();
+}
+
+int AssemblerReturnLatency() {
+ // Estimated max.
+ return Add64Latency(false) + MultiPopLatency() + MultiPopFPULatency() +
+ Latency::BRANCH + Add64Latency() + 1 + DropAndRetLatency();
+}
+
+int TryInlineTruncateDoubleToILatency() {
+ return 2 + Latency::TRUNC_W_D + Latency::MOVF_FREG + 2 + AndLatency(false) +
+ Latency::BRANCH;
+}
+
+int CallStubDelayedLatency() { return 1 + CallLatency(); }
+
+int TruncateDoubleToIDelayedLatency() {
+ // TODO(riscv): This no longer reflects how TruncateDoubleToI is called.
+ return TryInlineTruncateDoubleToILatency() + 1 + Sub64Latency(false) +
+ StoreDoubleLatency() + CallStubDelayedLatency() + Add64Latency(false) +
+ 1;
+}
+
+int CheckPageFlagLatency() {
+ return AndLatency(false) + AlignedMemoryLatency() + AndLatency(false) +
+ Latency::BRANCH;
+}
+
+int SltuLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int BranchShortHelperLatency() {
+ return SltuLatency() + 2; // Estimated max.
+}
+
+int BranchShortLatency() { return BranchShortHelperLatency(); }
+
+int MoveLatency() { return 1; }
+
+int MovToFloatParametersLatency() { return 2 * MoveLatency(); }
+
+int MovFromFloatResultLatency() { return MoveLatency(); }
+
+int AddOverflow64Latency() {
+ // Estimated max.
+ return 6;
+}
+
+int SubOverflow64Latency() {
+ // Estimated max.
+ return 6;
+}
+
+int MulOverflow32Latency() {
+ // Estimated max.
+ return Mul32Latency() + Mulh32Latency() + 2;
+}
+
+// TODO(RISCV): This is incorrect for RISC-V.
+int Clz64Latency() { return 1; }
+
+int Ctz32Latency() {
+ return Add64Latency(false) + XorLatency() + AndLatency() + Clz64Latency() +
+ 1 + Sub64Latency();
+}
+
+int Ctz64Latency() {
+ return Add64Latency(false) + XorLatency() + AndLatency() + 1 + Sub64Latency();
+}
+
+int Popcnt32Latency() {
+ return 2 + AndLatency() + Sub64Latency() + 1 + AndLatency() + 1 +
+ AndLatency() + Add64Latency() + 1 + Add64Latency() + 1 + AndLatency() +
+ 1 + Mul32Latency() + 1;
+}
+
+int Popcnt64Latency() {
+ return 2 + AndLatency() + Sub64Latency() + 1 + AndLatency() + 1 +
+ AndLatency() + Add64Latency() + 1 + Add64Latency() + 1 + AndLatency() +
+ 1 + Mul64Latency() + 1;
+}
+
+int CompareFLatency() { return Latency::C_cond_S; }
+
+int CompareF32Latency() { return CompareFLatency(); }
+
+int CompareF64Latency() { return CompareFLatency(); }
+
+int CompareIsNanFLatency() { return CompareFLatency(); }
+
+int CompareIsNanF32Latency() { return CompareIsNanFLatency(); }
+
+int CompareIsNanF64Latency() { return CompareIsNanFLatency(); }
+
+int NegsLatency() {
+ // Estimated.
+ return CompareIsNanF32Latency() + 2 * Latency::BRANCH + Latency::NEG_S +
+ Latency::MOVF_FREG + 1 + XorLatency() + Latency::MOVT_FREG;
+}
+
+int NegdLatency() {
+ // Estimated.
+ return CompareIsNanF64Latency() + 2 * Latency::BRANCH + Latency::NEG_D +
+ Latency::MOVF_HIGH_DREG + 1 + XorLatency() + Latency::MOVT_DREG;
+}
+
+int Float64RoundLatency() {
+ // For ceil_l_d, floor_l_d, round_l_d, trunc_l_d latency is 4.
+ return Latency::MOVF_HIGH_DREG + 1 + Latency::BRANCH + Latency::MOV_D + 4 +
+ Latency::MOVF_HIGH_DREG + Latency::BRANCH + Latency::CVT_D_L + 2 +
+ Latency::MOVT_HIGH_FREG;
+}
+
+int Float32RoundLatency() {
+ // For ceil_w_s, floor_w_s, round_w_s, trunc_w_s latency is 4.
+ return Latency::MOVF_FREG + 1 + Latency::BRANCH + Latency::MOV_S + 4 +
+ Latency::MOVF_FREG + Latency::BRANCH + Latency::CVT_S_W + 2 +
+ Latency::MOVT_FREG;
+}
+
+int Float32MaxLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF32Latency() + Latency::BRANCH;
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::MOVF_FREG + 1 + Latency::MOV_S;
+}
+
+int Float64MaxLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF64Latency() + Latency::BRANCH;
+ return latency + 5 * Latency::BRANCH + 2 * CompareF64Latency() +
+ Latency::MOVF_HIGH_DREG + Latency::MOV_D;
+}
+
+int Float32MinLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF32Latency() + Latency::BRANCH;
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::MOVF_FREG + 1 + Latency::MOV_S;
+}
+
+int Float64MinLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF64Latency() + Latency::BRANCH;
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::MOVF_HIGH_DREG + Latency::MOV_D;
+}
+
+int TruncLSLatency(bool load_status) {
+ int latency = Latency::TRUNC_L_S + Latency::MOVF_HIGH_DREG;
+ if (load_status) {
+ latency += SltuLatency() + 7;
+ }
+ return latency;
+}
+
+int TruncLDLatency(bool load_status) {
+ int latency = Latency::TRUNC_L_D + Latency::MOVF_HIGH_DREG;
+ if (load_status) {
+ latency += SltuLatency() + 7;
+ }
+ return latency;
+}
+
+int TruncUlSLatency() {
+ // Estimated max.
+ return 2 * CompareF32Latency() + CompareIsNanF32Latency() +
+ 4 * Latency::BRANCH + Latency::SUB_S + 2 * Latency::TRUNC_L_S +
+ 3 * Latency::MOVF_HIGH_DREG + OrLatency() + Latency::MOVT_FREG +
+ Latency::MOV_S + SltuLatency() + 4;
+}
+
+int TruncUlDLatency() {
+ // Estimated max.
+ return 2 * CompareF64Latency() + CompareIsNanF64Latency() +
+ 4 * Latency::BRANCH + Latency::SUB_D + 2 * Latency::TRUNC_L_D +
+ 3 * Latency::MOVF_HIGH_DREG + OrLatency() + Latency::MOVT_DREG +
+ Latency::MOV_D + SltuLatency() + 4;
+}
+
+int PushLatency() { return Add64Latency() + AlignedMemoryLatency(); }
+
+int ByteSwapSignedLatency() { return 2; }
+
+int LlLatency(int offset) {
+ bool is_one_instruction = is_int12(offset);
+ if (is_one_instruction) {
+ return 1;
+ } else {
+ return 3;
+ }
+}
+
+int ExtractBitsLatency(bool sign_extend, int size) {
+ int latency = 2;
+ if (sign_extend) {
+ switch (size) {
+ case 8:
+ case 16:
+ case 32:
+ latency += 1;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return latency;
+}
+
+int InsertBitsLatency() { return 2 + Sub64Latency(false) + 2; }
+
+int ScLatency(int offset) { return 3; }
+
+int Word32AtomicExchangeLatency(bool sign_extend, int size) {
+ return Add64Latency(false) + 1 + Sub64Latency() + 2 + LlLatency(0) +
+ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() +
+ ScLatency(0) + BranchShortLatency() + 1;
+}
+
+int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) {
+ return 2 + Sub64Latency() + 2 + LlLatency(0) +
+ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() +
+ ScLatency(0) + BranchShortLatency() + 1;
+}
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(RISCV): Verify these latencies for RISC-V (currently using MIPS
+ // numbers).
+ switch (instr->arch_opcode()) {
+ case kArchCallCodeObject:
+ case kArchCallWasmFunction:
+ return CallLatency();
+ case kArchTailCallCodeObject:
+ case kArchTailCallWasm:
+ case kArchTailCallAddress:
+ return JumpLatency();
+ case kArchCallJSFunction: {
+ int latency = 0;
+ if (FLAG_debug_code) {
+ latency = 1 + AssertLatency();
+ }
+ return latency + 1 + Add64Latency(false) + CallLatency();
+ }
+ case kArchPrepareCallCFunction:
+ return PrepareCallCFunctionLatency();
+ case kArchSaveCallerRegisters: {
+ auto fp_mode =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ return PushCallerSavedLatency(fp_mode);
+ }
+ case kArchRestoreCallerRegisters: {
+ auto fp_mode =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ return PopCallerSavedLatency(fp_mode);
+ }
+ case kArchPrepareTailCall:
+ return 2;
+ case kArchCallCFunction:
+ return CallCFunctionLatency();
+ case kArchJmp:
+ return AssembleArchJumpLatency();
+ case kArchTableSwitch:
+ return AssembleArchTableSwitchLatency();
+ case kArchAbortCSAAssert:
+ return CallLatency() + 1;
+ case kArchDebugBreak:
+ return 1;
+ case kArchComment:
+ case kArchNop:
+ case kArchThrowTerminator:
+ case kArchDeoptimize:
+ return 0;
+ case kArchRet:
+ return AssemblerReturnLatency();
+ case kArchFramePointer:
+ return 1;
+ case kArchParentFramePointer:
+ // Estimated max.
+ return AlignedMemoryLatency();
+ case kArchTruncateDoubleToI:
+ return TruncateDoubleToIDelayedLatency();
+ case kArchStoreWithWriteBarrier:
+ return Add64Latency() + 1 + CheckPageFlagLatency();
+ case kArchStackSlot:
+ // Estimated max.
+ return Add64Latency(false) + AndLatency(false) + AssertLatency() +
+ Add64Latency(false) + AndLatency(false) + BranchShortLatency() +
+ 1 + Sub64Latency() + Add64Latency();
+ case kArchWordPoisonOnSpeculation:
+ return AndLatency();
+ case kIeee754Float64Acos:
+ case kIeee754Float64Acosh:
+ case kIeee754Float64Asin:
+ case kIeee754Float64Asinh:
+ case kIeee754Float64Atan:
+ case kIeee754Float64Atanh:
+ case kIeee754Float64Atan2:
+ case kIeee754Float64Cos:
+ case kIeee754Float64Cosh:
+ case kIeee754Float64Cbrt:
+ case kIeee754Float64Exp:
+ case kIeee754Float64Expm1:
+ case kIeee754Float64Log:
+ case kIeee754Float64Log1p:
+ case kIeee754Float64Log10:
+ case kIeee754Float64Log2:
+ case kIeee754Float64Pow:
+ case kIeee754Float64Sin:
+ case kIeee754Float64Sinh:
+ case kIeee754Float64Tan:
+ case kIeee754Float64Tanh:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kRiscvAdd32:
+ case kRiscvAdd64:
+ return Add64Latency(instr->InputAt(1)->IsRegister());
+ case kRiscvAddOvf64:
+ return AddOverflow64Latency();
+ case kRiscvSub32:
+ case kRiscvSub64:
+ return Sub64Latency(instr->InputAt(1)->IsRegister());
+ case kRiscvSubOvf64:
+ return SubOverflow64Latency();
+ case kRiscvMul32:
+ return Mul32Latency();
+ case kRiscvMulOvf32:
+ return MulOverflow32Latency();
+ case kRiscvMulHigh32:
+ return Mulh32Latency();
+ case kRiscvMulHighU32:
+ return Mulhu32Latency();
+ case kRiscvMulHigh64:
+ return Mulh64Latency();
+ case kRiscvDiv32: {
+ int latency = Div32Latency(instr->InputAt(1)->IsRegister());
+ return latency + MovzLatency();
+ }
+ case kRiscvDivU32: {
+ int latency = Divu32Latency(instr->InputAt(1)->IsRegister());
+ return latency + MovzLatency();
+ }
+ case kRiscvMod32:
+ return Mod32Latency();
+ case kRiscvModU32:
+ return Modu32Latency();
+ case kRiscvMul64:
+ return Mul64Latency();
+ case kRiscvDiv64: {
+ int latency = Div64Latency();
+ return latency + MovzLatency();
+ }
+ case kRiscvDivU64: {
+ int latency = Divu64Latency();
+ return latency + MovzLatency();
+ }
+ case kRiscvMod64:
+ return Mod64Latency();
+ case kRiscvModU64:
+ return Modu64Latency();
+ case kRiscvAnd:
+ return AndLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvAnd32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = AndLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kRiscvOr:
+ return OrLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvOr32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = OrLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kRiscvNor:
+ return NorLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvNor32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = NorLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kRiscvXor:
+ return XorLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvXor32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = XorLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kRiscvClz32:
+ case kRiscvClz64:
+ return Clz64Latency();
+ case kRiscvCtz32:
+ return Ctz32Latency();
+ case kRiscvCtz64:
+ return Ctz64Latency();
+ case kRiscvPopcnt32:
+ return Popcnt32Latency();
+ case kRiscvPopcnt64:
+ return Popcnt64Latency();
+ case kRiscvShl32:
+ return 1;
+ case kRiscvShr32:
+ case kRiscvSar32:
+ case kRiscvZeroExtendWord:
+ return 2;
+ case kRiscvSignExtendWord:
+ case kRiscvShl64:
+ case kRiscvShr64:
+ case kRiscvSar64:
+ case kRiscvRor32:
+ case kRiscvRor64:
+ return 1;
+ case kRiscvTst:
+ return AndLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvMov:
+ return 1;
+ case kRiscvCmpS:
+ return MoveLatency() + CompareF32Latency();
+ case kRiscvAddS:
+ return Latency::ADD_S;
+ case kRiscvSubS:
+ return Latency::SUB_S;
+ case kRiscvMulS:
+ return Latency::MUL_S;
+ case kRiscvDivS:
+ return Latency::DIV_S;
+ case kRiscvModS:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kRiscvAbsS:
+ return Latency::ABS_S;
+ case kRiscvNegS:
+ return NegdLatency();
+ case kRiscvSqrtS:
+ return Latency::SQRT_S;
+ case kRiscvMaxS:
+ return Latency::MAX_S;
+ case kRiscvMinS:
+ return Latency::MIN_S;
+ case kRiscvCmpD:
+ return MoveLatency() + CompareF64Latency();
+ case kRiscvAddD:
+ return Latency::ADD_D;
+ case kRiscvSubD:
+ return Latency::SUB_D;
+ case kRiscvMulD:
+ return Latency::MUL_D;
+ case kRiscvDivD:
+ return Latency::DIV_D;
+ case kRiscvModD:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kRiscvAbsD:
+ return Latency::ABS_D;
+ case kRiscvNegD:
+ return NegdLatency();
+ case kRiscvSqrtD:
+ return Latency::SQRT_D;
+ case kRiscvMaxD:
+ return Latency::MAX_D;
+ case kRiscvMinD:
+ return Latency::MIN_D;
+ case kRiscvFloat64RoundDown:
+ case kRiscvFloat64RoundTruncate:
+ case kRiscvFloat64RoundUp:
+ case kRiscvFloat64RoundTiesEven:
+ return Float64RoundLatency();
+ case kRiscvFloat32RoundDown:
+ case kRiscvFloat32RoundTruncate:
+ case kRiscvFloat32RoundUp:
+ case kRiscvFloat32RoundTiesEven:
+ return Float32RoundLatency();
+ case kRiscvFloat32Max:
+ return Float32MaxLatency();
+ case kRiscvFloat64Max:
+ return Float64MaxLatency();
+ case kRiscvFloat32Min:
+ return Float32MinLatency();
+ case kRiscvFloat64Min:
+ return Float64MinLatency();
+ case kRiscvFloat64SilenceNaN:
+ return Latency::SUB_D;
+ case kRiscvCvtSD:
+ return Latency::CVT_S_D;
+ case kRiscvCvtDS:
+ return Latency::CVT_D_S;
+ case kRiscvCvtDW:
+ return Latency::MOVT_FREG + Latency::CVT_D_W;
+ case kRiscvCvtSW:
+ return Latency::MOVT_FREG + Latency::CVT_S_W;
+ case kRiscvCvtSUw:
+ return 1 + Latency::MOVT_DREG + Latency::CVT_S_L;
+ case kRiscvCvtSL:
+ return Latency::MOVT_DREG + Latency::CVT_S_L;
+ case kRiscvCvtDL:
+ return Latency::MOVT_DREG + Latency::CVT_D_L;
+ case kRiscvCvtDUw:
+ return 1 + Latency::MOVT_DREG + Latency::CVT_D_L;
+ case kRiscvCvtDUl:
+ return 2 * Latency::BRANCH + 3 + 2 * Latency::MOVT_DREG +
+ 2 * Latency::CVT_D_L + Latency::ADD_D;
+ case kRiscvCvtSUl:
+ return 2 * Latency::BRANCH + 3 + 2 * Latency::MOVT_DREG +
+ 2 * Latency::CVT_S_L + Latency::ADD_S;
+ case kRiscvFloorWD:
+ return Latency::FLOOR_W_D + Latency::MOVF_FREG;
+ case kRiscvCeilWD:
+ return Latency::CEIL_W_D + Latency::MOVF_FREG;
+ case kRiscvRoundWD:
+ return Latency::ROUND_W_D + Latency::MOVF_FREG;
+ case kRiscvTruncWD:
+ return Latency::TRUNC_W_D + Latency::MOVF_FREG;
+ case kRiscvFloorWS:
+ return Latency::FLOOR_W_S + Latency::MOVF_FREG;
+ case kRiscvCeilWS:
+ return Latency::CEIL_W_S + Latency::MOVF_FREG;
+ case kRiscvRoundWS:
+ return Latency::ROUND_W_S + Latency::MOVF_FREG;
+ case kRiscvTruncWS:
+ return Latency::TRUNC_W_S + Latency::MOVF_FREG + 2 + MovnLatency();
+ case kRiscvTruncLS:
+ return TruncLSLatency(instr->OutputCount() > 1);
+ case kRiscvTruncLD:
+ return TruncLDLatency(instr->OutputCount() > 1);
+ case kRiscvTruncUwD:
+ // Estimated max.
+ return CompareF64Latency() + 2 * Latency::BRANCH +
+ 2 * Latency::TRUNC_W_D + Latency::SUB_D + OrLatency() +
+ Latency::MOVT_FREG + Latency::MOVF_FREG + Latency::MOVT_HIGH_FREG +
+ 1;
+ case kRiscvTruncUwS:
+ // Estimated max.
+ return CompareF32Latency() + 2 * Latency::BRANCH +
+ 2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() +
+ Latency::MOVT_FREG + 2 * Latency::MOVF_FREG + 2 + MovzLatency();
+ case kRiscvTruncUlS:
+ return TruncUlSLatency();
+ case kRiscvTruncUlD:
+ return TruncUlDLatency();
+ case kRiscvBitcastDL:
+ return Latency::MOVF_HIGH_DREG;
+ case kRiscvBitcastLD:
+ return Latency::MOVT_DREG;
+ case kRiscvFloat64ExtractLowWord32:
+ return Latency::MOVF_FREG;
+ case kRiscvFloat64InsertLowWord32:
+ return Latency::MOVF_HIGH_FREG + Latency::MOVT_FREG +
+ Latency::MOVT_HIGH_FREG;
+ case kRiscvFloat64ExtractHighWord32:
+ return Latency::MOVF_HIGH_FREG;
+ case kRiscvFloat64InsertHighWord32:
+ return Latency::MOVT_HIGH_FREG;
+ case kRiscvSignExtendByte:
+ case kRiscvSignExtendShort:
+ return 1;
+ case kRiscvLbu:
+ case kRiscvLb:
+ case kRiscvLhu:
+ case kRiscvLh:
+ case kRiscvLwu:
+ case kRiscvLw:
+ case kRiscvLd:
+ case kRiscvSb:
+ case kRiscvSh:
+ case kRiscvSw:
+ case kRiscvSd:
+ return AlignedMemoryLatency();
+ case kRiscvLoadFloat:
+ return ULoadFloatLatency();
+ case kRiscvLoadDouble:
+ return LoadDoubleLatency();
+ case kRiscvStoreFloat:
+ return StoreFloatLatency();
+ case kRiscvStoreDouble:
+ return StoreDoubleLatency();
+ case kRiscvUlhu:
+ case kRiscvUlh:
+ return UlhuLatency();
+ case kRiscvUlwu:
+ return UlwuLatency();
+ case kRiscvUlw:
+ return UlwLatency();
+ case kRiscvUld:
+ return UldLatency();
+ case kRiscvULoadFloat:
+ return ULoadFloatLatency();
+ case kRiscvULoadDouble:
+ return ULoadDoubleLatency();
+ case kRiscvUsh:
+ return UshLatency();
+ case kRiscvUsw:
+ return UswLatency();
+ case kRiscvUsd:
+ return UsdLatency();
+ case kRiscvUStoreFloat:
+ return UStoreFloatLatency();
+ case kRiscvUStoreDouble:
+ return UStoreDoubleLatency();
+ case kRiscvPush: {
+ int latency = 0;
+ if (instr->InputAt(0)->IsFPRegister()) {
+ latency = StoreDoubleLatency() + Sub64Latency(false);
+ } else {
+ latency = PushLatency();
+ }
+ return latency;
+ }
+ case kRiscvPeek: {
+ int latency = 0;
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ auto op = LocationOperand::cast(instr->OutputAt(0));
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat64:
+ latency = LoadDoubleLatency();
+ break;
+ case MachineRepresentation::kFloat32:
+ latency = Latency::LOAD_FLOAT;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ latency = AlignedMemoryLatency();
+ }
+ return latency;
+ }
+ case kRiscvStackClaim:
+ return Sub64Latency(false);
+ case kRiscvStoreToStackSlot: {
+ int latency = 0;
+ if (instr->InputAt(0)->IsFPRegister()) {
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ latency = 1; // Estimated value.
+ } else {
+ latency = StoreDoubleLatency();
+ }
+ } else {
+ latency = AlignedMemoryLatency();
+ }
+ return latency;
+ }
+ case kRiscvByteSwap64:
+ return ByteSwapSignedLatency();
+ case kRiscvByteSwap32:
+ return ByteSwapSignedLatency();
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
+ return 2;
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
+ return 3;
+ case kWord32AtomicExchangeInt8:
+ return Word32AtomicExchangeLatency(true, 8);
+ case kWord32AtomicExchangeUint8:
+ return Word32AtomicExchangeLatency(false, 8);
+ case kWord32AtomicExchangeInt16:
+ return Word32AtomicExchangeLatency(true, 16);
+ case kWord32AtomicExchangeUint16:
+ return Word32AtomicExchangeLatency(false, 16);
+ case kWord32AtomicExchangeWord32:
+ return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
+ case kWord32AtomicCompareExchangeInt8:
+ return Word32AtomicCompareExchangeLatency(true, 8);
+ case kWord32AtomicCompareExchangeUint8:
+ return Word32AtomicCompareExchangeLatency(false, 8);
+ case kWord32AtomicCompareExchangeInt16:
+ return Word32AtomicCompareExchangeLatency(true, 16);
+ case kWord32AtomicCompareExchangeUint16:
+ return Word32AtomicCompareExchangeLatency(false, 16);
+ case kWord32AtomicCompareExchangeWord32:
+ return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
+ BranchShortLatency() + 1;
+ case kRiscvAssertEqual:
+ return AssertLatency();
+ default:
+ return 1;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
new file mode 100644
index 0000000000..4d86fd02a3
--- /dev/null
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -0,0 +1,3034 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+// Adds RISC-V-specific methods for generating InstructionOperands.
+class RiscvOperandGenerator final : public OperandGenerator {
+ public:
+ explicit RiscvOperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant) ||
+ (node->opcode() == IrOpcode::kInt64Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
+ return OpParameter<int64_t>(node->op());
+ }
+
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node->op());
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode mode) {
+ return IsIntegerConstant(node) &&
+ CanBeImmediate(GetIntegerConstantValue(node), mode);
+ }
+
+ bool CanBeImmediate(int64_t value, InstructionCode opcode) {
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kRiscvShl32:
+ case kRiscvSar32:
+ case kRiscvShr32:
+ return is_uint5(value);
+ case kRiscvShl64:
+ case kRiscvSar64:
+ case kRiscvShr64:
+ return is_uint6(value);
+ case kRiscvAdd32:
+ case kRiscvAnd32:
+ case kRiscvAnd:
+ case kRiscvAdd64:
+ case kRiscvOr32:
+ case kRiscvOr:
+ case kRiscvTst:
+ case kRiscvXor:
+ return is_int12(value);
+ case kRiscvLb:
+ case kRiscvLbu:
+ case kRiscvSb:
+ case kRiscvLh:
+ case kRiscvLhu:
+ case kRiscvSh:
+ case kRiscvLw:
+ case kRiscvSw:
+ case kRiscvLd:
+ case kRiscvSd:
+ case kRiscvLoadFloat:
+ case kRiscvStoreFloat:
+ case kRiscvLoadDouble:
+ case kRiscvStoreDouble:
+ return is_int32(value);
+ default:
+ return is_int12(value);
+ }
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ TRACE_UNIMPL();
+ return false;
+ }
+};
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ if (g.IsIntegerConstant(node->InputAt(1))) {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ }
+}
+
+static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), opcode));
+}
+
+struct ExtendingLoadMatcher {
+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+ Initialize(node);
+ }
+
+ bool Matches() const { return matches_; }
+
+ Node* base() const {
+ DCHECK(Matches());
+ return base_;
+ }
+ int64_t immediate() const {
+ DCHECK(Matches());
+ return immediate_;
+ }
+ ArchOpcode opcode() const {
+ DCHECK(Matches());
+ return opcode_;
+ }
+
+ private:
+ bool matches_;
+ InstructionSelector* selector_;
+ Node* base_;
+ int64_t immediate_;
+ ArchOpcode opcode_;
+
+ void Initialize(Node* node) {
+ Int64BinopMatcher m(node);
+ // When loading a 64-bit value and shifting by 32, we should
+ // just load and sign-extend the interesting 4 bytes instead.
+ // This happens, for example, when we're loading and untagging SMIs.
+ DCHECK(m.IsWord64Sar());
+ if (m.left().IsLoad() && m.right().Is(32) &&
+ selector_->CanCover(m.node(), m.left().node())) {
+ DCHECK_EQ(selector_->GetEffectLevel(node),
+ selector_->GetEffectLevel(m.left().node()));
+ MachineRepresentation rep =
+ LoadRepresentationOf(m.left().node()->op()).representation();
+ DCHECK_EQ(3, ElementSizeLog2Of(rep));
+ if (rep != MachineRepresentation::kTaggedSigned &&
+ rep != MachineRepresentation::kTaggedPointer &&
+ rep != MachineRepresentation::kTagged &&
+ rep != MachineRepresentation::kWord64) {
+ return;
+ }
+
+ RiscvOperandGenerator g(selector_);
+ Node* load = m.left().node();
+ Node* offset = load->InputAt(1);
+ base_ = load->InputAt(0);
+ opcode_ = kRiscvLw;
+ if (g.CanBeImmediate(offset, opcode_)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ immediate_ = g.GetIntegerConstantValue(offset) + 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ immediate_ = g.GetIntegerConstantValue(offset);
+#endif
+ matches_ = g.CanBeImmediate(immediate_, kRiscvLw);
+ }
+ }
+ }
+};
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+ Node* output_node) {
+ ExtendingLoadMatcher m(node, selector);
+ RiscvOperandGenerator g(selector);
+ if (m.Matches()) {
+ InstructionOperand inputs[2];
+ inputs[0] = g.UseRegister(m.base());
+ InstructionCode opcode =
+ m.opcode() | AddressingModeField::encode(kMode_MRI);
+ DCHECK(is_int32(m.immediate()));
+ inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+ inputs);
+ return true;
+ }
+ return false;
+}
+
+bool TryMatchImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ size_t* input_count_return, InstructionOperand* inputs) {
+ RiscvOperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_MRI);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ return false;
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+ size_t output_count = 0;
+
+ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ }
+
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ VisitBinop(selector, node, opcode, false, kArchNop);
+}
+
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)),
+ sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
+}
+
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+}
+
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+ Node* output = nullptr) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitLoadTransform(Node* node) {
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (params.transformation) {
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kRiscvS128Load8Splat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kRiscvS128Load16Splat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kRiscvS128Load32Splat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kRiscvS128Load64Splat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kRiscvS128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kRiscvS128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kRiscvS128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kRiscvS128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kRiscvS128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kRiscvS128Load32x2U;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kRiscvLoadFloat;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kRiscvLoadDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = load_rep.IsUnsigned() ? kRiscvLwu : kRiscvLw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvLd;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kRiscvMsaLd;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitStore(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ // TODO(riscv): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier &&
+ V8_LIKELY(!FLAG_disable_write_barriers)) {
+ DCHECK(CanBeTaggedPointer(rep));
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kRiscvStoreFloat;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kRiscvStoreDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kRiscvSb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kRiscvSh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvSw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvSd;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kRiscvMsaSt;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
+ }
+ }
+}
+
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kRiscvAnd32, true, kRiscvAnd32);
+}
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
+
+ // Dext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Dext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ if (lsb == 0 && mask_width == 64) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+ return;
+ }
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ VisitBinop(this, node, kRiscvAnd, true, kRiscvAnd);
+}
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kRiscvOr32, true, kRiscvOr32);
+}
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kRiscvOr, true, kRiscvOr);
+}
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvNor32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvNor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kRiscvXor32, true, kRiscvXor32);
+}
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvNor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kRiscvXor, true, kRiscvXor);
+}
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kRiscvShl32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kRiscvShl32, node);
+}
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitRRO(this, kRiscvShr32, node);
+}
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
+ RiscvOperandGenerator g(this);
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 32)) {
+ Emit(kRiscvShl32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kRiscvSar32, node);
+}
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kRiscvShl64, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ uint64_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kRiscvShl64, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kRiscvShl64, node);
+}
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ VisitRRO(this, kRiscvShr64, node);
+}
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryEmitExtendingLoad(this, node, node)) return;
+ VisitRRO(this, kRiscvSar64, node);
+}
+
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kRiscvRor32, node);
+}
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kRiscvClz32, node);
+}
+
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvByteSwap64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvByteSwap32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvCtz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64Ctz(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvCtz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvPopcnt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvPopcnt64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kRiscvRor64, node);
+}
+
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kRiscvClz64, node);
+}
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop(this, node, kRiscvAdd32, true, kRiscvAdd32);
+}
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ VisitBinop(this, node, kRiscvAdd64, true, kRiscvAdd64);
+}
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitBinop(this, node, kRiscvSub32);
+}
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitBinop(this, node, kRiscvSub64);
+}
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kRiscvShl32 | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kRiscvShl32 | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kRiscvSub32 | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Dmul high.
+ Emit(kRiscvMulHigh64, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ VisitRRR(this, kRiscvMul32, node);
+}
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ VisitRRR(this, kRiscvMulHigh32, node);
+}
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitRRR(this, kRiscvMulHighU32, node);
+}
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ // TODO(dusmil): Add optimization for shifts larger than 32.
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kRiscvShl64 | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kRiscvShl64 | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kRiscvSub64 | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Emit(kRiscvMul64, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Ddiv.
+ Emit(kRiscvDiv64, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kRiscvDiv32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kRiscvDivU32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Dmod.
+ Emit(kRiscvMod64, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kRiscvMod32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kRiscvModU32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kRiscvDiv64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Div(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kRiscvDivU64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kRiscvMod64, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kRiscvModU64, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDS, node);
+}
+
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kRiscvCvtSW, node);
+}
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kRiscvCvtSUw, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDW, node);
+}
+
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDL, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDUw, node);
+}
+
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionCode opcode = kRiscvTruncWS;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionCode opcode = kRiscvTruncUwS;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kRiscvFloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kRiscvCeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kRiscvRoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kRiscvTruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kRiscvFloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kRiscvCeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kRiscvRoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kRiscvTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kRiscvTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kRiscvTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kRiscvTruncWD, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ VisitRR(this, kRiscvTruncLD, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kRiscvTruncUwD, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
+ VisitRR(this, kRiscvTruncUlD, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kRiscvTruncUwD, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionCode opcode = kRiscvTruncLD;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kRiscvTruncLS, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kRiscvTruncLD, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kRiscvTruncUlS, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ RiscvOperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kRiscvTruncUlD, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ Node* value = node->InputAt(0);
+ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ // Generate sign-extending load.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvLw;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ EmitLoad(this, value, opcode, node);
+ } else {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvShl32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+ }
+}
+
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ DCHECK_NE(node->opcode(), IrOpcode::kPhi);
+ if (node->opcode() == IrOpcode::kLoad) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ if (load_rep.IsUnsigned()) {
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return true;
+ default:
+ return false;
+ }
+ }
+ }
+
+ // All other 32-bit operations sign-extend to the upper 32 bits
+ return false;
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (ZeroExtendsWord32ToWord64(value)) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ }
+ Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar: {
+ if (CanCoverTransitively(node, value, value->InputAt(0)) &&
+ TryEmitExtendingLoad(this, value, node)) {
+ return;
+ } else {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kRiscvSar64, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ // Semantics of this machine IR is not clear. For example, x86 zero-extend the
+ // truncated value; arm treats it as nop thus the upper 32-bit as undefined;
+ // Riscv emits ext instruction which zero-extend the 32-bit value; for riscv,
+ // we do sign-extension of the truncated value
+ Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kRiscvCvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kRiscvCvtSD, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
+
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kRiscvTruncWD, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kRiscvCvtSL, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDL, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kRiscvCvtSUl, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDUl, node);
+}
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kRiscvBitcastFloat32ToInt32, node);
+}
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kRiscvBitcastDL, node);
+}
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ VisitRR(this, kRiscvBitcastInt32ToFloat32, node);
+}
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kRiscvBitcastLD, node);
+}
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kRiscvAddS, node);
+}
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRR(this, kRiscvAddD, node);
+}
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kRiscvSubS, node);
+}
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRR(this, kRiscvSubD, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kRiscvMulS, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRR(this, kRiscvMulD, node);
+}
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kRiscvDivS, node);
+}
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kRiscvDivD, node);
+}
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvModD, g.DefineAsFixed(node, fa0),
+ g.UseFixed(node->InputAt(0), fa0), g.UseFixed(node->InputAt(1), fa1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvFloat32Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvFloat64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvFloat32Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvFloat64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kRiscvAbsS, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kRiscvAbsD, node);
+}
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kRiscvSqrtS, node);
+}
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kRiscvSqrtD, node);
+}
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kRiscvFloat32RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kRiscvFloat64RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kRiscvFloat32RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kRiscvFloat64RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kRiscvFloat32RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRR(this, kRiscvFloat64RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kRiscvFloat32RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kRiscvFloat64RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kRiscvNegS, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kRiscvNegD, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ RiscvOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, fa0), g.UseFixed(node->InputAt(0), fa0),
+ g.UseFixed(node->InputAt(1), fa1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ RiscvOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, fa0), g.UseFixed(node->InputAt(0), fa1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
+ Node* node) {
+ RiscvOperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (PushParameter input : (*arguments)) {
+ Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(slot << kSystemPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ int push_count = static_cast<int>(call_descriptor->StackParameterCount());
+ if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (PushParameter input : (*arguments)) {
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
+ Emit(kRiscvStackClaim, g.NoOutput(),
+ g.TempImmediate(stack_size << kSystemPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node) {
+ Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
+ }
+ }
+ }
+}
+
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
+ RiscvOperandGenerator g(this);
+
+ int reverse_slot = 1;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!call_descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kRiscvPeek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ RiscvOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kRiscvULoadFloat;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kRiscvULoadDouble;
+ break;
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = load_rep.IsUnsigned() ? kRiscvUlwu : kRiscvUlw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvUld;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kRiscvMsaLd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kRiscvUStoreFloat;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kRiscvUStoreDouble;
+ break;
+ case MachineRepresentation::kWord8:
+ opcode = kRiscvSb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kRiscvUsh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvUsw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvUsd;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kRiscvMsaSt;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ selector->EmitWithContinuation(opcode, left, right, cont);
+}
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kRiscvCmpS, lhs, rhs, cont);
+}
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kRiscvCmpD, lhs, rhs, cont);
+}
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ RiscvOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, opcode)) {
+ if (opcode == kRiscvTst) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ }
+ } else if (g.CanBeImmediate(left, opcode)) {
+ if (!commutative) cont->Commute();
+ if (opcode == kRiscvTst) {
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ }
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+bool IsNodeUnsigned(Node* n) {
+ NodeMatcher m(n);
+
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
+ m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ LoadRepresentation load_rep = LoadRepresentationOf(n->op());
+ return load_rep.IsUnsigned();
+ } else {
+ return m.IsUint32Div() || m.IsUint32LessThan() ||
+ m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
+ m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
+ m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
+ }
+}
+
+// Shared routine for multiple word compare operations.
+void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+
+ selector->Emit(kRiscvShl64, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kRiscvShl64, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+
+ VisitCompare(selector, opcode, leftOp, rightOp, cont);
+}
+
+void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode,
+ FlagsContinuation* cont) {
+ if (FLAG_debug_code) {
+ RiscvOperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+ InstructionOperand optimizedResult = g.TempRegister();
+ InstructionOperand fullResult = g.TempRegister();
+ FlagsCondition condition = cont->condition();
+ InstructionCode testOpcode = opcode |
+ FlagsConditionField::encode(condition) |
+ FlagsModeField::encode(kFlags_set);
+
+ selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+
+ selector->Emit(kRiscvShl64, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kRiscvShl64, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+ selector->Emit(testOpcode, fullResult, leftOp, rightOp);
+
+ selector->Emit(kRiscvAssertEqual, g.NoOutput(), optimizedResult, fullResult,
+ g.TempImmediate(static_cast<int>(
+ AbortReason::kUnsupportedNonPrimitiveCompare)));
+ }
+
+ VisitWordCompare(selector, node, opcode, cont, false);
+}
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ // RISC-V doesn't support Word32 compare instructions. Instead it relies
+ // that the values in registers are correctly sign-extended and uses
+ // Word64 comparison instead. This behavior is correct in most cases,
+ // but doesn't work when comparing signed with unsigned operands.
+ // We could simulate full Word32 compare in all cases but this would
+ // create an unnecessary overhead since unsigned integers are rarely
+ // used in JavaScript.
+ // The solution proposed here tries to match a comparison of signed
+ // with unsigned operand, and perform full Word32Compare only
+ // in those cases. Unfortunately, the solution is not complete because
+ // it might skip cases where Word32 full compare is needed, so
+ // basically it is a hack.
+ // When call to a host function in simulator, if the function return a
+ // int32 value, the simulator do not sign-extended to int64 because in
+ // simulator we do not know the function whether return a int32 or int64.
+ // so we need do a full word32 compare in this case.
+#ifndef USE_SIMULATOR
+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
+#else
+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) ||
+ node->InputAt(0)->opcode() == IrOpcode::kCall ||
+ node->InputAt(1)->opcode() == IrOpcode::kCall) {
+#endif
+ VisitFullWord32Compare(selector, node, kRiscvCmp, cont);
+ } else {
+ VisitOptimizedWord32Compare(selector, node, kRiscvCmp, cont);
+ }
+}
+
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kRiscvCmp, cont, false);
+}
+
+void EmitWordCompareZero(InstructionSelector* selector, Node* value,
+ FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ selector->EmitWithContinuation(kRiscvCmp, g.UseRegister(value),
+ g.TempImmediate(0), cont);
+}
+
+void VisitAtomicLoad(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void VisitAtomicStore(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[4];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ temps[2] = g.TempRegister();
+ temps[3] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
+}
+
+} // namespace
+
+void InstructionSelector::VisitStackPointerGreaterThan(
+ Node* node, FlagsContinuation* cont) {
+ StackCheckKind kind = StackCheckKindOf(node->op());
+ InstructionCode opcode =
+ kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
+
+ RiscvOperandGenerator g(this);
+
+ // No outputs.
+ InstructionOperand* const outputs = nullptr;
+ const int output_count = 0;
+
+ // Applying an offset to this stack check requires a temp register. Offsets
+ // are only applied to the first stack check. If applying an offset, we must
+ // ensure the input and temp registers do not alias, thus kUniqueRegister.
+ InstructionOperand temps[] = {g.TempRegister()};
+ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0);
+ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
+ ? OperandGenerator::kUniqueRegister
+ : OperandGenerator::kRegister;
+
+ Node* const value = node->InputAt(0);
+ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
+ static constexpr int input_count = arraysize(inputs);
+
+ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
+ temp_count, temps, cont);
+}
+
+// Shared routine for word comparisons against zero.
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else {
+ break;
+ }
+
+ cont->Negate();
+ }
+
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kWord64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either nullptr, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvAdd64, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvSub64, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvMulOvf32, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvAddOvf64, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvSubOvf64, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kRiscvTst, cont, true);
+ case IrOpcode::kStackPointerGreaterThan:
+ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+ return VisitStackPointerGreaterThan(value, cont);
+ default:
+ break;
+ }
+ }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
+ EmitWordCompareZero(this, value, cont);
+}
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 10 + 2 * sw.value_range();
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value()) {
+ index_operand = g.TempRegister();
+ Emit(kRiscvSub32, index_operand, value_operand,
+ g.TempImmediate(sw.min_value()));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+ }
+
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
+}
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvAdd64, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvAdd64, &cont);
+}
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvSub64, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvSub64, &cont);
+}
+
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvMulOvf32, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvMulOvf32, &cont);
+}
+
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvAddOvf64, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvAddOvf64, &cont);
+}
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvSubOvf64, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvSubOvf64, &cont);
+}
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ VisitRR(this, kRiscvFloat64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ VisitRR(this, kRiscvFloat64ExtractHighWord32, node);
+}
+
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kRiscvFloat64SilenceNaN, node);
+}
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kRiscvFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kRiscvFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSync, g.NoOutput());
+}
+
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kWord32AtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ VisitAtomicLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kWord32AtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kWord32AtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kWord32AtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ VisitAtomicStore(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = kRiscvWord64AtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kRiscvWord64AtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvWord64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvWord64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ VisitAtomicLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kRiscvWord64AtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kRiscvWord64AtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvWord64AtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvWord64AtomicStoreWord64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ VisitAtomicStore(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kWord32AtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kWord32AtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kWord32AtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kWord32AtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kWord32AtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kRiscvWord64AtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kRiscvWord64AtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kRiscvWord64AtomicExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kRiscvWord64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kWord32AtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kWord32AtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kWord32AtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kWord32AtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kWord32AtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicCompareExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kRiscvWord64AtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kRiscvWord64AtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kRiscvWord64AtomicCompareExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kRiscvWord64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicCompareExchange(this, node, opcode);
+}
+void InstructionSelector::VisitWord32AtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicBinop(this, node, opcode);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicBinop(this, node, opcode);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation( \
+ node, kRiscvWord64Atomic##op##Uint8, kRiscvWord64Atomic##op##Uint16, \
+ kRiscvWord64Atomic##op##Uint32, kRiscvWord64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+#define SIMD_TYPE_LIST(V) \
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kRiscvF64x2Abs) \
+ V(F64x2Neg, kRiscvF64x2Neg) \
+ V(F64x2Sqrt, kRiscvF64x2Sqrt) \
+ V(F64x2ConvertLowI32x4S, kRiscvF64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kRiscvF64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kRiscvF64x2PromoteLowF32x4) \
+ V(F64x2Ceil, kRiscvF64x2Ceil) \
+ V(F64x2Floor, kRiscvF64x2Floor) \
+ V(F64x2Trunc, kRiscvF64x2Trunc) \
+ V(F64x2NearestInt, kRiscvF64x2NearestInt) \
+ V(I64x2Neg, kRiscvI64x2Neg) \
+ V(I64x2BitMask, kRiscvI64x2BitMask) \
+ V(I64x2Eq, kRiscvI64x2Eq) \
+ V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
+ V(F32x4Abs, kRiscvF32x4Abs) \
+ V(F32x4Neg, kRiscvF32x4Neg) \
+ V(F32x4Sqrt, kRiscvF32x4Sqrt) \
+ V(F32x4RecipApprox, kRiscvF32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kRiscvF32x4RecipSqrtApprox) \
+ V(F32x4DemoteF64x2Zero, kRiscvF32x4DemoteF64x2Zero) \
+ V(F32x4Ceil, kRiscvF32x4Ceil) \
+ V(F32x4Floor, kRiscvF32x4Floor) \
+ V(F32x4Trunc, kRiscvF32x4Trunc) \
+ V(F32x4NearestInt, kRiscvF32x4NearestInt) \
+ V(I64x2SConvertI32x4Low, kRiscvI64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kRiscvI64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kRiscvI64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kRiscvI64x2UConvertI32x4High) \
+ V(I32x4SConvertF32x4, kRiscvI32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kRiscvI32x4UConvertF32x4) \
+ V(I32x4Neg, kRiscvI32x4Neg) \
+ V(I32x4SConvertI16x8Low, kRiscvI32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kRiscvI32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kRiscvI32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kRiscvI32x4UConvertI16x8High) \
+ V(I32x4Abs, kRiscvI32x4Abs) \
+ V(I32x4BitMask, kRiscvI32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kRiscvI16x8Neg) \
+ V(I16x8SConvertI8x16Low, kRiscvI16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kRiscvI16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kRiscvI16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kRiscvI16x8UConvertI8x16High) \
+ V(I16x8Abs, kRiscvI16x8Abs) \
+ V(I16x8BitMask, kRiscvI16x8BitMask) \
+ V(I8x16Neg, kRiscvI8x16Neg) \
+ V(I8x16Abs, kRiscvI8x16Abs) \
+ V(I8x16BitMask, kRiscvI8x16BitMask) \
+ V(I8x16Popcnt, kRiscvI8x16Popcnt) \
+ V(S128Not, kRiscvS128Not) \
+ V(V128AnyTrue, kRiscvV128AnyTrue) \
+ V(V32x4AllTrue, kRiscvV32x4AllTrue) \
+ V(V16x8AllTrue, kRiscvV16x8AllTrue) \
+ V(V8x16AllTrue, kRiscvV8x16AllTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, kRiscvF64x2Add) \
+ V(F64x2Sub, kRiscvF64x2Sub) \
+ V(F64x2Mul, kRiscvF64x2Mul) \
+ V(F64x2Div, kRiscvF64x2Div) \
+ V(F64x2Min, kRiscvF64x2Min) \
+ V(F64x2Max, kRiscvF64x2Max) \
+ V(F64x2Eq, kRiscvF64x2Eq) \
+ V(F64x2Ne, kRiscvF64x2Ne) \
+ V(F64x2Lt, kRiscvF64x2Lt) \
+ V(F64x2Le, kRiscvF64x2Le) \
+ V(I64x2Add, kRiscvI64x2Add) \
+ V(I64x2Sub, kRiscvI64x2Sub) \
+ V(I64x2Mul, kRiscvI64x2Mul) \
+ V(F32x4Add, kRiscvF32x4Add) \
+ V(F32x4AddHoriz, kRiscvF32x4AddHoriz) \
+ V(F32x4Sub, kRiscvF32x4Sub) \
+ V(F32x4Mul, kRiscvF32x4Mul) \
+ V(F32x4Div, kRiscvF32x4Div) \
+ V(F32x4Max, kRiscvF32x4Max) \
+ V(F32x4Min, kRiscvF32x4Min) \
+ V(F32x4Eq, kRiscvF32x4Eq) \
+ V(F32x4Ne, kRiscvF32x4Ne) \
+ V(F32x4Lt, kRiscvF32x4Lt) \
+ V(F32x4Le, kRiscvF32x4Le) \
+ V(I32x4Add, kRiscvI32x4Add) \
+ V(I32x4AddHoriz, kRiscvI32x4AddHoriz) \
+ V(I32x4Sub, kRiscvI32x4Sub) \
+ V(I32x4Mul, kRiscvI32x4Mul) \
+ V(I32x4MaxS, kRiscvI32x4MaxS) \
+ V(I32x4MinS, kRiscvI32x4MinS) \
+ V(I32x4MaxU, kRiscvI32x4MaxU) \
+ V(I32x4MinU, kRiscvI32x4MinU) \
+ V(I32x4Eq, kRiscvI32x4Eq) \
+ V(I32x4Ne, kRiscvI32x4Ne) \
+ V(I32x4GtS, kRiscvI32x4GtS) \
+ V(I32x4GeS, kRiscvI32x4GeS) \
+ V(I32x4GtU, kRiscvI32x4GtU) \
+ V(I32x4GeU, kRiscvI32x4GeU) \
+ V(I32x4DotI16x8S, kRiscvI32x4DotI16x8S) \
+ V(I16x8Add, kRiscvI16x8Add) \
+ V(I16x8AddSatS, kRiscvI16x8AddSatS) \
+ V(I16x8AddSatU, kRiscvI16x8AddSatU) \
+ V(I16x8AddHoriz, kRiscvI16x8AddHoriz) \
+ V(I16x8Sub, kRiscvI16x8Sub) \
+ V(I16x8SubSatS, kRiscvI16x8SubSatS) \
+ V(I16x8SubSatU, kRiscvI16x8SubSatU) \
+ V(I16x8Mul, kRiscvI16x8Mul) \
+ V(I16x8MaxS, kRiscvI16x8MaxS) \
+ V(I16x8MinS, kRiscvI16x8MinS) \
+ V(I16x8MaxU, kRiscvI16x8MaxU) \
+ V(I16x8MinU, kRiscvI16x8MinU) \
+ V(I16x8Eq, kRiscvI16x8Eq) \
+ V(I16x8Ne, kRiscvI16x8Ne) \
+ V(I16x8GtS, kRiscvI16x8GtS) \
+ V(I16x8GeS, kRiscvI16x8GeS) \
+ V(I16x8GtU, kRiscvI16x8GtU) \
+ V(I16x8GeU, kRiscvI16x8GeU) \
+ V(I16x8RoundingAverageU, kRiscvI16x8RoundingAverageU) \
+ V(I16x8Q15MulRSatS, kRiscvI16x8Q15MulRSatS) \
+ V(I16x8SConvertI32x4, kRiscvI16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kRiscvI16x8UConvertI32x4) \
+ V(I8x16Add, kRiscvI8x16Add) \
+ V(I8x16AddSatS, kRiscvI8x16AddSatS) \
+ V(I8x16AddSatU, kRiscvI8x16AddSatU) \
+ V(I8x16Sub, kRiscvI8x16Sub) \
+ V(I8x16SubSatS, kRiscvI8x16SubSatS) \
+ V(I8x16SubSatU, kRiscvI8x16SubSatU) \
+ V(I8x16Mul, kRiscvI8x16Mul) \
+ V(I8x16MaxS, kRiscvI8x16MaxS) \
+ V(I8x16MinS, kRiscvI8x16MinS) \
+ V(I8x16MaxU, kRiscvI8x16MaxU) \
+ V(I8x16MinU, kRiscvI8x16MinU) \
+ V(I8x16Eq, kRiscvI8x16Eq) \
+ V(I8x16Ne, kRiscvI8x16Ne) \
+ V(I8x16GtS, kRiscvI8x16GtS) \
+ V(I8x16GeS, kRiscvI8x16GeS) \
+ V(I8x16GtU, kRiscvI8x16GtU) \
+ V(I8x16GeU, kRiscvI8x16GeU) \
+ V(I8x16RoundingAverageU, kRiscvI8x16RoundingAverageU) \
+ V(I8x16SConvertI16x8, kRiscvI8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kRiscvI8x16UConvertI16x8) \
+ V(S128And, kRiscvS128And) \
+ V(S128Or, kRiscvS128Or) \
+ V(S128Xor, kRiscvS128Xor) \
+ V(S128AndNot, kRiscvS128AndNot)
+
+void InstructionSelector::VisitS128Const(Node* node) {
+ RiscvOperandGenerator g(this);
+ static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
+ uint32_t val[kUint32Immediates];
+ memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
+ // If all bytes are zeros or ones, avoid emitting code for generic constants
+ bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
+ bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
+ val[2] == UINT32_MAX && val[3] == UINT32_MAX;
+ InstructionOperand dst = g.DefineAsRegister(node);
+ if (all_zeros) {
+ Emit(kRiscvS128Zero, dst);
+ } else if (all_ones) {
+ Emit(kRiscvS128AllOnes, dst);
+ } else {
+ Emit(kRiscvS128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
+ g.UseImmediate(val[2]), g.UseImmediate(val[3]));
+ }
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvS128Zero, g.DefineAsRegister(node));
+}
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kRiscv##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+SIMD_VISIT_SPLAT(F64x2)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, kRiscv##Type##ExtractLane##Sign, node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, )
+SIMD_VISIT_EXTRACT_LANE(F32x4, )
+SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I16x8, U)
+SIMD_VISIT_EXTRACT_LANE(I16x8, S)
+SIMD_VISIT_EXTRACT_LANE(I8x16, U)
+SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kRiscv##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+SIMD_VISIT_REPLACE_LANE(F64x2)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitSimdShift(this, kRiscv##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kRiscvS128Select, node);
+}
+
+namespace {
+
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kRiscvS32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kRiscvS32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kRiscvS32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kRiscvS32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kRiscvS32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kRiscvS32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kRiscvS16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kRiscvS16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kRiscvS16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kRiscvS16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kRiscvS16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kRiscvS16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kRiscvS16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kRiscvS16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kRiscvS8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kRiscvS8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kRiscvS8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kRiscvS8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kRiscvS8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kRiscvS8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kRiscvS8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kRiscvS8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kRiscvS8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ is_swizzle, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t offset;
+ RiscvOperandGenerator g(this);
+ if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
+ return;
+ }
+ if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ return;
+ }
+ Emit(kRiscvS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
+}
+
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ // We don't want input 0 or input 1 to be the same as output, since we will
+ // modify output before do the calculation.
+ Emit(kRiscvI8x16Swizzle, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvShl32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kRiscvF32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kRiscvF32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kRiscvF64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kRiscvF64x2Pmax, node);
+}
+
+#define VISIT_EXT_MUL(OPCODE1, OPCODE2) \
+ void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) { \
+ UNREACHABLE(); \
+ } \
+ void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) { \
+ UNREACHABLE(); \
+ }
+
+VISIT_EXT_MUL(I64x2, I32x4S)
+VISIT_EXT_MUL(I64x2, I32x4U)
+VISIT_EXT_MUL(I32x4, I16x8S)
+VISIT_EXT_MUL(I32x4, I16x8U)
+VISIT_EXT_MUL(I16x8, I8x16S)
+VISIT_EXT_MUL(I16x8, I8x16U)
+#undef VISIT_EXT_MUL
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+ return flags | MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ NoUnalignedAccessSupport();
+}
+
+#undef SIMD_BINOP_LIST
+#undef SIMD_SHIFT_OP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_TYPE_LIST
+#undef TRACE_UNIMPL
+#undef TRACE
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 9750b0d538..f7c5498e07 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1004,30 +1004,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ LoadU64(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CmpS64(scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&done);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ LoadU64(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm,
@@ -1077,8 +1053,7 @@ void AdjustStackPointerForTailCall(
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
S390OperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
+ const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
@@ -1235,13 +1210,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
@@ -1358,7 +1327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ bind(&return_location);
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1922,16 +1891,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_UNARY_OP(D_DInstr(lcdbr), nullInstr, nullInstr);
break;
case kS390_Cntlz32: {
- __ llgfr(i.OutputRegister(), i.InputRegister(0));
- __ flogr(r0, i.OutputRegister());
- __ AddS32(i.OutputRegister(), r0, Operand(-32));
- // No need to zero-ext b/c llgfr is done already
+ __ CountLeadingZerosU32(i.OutputRegister(), i.InputRegister(0), r0);
break;
}
#if V8_TARGET_ARCH_S390X
case kS390_Cntlz64: {
- __ flogr(r0, i.InputRegister(0));
- __ mov(i.OutputRegister(), r0);
+ __ CountLeadingZerosU64(i.OutputRegister(), i.InputRegister(0), r0);
break;
}
#endif
@@ -1991,42 +1956,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CanonicalizeNaN(result, value);
break;
}
- case kS390_StackClaim: {
- int num_slots = i.InputInt32(0);
- __ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
- frame_access_state()->IncreaseSPDelta(num_slots);
- break;
- }
- case kS390_Push:
- if (instr->InputAt(0)->IsFPRegister()) {
- LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- switch (op->representation()) {
- case MachineRepresentation::kFloat32:
- __ lay(sp, MemOperand(sp, -kSystemPointerSize));
- __ StoreF32(i.InputDoubleRegister(0), MemOperand(sp));
- break;
- case MachineRepresentation::kFloat64:
- __ lay(sp, MemOperand(sp, -kDoubleSize));
- __ StoreF64(i.InputDoubleRegister(0), MemOperand(sp));
- frame_access_state()->IncreaseSPDelta(kDoubleSize /
- kSystemPointerSize);
- break;
- case MachineRepresentation::kSimd128: {
- __ lay(sp, MemOperand(sp, -kSimd128Size));
- __ StoreV128(i.InputDoubleRegister(0), MemOperand(sp), kScratchReg);
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- __ Push(i.InputRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
+ case kS390_Push: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(1));
+ MachineRepresentation rep = op->representation();
+ int pushed_slots = ElementSizeInPointers(rep);
+ // Slot-sized arguments are never padded but there may be a gap if
+ // the slot allocator reclaimed other padding slots. Adjust the stack
+ // here to skip any gap.
+ if (slots > pushed_slots) {
+ __ lay(sp,
+ MemOperand(sp, -((slots - pushed_slots) * kSystemPointerSize)));
}
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ __ StoreF32(i.InputDoubleRegister(1), MemOperand(sp));
+ break;
+ case MachineRepresentation::kFloat64:
+ __ lay(sp, MemOperand(sp, -kDoubleSize));
+ __ StoreF64(i.InputDoubleRegister(1), MemOperand(sp));
+ break;
+ case MachineRepresentation::kSimd128:
+ __ lay(sp, MemOperand(sp, -kSimd128Size));
+ __ StoreV128(i.InputDoubleRegister(1), MemOperand(sp), kScratchReg);
+ break;
+ default:
+ __ Push(i.InputRegister(1));
+ break;
+ }
+ frame_access_state()->IncreaseSPDelta(slots);
break;
+ }
case kS390_PushFrame: {
int num_slots = i.InputInt32(1);
__ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
@@ -2335,7 +2297,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ lrvg(r0, operand);
__ lrvg(r1, MemOperand(operand.rx(), operand.rb(),
- operand.offset() + kBitsPerByte));
+ operand.offset() + kSystemPointerSize));
__ vlvgp(i.OutputSimd128Register(), r1, r0);
}
break;
@@ -2402,7 +2364,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(3));
__ strvg(r0, operand);
__ strvg(r1, MemOperand(operand.rx(), operand.rb(),
- operand.offset() + kBitsPerByte));
+ operand.offset() + kSystemPointerSize));
}
break;
}
@@ -2672,13 +2634,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_F32x4Splat: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
Condition(2));
-#else
- __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(1),
- Condition(2));
-#endif
break;
}
case kS390_I64x2Splat: {
@@ -2707,84 +2664,44 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
// vector extract element
case kS390_F64x2ExtractLane: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
Operand(1 - i.InputInt8(1)), Condition(3));
-#else
- __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
- Operand(i.InputInt8(1)), Condition(3));
-#endif
break;
}
case kS390_F32x4ExtractLane: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
Operand(3 - i.InputInt8(1)), Condition(2));
-#else
- __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
- Operand(i.InputInt8(1)), Condition(2));
-#endif
break;
}
case kS390_I64x2ExtractLane: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
-#else
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(3));
-#endif
break;
}
case kS390_I32x4ExtractLane: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
-#else
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(2));
-#endif
break;
}
case kS390_I16x8ExtractLaneU: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
-#else
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(1));
-#endif
break;
}
case kS390_I16x8ExtractLaneS: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(kScratchReg, i.InputSimd128Register(0),
MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
-#else
- __ vlgv(kScratchReg, i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(1));
-#endif
__ lghr(i.OutputRegister(), kScratchReg);
break;
}
case kS390_I8x16ExtractLaneU: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
-#else
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(0));
-#endif
break;
}
case kS390_I8x16ExtractLaneS: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(kScratchReg, i.InputSimd128Register(0),
MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
-#else
- __ vlgv(kScratchReg, i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(0));
-#endif
__ lgbr(i.OutputRegister(), kScratchReg);
break;
}
@@ -2795,13 +2712,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
__ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
Condition(3));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(kScratchDoubleReg, kScratchReg,
MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
-#else
- __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, i.InputInt8(1)),
- Condition(3));
-#endif
__ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
break;
}
@@ -2809,17 +2721,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
__ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
Condition(2));
__ vlvg(kScratchDoubleReg, kScratchReg,
MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
-#else
- __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 1),
- Condition(2));
- __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, i.InputInt8(1)),
- Condition(2));
-#endif
__ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
break;
}
@@ -2829,13 +2734,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
-#else
- __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
- MemOperand(r0, i.InputInt8(1)), Condition(3));
-#endif
break;
}
case kS390_I32x4ReplaceLane: {
@@ -2844,13 +2744,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
-#else
- __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
- MemOperand(r0, i.InputInt8(1)), Condition(2));
-#endif
break;
}
case kS390_I16x8ReplaceLane: {
@@ -2859,13 +2754,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
-#else
- __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
- MemOperand(r0, i.InputInt8(1)), Condition(1));
-#endif
break;
}
case kS390_I8x16ReplaceLane: {
@@ -2874,13 +2764,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
-#else
- __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
- MemOperand(r0, i.InputInt8(1)), Condition(0));
-#endif
break;
}
// vector binops
@@ -2942,13 +2827,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
-#define FLOAT_ADD_HORIZ(src0, src1, scratch0, scratch1, add0, add1) \
- __ vpk(dst, src0, src1, Condition(0), Condition(0), Condition(3)); \
- __ vesrl(scratch0, src0, MemOperand(r0, shift_bits), Condition(3)); \
- __ vesrl(scratch1, src1, MemOperand(r0, shift_bits), Condition(3)); \
- __ vpk(kScratchDoubleReg, scratch0, scratch1, Condition(0), Condition(0), \
- Condition(3)); \
- __ vfa(dst, add0, add1, Condition(0), Condition(0), Condition(2));
case kS390_F32x4AddHoriz: {
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -2956,14 +2834,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DoubleRegister tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
DoubleRegister tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
constexpr int shift_bits = 32;
-#ifdef V8_TARGET_BIG_ENDIAN
- FLOAT_ADD_HORIZ(src1, src0, tempFPReg2, tempFPReg1, kScratchDoubleReg,
- dst)
-#else
- FLOAT_ADD_HORIZ(src0, src1, tempFPReg1, tempFPReg2, dst,
- kScratchDoubleReg)
-#endif
-#undef FLOAT_ADD_HORIZ
+ __ vpk(dst, src1, src0, Condition(0), Condition(0), Condition(3));
+ __ vesrl(tempFPReg2, src1, MemOperand(r0, shift_bits), Condition(3));
+ __ vesrl(tempFPReg1, src0, MemOperand(r0, shift_bits), Condition(3));
+ __ vpk(kScratchDoubleReg, tempFPReg2, tempFPReg1, Condition(0),
+ Condition(0), Condition(3));
+ __ vfa(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
+ Condition(2));
break;
}
case kS390_F32x4Sub: {
@@ -3055,13 +2932,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
__ vsumg(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
Condition(0), Condition(2));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
Condition(3));
-#else
- __ vpk(dst, dst, kScratchDoubleReg, Condition(0), Condition(0),
- Condition(3));
-#endif
break;
}
case kS390_I32x4Sub: {
@@ -3092,13 +2964,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(1));
__ vsum(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
Condition(0), Condition(1));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
Condition(2));
-#else
- __ vpk(dst, dst, kScratchDoubleReg, Condition(0), Condition(0),
- Condition(2));
-#endif
break;
}
case kS390_I16x8Sub: {
@@ -3276,6 +3143,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(0), Condition(2));
break;
}
+ case kS390_I64x2Ne: {
+ __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ i.OutputSimd128Register(), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
case kS390_I32x4Ne: {
__ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3312,6 +3187,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
+ case kS390_I64x2GtS: {
+ __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ break;
+ }
+ case kS390_I64x2GeS: {
+ // Compute !(B > A) which is equal to A >= B.
+ __ vch(kScratchDoubleReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(3));
+ __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(3));
+ break;
+ }
case kS390_I32x4GtS: {
__ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3503,11 +3391,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_F32x4RecipApprox: {
__ mov(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
-#else
- __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(1), Condition(2));
-#endif
__ vfd(i.OutputSimd128Register(), kScratchDoubleReg,
i.InputSimd128Register(0), Condition(0), Condition(0),
Condition(2));
@@ -3519,11 +3403,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
__ mov(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
-#else
- __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(1), Condition(2));
-#endif
__ vfd(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(0), Condition(2));
break;
@@ -3554,10 +3434,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(2));
break;
}
+ case kS390_I64x2Abs: {
+ __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(3));
+ break;
+ }
// vector boolean unops
- case kS390_V32x4AnyTrue:
- case kS390_V16x8AnyTrue:
- case kS390_V8x16AnyTrue: {
+ case kS390_V128AnyTrue: {
Simd128Register src = i.InputSimd128Register(0);
Register dst = i.OutputRegister();
Register temp = i.TempRegister(0);
@@ -3580,6 +3463,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0), \
Condition(0)); \
__ locgr(Condition(8), dst, temp);
+ case kS390_V64x2AllTrue: {
+ SIMD_ALL_TRUE(3)
+ break;
+ }
case kS390_V32x4AllTrue: {
SIMD_ALL_TRUE(2)
break;
@@ -3616,17 +3503,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_S128Const: {
-#ifdef V8_TARGET_BIG_ENDIAN
for (int index = 0, j = 0; index < 2; index++, j = +2) {
__ mov(index < 1 ? ip : r0, Operand(i.InputInt32(j)));
__ iihf(index < 1 ? ip : r0, Operand(i.InputInt32(j + 1)));
}
-#else
- for (int index = 0, j = 0; index < 2; index++, j = +2) {
- __ mov(index < 1 ? r0 : ip, Operand(i.InputInt32(j)));
- __ iihf(index < 1 ? r0 : ip, Operand(i.InputInt32(j + 1)));
- }
-#endif
__ vlvgp(i.OutputSimd128Register(), r0, ip);
break;
}
@@ -3655,70 +3535,58 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- // vector conversions
-#define CONVERT_FLOAT_TO_INT32(convert) \
- for (int index = 0; index < 4; index++) { \
- __ vlgv(kScratchReg, kScratchDoubleReg, MemOperand(r0, index), \
- Condition(2)); \
- __ MovIntToFloat(tempFPReg1, kScratchReg); \
- __ convert(kScratchReg, tempFPReg1, kRoundToZero); \
- __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
- }
case kS390_I32x4SConvertF32x4: {
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
// NaN to 0
__ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
__ vfce(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
Condition(0), Condition(0), Condition(2));
__ vn(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),
Condition(0), Condition(0));
- CONVERT_FLOAT_TO_INT32(ConvertFloat32ToInt32)
+ __ vcgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(2));
break;
}
case kS390_I32x4UConvertF32x4: {
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
// NaN to 0, negative to 0
__ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
__ vfmax(kScratchDoubleReg, src, kScratchDoubleReg, Condition(1),
Condition(0), Condition(2));
- CONVERT_FLOAT_TO_INT32(ConvertFloat32ToUnsignedInt32)
- break;
- }
-#undef CONVERT_FLOAT_TO_INT32
-#define CONVERT_INT32_TO_FLOAT(convert, double_index) \
- Simd128Register src = i.InputSimd128Register(0); \
- Simd128Register dst = i.OutputSimd128Register(); \
- for (int index = 0; index < 4; index++) { \
- __ vlgv(kScratchReg, src, MemOperand(r0, index), Condition(2)); \
- __ convert(kScratchDoubleReg, kScratchReg); \
- __ MovFloatToInt(kScratchReg, kScratchDoubleReg); \
- __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
- }
+ __ vclgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(2));
+ break;
+ }
case kS390_F32x4SConvertI32x4: {
-#ifdef V8_TARGET_BIG_ENDIAN
- CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 0)
-#else
- CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 1)
-#endif
+ __ vcdg(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(4), Condition(0), Condition(2));
break;
}
case kS390_F32x4UConvertI32x4: {
-#ifdef V8_TARGET_BIG_ENDIAN
- CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 0)
-#else
- CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 1)
-#endif
+ __ vcdlg(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(4), Condition(0), Condition(2));
break;
}
-#undef CONVERT_INT32_TO_FLOAT
#define VECTOR_UNPACK(op, mode) \
__ op(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0), \
Condition(0), Condition(mode));
+ case kS390_I64x2SConvertI32x4Low: {
+ VECTOR_UNPACK(vupl, 2)
+ break;
+ }
+ case kS390_I64x2SConvertI32x4High: {
+ VECTOR_UNPACK(vuph, 2)
+ break;
+ }
+ case kS390_I64x2UConvertI32x4Low: {
+ VECTOR_UNPACK(vupll, 2)
+ break;
+ }
+ case kS390_I64x2UConvertI32x4High: {
+ VECTOR_UNPACK(vuplh, 2)
+ break;
+ }
case kS390_I32x4SConvertI16x8Low: {
VECTOR_UNPACK(vupl, 1)
break;
@@ -3753,22 +3621,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef VECTOR_UNPACK
case kS390_I16x8SConvertI32x4:
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0), Condition(0), Condition(2));
-#else
- __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
-#endif
break;
case kS390_I8x16SConvertI16x8:
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0), Condition(0), Condition(1));
-#else
- __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
-#endif
break;
#define VECTOR_PACK_UNSIGNED(mode) \
Simd128Register tempFPReg = i.ToSimd128Register(instr->TempAt(0)); \
@@ -3781,25 +3639,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_I16x8UConvertI32x4: {
// treat inputs as signed, and saturate to unsigned (negative to 0)
VECTOR_PACK_UNSIGNED(2)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
Condition(0), Condition(2));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I8x16UConvertI16x8: {
// treat inputs as signed, and saturate to unsigned (negative to 0)
VECTOR_PACK_UNSIGNED(1)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
Condition(0), Condition(1));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
- Condition(0), Condition(1));
-#endif
break;
}
#undef VECTOR_PACK_UNSIGNED
@@ -3822,35 +3670,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(mode + 1));
case kS390_I16x8AddSatS: {
BINOP_EXTRACT(va, vuph, vupl, 1)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
-#else
- __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I16x8SubSatS: {
BINOP_EXTRACT(vs, vuph, vupl, 1)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
-#else
- __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I16x8AddSatU: {
BINOP_EXTRACT(va, vuplh, vupll, 1)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I16x8SubSatU: {
@@ -3862,46 +3695,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(2));
__ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
Condition(2));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I8x16AddSatS: {
BINOP_EXTRACT(va, vuph, vupl, 0)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
-#else
- __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(1));
-#endif
break;
}
case kS390_I8x16SubSatS: {
BINOP_EXTRACT(vs, vuph, vupl, 0)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
-#else
- __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(1));
-#endif
break;
}
case kS390_I8x16AddSatU: {
BINOP_EXTRACT(va, vuplh, vupll, 0)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(1));
-#endif
break;
}
case kS390_I8x16SubSatU: {
@@ -3913,14 +3726,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(1));
__ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
Condition(1));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(1));
-
-#endif
break;
}
#undef BINOP_EXTRACT
@@ -3932,13 +3739,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt32(4), i.InputInt32(5)};
// create 2 * 8 byte inputs indicating new indices
for (int i = 0, j = 0; i < 2; i++, j = +2) {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
__ iihf(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
-#else
- __ mov(i < 1 ? r0 : ip, Operand(k8x16_indices[j]));
- __ iihf(i < 1 ? r0 : ip, Operand(k8x16_indices[j + 1]));
-#endif
}
__ vlvgp(kScratchDoubleReg, r0, ip);
__ vperm(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
@@ -3954,7 +3756,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrepi(kScratchDoubleReg, Operand(31), Condition(0));
__ vmnl(tempFPReg1, src1, kScratchDoubleReg, Condition(0), Condition(0),
Condition(0));
-#ifdef V8_TARGET_BIG_ENDIAN
// input needs to be reversed
__ vlgv(r0, src0, MemOperand(r0, 0), Condition(3));
__ vlgv(r1, src0, MemOperand(r0, 1), Condition(3));
@@ -3966,22 +3767,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(0), Condition(0));
__ vperm(dst, dst, kScratchDoubleReg, tempFPReg1, Condition(0),
Condition(0));
-#else
- __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(0));
- __ vperm(dst, src0, kScratchDoubleReg, tempFPReg1, Condition(0),
- Condition(0));
-#endif
break;
}
case kS390_I64x2BitMask: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(kScratchReg, Operand(0x80800040));
__ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
-#else
- __ mov(kScratchReg, Operand(0x80808080));
- __ iihf(kScratchReg, Operand(0x40008080));
-#endif
__ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
__ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
@@ -3990,13 +3780,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_I32x4BitMask: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(kScratchReg, Operand(0x204060));
__ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
-#else
- __ mov(kScratchReg, Operand(0x80808080));
- __ iihf(kScratchReg, Operand(0x60402000));
-#endif
__ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
__ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
@@ -4005,13 +3790,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_I16x8BitMask: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(kScratchReg, Operand(0x40506070));
__ iihf(kScratchReg, Operand(0x102030));
-#else
- __ mov(kScratchReg, Operand(0x30201000));
- __ iihf(kScratchReg, Operand(0x70605040));
-#endif
__ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
__ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
@@ -4020,17 +3800,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_I8x16BitMask: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(r0, Operand(0x60687078));
__ iihf(r0, Operand(0x40485058));
__ mov(ip, Operand(0x20283038));
__ iihf(ip, Operand(0x81018));
-#else
- __ mov(ip, Operand(0x58504840));
- __ iihf(ip, Operand(0x78706860));
- __ mov(r0, Operand(0x18100800));
- __ iihf(r0, Operand(0x38302820));
-#endif
__ vlvgp(kScratchDoubleReg, ip, r0);
__ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
@@ -4240,14 +4013,108 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrepi(tempFPReg2, Operand(0x4000), Condition(2));
Q15_MUL_ROAUND(kScratchDoubleReg, vupl)
Q15_MUL_ROAUND(dst, vuph)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(dst, dst, kScratchDoubleReg, Condition(0), Condition(2));
-#else
- __ vpks(dst, kScratchDoubleReg, dst, Condition(0), Condition(2));
-#endif
break;
}
#undef Q15_MUL_ROAUND
+#define SIGN_SELECT(mode) \
+ Simd128Register src0 = i.InputSimd128Register(0); \
+ Simd128Register src1 = i.InputSimd128Register(1); \
+ Simd128Register src2 = i.InputSimd128Register(2); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
+ Condition(0), Condition(3)); \
+ __ vch(kScratchDoubleReg, kScratchDoubleReg, src2, Condition(0), \
+ Condition(mode)); \
+ __ vsel(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
+ case kS390_I8x16SignSelect: {
+ SIGN_SELECT(0)
+ break;
+ }
+ case kS390_I16x8SignSelect: {
+ SIGN_SELECT(1)
+ break;
+ }
+ case kS390_I32x4SignSelect: {
+ SIGN_SELECT(2)
+ break;
+ }
+ case kS390_I64x2SignSelect: {
+ SIGN_SELECT(3)
+ break;
+ }
+#undef SIGN_SELECT
+ case kS390_I8x16Popcnt: {
+ __ vpopct(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(0), Condition(0), Condition(0));
+ break;
+ }
+ case kS390_F64x2ConvertLowI32x4S: {
+ __ vupl(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(2));
+ __ vcdg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2ConvertLowI32x4U: {
+ __ vupll(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(2));
+ __ vcdlg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2PromoteLowF32x4: {
+ Register holder = r1;
+ for (int index = 0; index < 2; ++index) {
+ __ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, index + 2),
+ Condition(2));
+ __ MovIntToFloat(kScratchDoubleReg, r0);
+ __ ldebr(kScratchDoubleReg, kScratchDoubleReg);
+ __ MovDoubleToInt64(holder, kScratchDoubleReg);
+ holder = ip;
+ }
+ __ vlvgp(i.OutputSimd128Register(), r1, ip);
+ break;
+ }
+ case kS390_F32x4DemoteF64x2Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Register holder = r1;
+ for (int index = 0; index < 2; ++index) {
+ __ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, index),
+ Condition(3));
+ __ MovInt64ToDouble(kScratchDoubleReg, r0);
+ __ ledbr(kScratchDoubleReg, kScratchDoubleReg);
+ __ MovFloatToInt(holder, kScratchDoubleReg);
+ holder = ip;
+ }
+ __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+ __ vlvg(dst, r1, MemOperand(r0, 2), Condition(2));
+ __ vlvg(dst, ip, MemOperand(r0, 3), Condition(2));
+ break;
+ }
+ case kS390_I32x4TruncSatF64x2SZero: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ // NaN to 0
+ __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
+ __ vfce(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(3));
+ __ vn(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),
+ Condition(0), Condition(0));
+ __ vcgd(kScratchDoubleReg, kScratchDoubleReg, Condition(5), Condition(0),
+ Condition(3));
+ __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+ __ vpks(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
+ break;
+ }
+ case kS390_I32x4TruncSatF64x2UZero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vclgd(kScratchDoubleReg, i.InputSimd128Register(0), Condition(5),
+ Condition(0), Condition(3));
+ __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+ __ vpkls(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
+ break;
+ }
case kS390_StoreCompressTagged: {
CHECK(!instr->HasOutput());
size_t index = 0;
@@ -4361,7 +4228,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4561,7 +4428,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4637,7 +4504,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = r5;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4645,9 +4511,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
+
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index bed16450be..8068894b6b 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -92,7 +92,6 @@ namespace compiler {
V(S390_Tst64) \
V(S390_Push) \
V(S390_PushFrame) \
- V(S390_StackClaim) \
V(S390_StoreToStackSlot) \
V(S390_SignExtendWord8ToInt32) \
V(S390_SignExtendWord16ToInt32) \
@@ -215,6 +214,9 @@ namespace compiler {
V(S390_F64x2Floor) \
V(S390_F64x2Trunc) \
V(S390_F64x2NearestInt) \
+ V(S390_F64x2ConvertLowI32x4S) \
+ V(S390_F64x2ConvertLowI32x4U) \
+ V(S390_F64x2PromoteLowF32x4) \
V(S390_F32x4Splat) \
V(S390_F32x4ExtractLane) \
V(S390_F32x4ReplaceLane) \
@@ -244,6 +246,7 @@ namespace compiler {
V(S390_F32x4Floor) \
V(S390_F32x4Trunc) \
V(S390_F32x4NearestInt) \
+ V(S390_F32x4DemoteF64x2Zero) \
V(S390_I64x2Neg) \
V(S390_I64x2Add) \
V(S390_I64x2Sub) \
@@ -260,6 +263,15 @@ namespace compiler {
V(S390_I64x2ExtMulHighI32x4S) \
V(S390_I64x2ExtMulLowI32x4U) \
V(S390_I64x2ExtMulHighI32x4U) \
+ V(S390_I64x2SConvertI32x4Low) \
+ V(S390_I64x2SConvertI32x4High) \
+ V(S390_I64x2UConvertI32x4Low) \
+ V(S390_I64x2UConvertI32x4High) \
+ V(S390_I64x2SignSelect) \
+ V(S390_I64x2Ne) \
+ V(S390_I64x2GtS) \
+ V(S390_I64x2GeS) \
+ V(S390_I64x2Abs) \
V(S390_I32x4Splat) \
V(S390_I32x4ExtractLane) \
V(S390_I32x4ReplaceLane) \
@@ -296,6 +308,9 @@ namespace compiler {
V(S390_I32x4ExtMulHighI16x8U) \
V(S390_I32x4ExtAddPairwiseI16x8S) \
V(S390_I32x4ExtAddPairwiseI16x8U) \
+ V(S390_I32x4SignSelect) \
+ V(S390_I32x4TruncSatF64x2SZero) \
+ V(S390_I32x4TruncSatF64x2UZero) \
V(S390_I16x8Splat) \
V(S390_I16x8ExtractLaneU) \
V(S390_I16x8ExtractLaneS) \
@@ -338,6 +353,7 @@ namespace compiler {
V(S390_I16x8ExtAddPairwiseI8x16S) \
V(S390_I16x8ExtAddPairwiseI8x16U) \
V(S390_I16x8Q15MulRSatS) \
+ V(S390_I16x8SignSelect) \
V(S390_I8x16Splat) \
V(S390_I8x16ExtractLaneU) \
V(S390_I8x16ExtractLaneS) \
@@ -370,12 +386,13 @@ namespace compiler {
V(S390_I8x16BitMask) \
V(S390_I8x16Shuffle) \
V(S390_I8x16Swizzle) \
- V(S390_V32x4AnyTrue) \
- V(S390_V16x8AnyTrue) \
- V(S390_V8x16AnyTrue) \
+ V(S390_I8x16SignSelect) \
+ V(S390_I8x16Popcnt) \
+ V(S390_V64x2AllTrue) \
V(S390_V32x4AllTrue) \
V(S390_V16x8AllTrue) \
V(S390_V8x16AllTrue) \
+ V(S390_V128AnyTrue) \
V(S390_S128And) \
V(S390_S128Or) \
V(S390_S128Xor) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index 8c1c804760..de6abc56a3 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -161,6 +161,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F64x2Floor:
case kS390_F64x2Trunc:
case kS390_F64x2NearestInt:
+ case kS390_F64x2ConvertLowI32x4S:
+ case kS390_F64x2ConvertLowI32x4U:
+ case kS390_F64x2PromoteLowF32x4:
case kS390_F32x4Splat:
case kS390_F32x4ExtractLane:
case kS390_F32x4ReplaceLane:
@@ -190,6 +193,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F32x4Floor:
case kS390_F32x4Trunc:
case kS390_F32x4NearestInt:
+ case kS390_F32x4DemoteF64x2Zero:
case kS390_I64x2Neg:
case kS390_I64x2Add:
case kS390_I64x2Sub:
@@ -206,6 +210,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I64x2ExtMulHighI32x4S:
case kS390_I64x2ExtMulLowI32x4U:
case kS390_I64x2ExtMulHighI32x4U:
+ case kS390_I64x2SConvertI32x4Low:
+ case kS390_I64x2SConvertI32x4High:
+ case kS390_I64x2UConvertI32x4Low:
+ case kS390_I64x2UConvertI32x4High:
+ case kS390_I64x2SignSelect:
+ case kS390_I64x2Ne:
+ case kS390_I64x2GtS:
+ case kS390_I64x2GeS:
+ case kS390_I64x2Abs:
case kS390_I32x4Splat:
case kS390_I32x4ExtractLane:
case kS390_I32x4ReplaceLane:
@@ -242,6 +255,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I32x4ExtMulHighI16x8U:
case kS390_I32x4ExtAddPairwiseI16x8S:
case kS390_I32x4ExtAddPairwiseI16x8U:
+ case kS390_I32x4SignSelect:
+ case kS390_I32x4TruncSatF64x2SZero:
+ case kS390_I32x4TruncSatF64x2UZero:
case kS390_I16x8Splat:
case kS390_I16x8ExtractLaneU:
case kS390_I16x8ExtractLaneS:
@@ -284,6 +300,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8ExtAddPairwiseI8x16S:
case kS390_I16x8ExtAddPairwiseI8x16U:
case kS390_I16x8Q15MulRSatS:
+ case kS390_I16x8SignSelect:
case kS390_I8x16Splat:
case kS390_I8x16ExtractLaneU:
case kS390_I8x16ExtractLaneS:
@@ -316,12 +333,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I8x16BitMask:
case kS390_I8x16Shuffle:
case kS390_I8x16Swizzle:
- case kS390_V32x4AnyTrue:
- case kS390_V16x8AnyTrue:
- case kS390_V8x16AnyTrue:
+ case kS390_I8x16SignSelect:
+ case kS390_I8x16Popcnt:
+ case kS390_V64x2AllTrue:
case kS390_V32x4AllTrue:
case kS390_V16x8AllTrue:
case kS390_V8x16AllTrue:
+ case kS390_V128AnyTrue:
case kS390_S128And:
case kS390_S128Or:
case kS390_S128Xor:
@@ -367,7 +385,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Push:
case kS390_PushFrame:
case kS390_StoreToStackSlot:
- case kS390_StackClaim:
return kHasSideEffect;
case kS390_Word64AtomicExchangeUint8:
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index c2dd218fd6..972d268014 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -703,7 +703,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode |= AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(opcode, 1, outputs, input_count, inputs);
}
@@ -2119,36 +2119,15 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
- int num_slots = 0;
- int slot = 0;
-
-#define INPUT_SWITCH(param) \
- switch (input.location.GetType().representation()) { \
- case MachineRepresentation::kSimd128: \
- param += kSimd128Size / kSystemPointerSize; \
- break; \
- case MachineRepresentation::kFloat64: \
- param += kDoubleSize / kSystemPointerSize; \
- break; \
- default: \
- param += 1; \
- break; \
- }
- for (PushParameter input : *arguments) {
- if (input.node == nullptr) continue;
- INPUT_SWITCH(num_slots)
- }
- Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
- for (PushParameter input : *arguments) {
+ int stack_decrement = 0;
+ for (PushParameter input : base::Reversed(*arguments)) {
+ stack_decrement += kSystemPointerSize;
// Skip any alignment holes in pushed nodes.
- if (input.node) {
- Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
- g.TempImmediate(slot));
- INPUT_SWITCH(slot)
- }
+ if (input.node == nullptr) continue;
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
+ Emit(kS390_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
}
-#undef INPUT_SWITCH
- DCHECK(num_slots == slot);
}
}
@@ -2159,8 +2138,6 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -2451,6 +2428,9 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I64x2ExtMulLowI32x4U) \
V(I64x2ExtMulHighI32x4U) \
V(I16x8Q15MulRSatS) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2520,38 +2500,54 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(S128Xor) \
V(S128AndNot)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs) \
- V(F64x2Neg) \
- V(F64x2Sqrt) \
- V(F64x2Ceil) \
- V(F64x2Floor) \
- V(F64x2Trunc) \
- V(F64x2NearestInt) \
- V(F32x4Abs) \
- V(F32x4Neg) \
- V(F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox) \
- V(F32x4Sqrt) \
- V(F32x4Ceil) \
- V(F32x4Floor) \
- V(F32x4Trunc) \
- V(F32x4NearestInt) \
- V(I64x2Neg) \
- V(I16x8Abs) \
- V(I32x4Neg) \
- V(I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High) \
- V(I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High) \
- V(I32x4Abs) \
- V(I16x8Neg) \
- V(I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High) \
- V(I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High) \
- V(I8x16Neg) \
- V(I8x16Abs) \
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Sqrt) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(F32x4Sqrt) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero) \
+ V(I64x2Neg) \
+ V(I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High) \
+ V(I64x2Abs) \
+ V(I32x4Neg) \
+ V(I32x4Abs) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
+ V(I16x8Neg) \
+ V(I16x8Abs) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U) \
+ V(I8x16Neg) \
+ V(I8x16Abs) \
+ V(I8x16Popcnt) \
V(S128Not)
#define SIMD_SHIFT_LIST(V) \
@@ -2569,9 +2565,8 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16ShrU)
#define SIMD_BOOL_LIST(V) \
- V(V32x4AnyTrue) \
- V(V16x8AnyTrue) \
- V(V8x16AnyTrue) \
+ V(V128AnyTrue) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2723,7 +2718,6 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
S390OperandGenerator g(this);
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
-#ifdef V8_TARGET_BIG_ENDIAN
// Remap the shuffle indices to match IBM lane numbering.
int max_index = 15;
int total_lane_count = 2 * kSimd128Size;
@@ -2735,7 +2729,6 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
: total_lane_count - current_index + max_index);
}
shuffle_p = &shuffle_remapped[0];
-#endif
Emit(kS390_I8x16Shuffle, g.DefineAsRegister(node),
g.UseUniqueRegister(input0), g.UseUniqueRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_p)),
@@ -2817,11 +2810,21 @@ void InstructionSelector::EmitPrepareResults(
}
}
+void InstructionSelector::VisitLoadLane(Node* node) {
+ // We should never reach here, see http://crrev.com/c/2577820
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitLoadTransform(Node* node) {
// We should never reach here, see http://crrev.com/c/2050811
UNREACHABLE();
}
+void InstructionSelector::VisitStoreLane(Node* node) {
+ // We should never reach here, see http://crrev.com/c/2577820
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
S390OperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index e905c7194f..0a3e065bbe 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -7,6 +7,7 @@
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/external-reference.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/x64/assembler-x64.h"
@@ -339,7 +340,7 @@ class WasmOutOfLineTrap : public OutOfLineCode {
__ near_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -364,8 +365,7 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessProtected) {
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
}
@@ -374,8 +374,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
X64OperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->andq(value, kSpeculationPoisonRegister);
@@ -710,28 +709,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &done, Label::kNear);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ SmiUntag(caller_args_count_reg,
- Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(Instruction* instr,
@@ -923,13 +900,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
- if (!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
- V8_FALLTHROUGH;
case kArchTailCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
@@ -1058,7 +1028,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ bind(&return_location);
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1077,7 +1047,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
- // TODO(tebbi): Do we need an lfence here?
+ // TODO(turbofan): Do we need an lfence here?
break;
}
case kArchJmp:
@@ -2154,8 +2124,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movsd: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
// If we have to poison the loaded value, we load into a general
// purpose register first, mask it with the poison, and move the
@@ -2174,7 +2143,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movdqu: {
- CpuFeatureScope sse_scope(tasm(), SSSE3);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
__ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
@@ -2294,59 +2262,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Inc32:
__ incl(i.OutputRegister());
break;
- case kX64Push:
- if (HasAddressingMode(instr)) {
- size_t index = 0;
+ case kX64Push: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ // Whenever codegen uses pushq, we need to check if stack_decrement
+ // contains any extra padding and adjust the stack before the pushq.
+ if (HasImmediateInput(instr, 1)) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ pushq(i.InputImmediate(1));
+ } else if (HasAddressingMode(instr)) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ size_t index = 1;
Operand operand = i.MemoryOperand(&index);
__ pushq(operand);
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
- } else if (HasImmediateInput(instr, 0)) {
- __ pushq(i.InputImmediate(0));
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
- } else if (HasRegisterInput(instr, 0)) {
- __ pushq(i.InputRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
- } else if (instr->InputAt(0)->IsFloatRegister() ||
- instr->InputAt(0)->IsDoubleRegister()) {
- // TODO(titzer): use another machine instruction?
- __ AllocateStackSpace(kDoubleSize);
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kDoubleSize);
- __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
- } else if (instr->InputAt(0)->IsSimd128Register()) {
- // TODO(titzer): use another machine instruction?
- __ AllocateStackSpace(kSimd128Size);
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSimd128Size);
- __ Movups(Operand(rsp, 0), i.InputSimd128Register(0));
- } else if (instr->InputAt(0)->IsStackSlot() ||
- instr->InputAt(0)->IsFloatStackSlot() ||
- instr->InputAt(0)->IsDoubleStackSlot()) {
- __ pushq(i.InputOperand(0));
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
} else {
- DCHECK(instr->InputAt(0)->IsSimd128StackSlot());
- __ Movups(kScratchDoubleReg, i.InputOperand(0));
- // TODO(titzer): use another machine instruction?
- __ AllocateStackSpace(kSimd128Size);
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSimd128Size);
- __ Movups(Operand(rsp, 0), kScratchDoubleReg);
+ InstructionOperand* input = instr->InputAt(1);
+ if (input->IsRegister()) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ pushq(i.InputRegister(1));
+ } else if (input->IsFloatRegister() || input->IsDoubleRegister()) {
+ DCHECK_GE(stack_decrement, kSystemPointerSize);
+ __ AllocateStackSpace(stack_decrement);
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
+ } else if (input->IsSimd128Register()) {
+ DCHECK_GE(stack_decrement, kSimd128Size);
+ __ AllocateStackSpace(stack_decrement);
+ // TODO(bbudge) Use Movaps when slots are aligned.
+ __ Movups(Operand(rsp, 0), i.InputSimd128Register(1));
+ } else if (input->IsStackSlot() || input->IsFloatStackSlot() ||
+ input->IsDoubleStackSlot()) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ pushq(i.InputOperand(1));
+ } else {
+ DCHECK(input->IsSimd128StackSlot());
+ DCHECK_GE(stack_decrement, kSimd128Size);
+ // TODO(bbudge) Use Movaps when slots are aligned.
+ __ Movups(kScratchDoubleReg, i.InputOperand(1));
+ __ AllocateStackSpace(stack_decrement);
+ __ Movups(Operand(rsp, 0), kScratchDoubleReg);
+ }
}
+ frame_access_state()->IncreaseSPDelta(slots);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ stack_decrement);
break;
+ }
case kX64Poke: {
int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
@@ -2513,6 +2473,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64F64x2ConvertLowI32x4S: {
+ __ Cvtdq2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F64x2ConvertLowI32x4U: {
+ __ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F64x2PromoteLowF32x4: {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F32x4DemoteF64x2Zero: {
+ __ Cvtpd2ps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64I32x4TruncSatF64x2SZero: {
+ __ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kX64I32x4TruncSatF64x2UZero: {
+ __ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ break;
+ }
case kX64F32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputDoubleRegister(0);
@@ -2589,11 +2576,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (dst == src) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Psrld(kScratchDoubleReg, byte{1});
- __ Andps(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ Andps(dst, kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
__ Psrld(dst, byte{1});
- __ Andps(dst, i.InputSimd128Register(0));
+ __ Andps(dst, src);
}
break;
}
@@ -2603,11 +2590,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (dst == src) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Pslld(kScratchDoubleReg, byte{31});
- __ Xorps(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ Xorps(dst, kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
__ Pslld(dst, byte{31});
- __ Xorps(dst, i.InputSimd128Register(0));
+ __ Xorps(dst, src);
}
break;
}
@@ -2775,6 +2762,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
+ case kX64I64x2Abs: {
+ __ I64x2Abs(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kX64I64x2Neg: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
@@ -2848,9 +2839,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2Eq: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
ASSEMBLE_SIMD_BINOP(pcmpeqq);
break;
}
+ case kX64I64x2Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqq(tmp, tmp);
+ __ Pxor(i.OutputSimd128Register(), tmp);
+ break;
+ }
+ case kX64I64x2GtS: {
+ __ I64x2GtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2GeS: {
+ __ I64x2GeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
case kX64I64x2ShrU: {
// Take shift value modulo 2^6.
ASSEMBLE_SIMD_SHIFT(psrlq, 6);
@@ -2885,15 +2895,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2SConvertI32x4High: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpunpckhqdq(dst, src, src);
- } else {
- __ pshufd(dst, src, 0xEE);
- }
- __ Pmovsxdq(dst, dst);
+ __ I64x2SConvertI32x4High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kX64I64x2UConvertI32x4Low: {
@@ -2901,17 +2904,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2UConvertI32x4High: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpunpckhdq(dst, src, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pshufd(dst, src, 0xEE);
- __ pmovzxdq(dst, dst);
- }
+ __ I64x2UConvertI32x4High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kX64I32x4Splat: {
@@ -3106,31 +3100,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4ExtAddPairwiseI16x8S: {
XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // kScratchDoubleReg = |1|1|1|1|1|1|1|1|
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrlw(kScratchDoubleReg, byte{15});
- // pmaddwd multiplies signed words in kScratchDoubleReg and src, producing
- // signed doublewords, then adds pairwise.
- // src = |a|b|c|d|e|f|g|h|
+ XMMRegister src1 = i.InputSimd128Register(0);
+ // pmaddwd multiplies signed words in src1 and src2, producing signed
+ // doublewords, then adds pairwise.
+ // src1 = |a|b|c|d|e|f|g|h|
+ // src2 = |1|1|1|1|1|1|1|1|
// dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- __ Pmaddwd(dst, src, kScratchDoubleReg);
+ Operand src2 = __ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i16x8_splat_0x0001());
+ __ Pmaddwd(dst, src1, src2);
break;
}
case kX64I32x4ExtAddPairwiseI16x8U: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
-
- // src = |a|b|c|d|e|f|g|h|
- // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, byte{16});
- // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
- __ Pand(kScratchDoubleReg, src);
- // dst = |0|a|0|c|0|e|0|g|
- __ Psrld(dst, src, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- __ Paddd(dst, kScratchDoubleReg);
+ __ I32x4ExtAddPairwiseI16x8U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kX64S128Const: {
@@ -3244,9 +3227,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8Ne: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pcmpeqw(dst, i.InputSimd128Register(1));
__ Pcmpeqw(tmp, tmp);
- __ Pxor(i.OutputSimd128Register(), tmp);
+ __ Pxor(dst, tmp);
break;
}
case kX64I16x8GtS: {
@@ -3352,35 +3336,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8ExtAddPairwiseI8x16S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- DCHECK_NE(dst, src);
- // dst = i8x16.splat(1)
- __ Move(dst, uint32_t{0x01010101});
- __ Pshufd(dst, dst, byte{0});
- __ Pmaddubsw(dst, dst, src);
+ __ I16x8ExtAddPairwiseI8x16S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kX64I16x8ExtAddPairwiseI8x16U: {
XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // dst = i8x16.splat(1)
- __ Move(kScratchDoubleReg, uint32_t{0x01010101});
- __ Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
- __ Pmaddubsw(dst, src, kScratchDoubleReg);
+ XMMRegister src1 = i.InputSimd128Register(0);
+ Operand src2 = __ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01());
+ __ Pmaddubsw(dst, src1, src2);
break;
}
case kX64I16x8Q15MulRSatS: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- XMMRegister src1 = i.InputSimd128Register(1);
- // k = i16x8.splat(0x8000)
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllw(kScratchDoubleReg, byte{15});
-
- __ Pmulhrsw(dst, src0, src1);
- __ Pcmpeqw(kScratchDoubleReg, dst);
- __ Pxor(dst, kScratchDoubleReg);
+ __ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kX64I8x16Splat: {
@@ -3586,9 +3556,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Ne: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ Pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pcmpeqb(dst, i.InputSimd128Register(1));
__ Pcmpeqb(tmp, tmp);
- __ Pxor(i.OutputSimd128Register(), tmp);
+ __ Pxor(dst, tmp);
break;
}
case kX64I8x16GtS: {
@@ -3758,14 +3729,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
- __ Movdqa(kScratchDoubleReg, dst);
- __ Pcmpeqd(dst, dst);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Pxor(dst, kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
__ Pxor(dst, src);
}
-
break;
}
case kX64S128Select: {
@@ -3782,16 +3751,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Swizzle: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister mask = i.TempSimd128Register(0);
-
- // Out-of-range indices should return 0, add 112 so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- __ Move(mask, uint32_t{0x70707070});
- __ Pshufd(mask, mask, uint8_t{0x0});
- __ Paddusb(mask, i.InputSimd128Register(1));
- __ Pshufb(dst, mask);
+ __ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kX64I8x16Shuffle: {
@@ -3841,6 +3802,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I8x16Popcnt: {
+ __ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.TempSimd128Register(0));
+ break;
+ }
case kX64S128Load8Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
XMMRegister dst = i.OutputSimd128Register();
@@ -3919,12 +3885,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
uint8_t lane = i.InputUint8(index + 1);
- if (lane == 0) {
- __ Movss(operand, i.InputSimd128Register(index));
- } else {
- DCHECK_GE(3, lane);
- __ Extractps(operand, i.InputSimd128Register(index), lane);
- }
+ __ S128Store32Lane(operand, i.InputSimd128Register(index), lane);
break;
}
case kX64S128Store64Lane: {
@@ -3932,12 +3893,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
uint8_t lane = i.InputUint8(index + 1);
- if (lane == 0) {
- __ Movlps(operand, i.InputSimd128Register(index));
- } else {
- DCHECK_EQ(1, lane);
- __ Movhps(operand, i.InputSimd128Register(index));
- }
+ __ S128Store64Lane(operand, i.InputSimd128Register(index), lane);
break;
}
case kX64Shufps: {
@@ -4156,9 +4112,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Por(dst, kScratchDoubleReg);
break;
}
- case kX64V32x4AnyTrue:
- case kX64V16x8AnyTrue:
- case kX64V8x16AnyTrue: {
+ case kX64V128AnyTrue: {
Register dst = i.OutputRegister();
XMMRegister src = i.InputSimd128Register(0);
@@ -4171,6 +4125,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
+ case kX64V64x2AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
+ break;
+ }
case kX64V32x4AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
@@ -4183,6 +4141,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
+ case kX64Prefetch:
+ __ prefetch(i.MemoryOperand(), 1);
+ break;
+ case kX64PrefetchNta:
+ __ prefetch(i.MemoryOperand(), 0);
+ break;
case kWord32AtomicExchangeInt8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movsxbl(i.InputRegister(0), i.InputRegister(0));
@@ -4333,14 +4297,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicStoreWord8:
case kWord32AtomicStoreWord16:
case kWord32AtomicStoreWord32:
- case kX64Word64AtomicLoadUint8:
- case kX64Word64AtomicLoadUint16:
- case kX64Word64AtomicLoadUint32:
- case kX64Word64AtomicLoadUint64:
- case kX64Word64AtomicStoreWord8:
- case kX64Word64AtomicStoreWord16:
- case kX64Word64AtomicStoreWord32:
- case kX64Word64AtomicStoreWord64:
UNREACHABLE(); // Won't be generated by instruction selector.
break;
}
@@ -4663,7 +4619,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ near_call(wasm::WasmCode::kWasmStackOverflow,
RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
@@ -4758,7 +4714,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = rcx;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4766,9 +4721,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 375a81d096..6c48a04ea1 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -174,6 +174,9 @@ namespace compiler {
V(X64F64x2Pmin) \
V(X64F64x2Pmax) \
V(X64F64x2Round) \
+ V(X64F64x2ConvertLowI32x4S) \
+ V(X64F64x2ConvertLowI32x4U) \
+ V(X64F64x2PromoteLowF32x4) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -200,8 +203,10 @@ namespace compiler {
V(X64F32x4Pmin) \
V(X64F32x4Pmax) \
V(X64F32x4Round) \
+ V(X64F32x4DemoteF64x2Zero) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
+ V(X64I64x2Abs) \
V(X64I64x2Neg) \
V(X64I64x2BitMask) \
V(X64I64x2Shl) \
@@ -210,6 +215,9 @@ namespace compiler {
V(X64I64x2Sub) \
V(X64I64x2Mul) \
V(X64I64x2Eq) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2Ne) \
V(X64I64x2ShrU) \
V(X64I64x2SignSelect) \
V(X64I64x2ExtMulLowI32x4S) \
@@ -256,6 +264,8 @@ namespace compiler {
V(X64I32x4ExtMulHighI16x8U) \
V(X64I32x4ExtAddPairwiseI16x8S) \
V(X64I32x4ExtAddPairwiseI16x8U) \
+ V(X64I32x4TruncSatF64x2SZero) \
+ V(X64I32x4TruncSatF64x2UZero) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLaneS) \
V(X64I16x8SConvertI8x16Low) \
@@ -343,6 +353,7 @@ namespace compiler {
V(X64S128AndNot) \
V(X64I8x16Swizzle) \
V(X64I8x16Shuffle) \
+ V(X64I8x16Popcnt) \
V(X64S128Load8Splat) \
V(X64S128Load16Splat) \
V(X64S128Load32Splat) \
@@ -382,20 +393,13 @@ namespace compiler {
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
- V(X64V32x4AnyTrue) \
+ V(X64V128AnyTrue) \
+ V(X64V64x2AllTrue) \
V(X64V32x4AllTrue) \
- V(X64V16x8AnyTrue) \
V(X64V16x8AllTrue) \
- V(X64V8x16AnyTrue) \
V(X64V8x16AllTrue) \
- V(X64Word64AtomicLoadUint8) \
- V(X64Word64AtomicLoadUint16) \
- V(X64Word64AtomicLoadUint32) \
- V(X64Word64AtomicLoadUint64) \
- V(X64Word64AtomicStoreWord8) \
- V(X64Word64AtomicStoreWord16) \
- V(X64Word64AtomicStoreWord32) \
- V(X64Word64AtomicStoreWord64) \
+ V(X64Prefetch) \
+ V(X64PrefetchNta) \
V(X64Word64AtomicAddUint8) \
V(X64Word64AtomicAddUint16) \
V(X64Word64AtomicAddUint32) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index e9ed7c9e85..2ecbab8f50 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -150,6 +150,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2Pmin:
case kX64F64x2Pmax:
case kX64F64x2Round:
+ case kX64F64x2ConvertLowI32x4S:
+ case kX64F64x2ConvertLowI32x4U:
+ case kX64F64x2PromoteLowF32x4:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -176,8 +179,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Pmin:
case kX64F32x4Pmax:
case kX64F32x4Round:
+ case kX64F32x4DemoteF64x2Zero:
case kX64I64x2Splat:
case kX64I64x2ExtractLane:
+ case kX64I64x2Abs:
case kX64I64x2Neg:
case kX64I64x2BitMask:
case kX64I64x2Shl:
@@ -186,6 +191,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I64x2Sub:
case kX64I64x2Mul:
case kX64I64x2Eq:
+ case kX64I64x2GtS:
+ case kX64I64x2GeS:
+ case kX64I64x2Ne:
case kX64I64x2ShrU:
case kX64I64x2SignSelect:
case kX64I64x2ExtMulLowI32x4S:
@@ -232,6 +240,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4ExtMulHighI16x8U:
case kX64I32x4ExtAddPairwiseI16x8S:
case kX64I32x4ExtAddPairwiseI16x8U:
+ case kX64I32x4TruncSatF64x2SZero:
+ case kX64I32x4TruncSatF64x2UZero:
case kX64I16x8Splat:
case kX64I16x8ExtractLaneS:
case kX64I16x8SConvertI8x16Low:
@@ -311,12 +321,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Zero:
case kX64S128AllOnes:
case kX64S128AndNot:
- case kX64V32x4AnyTrue:
+ case kX64V64x2AllTrue:
case kX64V32x4AllTrue:
- case kX64V16x8AnyTrue:
case kX64V16x8AllTrue:
case kX64I8x16Swizzle:
case kX64I8x16Shuffle:
+ case kX64I8x16Popcnt:
case kX64Shufps:
case kX64S32x4Rotate:
case kX64S32x4Swizzle:
@@ -344,7 +354,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S8x8Reverse:
case kX64S8x4Reverse:
case kX64S8x2Reverse:
- case kX64V8x16AnyTrue:
+ case kX64V128AnyTrue:
case kX64V8x16AllTrue:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
@@ -417,18 +427,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64MFence:
case kX64LFence:
+ case kX64Prefetch:
+ case kX64PrefetchNta:
return kHasSideEffect;
- case kX64Word64AtomicLoadUint8:
- case kX64Word64AtomicLoadUint16:
- case kX64Word64AtomicLoadUint32:
- case kX64Word64AtomicLoadUint64:
- return kIsLoadOperation;
-
- case kX64Word64AtomicStoreWord8:
- case kX64Word64AtomicStoreWord16:
- case kX64Word64AtomicStoreWord32:
- case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint8:
case kX64Word64AtomicAddUint16:
case kX64Word64AtomicAddUint32:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index e2d8cf27bf..5508357675 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -10,7 +10,9 @@
#include "src/base/platform/wrappers.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/machine-type.h"
+#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -383,7 +385,7 @@ void InstructionSelector::VisitLoadLane(Node* node) {
// x64 supports unaligned loads.
DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
if (params.kind == MemoryAccessKind::kProtected) {
- opcode |= MiscField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
Emit(opcode, 1, outputs, input_count, inputs);
}
@@ -435,7 +437,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
InstructionCode code = opcode;
if (params.kind == MemoryAccessKind::kProtected) {
- code |= MiscField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtected);
}
VisitLoad(node, node, code);
}
@@ -450,10 +452,10 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
- code |= MiscField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtected);
} else if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= MiscField::encode(kMemoryAccessPoisoned);
+ code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
}
@@ -528,7 +530,7 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
- MiscField::encode(kMemoryAccessProtected);
+ AccessModeField::encode(kMemoryAccessProtected);
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
@@ -565,7 +567,7 @@ void InstructionSelector::VisitStoreLane(Node* node) {
opcode |= AddressingModeField::encode(addressing_mode);
if (params.kind == MemoryAccessKind::kProtected) {
- opcode |= MiscField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
InstructionOperand value_operand = g.UseRegister(node->InputAt(2));
@@ -575,6 +577,30 @@ void InstructionSelector::VisitStoreLane(Node* node) {
Emit(opcode, 0, nullptr, input_count, inputs);
}
+void InstructionSelector::VisitPrefetchTemporal(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionCode opcode = kX64Prefetch;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ DCHECK_LE(input_count, 2);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
+void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionCode opcode = kX64PrefetchNta;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ DCHECK_LE(input_count, 2);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -1795,29 +1821,33 @@ void InstructionSelector::EmitPrepareArguments(
} else {
// Push any stack arguments.
int effect_level = GetEffectLevel(node);
+ int stack_decrement = 0;
for (PushParameter input : base::Reversed(*arguments)) {
- // Skip any alignment holes in pushed nodes. We may have one in case of a
- // Simd128 stack argument.
+ stack_decrement += kSystemPointerSize;
+ // Skip holes in the param array. These represent both extra slots for
+ // multi-slot values and padding slots for alignment.
if (input.node == nullptr) continue;
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
if (g.CanBeImmediate(input.node)) {
- Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node));
+ Emit(kX64Push, g.NoOutput(), decrement, g.UseImmediate(input.node));
} else if (IsSupported(ATOM) ||
sequence()->IsFP(GetVirtualRegister(input.node))) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
- Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node));
+ Emit(kX64Push, g.NoOutput(), decrement, g.UseRegister(input.node));
} else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
effect_level)) {
InstructionOperand outputs[1];
- InstructionOperand inputs[4];
+ InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionCode opcode = kX64Push;
+ inputs[input_count++] = decrement;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
input.node, inputs, &input_count);
- opcode |= AddressingModeField::encode(mode);
+ InstructionCode opcode = kX64Push | AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
- Emit(kX64Push, g.NoOutput(), g.UseAny(input.node));
+ Emit(kX64Push, g.NoOutput(), decrement, g.UseAny(input.node));
}
}
}
@@ -1850,8 +1880,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
namespace {
void VisitCompareWithMemoryOperand(InstructionSelector* selector,
@@ -2894,6 +2922,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16GeU)
#define SIMD_BINOP_ONE_TEMP_LIST(V) \
+ V(I64x2Ne) \
V(I32x4Ne) \
V(I32x4GtU) \
V(I16x8Ne) \
@@ -2903,12 +2932,15 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_UNOP_LIST(V) \
V(F64x2Sqrt) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2PromoteLowF32x4) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
+ V(F32x4DemoteF64x2Zero) \
V(I64x2Neg) \
V(I64x2BitMask) \
V(I64x2SConvertI32x4Low) \
@@ -2947,12 +2979,8 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16Shl) \
V(I8x16ShrU)
-#define SIMD_ANYTRUE_LIST(V) \
- V(V32x4AnyTrue) \
- V(V16x8AnyTrue) \
- V(V8x16AnyTrue)
-
#define SIMD_ALLTRUE_LIST(V) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -3142,15 +3170,11 @@ SIMD_BINOP_ONE_TEMP_LIST(VISIT_SIMD_BINOP_ONE_TEMP)
#undef VISIT_SIMD_BINOP_ONE_TEMP
#undef SIMD_BINOP_ONE_TEMP_LIST
-#define VISIT_SIMD_ANYTRUE(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64##Opcode, g.DefineAsRegister(node), \
- g.UseUniqueRegister(node->InputAt(0))); \
- }
-SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
-#undef VISIT_SIMD_ANYTRUE
-#undef SIMD_ANYTRUE_LIST
+void InstructionSelector::VisitV128AnyTrue(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64V128AnyTrue, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)));
+}
#define VISIT_SIMD_ALLTRUE(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -3628,10 +3652,9 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64I8x16Swizzle, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- arraysize(temps), temps);
+ Emit(kX64I8x16Swizzle,
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
namespace {
@@ -3693,6 +3716,85 @@ void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
Emit(kX64I16x8ExtAddPairwiseI8x16U, dst, g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitI8x16Popcnt(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand dst = CpuFeatures::IsSupported(AVX)
+ ? g.DefineAsRegister(node)
+ : g.DefineAsRegister(node);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kX64I8x16Popcnt, dst, g.UseUniqueRegister(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kX64F64x2ConvertLowI32x4U, dst, g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
+ X64OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ // Requires dst != src.
+ Emit(kX64I32x4TruncSatF64x2SZero, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)));
+ } else {
+ Emit(kX64I32x4TruncSatF64x2SZero, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+ }
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand dst = CpuFeatures::IsSupported(AVX)
+ ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node);
+ Emit(kX64I32x4TruncSatF64x2UZero, dst, g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitI64x2GtS(Node* node) {
+ X64OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kX64I64x2GtS, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ Emit(kX64I64x2GtS, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else {
+ Emit(kX64I64x2GtS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ }
+}
+
+void InstructionSelector::VisitI64x2GeS(Node* node) {
+ X64OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kX64I64x2GeS, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ Emit(kX64I64x2GeS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ } else {
+ Emit(kX64I64x2GeS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ }
+}
+
+void InstructionSelector::VisitI64x2Abs(Node* node) {
+ X64OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kX64I64x2Abs, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)));
+ } else {
+ Emit(kX64I64x2Abs, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+ }
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 2d9b026dfa..86ec47979f 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -92,7 +92,8 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
} else {
counters_array = graph->NewNode(PointerConstant(&common, data->counts()));
}
- Node* one = graph->NewNode(common.Float64Constant(1));
+ Node* zero = graph->NewNode(common.Int32Constant(0));
+ Node* one = graph->NewNode(common.Int32Constant(1));
BasicBlockVector* blocks = schedule->rpo_order();
size_t block_number = 0;
for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
@@ -104,26 +105,37 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
// It is unnecessary to wire effect and control deps for load and store
// since this happens after scheduling.
// Construct increment operation.
- int offset_to_counter_value = static_cast<int>(block_number) * kDoubleSize;
+ int offset_to_counter_value = static_cast<int>(block_number) * kInt32Size;
if (on_heap_counters) {
offset_to_counter_value += ByteArray::kHeaderSize - kHeapObjectTag;
}
Node* offset_to_counter =
graph->NewNode(IntPtrConstant(&common, offset_to_counter_value));
Node* load =
- graph->NewNode(machine.Load(MachineType::Float64()), counters_array,
+ graph->NewNode(machine.Load(MachineType::Uint32()), counters_array,
offset_to_counter, graph->start(), graph->start());
- Node* inc = graph->NewNode(machine.Float64Add(), load, one);
- Node* store = graph->NewNode(
- machine.Store(StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier)),
- counters_array, offset_to_counter, inc, graph->start(), graph->start());
+ Node* inc = graph->NewNode(machine.Int32Add(), load, one);
+
+ // Branchless saturation, because we've already run the scheduler, so
+ // introducing extra control flow here would be surprising.
+ Node* overflow = graph->NewNode(machine.Uint32LessThan(), inc, load);
+ Node* overflow_mask = graph->NewNode(machine.Int32Sub(), zero, overflow);
+ Node* saturated_inc =
+ graph->NewNode(machine.Word32Or(), inc, overflow_mask);
+
+ Node* store =
+ graph->NewNode(machine.Store(StoreRepresentation(
+ MachineRepresentation::kWord32, kNoWriteBarrier)),
+ counters_array, offset_to_counter, saturated_inc,
+ graph->start(), graph->start());
// Insert the new nodes.
- static const int kArraySize = 6;
- Node* to_insert[kArraySize] = {counters_array, one, offset_to_counter,
- load, inc, store};
- // The first two Nodes are constant across all blocks.
- int insertion_start = block_number == 0 ? 0 : 2;
+ static const int kArraySize = 10;
+ Node* to_insert[kArraySize] = {
+ counters_array, zero, one, offset_to_counter,
+ load, inc, overflow, overflow_mask,
+ saturated_inc, store};
+ // The first three Nodes are constant across all blocks.
+ int insertion_start = block_number == 0 ? 0 : 3;
NodeVector::iterator insertion_point = FindInsertionPoint(block);
block->InsertNodes(insertion_point, &to_insert[insertion_start],
&to_insert[kArraySize]);
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index e76189a31f..8489a72658 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -79,7 +79,7 @@ ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset,
}
BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
- Zone* zone, BailoutId osr_bailout_id,
+ Zone* zone, BytecodeOffset osr_bailout_id,
bool analyze_liveness)
: bytecode_array_(bytecode_array),
zone_(zone),
@@ -166,6 +166,11 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
}
}
+ if (Bytecodes::WritesImplicitRegister(bytecode)) {
+ in_liveness->MarkRegisterDead(
+ interpreter::Register::FromShortStar(bytecode).index());
+ }
+
if (Bytecodes::ReadsAccumulator(bytecode)) {
in_liveness->MarkAccumulatorLive();
}
@@ -308,6 +313,10 @@ void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments* assignments,
break;
}
}
+
+ if (Bytecodes::WritesImplicitRegister(bytecode)) {
+ assignments->Add(interpreter::Register::FromShortStar(bytecode));
+ }
}
} // namespace
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index a05194f832..0e9043a16a 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -99,7 +99,7 @@ struct V8_EXPORT_PRIVATE LoopInfo {
class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
public:
BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
- BailoutId osr_bailout_id, bool analyze_liveness);
+ BytecodeOffset osr_bailout_id, bool analyze_liveness);
BytecodeAnalysis(const BytecodeAnalysis&) = delete;
BytecodeAnalysis& operator=(const BytecodeAnalysis&) = delete;
@@ -128,7 +128,7 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
return osr_entry_point_;
}
// Return the osr_bailout_id (for verification purposes).
- BailoutId osr_bailout_id() const { return osr_bailout_id_; }
+ BytecodeOffset osr_bailout_id() const { return osr_bailout_id_; }
// Return whether liveness analysis was performed (for verification purposes).
bool liveness_analyzed() const { return analyze_liveness_; }
@@ -167,7 +167,7 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
Handle<BytecodeArray> const bytecode_array_;
Zone* const zone_;
- BailoutId const osr_bailout_id_;
+ BytecodeOffset const osr_bailout_id_;
bool const analyze_liveness_;
ZoneStack<LoopStackEntry> loop_stack_;
ZoneVector<int> loop_end_index_queue_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 52acfc847e..54996bb475 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -15,6 +15,7 @@
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-observer.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
@@ -38,11 +39,12 @@ class BytecodeGraphBuilder {
NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info,
FeedbackCellRef const& feedback_cell,
- BailoutId osr_offset, JSGraph* jsgraph,
+ BytecodeOffset osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id,
CodeKind code_kind, BytecodeGraphBuilderFlags flags,
- TickCounter* tick_counter);
+ TickCounter* tick_counter,
+ ObserveNodeInfo const& observe_node_info);
BytecodeGraphBuilder(const BytecodeGraphBuilder&) = delete;
BytecodeGraphBuilder& operator=(const BytecodeGraphBuilder&) = delete;
@@ -207,10 +209,54 @@ class BytecodeGraphBuilder {
// Prepare information for lazy deoptimization. This information is attached
// to the given node and the output value produced by the node is combined.
- // Conceptually this frame state is "after" a given operation.
- void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
+ //
+ // The low-level chokepoint - use the variants below instead.
void PrepareFrameState(Node* node, OutputFrameStateCombine combine,
- BailoutId bailout_id);
+ BytecodeOffset bailout_id,
+ const BytecodeLivenessState* liveness);
+
+ // In the common case, frame states are conceptually "after" a given
+ // operation and at the current bytecode offset.
+ void PrepareFrameState(Node* node, OutputFrameStateCombine combine) {
+ if (!OperatorProperties::HasFrameStateInput(node->op())) return;
+ const int offset = bytecode_iterator().current_offset();
+ return PrepareFrameState(node, combine, BytecodeOffset(offset),
+ bytecode_analysis().GetOutLivenessFor(offset));
+ }
+
+ // For function-entry stack checks, they're conceptually "before" the first
+ // bytecode and at a special marker bytecode offset.
+ // In the case of FE stack checks, the current bytecode is also the first
+ // bytecode, so we use a special marker bytecode offset to signify a virtual
+ // bytecode before the first physical bytecode.
+ void PrepareFrameStateForFunctionEntryStackCheck(Node* node) {
+ DCHECK_EQ(bytecode_iterator().current_offset(), 0);
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ DCHECK(node->opcode() == IrOpcode::kJSStackCheck);
+ return PrepareFrameState(node, OutputFrameStateCombine::Ignore(),
+ BytecodeOffset(kFunctionEntryBytecodeOffset),
+ bytecode_analysis().GetInLivenessFor(0));
+ }
+
+ // For OSR-entry stack checks, they're conceptually "before" the first
+ // bytecode of the current loop. We implement this in a similar manner to
+ // function-entry (FE) stack checks above, i.e. we deopt at the predecessor
+ // of the current bytecode.
+ // In the case of OSR-entry stack checks, a physical predecessor bytecode
+ // exists: the JumpLoop bytecode. We attach to JumpLoop by using
+ // `bytecode_analysis().osr_bailout_id()` instead of current_offset (the
+ // former points at JumpLoop, the latter at the loop header, i.e. the target
+ // of JumpLoop).
+ void PrepareFrameStateForOSREntryStackCheck(Node* node) {
+ DCHECK_EQ(bytecode_iterator().current_offset(),
+ bytecode_analysis().osr_entry_point());
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ DCHECK(node->opcode() == IrOpcode::kJSStackCheck);
+ const int offset = bytecode_analysis().osr_bailout_id().ToInt();
+ return PrepareFrameState(node, OutputFrameStateCombine::Ignore(),
+ BytecodeOffset(offset),
+ bytecode_analysis().GetOutLivenessFor(offset));
+ }
void BuildCreateArguments(CreateArgumentsType type);
Node* BuildLoadGlobal(NameRef name, uint32_t feedback_slot_index,
@@ -304,6 +350,7 @@ class BytecodeGraphBuilder {
// StackChecks.
void BuildFunctionEntryStackCheck();
void BuildIterationBodyStackCheck();
+ void MaybeBuildOSREntryStackCheck();
// Control flow plumbing.
void BuildJump();
@@ -365,6 +412,12 @@ class BytecodeGraphBuilder {
int context_register_; // Index of register holding handler context.
};
+ Handle<Object> GetConstantForIndexOperand(int operand_index) const {
+ return broker_->CanonicalPersistentHandle(
+ bytecode_iterator().GetConstantForIndexOperand(operand_index,
+ local_isolate_));
+ }
+
Graph* graph() const { return jsgraph_->graph(); }
CommonOperatorBuilder* common() const { return jsgraph_->common(); }
Zone* graph_zone() const { return graph()->zone(); }
@@ -388,6 +441,9 @@ class BytecodeGraphBuilder {
SourcePositionTableIterator& source_position_iterator() {
return *source_position_iterator_.get();
}
+ interpreter::BytecodeArrayIterator const& bytecode_iterator() const {
+ return bytecode_iterator_;
+ }
interpreter::BytecodeArrayIterator& bytecode_iterator() {
return bytecode_iterator_;
}
@@ -418,6 +474,7 @@ class BytecodeGraphBuilder {
#undef DECLARE_VISIT_BYTECODE
JSHeapBroker* const broker_;
+ LocalIsolate* const local_isolate_;
Zone* const local_zone_;
JSGraph* const jsgraph_;
// The native context for which we optimize.
@@ -434,6 +491,7 @@ class BytecodeGraphBuilder {
Environment* environment_;
bool const osr_;
int currently_peeled_loop_offset_;
+ bool is_osr_entry_stack_check_pending_;
const bool skip_first_stack_check_;
@@ -484,6 +542,8 @@ class BytecodeGraphBuilder {
TickCounter* const tick_counter_;
+ ObserveNodeInfo const observe_node_info_;
+
static constexpr int kBinaryOperationHintIndex = 1;
static constexpr int kBinaryOperationSmiHintIndex = 1;
static constexpr int kCompareOperationHintIndex = 1;
@@ -532,7 +592,8 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
- Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
+ Node* Checkpoint(BytecodeOffset bytecode_offset,
+ OutputFrameStateCombine combine,
const BytecodeLivenessState* liveness);
// Control dependency tracked by this environment.
@@ -955,7 +1016,7 @@ Node* BytecodeGraphBuilder::Environment::GetStateValuesFromCache(
}
Node* BytecodeGraphBuilder::Environment::Checkpoint(
- BailoutId bailout_id, OutputFrameStateCombine combine,
+ BytecodeOffset bailout_id, OutputFrameStateCombine combine,
const BytecodeLivenessState* liveness) {
if (parameter_count() == register_count()) {
// Re-use the state-value cache if the number of local registers happens
@@ -991,17 +1052,21 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
JSHeapBroker* broker, Zone* local_zone,
NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info,
- FeedbackCellRef const& feedback_cell, BailoutId osr_offset,
+ FeedbackCellRef const& feedback_cell, BytecodeOffset osr_offset,
JSGraph* jsgraph, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id, CodeKind code_kind,
- BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
+ BytecodeGraphBuilderFlags flags, TickCounter* tick_counter,
+ ObserveNodeInfo const& observe_node_info)
: broker_(broker),
+ local_isolate_(broker_->local_isolate()
+ ? broker_->local_isolate()
+ : broker_->isolate()->AsLocalIsolate()),
local_zone_(local_zone),
jsgraph_(jsgraph),
native_context_(native_context),
shared_info_(shared_info),
feedback_cell_(feedback_cell),
- feedback_vector_(feedback_cell.value().AsFeedbackVector()),
+ feedback_vector_(feedback_cell.value()->AsFeedbackVector()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
broker, jsgraph, feedback_vector_,
@@ -1009,19 +1074,19 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
? JSTypeHintLowering::kBailoutOnUninitialized
: JSTypeHintLowering::kNoFlags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
- FrameStateType::kInterpretedFunction,
+ FrameStateType::kUnoptimizedFunction,
bytecode_array().parameter_count(), bytecode_array().register_count(),
shared_info.object())),
source_position_iterator_(std::make_unique<SourcePositionTableIterator>(
bytecode_array().SourcePositionTable())),
- bytecode_iterator_(
- std::make_unique<OffHeapBytecodeArray>(bytecode_array())),
+ bytecode_iterator_(bytecode_array().object()),
bytecode_analysis_(
bytecode_array().object(), local_zone, osr_offset,
flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness),
environment_(nullptr),
osr_(!osr_offset.IsNone()),
currently_peeled_loop_offset_(-1),
+ is_osr_entry_stack_check_pending_(osr_),
skip_first_stack_check_(flags &
BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
merge_environments_(local_zone),
@@ -1039,7 +1104,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
state_values_cache_(jsgraph),
source_positions_(source_positions),
start_position_(shared_info.StartPosition(), inlining_id),
- tick_counter_(tick_counter) {}
+ tick_counter_(tick_counter),
+ observe_node_info_(observe_node_info) {}
Node* BytecodeGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
@@ -1213,7 +1279,7 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
- BailoutId bailout_id(bytecode_iterator().current_offset());
+ BytecodeOffset bailout_id(bytecode_iterator().current_offset());
const BytecodeLivenessState* liveness_before =
bytecode_analysis().GetInLivenessFor(
@@ -1239,36 +1305,18 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
#endif // DEBUG
}
-void BytecodeGraphBuilder::PrepareFrameState(Node* node,
- OutputFrameStateCombine combine) {
- if (OperatorProperties::HasFrameStateInput(node->op())) {
- PrepareFrameState(node, combine,
- BailoutId(bytecode_iterator().current_offset()));
- }
-}
-
-void BytecodeGraphBuilder::PrepareFrameState(Node* node,
- OutputFrameStateCombine combine,
- BailoutId bailout_id) {
+void BytecodeGraphBuilder::PrepareFrameState(
+ Node* node, OutputFrameStateCombine combine, BytecodeOffset bailout_id,
+ const BytecodeLivenessState* liveness) {
if (OperatorProperties::HasFrameStateInput(node->op())) {
// Add the frame state for after the operation. The node in question has
// already been created and had a {Dead} frame state input up until now.
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
- DCHECK_IMPLIES(bailout_id.ToInt() == kFunctionEntryBytecodeOffset,
- bytecode_iterator().current_offset() == 0);
-
- // If we have kFunctionEntryBytecodeOffset as the bailout_id, we want to get
- // the liveness at the moment of function entry. This is the same as the IN
- // liveness of the first actual bytecode.
- const BytecodeLivenessState* liveness_after =
- bailout_id.ToInt() == kFunctionEntryBytecodeOffset
- ? bytecode_analysis().GetInLivenessFor(0)
- : bytecode_analysis().GetOutLivenessFor(bailout_id.ToInt());
Node* frame_state_after =
- environment()->Checkpoint(bailout_id, combine, liveness_after);
+ environment()->Checkpoint(bailout_id, combine, liveness);
NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
}
}
@@ -1378,8 +1426,7 @@ void BytecodeGraphBuilder::BuildFunctionEntryStackCheck() {
if (!skip_first_stack_check()) {
Node* node =
NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry));
- PrepareFrameState(node, OutputFrameStateCombine::Ignore(),
- BailoutId(kFunctionEntryBytecodeOffset));
+ PrepareFrameStateForFunctionEntryStackCheck(node);
}
}
@@ -1389,6 +1436,15 @@ void BytecodeGraphBuilder::BuildIterationBodyStackCheck() {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::MaybeBuildOSREntryStackCheck() {
+ if (V8_UNLIKELY(is_osr_entry_stack_check_pending_)) {
+ is_osr_entry_stack_check_pending_ = false;
+ Node* node =
+ NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry));
+ PrepareFrameStateForOSREntryStackCheck(node);
+ }
+}
+
// We will iterate through the OSR loop, then its parent, and so on
// until we have reached the outmost loop containing the OSR loop. We do
// not generate nodes for anything before the outermost loop.
@@ -1469,6 +1525,13 @@ void BytecodeGraphBuilder::VisitSingleBytecode() {
if (environment() != nullptr) {
BuildLoopHeaderEnvironment(current_offset);
+
+ // The OSR-entry stack check must be emitted during the first call to
+ // VisitSingleBytecode in an OSR'd function. We don't know if that call
+ // will be made from AdvanceToOsrEntryAndPeelLoops or from VisitBytecodes,
+ // therefore we insert the logic here inside VisitSingleBytecode itself.
+ MaybeBuildOSREntryStackCheck();
+
switch (bytecode_iterator().current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
@@ -1526,8 +1589,8 @@ void BytecodeGraphBuilder::VisitLdaSmi() {
}
void BytecodeGraphBuilder::VisitLdaConstant() {
- ObjectRef object(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ ObjectRef object(broker(), GetConstantForIndexOperand(0),
+ ObjectRef::BackgroundSerialization::kAllowed);
Node* node = jsgraph()->Constant(object);
environment()->BindAccumulator(node);
}
@@ -1568,6 +1631,16 @@ void BytecodeGraphBuilder::VisitStar() {
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value);
}
+#define SHORT_STAR_VISITOR(Name, ...) \
+ void BytecodeGraphBuilder::Visit##Name() { \
+ Node* value = environment()->LookupAccumulator(); \
+ environment()->BindRegister( \
+ interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name), \
+ value); \
+ }
+SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
+#undef SHORT_STAR_VISITOR
+
void BytecodeGraphBuilder::VisitMov() {
Node* value =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1587,8 +1660,7 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(NameRef name,
void BytecodeGraphBuilder::VisitLdaGlobal() {
PrepareEagerCheckpoint();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(0));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
@@ -1597,8 +1669,7 @@ void BytecodeGraphBuilder::VisitLdaGlobal() {
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
PrepareEagerCheckpoint();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(0));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
@@ -1607,8 +1678,7 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(0));
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
@@ -1749,8 +1819,8 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
PrepareEagerCheckpoint();
- Node* name = jsgraph()->Constant(ObjectRef(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name =
+ jsgraph()->Constant(ObjectRef(broker(), GetConstantForIndexOperand(0)));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
@@ -1902,9 +1972,8 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
// Slow path, do a runtime load lookup.
set_environment(slow_environment);
{
- Node* name = jsgraph()->Constant(ObjectRef(
- broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name = jsgraph()->Constant(
+ ObjectRef(broker(), GetConstantForIndexOperand(0)));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1939,8 +2008,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
PrepareEagerCheckpoint();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(0));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1955,9 +2023,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Slow path, do a runtime load lookup.
set_environment(slow_environment);
{
- Node* name = jsgraph()->Constant(NameRef(
- broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name =
+ jsgraph()->Constant(NameRef(broker(), GetConstantForIndexOperand(0)));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1986,8 +2053,8 @@ void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
- Node* name = jsgraph()->Constant(ObjectRef(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name =
+ jsgraph()->Constant(ObjectRef(broker(), GetConstantForIndexOperand(0)));
int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
LanguageMode language_mode = static_cast<LanguageMode>(
interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
@@ -2011,8 +2078,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name.object(), feedback);
@@ -2036,8 +2102,7 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
const Operator* op = javascript()->LoadNamed(name.object(), FeedbackSource());
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* node = NewNode(op, object, feedback_vector_node());
@@ -2049,8 +2114,7 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() {
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* home_object = environment()->LookupAccumulator();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
@@ -2104,8 +2168,7 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
@@ -2146,8 +2209,7 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
LanguageMode language_mode =
static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2));
const Operator* op =
@@ -2226,8 +2288,7 @@ void BytecodeGraphBuilder::VisitPopContext() {
}
void BytecodeGraphBuilder::VisitCreateClosure() {
- SharedFunctionInfoRef shared_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ SharedFunctionInfoRef shared_info(broker(), GetConstantForIndexOperand(0));
AllocationType allocation =
interpreter::CreateClosureFlags::PretenuredBit::decode(
bytecode_iterator().GetFlagOperand(2))
@@ -2244,16 +2305,14 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
}
void BytecodeGraphBuilder::VisitCreateBlockContext() {
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(0));
const Operator* op = javascript()->CreateBlockContext(scope_info.object());
Node* context = NewNode(op);
environment()->BindAccumulator(context);
}
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(0));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op = javascript()->CreateFunctionContext(
scope_info.object(), slots, FUNCTION_SCOPE);
@@ -2262,8 +2321,7 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
}
void BytecodeGraphBuilder::VisitCreateEvalContext() {
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(0));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op = javascript()->CreateFunctionContext(scope_info.object(),
slots, EVAL_SCOPE);
@@ -2274,8 +2332,7 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() {
void BytecodeGraphBuilder::VisitCreateCatchContext() {
interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
Node* exception = environment()->LookupRegister(reg);
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(1));
const Operator* op = javascript()->CreateCatchContext(scope_info.object());
Node* context = NewNode(op, exception);
@@ -2285,8 +2342,7 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
void BytecodeGraphBuilder::VisitCreateWithContext() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(1));
const Operator* op = javascript()->CreateWithContext(scope_info.object());
Node* context = NewNode(op, object);
@@ -2312,8 +2368,7 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
}
void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
- StringRef constant_pattern(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ StringRef constant_pattern(broker(), GetConstantForIndexOperand(0));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
FeedbackSource pair = CreateFeedbackSource(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2327,7 +2382,7 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
ArrayBoilerplateDescriptionRef array_boilerplate_description(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ broker(), GetConstantForIndexOperand(0));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
FeedbackSource pair = CreateFeedbackSource(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2366,7 +2421,7 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() {
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
ObjectBoilerplateDescriptionRef constant_properties(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ broker(), GetConstantForIndexOperand(0));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
FeedbackSource pair = CreateFeedbackSource(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2404,8 +2459,8 @@ void BytecodeGraphBuilder::VisitCloneObject() {
void BytecodeGraphBuilder::VisitGetTemplateObject() {
FeedbackSource source =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
- TemplateObjectDescriptionRef description(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ TemplateObjectDescriptionRef description(broker(),
+ GetConstantForIndexOperand(0));
STATIC_ASSERT(JSGetTemplateObjectNode::FeedbackVectorIndex() == 0);
const Operator* op = javascript()->GetTemplateObject(
description.object(), shared_info().object(), source);
@@ -2696,16 +2751,25 @@ void BytecodeGraphBuilder::VisitCallRuntime() {
interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
- // Create node to perform the runtime call.
- const Operator* call = javascript()->CallRuntime(function_id, reg_count);
- Node* value = ProcessCallRuntimeArguments(call, receiver, reg_count);
- environment()->BindAccumulator(value, Environment::kAttachFrameState);
-
- // Connect to the end if {function_id} is non-returning.
- if (Runtime::IsNonReturning(function_id)) {
- // TODO(7099): Investigate if we need LoopExit node here.
- Node* control = NewNode(common()->Throw());
- MergeControlToLeaveFunction(control);
+ // Handle %ObserveNode here (rather than in JSIntrinsicLowering) to observe
+ // the node as early as possible.
+ if (function_id == Runtime::FunctionId::kObserveNode) {
+ DCHECK_EQ(1, reg_count);
+ Node* value = environment()->LookupRegister(receiver);
+ observe_node_info_.StartObserving(value);
+ environment()->BindAccumulator(value);
+ } else {
+ // Create node to perform the runtime call.
+ const Operator* call = javascript()->CallRuntime(function_id, reg_count);
+ Node* value = ProcessCallRuntimeArguments(call, receiver, reg_count);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+
+ // Connect to the end if {function_id} is non-returning.
+ if (Runtime::IsNonReturning(function_id)) {
+ // TODO(7099): Investigate if we need LoopExit node here.
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+ }
}
}
@@ -2891,8 +2955,8 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
Node* accumulator = environment()->LookupAccumulator();
Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
jsgraph()->TheHoleConstant());
- Node* name = jsgraph()->Constant(ObjectRef(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name =
+ jsgraph()->Constant(ObjectRef(broker(), GetConstantForIndexOperand(0)));
BuildHoleCheckAndThrow(check_for_hole,
Runtime::kThrowAccessedUninitializedVariable, name);
}
@@ -4508,18 +4572,21 @@ void BytecodeGraphBuilder::UpdateSourcePosition(int offset) {
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info,
FeedbackCellRef const& feedback_cell,
- BailoutId osr_offset, JSGraph* jsgraph,
+ BytecodeOffset osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags,
- TickCounter* tick_counter) {
+ TickCounter* tick_counter,
+ ObserveNodeInfo const& observe_node_info) {
DCHECK(broker->IsSerializedForCompilation(
- shared_info, feedback_cell.value().AsFeedbackVector()));
+ shared_info, feedback_cell.value()->AsFeedbackVector()));
+ DCHECK(feedback_cell.value()->AsFeedbackVector().serialized());
BytecodeGraphBuilder builder(
broker, local_zone, broker->target_native_context(), shared_info,
feedback_cell, osr_offset, jsgraph, invocation_frequency,
- source_positions, inlining_id, code_kind, flags, tick_counter);
+ source_positions, inlining_id, code_kind, flags, tick_counter,
+ observe_node_info);
builder.CreateGraph();
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 501451ec55..6870f266be 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -7,6 +7,7 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/js-type-hint-lowering.h"
+#include "src/compiler/node-observer.h"
#include "src/handles/handles.h"
#include "src/objects/code-kind.h"
#include "src/utils/utils.h"
@@ -25,6 +26,7 @@ class Zone;
namespace compiler {
class JSGraph;
+class NodeObserver;
class SourcePositionTable;
enum class BytecodeGraphBuilderFlag : uint8_t {
@@ -42,12 +44,13 @@ using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info,
FeedbackCellRef const& feedback_cell,
- BailoutId osr_offset, JSGraph* jsgraph,
+ BytecodeOffset osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags,
- TickCounter* tick_counter);
+ TickCounter* tick_counter,
+ ObserveNodeInfo const& observe_node_info = {});
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 2c5338b0d7..5950541111 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -31,6 +31,7 @@ namespace {
#define STACK_SHADOW_WORDS 4
#define PARAM_REGISTERS rcx, rdx, r8, r9
#define FP_PARAM_REGISTERS xmm0, xmm1, xmm2, xmm3
+#define FP_RETURN_REGISTER xmm0
#define CALLEE_SAVE_REGISTERS \
rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() | r14.bit() | \
r15.bit()
@@ -43,6 +44,7 @@ namespace {
// == x64 other ==============================================================
#define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9
#define FP_PARAM_REGISTERS xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
+#define FP_RETURN_REGISTER xmm0
#define CALLEE_SAVE_REGISTERS \
rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
#endif // V8_TARGET_OS_WIN
@@ -59,7 +61,6 @@ namespace {
(1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) | \
(1 << d14.code()) | (1 << d15.code())
-
#elif V8_TARGET_ARCH_ARM64
// ===========================================================================
// == arm64 ====================================================================
@@ -130,6 +131,19 @@ namespace {
d8.bit() | d9.bit() | d10.bit() | d11.bit() | d12.bit() | d13.bit() | \
d14.bit() | d15.bit()
+#elif V8_TARGET_ARCH_RISCV64
+// ===========================================================================
+// == riscv64 =================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+// fp is not part of CALLEE_SAVE_REGISTERS (similar to how MIPS64 or PPC defines
+// it)
+#define CALLEE_SAVE_REGISTERS \
+ s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | \
+ s8.bit() | s9.bit() | s10.bit() | s11.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ fs0.bit() | fs1.bit() | fs2.bit() | fs3.bit() | fs4.bit() | fs5.bit() | \
+ fs6.bit() | fs7.bit() | fs8.bit() | fs9.bit() | fs10.bit() | fs11.bit()
#else
// ===========================================================================
// == unknown ================================================================
@@ -236,24 +250,36 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
// Check the types of the signature.
for (size_t i = 0; i < msig->parameter_count(); i++) {
- MachineRepresentation rep = msig->GetParam(i).representation();
- CHECK_NE(MachineRepresentation::kFloat32, rep);
- CHECK_NE(MachineRepresentation::kFloat64, rep);
+ MachineType type = msig->GetParam(i);
+ CHECK(!IsFloatingPoint(type.representation()));
}
-#endif
- // Add return location(s). We don't support FP returns for now.
+ // Check the return types.
for (size_t i = 0; i < locations.return_count_; i++) {
MachineType type = msig->GetReturn(i);
CHECK(!IsFloatingPoint(type.representation()));
}
+#endif
CHECK_GE(2, locations.return_count_);
if (locations.return_count_ > 0) {
- locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
- msig->GetReturn(0)));
+#ifdef FP_RETURN_REGISTER
+ const v8::internal::DoubleRegister kFPReturnRegister = FP_RETURN_REGISTER;
+ auto reg = IsFloatingPoint(msig->GetReturn(0).representation())
+ ? kFPReturnRegister.code()
+ : kReturnRegister0.code();
+#else
+ auto reg = kReturnRegister0.code();
+#endif
+ // TODO(chromium:1052746): Use the correctly sized register here (e.g. "al"
+ // if the return type is kBit), so we don't have to use a hacky bitwise AND
+ // elsewhere.
+ locations.AddReturn(LinkageLocation::ForRegister(reg, msig->GetReturn(0)));
}
+
if (locations.return_count_ > 1) {
+ DCHECK(!IsFloatingPoint(msig->GetReturn(0).representation()));
+
locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister1.code(),
msig->GetReturn(1)));
}
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 0344a1916f..0361a2ada0 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -8,7 +8,6 @@
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
-#include "src/codegen/code-stub-assembler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler.h"
@@ -562,23 +561,23 @@ TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(TNode<WordT> value) {
CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
-TNode<WordT> CodeAssembler::WordShl(SloppyTNode<WordT> value, int shift) {
+TNode<WordT> CodeAssembler::WordShl(TNode<WordT> value, int shift) {
return (shift != 0) ? WordShl(value, IntPtrConstant(shift)) : value;
}
-TNode<WordT> CodeAssembler::WordShr(SloppyTNode<WordT> value, int shift) {
+TNode<WordT> CodeAssembler::WordShr(TNode<WordT> value, int shift) {
return (shift != 0) ? WordShr(value, IntPtrConstant(shift)) : value;
}
-TNode<WordT> CodeAssembler::WordSar(SloppyTNode<WordT> value, int shift) {
+TNode<WordT> CodeAssembler::WordSar(TNode<WordT> value, int shift) {
return (shift != 0) ? WordSar(value, IntPtrConstant(shift)) : value;
}
-TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> value, int shift) {
+TNode<Word32T> CodeAssembler::Word32Shr(TNode<Word32T> value, int shift) {
return (shift != 0) ? Word32Shr(value, Int32Constant(shift)) : value;
}
-TNode<Word32T> CodeAssembler::Word32Sar(SloppyTNode<Word32T> value, int shift) {
+TNode<Word32T> CodeAssembler::Word32Sar(TNode<Word32T> value, int shift) {
return (shift != 0) ? Word32Sar(value, Int32Constant(shift)) : value;
}
@@ -647,8 +646,7 @@ TNode<Float64T> CodeAssembler::RoundIntPtrToFloat64(Node* value) {
return UncheckedCast<Float64T>(raw_assembler()->ChangeInt32ToFloat64(value));
}
-TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(
- SloppyTNode<Float32T> value) {
+TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) {
return UncheckedCast<Int32T>(raw_assembler()->TruncateFloat32ToInt32(
value, TruncateKind::kSetOverflowToMin));
}
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 70991d8c7b..263ed37536 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -742,7 +742,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Load(MachineTypeOf<Type>::value, base, needs_poisoning));
}
template <class Type>
- TNode<Type> Load(Node* base, SloppyTNode<WordT> offset,
+ TNode<Type> Load(Node* base, TNode<WordT> offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return UncheckedCast<Type>(
Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
@@ -996,17 +996,17 @@ class V8_EXPORT_PRIVATE CodeAssembler {
IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
}
- TNode<WordT> WordShl(SloppyTNode<WordT> value, int shift);
- TNode<WordT> WordShr(SloppyTNode<WordT> value, int shift);
- TNode<WordT> WordSar(SloppyTNode<WordT> value, int shift);
+ TNode<WordT> WordShl(TNode<WordT> value, int shift);
+ TNode<WordT> WordShr(TNode<WordT> value, int shift);
+ TNode<WordT> WordSar(TNode<WordT> value, int shift);
TNode<IntPtrT> WordShr(TNode<IntPtrT> value, int shift) {
- return UncheckedCast<IntPtrT>(WordShr(static_cast<Node*>(value), shift));
+ return UncheckedCast<IntPtrT>(WordShr(TNode<WordT>(value), shift));
}
TNode<IntPtrT> WordSar(TNode<IntPtrT> value, int shift) {
- return UncheckedCast<IntPtrT>(WordSar(static_cast<Node*>(value), shift));
+ return UncheckedCast<IntPtrT>(WordSar(TNode<WordT>(value), shift));
}
- TNode<Word32T> Word32Shr(SloppyTNode<Word32T> value, int shift);
- TNode<Word32T> Word32Sar(SloppyTNode<Word32T> value, int shift);
+ TNode<Word32T> Word32Shr(TNode<Word32T> value, int shift);
+ TNode<Word32T> Word32Sar(TNode<Word32T> value, int shift);
// Unary
#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
@@ -1040,7 +1040,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// range, make sure that overflow detection is easy. In particular, return
// int_min instead of int_max on arm platforms by using parameter
// kSetOverflowToMin.
- TNode<Int32T> TruncateFloat32ToInt32(SloppyTNode<Float32T> value);
+ TNode<Int32T> TruncateFloat32ToInt32(TNode<Float32T> value);
// Projections
template <int index, class T1, class T2>
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 8b3826424f..73aca646ce 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -1528,7 +1528,7 @@ MachineRepresentation DeadValueRepresentationOf(Operator const* op) {
}
const Operator* CommonOperatorBuilder::FrameState(
- BailoutId bailout_id, OutputFrameStateCombine state_combine,
+ BytecodeOffset bailout_id, OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info) {
FrameStateInfo state_info(bailout_id, state_combine, function_info);
return zone()->New<Operator1<FrameStateInfo>>( // --
@@ -1625,6 +1625,17 @@ CommonOperatorBuilder::CreateFrameStateFunctionInfo(
shared_info);
}
+const FrameStateFunctionInfo*
+CommonOperatorBuilder::CreateJSToWasmFrameStateFunctionInfo(
+ FrameStateType type, int parameter_count, int local_count,
+ Handle<SharedFunctionInfo> shared_info,
+ const wasm::FunctionSig* signature) {
+ DCHECK_EQ(type, FrameStateType::kJSToWasmBuiltinContinuation);
+ DCHECK_NOT_NULL(signature);
+ return zone()->New<JSToWasmFrameStateFunctionInfo>(
+ type, parameter_count, local_count, shared_info, signature);
+}
+
const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
return zone()->New<Operator1<MachineRepresentation>>( // --
IrOpcode::kDeadValue, Operator::kPure, // opcode
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index d2768a9cf4..bf0e3a7bab 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -543,7 +543,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* ObjectState(uint32_t object_id, int pointer_slots);
const Operator* TypedObjectState(uint32_t object_id,
const ZoneVector<MachineType>* types);
- const Operator* FrameState(BailoutId bailout_id,
+ const Operator* FrameState(BytecodeOffset bailout_id,
OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info);
const Operator* Call(const CallDescriptor* call_descriptor);
@@ -561,6 +561,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
Handle<SharedFunctionInfo> shared_info);
+ const FrameStateFunctionInfo* CreateJSToWasmFrameStateFunctionInfo(
+ FrameStateType type, int parameter_count, int local_count,
+ Handle<SharedFunctionInfo> shared_info,
+ const wasm::FunctionSig* signature);
const Operator* MarkAsSafetyCheck(const Operator* op,
IsSafetyCheck safety_check);
@@ -600,6 +604,65 @@ class CommonNodeWrapperBase : public NodeWrapper {
NodeProperties::GetValueInput(node(), TheIndex)); \
}
+// TODO(jgruber): This class doesn't match the usual OpcodeNode naming
+// convention for historical reasons (it was originally a very basic typed node
+// wrapper similar to Effect and Control). Consider updating the name, with low
+// priority.
+class FrameState : public CommonNodeWrapperBase {
+ public:
+ explicit constexpr FrameState(Node* node) : CommonNodeWrapperBase(node) {
+ // TODO(jgruber): Disallow kStart (needed for PromiseConstructorBasic unit
+ // test, among others). Also, outer_frame_state points at the start node
+ // for non-inlined functions. This could be avoided by checking
+ // has_outer_frame_state() before casting to FrameState.
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kFrameState ||
+ node->opcode() == IrOpcode::kStart);
+ }
+
+ FrameStateInfo frame_state_info() const {
+ return FrameStateInfoOf(node()->op());
+ }
+
+ static constexpr int kFrameStateParametersInput = 0;
+ static constexpr int kFrameStateLocalsInput = 1;
+ static constexpr int kFrameStateStackInput = 2;
+ static constexpr int kFrameStateContextInput = 3;
+ static constexpr int kFrameStateFunctionInput = 4;
+ static constexpr int kFrameStateOuterStateInput = 5;
+ static constexpr int kFrameStateInputCount = 6;
+
+ // Note: The parameters should be accessed through StateValuesAccess.
+ Node* parameters() const {
+ Node* n = node()->InputAt(kFrameStateParametersInput);
+ DCHECK(n->opcode() == IrOpcode::kStateValues ||
+ n->opcode() == IrOpcode::kTypedStateValues);
+ return n;
+ }
+ Node* locals() const {
+ Node* n = node()->InputAt(kFrameStateLocalsInput);
+ DCHECK(n->opcode() == IrOpcode::kStateValues ||
+ n->opcode() == IrOpcode::kTypedStateValues);
+ return n;
+ }
+ // TODO(jgruber): Consider renaming this to the more meaningful
+ // 'accumulator'.
+ Node* stack() const { return node()->InputAt(kFrameStateStackInput); }
+ Node* context() const { return node()->InputAt(kFrameStateContextInput); }
+ Node* function() const { return node()->InputAt(kFrameStateFunctionInput); }
+
+ // An outer frame state exists for inlined functions; otherwise it points at
+ // the start node.
+ bool has_outer_frame_state() const {
+ Node* maybe_outer_frame_state = node()->InputAt(kFrameStateOuterStateInput);
+ DCHECK(maybe_outer_frame_state->opcode() == IrOpcode::kFrameState ||
+ maybe_outer_frame_state->opcode() == IrOpcode::kStart);
+ return maybe_outer_frame_state->opcode() == IrOpcode::kFrameState;
+ }
+ FrameState outer_frame_state() const {
+ return FrameState{node()->InputAt(kFrameStateOuterStateInput)};
+ }
+};
+
class StartNode final : public CommonNodeWrapperBase {
public:
explicit constexpr StartNode(Node* node) : CommonNodeWrapperBase(node) {
@@ -641,6 +704,67 @@ class StartNode final : public CommonNodeWrapperBase {
return node()->op()->ValueOutputCount() - kExtraOutputCount -
kReceiverOutputCount;
}
+
+ // Note these functions don't return the index of the Start output; instead
+ // they return the index assigned to the Parameter node.
+ // TODO(jgruber): Consider unifying the two.
+ int NewTargetParameterIndex() const {
+ return Linkage::GetJSCallNewTargetParamIndex(FormalParameterCount());
+ }
+ int ArgCountParameterIndex() const {
+ return Linkage::GetJSCallArgCountParamIndex(FormalParameterCount());
+ }
+ int ContextParameterIndex() const {
+ return Linkage::GetJSCallContextParamIndex(FormalParameterCount());
+ }
+
+ // TODO(jgruber): Remove this function and use
+ // Linkage::GetJSCallContextParamIndex instead. This currently doesn't work
+ // because tests don't create valid Start nodes - for example, they may add
+ // only two context outputs (and not the closure, new target, argc). Once
+ // tests are fixed, remove this function.
+ int ContextParameterIndex_MaybeNonStandardLayout() const {
+ // The context is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ //
+ // TODO(jgruber): This function is called from spots that operate on
+ // CSA/Torque graphs; Start node layout appears to be different there.
+ // These should be unified to avoid confusion. Once done, enable this
+ // DCHECK: DCHECK_EQ(LastOutputIndex(), ContextOutputIndex());
+ return node()->op()->ValueOutputCount() - 2;
+ }
+ int LastParameterIndex_MaybeNonStandardLayout() const {
+ return ContextParameterIndex_MaybeNonStandardLayout();
+ }
+
+ // Unlike ContextParameterIndex_MaybeNonStandardLayout above, these return
+ // output indices (and not the index assigned to a Parameter).
+ int NewTargetOutputIndex() const {
+ // Indices assigned to parameters are off-by-one (Parameters indices start
+ // at -1).
+ // TODO(jgruber): Consider starting at 0.
+ DCHECK_EQ(Linkage::GetJSCallNewTargetParamIndex(FormalParameterCount()) + 1,
+ node()->op()->ValueOutputCount() - 3);
+ return node()->op()->ValueOutputCount() - 3;
+ }
+ int ArgCountOutputIndex() const {
+ // Indices assigned to parameters are off-by-one (Parameters indices start
+ // at -1).
+ // TODO(jgruber): Consider starting at 0.
+ DCHECK_EQ(Linkage::GetJSCallArgCountParamIndex(FormalParameterCount()) + 1,
+ node()->op()->ValueOutputCount() - 2);
+ return node()->op()->ValueOutputCount() - 2;
+ }
+ int ContextOutputIndex() const {
+ // Indices assigned to parameters are off-by-one (Parameters indices start
+ // at -1).
+ // TODO(jgruber): Consider starting at 0.
+ DCHECK_EQ(Linkage::GetJSCallContextParamIndex(FormalParameterCount()) + 1,
+ node()->op()->ValueOutputCount() - 1);
+ return node()->op()->ValueOutputCount() - 1;
+ }
+ int LastOutputIndex() const { return ContextOutputIndex(); }
};
class DynamicCheckMapsWithDeoptUnlessNode final : public CommonNodeWrapperBase {
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index be503aa73f..2628575e4d 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -256,8 +256,6 @@ class FieldConstnessDependency final : public CompilationDependency {
class GlobalPropertyDependency final : public CompilationDependency {
public:
- // TODO(neis): Once the concurrent compiler frontend is always-on, we no
- // longer need to explicitly store the type and the read_only flag.
GlobalPropertyDependency(const PropertyCellRef& cell, PropertyCellType type,
bool read_only)
: cell_(cell), type_(type), read_only_(read_only) {
@@ -404,10 +402,6 @@ void CompilationDependencies::DependOnStableMap(const MapRef& map) {
}
}
-void CompilationDependencies::DependOnTransition(const MapRef& target_map) {
- RecordDependency(TransitionDependencyOffTheRecord(target_map));
-}
-
AllocationType CompilationDependencies::DependOnPretenureMode(
const AllocationSiteRef& site) {
DCHECK(!site.IsNeverSerializedHeapObject());
@@ -441,26 +435,15 @@ PropertyConstness CompilationDependencies::DependOnFieldConstness(
return PropertyConstness::kConst;
}
-void CompilationDependencies::DependOnFieldRepresentation(
- const MapRef& map, InternalIndex descriptor) {
- RecordDependency(FieldRepresentationDependencyOffTheRecord(map, descriptor));
-}
-
-void CompilationDependencies::DependOnFieldType(const MapRef& map,
- InternalIndex descriptor) {
- RecordDependency(FieldTypeDependencyOffTheRecord(map, descriptor));
-}
-
void CompilationDependencies::DependOnGlobalProperty(
const PropertyCellRef& cell) {
- DCHECK(!cell.IsNeverSerializedHeapObject());
PropertyCellType type = cell.property_details().cell_type();
bool read_only = cell.property_details().IsReadOnly();
RecordDependency(zone_->New<GlobalPropertyDependency>(cell, type, read_only));
}
bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
- DCHECK(!cell.IsNeverSerializedHeapObject());
+ cell.SerializeAsProtector();
if (cell.value().AsSmi() != Protectors::kProtectorValid) return false;
RecordDependency(zone_->New<ProtectorDependency>(cell));
return true;
@@ -514,13 +497,6 @@ void CompilationDependencies::DependOnElementsKind(
}
}
-bool CompilationDependencies::AreValid() const {
- for (auto dep : dependencies_) {
- if (!dep->IsValid()) return false;
- }
- return true;
-}
-
bool CompilationDependencies::Commit(Handle<Code> code) {
// Dependencies are context-dependent. In the future it may be possible to
// restore them in the consumer native context, but for now they are
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 0b1612487e..bcf619ea09 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -45,22 +45,10 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// Record the assumption that {map} stays stable.
void DependOnStableMap(const MapRef& map);
- // Record the assumption that {target_map} can be transitioned to, i.e., that
- // it does not become deprecated.
- void DependOnTransition(const MapRef& target_map);
-
// Return the pretenure mode of {site} and record the assumption that it does
// not change.
AllocationType DependOnPretenureMode(const AllocationSiteRef& site);
- // Record the assumption that the field representation of a field does not
- // change. The field is identified by the arguments.
- void DependOnFieldRepresentation(const MapRef& map, InternalIndex descriptor);
-
- // Record the assumption that the field type of a field does not change. The
- // field is identified by the arguments.
- void DependOnFieldType(const MapRef& map, InternalIndex descriptor);
-
// Return a field's constness and, if kConst, record the assumption that it
// remains kConst. The field is identified by the arguments.
//
@@ -110,23 +98,28 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
SlackTrackingPrediction DependOnInitialMapInstanceSizePrediction(
const JSFunctionRef& function);
- // The methods below allow for gathering dependencies without actually
- // recording them. They can be recorded at a later time (or they can be
- // ignored). For example,
- // DependOnTransition(map);
- // is equivalent to:
- // RecordDependency(TransitionDependencyOffTheRecord(map));
+ // Records {dependency} if not null.
void RecordDependency(CompilationDependency const* dependency);
+
+ // The methods below allow for gathering dependencies without actually
+ // recording them. They can be recorded at a later time via RecordDependency
+ // (or they can be ignored).
+
+ // Gather the assumption that {target_map} can be transitioned to, i.e., that
+ // it does not become deprecated.
CompilationDependency const* TransitionDependencyOffTheRecord(
const MapRef& target_map) const;
+
+ // Gather the assumption that the field representation of a field does not
+ // change. The field is identified by the arguments.
CompilationDependency const* FieldRepresentationDependencyOffTheRecord(
const MapRef& map, InternalIndex descriptor) const;
+
+ // Gather the assumption that the field type of a field does not change. The
+ // field is identified by the arguments.
CompilationDependency const* FieldTypeDependencyOffTheRecord(
const MapRef& map, InternalIndex descriptor) const;
- // Exposed only for testing purposes.
- bool AreValid() const;
-
private:
Zone* const zone_;
JSHeapBroker* const broker_;
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 5fe983213c..d64c3c80e5 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -148,7 +148,6 @@ class EffectControlLinearizer {
Node* LowerObjectIsInteger(Node* node);
Node* LowerNumberIsSafeInteger(Node* node);
Node* LowerObjectIsSafeInteger(Node* node);
- Node* LowerArgumentsFrame(Node* node);
Node* LowerArgumentsLength(Node* node);
Node* LowerRestLength(Node* node);
Node* LowerNewDoubleElements(Node* node);
@@ -251,6 +250,8 @@ class EffectControlLinearizer {
Node* CallBuiltin(Builtins::Name builtin, Operator::Properties properties,
Args...);
+ Node* ChangeBitToTagged(Node* value);
+ Node* ChangeFloat64ToTagged(Node* value, CheckForMinusZeroMode mode);
Node* ChangeInt32ToSmi(Node* value);
// In pointer compression, we smi-corrupt. This means the upper bits of a Smi
// are not important. ChangeTaggedInt32ToSmi has a known tagged int32 as input
@@ -259,11 +260,13 @@ class EffectControlLinearizer {
// In non pointer compression, it behaves like ChangeInt32ToSmi.
Node* ChangeTaggedInt32ToSmi(Node* value);
Node* ChangeInt32ToIntPtr(Node* value);
+ Node* ChangeInt32ToTagged(Node* value);
Node* ChangeInt64ToSmi(Node* value);
Node* ChangeIntPtrToInt32(Node* value);
Node* ChangeIntPtrToSmi(Node* value);
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
+ Node* ChangeUint32ToTagged(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ChangeSmiToInt64(Node* value);
@@ -1125,9 +1128,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kObjectIsUndetectable:
result = LowerObjectIsUndetectable(node);
break;
- case IrOpcode::kArgumentsFrame:
- result = LowerArgumentsFrame(node);
- break;
case IrOpcode::kArgumentsLength:
result = LowerArgumentsLength(node);
break;
@@ -1382,7 +1382,11 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
+ return ChangeFloat64ToTagged(value, mode);
+}
+Node* EffectControlLinearizer::ChangeFloat64ToTagged(
+ Node* value, CheckForMinusZeroMode mode) {
auto done = __ MakeLabel(MachineRepresentation::kTagged);
auto if_heapnumber = __ MakeDeferredLabel();
auto if_int32 = __ MakeLabel();
@@ -1438,7 +1442,10 @@ Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
Node* value = node->InputAt(0);
+ return ChangeBitToTagged(value);
+}
+Node* EffectControlLinearizer::ChangeBitToTagged(Node* value) {
auto if_true = __ MakeLabel();
auto done = __ MakeLabel(MachineRepresentation::kTagged);
@@ -1459,7 +1466,10 @@ Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
Node* value = node->InputAt(0);
+ return ChangeInt32ToTagged(value);
+}
+Node* EffectControlLinearizer::ChangeInt32ToTagged(Node* value) {
if (SmiValuesAre32Bits()) {
return ChangeInt32ToSmi(value);
}
@@ -1505,7 +1515,10 @@ Node* EffectControlLinearizer::LowerChangeInt64ToTagged(Node* node) {
Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
Node* value = node->InputAt(0);
+ return ChangeUint32ToTagged(value);
+}
+Node* EffectControlLinearizer::ChangeUint32ToTagged(Node* value) {
auto if_not_in_smi_range = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTagged);
@@ -1773,8 +1786,10 @@ Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* check_instance_type =
- __ Word32Equal(value_instance_type, __ Int32Constant(JS_FUNCTION_TYPE));
+ Node* check_instance_type = __ Uint32LessThanOrEqual(
+ __ Int32Sub(value_instance_type,
+ __ Int32Constant(FIRST_JS_FUNCTION_TYPE)),
+ __ Int32Constant(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
__ DeoptimizeIfNot(DeoptimizeReason::kWrongCallTarget, FeedbackSource(),
check_instance_type, frame_state);
@@ -3676,34 +3691,9 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
}
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
return ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), __ LoadFramePointer(),
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
-#else
- auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
- Node* frame = __ LoadFramePointer();
-
- Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
- int formal_parameter_count = FormalParameterCountOf(node->op());
- DCHECK_LE(0, formal_parameter_count);
-
- // The ArgumentsLength node is computing the actual number of arguments.
- // We have to distinguish the case when there is an arguments adaptor frame
- // (i.e., arguments_frame != LoadFramePointer()).
- auto if_adaptor_frame = __ MakeLabel();
- __ GotoIf(__ TaggedEqual(arguments_frame, frame), &done,
- __ SmiConstant(formal_parameter_count));
- __ Goto(&if_adaptor_frame);
-
- __ Bind(&if_adaptor_frame);
- Node* arguments_length = __ BitcastWordToTaggedSigned(__ Load(
- MachineType::Pointer(), arguments_frame,
- __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset)));
- __ Goto(&done, arguments_length);
- __ Bind(&done);
- return done.PhiAt(0);
-#endif
}
Node* EffectControlLinearizer::LowerRestLength(Node* node) {
@@ -3713,27 +3703,9 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
Node* frame = __ LoadFramePointer();
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
-#else
- Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
-
- // The RestLength node is computing the number of rest parameters,
- // which is max(0, actual_parameter_count - formal_parameter_count).
- // We have to distinguish the case, when there is an arguments adaptor frame
- // (i.e., arguments_frame != LoadFramePointer()).
- auto if_adaptor_frame = __ MakeLabel();
- __ GotoIf(__ TaggedEqual(arguments_frame, frame), &done, __ SmiConstant(0));
- __ Goto(&if_adaptor_frame);
-
- __ Bind(&if_adaptor_frame);
- Node* arguments_length = __ BitcastWordToTaggedSigned(__ Load(
- MachineType::Pointer(), arguments_frame,
- __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset)));
-#endif
-
Node* rest_length =
__ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
__ GotoIf(__ SmiLessThan(rest_length, __ SmiConstant(0)), &done,
@@ -3744,27 +3716,6 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
- auto done = __ MakeLabel(MachineType::PointerRepresentation());
-
- Node* frame = __ LoadFramePointer();
- Node* parent_frame =
- __ Load(MachineType::Pointer(), frame,
- __ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
- Node* parent_frame_type = __ Load(
- MachineType::IntPtr(), parent_frame,
- __ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset));
-
- __ GotoIf(__ IntPtrEqual(parent_frame_type,
- __ IntPtrConstant(StackFrame::TypeToMarker(
- StackFrame::ARGUMENTS_ADAPTOR))),
- &done, parent_frame);
- __ Goto(&done, frame);
-
- __ Bind(&done);
- return done.PhiAt(0);
-}
-
Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
AllocationType const allocation = AllocationTypeOf(node->op());
Node* length = node->InputAt(0);
@@ -3864,8 +3815,8 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
CreateArgumentsType type = parameters.arguments_type();
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
- Node* frame = NodeProperties::GetValueInput(node, 0);
- Node* arguments_count = NodeProperties::GetValueInput(node, 1);
+ Node* frame = __ LoadFramePointer();
+ Node* arguments_count = NodeProperties::GetValueInput(node, 0);
Builtins::Name builtin_name;
switch (type) {
case CreateArgumentsType::kMappedArguments:
@@ -5026,7 +4977,6 @@ void EffectControlLinearizer::LowerStoreMessage(Node* node) {
__ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
}
-// TODO(mslekova): Avoid code duplication with simplified lowering.
static MachineType MachineTypeFor(CTypeInfo::Type type) {
switch (type) {
case CTypeInfo::Type::kVoid:
@@ -5062,23 +5012,33 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
value_input_count);
if (fast_api_call_stack_slot_ == nullptr) {
- // Add the { fallback } output parameter.
- int kAlign = 4;
+ int kAlign = alignof(v8::FastApiCallbackOptions);
int kSize = sizeof(v8::FastApiCallbackOptions);
- // If this check fails, probably you've added new fields to
+ // If this check fails, you've probably added new fields to
// v8::FastApiCallbackOptions, which means you'll need to write code
// that initializes and reads from them too (see the Store and Load to
// fast_api_call_stack_slot_ below).
- CHECK_EQ(kSize, 1);
+ CHECK_EQ(kSize, sizeof(uintptr_t) * 2);
fast_api_call_stack_slot_ = __ StackSlot(kSize, kAlign);
}
- // Generate the store to `fast_api_call_stack_slot_`.
- __ Store(StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
- fast_api_call_stack_slot_, 0, jsgraph()->ZeroConstant());
+ // Leave the slot uninit if the callback doesn't use it.
+ if (c_signature->HasOptions()) {
+ // Generate the stores to `fast_api_call_stack_slot_`.
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
+ fast_api_call_stack_slot_,
+ static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)),
+ jsgraph()->ZeroConstant());
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ fast_api_call_stack_slot_,
+ static_cast<int>(offsetof(v8::FastApiCallbackOptions, data)),
+ n.SlowCallArgument(FastApiCallNode::kSlowCallDataArgumentIndex));
+ }
MachineSignature::Builder builder(
- graph()->zone(), 1, c_arg_count + FastApiCallNode::kHasErrorInputCount);
+ graph()->zone(), 1, c_arg_count + (c_signature->HasOptions() ? 1 : 0));
MachineType return_type = MachineTypeFor(c_signature->ReturnInfo().GetType());
builder.AddReturn(return_type);
for (int i = 0; i < c_arg_count; ++i) {
@@ -5086,7 +5046,9 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
MachineTypeFor(c_signature->ArgumentInfo(i).GetType());
builder.AddParam(machine_type);
}
- builder.AddParam(MachineType::Pointer()); // fast_api_call_stack_slot_
+ if (c_signature->HasOptions()) {
+ builder.AddParam(MachineType::Pointer()); // fast_api_call_stack_slot_
+ }
CallDescriptor* call_descriptor =
Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
@@ -5101,7 +5063,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
target_address, 0, n.target());
Node** const inputs = graph()->zone()->NewArray<Node*>(
- c_arg_count + FastApiCallNode::kFastCallExtraInputCount);
+ c_arg_count + n.FastCallExtraInputCount());
inputs[0] = n.target();
for (int i = FastApiCallNode::kFastTargetInputCount;
i < c_arg_count + FastApiCallNode::kFastTargetInputCount; ++i) {
@@ -5113,37 +5075,74 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
inputs[i] = NodeProperties::GetValueInput(node, i);
}
}
- inputs[c_arg_count + 1] = fast_api_call_stack_slot_;
-
- inputs[c_arg_count + 2] = __ effect();
- inputs[c_arg_count + 3] = __ control();
+ if (c_signature->HasOptions()) {
+ inputs[c_arg_count + 1] = fast_api_call_stack_slot_;
+ inputs[c_arg_count + 2] = __ effect();
+ inputs[c_arg_count + 3] = __ control();
+ } else {
+ inputs[c_arg_count + 1] = __ effect();
+ inputs[c_arg_count + 2] = __ control();
+ }
- __ Call(call_descriptor,
- c_arg_count + FastApiCallNode::kFastCallExtraInputCount, inputs);
+ Node* c_call_result = __ Call(
+ call_descriptor, c_arg_count + n.FastCallExtraInputCount(), inputs);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
target_address, 0, __ IntPtrConstant(0));
+ Node* fast_call_result;
+ switch (c_signature->ReturnInfo().GetType()) {
+ case CTypeInfo::Type::kVoid:
+ fast_call_result = __ UndefinedConstant();
+ break;
+ case CTypeInfo::Type::kBool:
+ static_assert(sizeof(bool) == 1, "unsupported bool size");
+ fast_call_result = ChangeBitToTagged(
+ __ Word32And(c_call_result, __ Int32Constant(0xFF)));
+ break;
+ case CTypeInfo::Type::kInt32:
+ fast_call_result = ChangeInt32ToTagged(c_call_result);
+ break;
+ case CTypeInfo::Type::kUint32:
+ fast_call_result = ChangeUint32ToTagged(c_call_result);
+ break;
+ case CTypeInfo::Type::kInt64:
+ case CTypeInfo::Type::kUint64:
+ UNREACHABLE();
+ case CTypeInfo::Type::kFloat32:
+ fast_call_result =
+ ChangeFloat64ToTagged(__ ChangeFloat32ToFloat64(c_call_result),
+ CheckForMinusZeroMode::kCheckForMinusZero);
+ break;
+ case CTypeInfo::Type::kFloat64:
+ fast_call_result = ChangeFloat64ToTagged(
+ c_call_result, CheckForMinusZeroMode::kCheckForMinusZero);
+ break;
+ case CTypeInfo::Type::kV8Value:
+ UNREACHABLE();
+ }
+
+ if (!c_signature->HasOptions()) return fast_call_result;
+
// Generate the load from `fast_api_call_stack_slot_`.
- Node* load = __ Load(MachineType::Int32(), fast_api_call_stack_slot_, 0);
+ Node* load =
+ __ Load(MachineType::Int32(), fast_api_call_stack_slot_,
+ static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)));
- TNode<Boolean> cond =
- TNode<Boolean>::UncheckedCast(__ Word32Equal(load, __ Int32Constant(0)));
+ Node* is_zero = __ Word32Equal(load, __ Int32Constant(0));
// Hint to true.
auto if_success = __ MakeLabel();
auto if_error = __ MakeDeferredLabel();
auto merge = __ MakeLabel(MachineRepresentation::kTagged);
- __ Branch(cond, &if_success, &if_error);
+ __ Branch(is_zero, &if_success, &if_error);
- // Generate fast call.
__ Bind(&if_success);
- Node* then_result = [&]() { return __ UndefinedConstant(); }();
- __ Goto(&merge, then_result);
+ __ Goto(&merge, fast_call_result);
// Generate direct slow call.
__ Bind(&if_error);
- Node* else_result = [&]() {
+ {
Node** const slow_inputs = graph()->zone()->NewArray<Node*>(
n.SlowCallArgumentCount() +
FastApiCallNode::kEffectAndControlInputCount);
@@ -5157,12 +5156,11 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
slow_inputs[index] = __ effect();
slow_inputs[index + 1] = __ control();
- Node* slow_call = __ Call(
+ Node* slow_call_result = __ Call(
params.descriptor(),
index + FastApiCallNode::kEffectAndControlInputCount, slow_inputs);
- return slow_call;
- }();
- __ Goto(&merge, else_result);
+ __ Goto(&merge, slow_call_result);
+ }
__ Bind(&merge);
return merge.PhiAt(0);
@@ -5235,13 +5233,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* offset =
__ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
- if (FLAG_unbox_double_fields) {
- Node* result = __ Load(MachineType::Float64(), object, offset);
- __ Goto(&done_double, result);
- } else {
- Node* field = __ Load(MachineType::AnyTagged(), object, offset);
- __ Goto(&loaded_field, field);
- }
+ Node* field = __ Load(MachineType::AnyTagged(), object, offset);
+ __ Goto(&loaded_field, field);
}
__ Bind(&if_outofobject);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index f4ab1c9709..97b22d8875 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -159,9 +159,12 @@ Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
// This input order is important to match the DFS traversal used in the
// instruction selector. Otherwise, the instruction selector might find a
// duplicate node before the original one.
- for (int input_id : {kFrameStateOuterStateInput, kFrameStateFunctionInput,
- kFrameStateParametersInput, kFrameStateContextInput,
- kFrameStateLocalsInput, kFrameStateStackInput}) {
+ for (int input_id : {FrameState::kFrameStateOuterStateInput,
+ FrameState::kFrameStateFunctionInput,
+ FrameState::kFrameStateParametersInput,
+ FrameState::kFrameStateContextInput,
+ FrameState::kFrameStateLocalsInput,
+ FrameState::kFrameStateStackInput}) {
Node* input = node->InputAt(input_id);
new_node.ReplaceInput(ReduceDeoptState(input, effect, deduplicator),
input_id);
@@ -226,9 +229,7 @@ void EscapeAnalysisReducer::Finalize() {
? params.formal_parameter_count()
: 0;
- Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
- if (arguments_frame->opcode() != IrOpcode::kArgumentsFrame) continue;
- Node* arguments_length = NodeProperties::GetValueInput(node, 1);
+ Node* arguments_length = NodeProperties::GetValueInput(node, 0);
if (arguments_length->opcode() != IrOpcode::kArgumentsLength) continue;
Node* arguments_length_state = nullptr;
@@ -328,7 +329,10 @@ void EscapeAnalysisReducer::Finalize() {
}
NodeProperties::SetType(offset,
TypeCache::Get()->kArgumentsLengthType);
- NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
+ Node* frame = jsgraph()->graph()->NewNode(
+ jsgraph()->machine()->LoadFramePointer());
+ NodeProperties::SetType(frame, Type::ExternalPointer());
+ NodeProperties::ReplaceValueInput(load, frame, 0);
NodeProperties::ReplaceValueInput(load, offset, 1);
NodeProperties::ChangeOp(
load, jsgraph()->simplified()->LoadStackArgument());
@@ -337,7 +341,7 @@ void EscapeAnalysisReducer::Finalize() {
case IrOpcode::kLoadField: {
DCHECK_EQ(FieldAccessOf(load->op()).offset,
FixedArray::kLengthOffset);
- Node* length = NodeProperties::GetValueInput(node, 1);
+ Node* length = NodeProperties::GetValueInput(node, 0);
ReplaceWithValue(load, length);
break;
}
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index c27bf8551c..7ff6ab684f 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -133,7 +133,7 @@ class VariableTracker {
Maybe<Node*> Get(Variable var) {
Node* node = current_state_.Get(var);
if (node && node->opcode() == IrOpcode::kDead) {
- // TODO(tebbi): We use {Dead} as a sentinel for uninitialized memory.
+ // TODO(turbofan): We use {Dead} as a sentinel for uninitialized memory.
// Reading uninitialized memory can only happen in unreachable code. In
// this case, we have to mark the object as escaping to avoid dead nodes
// in the graph. This is a workaround that should be removed once we can
@@ -479,8 +479,8 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
Node* phi = graph_->graph()->NewNode(
graph_->common()->Phi(MachineRepresentation::kTagged, arity),
arity + 1, &buffer_.front());
- // TODO(tebbi): Computing precise types here is tricky, because of
- // the necessary revisitations. If we really need this, we should
+ // TODO(turbofan): Computing precise types here is tricky, because
+ // of the necessary revisitations. If we really need this, we should
// probably do it afterwards.
NodeProperties::SetType(phi, Type::Any());
reducer_->AddRoot(phi);
@@ -711,7 +711,7 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
} else if (right_object && !right_object->HasEscaped()) {
replacement = jsgraph->FalseConstant();
}
- // TODO(tebbi) This is a workaround for uninhabited types. If we
+ // TODO(turbofan) This is a workaround for uninhabited types. If we
// replaced a value of uninhabited type with a constant, we would
// widen the type of the node. This could produce inconsistent
// types (which might confuse representation selection). We get
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 0fbc7d0bdd..907c7cc087 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -131,7 +131,7 @@ class VirtualObject : public Dependable {
CHECK(IsAligned(offset, kTaggedSize));
CHECK(!HasEscaped());
if (offset >= size()) {
- // TODO(tebbi): Reading out-of-bounds can only happen in unreachable
+ // TODO(turbofan): Reading out-of-bounds can only happen in unreachable
// code. In this case, we have to mark the object as escaping to avoid
// dead nodes in the graph. This is a workaround that should be removed
// once we can handle dead nodes everywhere.
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 5598a0fe59..b7c4588e36 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -11,16 +11,12 @@
#include "src/compiler/node.h"
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/wasm/value-type.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Guard equality of these constants. Ideally they should be merged at
-// some point.
-STATIC_ASSERT(kFrameStateOuterStateInput ==
- FrameState::kFrameStateOuterStateInput);
-
size_t hash_value(OutputFrameStateCombine const& sc) {
return base::hash_value(sc.parameter_);
}
@@ -53,8 +49,8 @@ size_t hash_value(FrameStateInfo const& info) {
std::ostream& operator<<(std::ostream& os, FrameStateType type) {
switch (type) {
- case FrameStateType::kInterpretedFunction:
- os << "INTERPRETED_FRAME";
+ case FrameStateType::kUnoptimizedFunction:
+ os << "UNOPTIMIZED_FRAME";
break;
case FrameStateType::kArgumentsAdaptor:
os << "ARGUMENTS_ADAPTOR";
@@ -65,6 +61,9 @@ std::ostream& operator<<(std::ostream& os, FrameStateType type) {
case FrameStateType::kBuiltinContinuation:
os << "BUILTIN_CONTINUATION_FRAME";
break;
+ case FrameStateType::kJSToWasmBuiltinContinuation:
+ os << "JS_TO_WASM_BUILTIN_CONTINUATION_FRAME";
+ break;
case FrameStateType::kJavaScriptBuiltinContinuation:
os << "JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME";
break;
@@ -88,7 +87,7 @@ std::ostream& operator<<(std::ostream& os, FrameStateInfo const& info) {
namespace {
-// Lazy deopt points where the frame state is assocated with a call get an
+// Lazy deopt points where the frame state is associated with a call get an
// additional parameter for the return result from the call. The return result
// is added by the deoptimizer and not explicitly specified in the frame state.
// Lazy deopt points which can catch exceptions further get an additional
@@ -110,7 +109,8 @@ FrameState CreateBuiltinContinuationFrameStateCommon(
JSGraph* jsgraph, FrameStateType frame_type, Builtins::Name name,
Node* closure, Node* context, Node** parameters, int parameter_count,
Node* outer_frame_state,
- Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>()) {
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>(),
+ const wasm::FunctionSig* signature = nullptr) {
Graph* const graph = jsgraph->graph();
CommonOperatorBuilder* const common = jsgraph->common();
@@ -118,10 +118,13 @@ FrameState CreateBuiltinContinuationFrameStateCommon(
common->StateValues(parameter_count, SparseInputMask::Dense());
Node* params_node = graph->NewNode(op_param, parameter_count, parameters);
- BailoutId bailout_id = Builtins::GetContinuationBailoutId(name);
+ BytecodeOffset bailout_id = Builtins::GetContinuationBytecodeOffset(name);
const FrameStateFunctionInfo* state_info =
- common->CreateFrameStateFunctionInfo(frame_type, parameter_count, 0,
- shared);
+ signature ? common->CreateJSToWasmFrameStateFunctionInfo(
+ frame_type, parameter_count, 0, shared, signature)
+ : common->CreateFrameStateFunctionInfo(
+ frame_type, parameter_count, 0, shared);
+
const Operator* op = common->FrameState(
bailout_id, OutputFrameStateCombine::Ignore(), state_info);
return FrameState(graph->NewNode(op, params_node, jsgraph->EmptyStateValues(),
@@ -134,7 +137,7 @@ FrameState CreateBuiltinContinuationFrameStateCommon(
FrameState CreateStubBuiltinContinuationFrameState(
JSGraph* jsgraph, Builtins::Name name, Node* context,
Node* const* parameters, int parameter_count, Node* outer_frame_state,
- ContinuationFrameStateMode mode) {
+ ContinuationFrameStateMode mode, const wasm::FunctionSig* signature) {
Callable callable = Builtins::CallableFor(jsgraph->isolate(), name);
CallInterfaceDescriptor descriptor = callable.descriptor();
@@ -163,10 +166,29 @@ FrameState CreateStubBuiltinContinuationFrameState(
actual_parameters.push_back(parameters[i]);
}
+ FrameStateType frame_state_type = FrameStateType::kBuiltinContinuation;
+ if (name == Builtins::kJSToWasmLazyDeoptContinuation) {
+ CHECK_NOT_NULL(signature);
+ frame_state_type = FrameStateType::kJSToWasmBuiltinContinuation;
+ }
return CreateBuiltinContinuationFrameStateCommon(
- jsgraph, FrameStateType::kBuiltinContinuation, name,
- jsgraph->UndefinedConstant(), context, actual_parameters.data(),
- static_cast<int>(actual_parameters.size()), outer_frame_state);
+ jsgraph, frame_state_type, name, jsgraph->UndefinedConstant(), context,
+ actual_parameters.data(), static_cast<int>(actual_parameters.size()),
+ outer_frame_state, Handle<SharedFunctionInfo>(), signature);
+}
+
+FrameState CreateJSWasmCallBuiltinContinuationFrameState(
+ JSGraph* jsgraph, Node* context, Node* outer_frame_state,
+ const wasm::FunctionSig* signature) {
+ base::Optional<wasm::ValueKind> wasm_return_type =
+ wasm::WasmReturnTypeFromSignature(signature);
+ Node* node_return_type =
+ jsgraph->SmiConstant(wasm_return_type ? wasm_return_type.value() : -1);
+ Node* lazy_deopt_parameters[] = {node_return_type};
+ return CreateStubBuiltinContinuationFrameState(
+ jsgraph, Builtins::kJSToWasmLazyDeoptContinuation, context,
+ lazy_deopt_parameters, arraysize(lazy_deopt_parameters),
+ outer_frame_state, ContinuationFrameStateMode::LAZY, signature);
}
FrameState CreateJavaScriptBuiltinContinuationFrameState(
@@ -185,6 +207,7 @@ FrameState CreateJavaScriptBuiltinContinuationFrameState(
// to be the second value in the translation when creating stack crawls
// (e.g. Error.stack) of optimized JavaScript frames.
std::vector<Node*> actual_parameters;
+ actual_parameters.reserve(stack_parameter_count);
for (int i = 0; i < stack_parameter_count; ++i) {
actual_parameters.push_back(stack_parameters[i]);
}
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 1dc54c0fdb..32586264e7 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -62,10 +62,12 @@ class OutputFrameStateCombine {
// The type of stack frame that a FrameState node represents.
enum class FrameStateType {
- kInterpretedFunction, // Represents an InterpretedFrame.
+ kUnoptimizedFunction, // Represents an UnoptimizedFrame.
kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
kConstructStub, // Represents a ConstructStubFrame.
kBuiltinContinuation, // Represents a continuation to a stub.
+ kJSToWasmBuiltinContinuation, // Represents a lazy deopt continuation for a
+ // JS to Wasm call.
kJavaScriptBuiltinContinuation, // Represents a continuation to a JavaScipt
// builtin.
kJavaScriptBuiltinContinuationWithCatch // Represents a continuation to a
@@ -89,7 +91,7 @@ class FrameStateFunctionInfo {
FrameStateType type() const { return type_; }
static bool IsJSFunctionType(FrameStateType type) {
- return type == FrameStateType::kInterpretedFunction ||
+ return type == FrameStateType::kUnoptimizedFunction ||
type == FrameStateType::kJavaScriptBuiltinContinuation ||
type == FrameStateType::kJavaScriptBuiltinContinuationWithCatch;
}
@@ -101,20 +103,37 @@ class FrameStateFunctionInfo {
Handle<SharedFunctionInfo> const shared_info_;
};
+class JSToWasmFrameStateFunctionInfo : public FrameStateFunctionInfo {
+ public:
+ JSToWasmFrameStateFunctionInfo(FrameStateType type, int parameter_count,
+ int local_count,
+ Handle<SharedFunctionInfo> shared_info,
+ const wasm::FunctionSig* signature)
+ : FrameStateFunctionInfo(type, parameter_count, local_count, shared_info),
+ signature_(signature) {
+ DCHECK_NOT_NULL(signature);
+ }
+
+ const wasm::FunctionSig* signature() const { return signature_; }
+
+ private:
+ const wasm::FunctionSig* const signature_;
+};
class FrameStateInfo final {
public:
- FrameStateInfo(BailoutId bailout_id, OutputFrameStateCombine state_combine,
+ FrameStateInfo(BytecodeOffset bailout_id,
+ OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* info)
: bailout_id_(bailout_id),
frame_state_combine_(state_combine),
info_(info) {}
FrameStateType type() const {
- return info_ == nullptr ? FrameStateType::kInterpretedFunction
+ return info_ == nullptr ? FrameStateType::kUnoptimizedFunction
: info_->type();
}
- BailoutId bailout_id() const { return bailout_id_; }
+ BytecodeOffset bailout_id() const { return bailout_id_; }
OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
MaybeHandle<SharedFunctionInfo> shared_info() const {
return info_ == nullptr ? MaybeHandle<SharedFunctionInfo>()
@@ -129,7 +148,7 @@ class FrameStateInfo final {
const FrameStateFunctionInfo* function_info() const { return info_; }
private:
- BailoutId const bailout_id_;
+ BytecodeOffset const bailout_id_;
OutputFrameStateCombine const frame_state_combine_;
const FrameStateFunctionInfo* const info_;
};
@@ -141,20 +160,19 @@ size_t hash_value(FrameStateInfo const&);
std::ostream& operator<<(std::ostream&, FrameStateInfo const&);
-static constexpr int kFrameStateParametersInput = 0;
-static constexpr int kFrameStateLocalsInput = 1;
-static constexpr int kFrameStateStackInput = 2;
-static constexpr int kFrameStateContextInput = 3;
-static constexpr int kFrameStateFunctionInput = 4;
-static constexpr int kFrameStateOuterStateInput = 5;
-static constexpr int kFrameStateInputCount = kFrameStateOuterStateInput + 1;
-
enum class ContinuationFrameStateMode { EAGER, LAZY, LAZY_WITH_CATCH };
+class FrameState;
+
FrameState CreateStubBuiltinContinuationFrameState(
JSGraph* graph, Builtins::Name name, Node* context, Node* const* parameters,
int parameter_count, Node* outer_frame_state,
- ContinuationFrameStateMode mode);
+ ContinuationFrameStateMode mode,
+ const wasm::FunctionSig* signature = nullptr);
+
+FrameState CreateJSWasmCallBuiltinContinuationFrameState(
+ JSGraph* jsgraph, Node* context, Node* outer_frame_state,
+ const wasm::FunctionSig* signature);
FrameState CreateJavaScriptBuiltinContinuationFrameState(
JSGraph* graph, const SharedFunctionInfoRef& shared, Builtins::Name name,
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 1b03a22968..7fc0c27b84 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -51,13 +51,13 @@ class CallDescriptor;
//
// slot JS frame
// +-----------------+--------------------------------
-// -n-1 | parameter 0 | ^
+// -n-1 | parameter n | ^
// |- - - - - - - - -| |
-// -n | | Caller
+// -n | parameter n-1 | Caller
// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
+// -2 | parameter 1 | (slot < 0)
// |- - - - - - - - -| |
-// -1 | parameter n | v
+// -1 | parameter 0 | v
// -----+-----------------+--------------------------------
// 0 | return addr | ^ ^
// |- - - - - - - - -| | |
diff --git a/deps/v8/src/compiler/functional-list.h b/deps/v8/src/compiler/functional-list.h
index b3d7a5571a..465bdf133b 100644
--- a/deps/v8/src/compiler/functional-list.h
+++ b/deps/v8/src/compiler/functional-list.h
@@ -16,7 +16,7 @@ namespace compiler {
// results in an O(1) copy operation. It is the equivalent of functional lists
// in ML-like languages, with the only difference that it also caches the length
// of the list in each node.
-// TODO(tebbi): Use this implementation also for RedundancyElimination.
+// TODO(turbofan): Use this implementation also for RedundancyElimination.
template <class A>
class FunctionalList {
private:
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index a2de1b7c09..bb3bc34a58 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -34,6 +34,7 @@ class BasicBlock;
V(BitcastInt32ToFloat32) \
V(BitcastWord32ToWord64) \
V(BitcastInt64ToFloat64) \
+ V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToInt64) \
V(ChangeFloat64ToUint32) \
@@ -47,6 +48,7 @@ class BasicBlock;
V(Float64ExtractLowWord32) \
V(Float64SilenceNaN) \
V(RoundFloat64ToInt32) \
+ V(RoundInt32ToFloat32) \
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToWord32) \
V(TruncateInt64ToInt32) \
@@ -89,6 +91,9 @@ class BasicBlock;
V(Word64And) \
V(Word64Equal) \
V(Word64Or) \
+ V(Word64Sar) \
+ V(Word64SarShiftOutZeros) \
+ V(Word64Shl) \
V(Word64Shr) \
V(WordAnd) \
V(WordEqual) \
@@ -105,8 +110,12 @@ class BasicBlock;
V(Int32Mod) \
V(Int32MulWithOverflow) \
V(Int32SubWithOverflow) \
+ V(Int64Div) \
+ V(Int64Mod) \
V(Uint32Div) \
- V(Uint32Mod)
+ V(Uint32Mod) \
+ V(Uint64Div) \
+ V(Uint64Mod)
#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
V(AllocateInOldGenerationStub, Code) \
@@ -369,6 +378,22 @@ class V8_EXPORT_PRIVATE GraphAssembler {
BranchHint hint, Vars...);
// Control helpers.
+
+ // {GotoIf(c, l, h)} is equivalent to {BranchWithHint(c, l, templ, h);
+ // Bind(templ)}.
+ template <typename... Vars>
+ void GotoIf(Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* label,
+ BranchHint hint, Vars...);
+
+ // {GotoIfNot(c, l, h)} is equivalent to {BranchWithHint(c, templ, l, h);
+ // Bind(templ)}.
+ // The branch hint refers to the expected outcome of the provided condition,
+ // so {GotoIfNot(..., BranchHint::kTrue)} means "optimize for the case where
+ // the branch is *not* taken".
+ template <typename... Vars>
+ void GotoIfNot(Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* label,
+ BranchHint hint, Vars...);
+
// {GotoIf(c, l)} is equivalent to {Branch(c, l, templ);Bind(templ)}.
template <typename... Vars>
void GotoIf(Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* label,
@@ -747,9 +772,7 @@ void GraphAssembler::Goto(GraphAssemblerLabel<sizeof...(Vars)>* label,
template <typename... Vars>
void GraphAssembler::GotoIf(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* label,
- Vars... vars) {
- BranchHint hint =
- label->IsDeferred() ? BranchHint::kFalse : BranchHint::kNone;
+ BranchHint hint, Vars... vars) {
Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
control_ = graph()->NewNode(common()->IfTrue(), branch);
@@ -762,8 +785,7 @@ void GraphAssembler::GotoIf(Node* condition,
template <typename... Vars>
void GraphAssembler::GotoIfNot(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* label,
- Vars... vars) {
- BranchHint hint = label->IsDeferred() ? BranchHint::kTrue : BranchHint::kNone;
+ BranchHint hint, Vars... vars) {
Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
control_ = graph()->NewNode(common()->IfFalse(), branch);
@@ -773,6 +795,23 @@ void GraphAssembler::GotoIfNot(Node* condition,
control_ = AddNode(graph()->NewNode(common()->IfTrue(), branch));
}
+template <typename... Vars>
+void GraphAssembler::GotoIf(Node* condition,
+ GraphAssemblerLabel<sizeof...(Vars)>* label,
+ Vars... vars) {
+ BranchHint hint =
+ label->IsDeferred() ? BranchHint::kFalse : BranchHint::kNone;
+ return GotoIf(condition, label, hint, vars...);
+}
+
+template <typename... Vars>
+void GraphAssembler::GotoIfNot(Node* condition,
+ GraphAssemblerLabel<sizeof...(Vars)>* label,
+ Vars... vars) {
+ BranchHint hint = label->IsDeferred() ? BranchHint::kTrue : BranchHint::kNone;
+ return GotoIfNot(condition, label, hint, vars...);
+}
+
template <typename... Args>
TNode<Object> GraphAssembler::Call(const CallDescriptor* call_descriptor,
Node* first_arg, Args... args) {
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 155d6fa8ef..998f37eea8 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -10,6 +10,7 @@
#include "src/codegen/tick-counter.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/node-observer.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/verifier.h"
@@ -28,8 +29,19 @@ enum class GraphReducer::State : uint8_t {
void Reducer::Finalize() {}
+Reduction Reducer::Reduce(Node* node,
+ ObserveNodeManager* observe_node_manager) {
+ Reduction reduction = Reduce(node);
+ if (V8_UNLIKELY(observe_node_manager && reduction.Changed())) {
+ observe_node_manager->OnNodeChanged(reducer_name(), node,
+ reduction.replacement());
+ }
+ return reduction;
+}
+
GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
- JSHeapBroker* broker, Node* dead)
+ JSHeapBroker* broker, Node* dead,
+ ObserveNodeManager* observe_node_manager)
: graph_(graph),
dead_(dead),
state_(graph, 4),
@@ -37,7 +49,8 @@ GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
revisit_(zone),
stack_(zone),
tick_counter_(tick_counter),
- broker_(broker) {
+ broker_(broker),
+ observe_node_manager_(observe_node_manager) {
if (dead != nullptr) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -89,7 +102,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
for (auto i = reducers_.begin(); i != reducers_.end();) {
if (i != skip) {
tick_counter_->TickAndMaybeEnterSafepoint();
- Reduction reduction = (*i)->Reduce(node);
+ Reduction reduction = (*i)->Reduce(node, observe_node_manager_);
if (!reduction.Changed()) {
// No change from this reducer.
} else if (reduction.replacement() == node) {
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 171033fe53..6a6eab5ebb 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -20,6 +20,7 @@ namespace compiler {
class Graph;
class JSHeapBroker;
class Node;
+class ObserveNodeManager;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
@@ -58,7 +59,7 @@ class V8_EXPORT_PRIVATE Reducer {
virtual const char* reducer_name() const = 0;
// Try to reduce a node if possible.
- virtual Reduction Reduce(Node* node) = 0;
+ Reduction Reduce(Node* node, ObserveNodeManager* observe_node_manager);
// Invoked by the {GraphReducer} when all nodes are done. Can be used to
// do additional reductions at the end, which in turn can cause a new round
@@ -69,6 +70,9 @@ class V8_EXPORT_PRIVATE Reducer {
static Reduction NoChange() { return Reduction(); }
static Reduction Replace(Node* node) { return Reduction(node); }
static Reduction Changed(Node* node) { return Reduction(node); }
+
+ private:
+ virtual Reduction Reduce(Node* node) = 0;
};
@@ -136,7 +140,8 @@ class V8_EXPORT_PRIVATE GraphReducer
: public NON_EXPORTED_BASE(AdvancedReducer::Editor) {
public:
GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
- JSHeapBroker* broker, Node* dead = nullptr);
+ JSHeapBroker* broker, Node* dead = nullptr,
+ ObserveNodeManager* observe_node_manager = nullptr);
~GraphReducer() override;
GraphReducer(const GraphReducer&) = delete;
@@ -193,6 +198,7 @@ class V8_EXPORT_PRIVATE GraphReducer
ZoneStack<NodeState> stack_;
TickCounter* const tick_counter_;
JSHeapBroker* const broker_;
+ ObserveNodeManager* const observe_node_manager_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index c6f58152bf..1208d0f4f6 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -230,6 +230,8 @@ std::unique_ptr<char[]> GetVisualizerLogFileName(OptimizedCompilationInfo* info,
}
std::replace(filename.begin(), filename.begin() + filename.length(), ' ',
'_');
+ std::replace(filename.begin(), filename.begin() + filename.length(), ':',
+ '-');
EmbeddedVector<char, 256> base_dir;
if (optional_base_dir != nullptr) {
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index e5bfa3e34e..e41bb6d748 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -27,10 +27,16 @@ class InternalizedString;
class JSBoundFunction;
class JSDataView;
class JSGlobalProxy;
-class JSRegExp;
class JSTypedArray;
class NativeContext;
class ScriptContextTable;
+template <typename>
+class Signature;
+
+namespace wasm {
+class ValueType;
+struct WasmModule;
+} // namespace wasm
namespace compiler {
@@ -38,6 +44,10 @@ namespace compiler {
// For a store during literal creation, do not walk up the prototype chain.
enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
+inline bool IsAnyStore(AccessMode mode) {
+ return mode == AccessMode::kStore || mode == AccessMode::kStoreInLiteral;
+}
+
enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded };
enum class OddballType : uint8_t {
@@ -63,19 +73,44 @@ enum class OddballType : uint8_t {
V(ScopeInfo) \
/* Subtypes of String */ \
V(InternalizedString) \
+ /* Subtypes of FixedArrayBase */ \
+ V(BytecodeArray) \
/* Subtypes of Name */ \
- V(String) \
V(Symbol) \
/* Subtypes of HeapObject */ \
V(AccessorInfo) \
V(ArrayBoilerplateDescription) \
V(CallHandlerInfo) \
V(Cell) \
- V(Name) \
+ V(Code) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(RegExpBoilerplateDescription) \
+ V(SharedFunctionInfo) \
V(TemplateObjectDescription)
// This list is sorted such that subtypes appear before their supertypes.
// DO NOT VIOLATE THIS PROPERTY!
+// Classes in this list behave like serialized classes, but they allow lazy
+// serialization from background threads where this is safe (e.g. for objects
+// that are immutable and fully initialized once visible). Pass
+// ObjectRef::BackgroundSerialization::kAllowed to the ObjectRef constructor
+// for objects where serialization from the background thread is safe.
+#define HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(V) \
+ /* Subtypes of HeapObject */ \
+ V(BigInt) \
+ V(HeapNumber) \
+ V(Map)
+
+// This list is sorted such that subtypes appear before their supertypes.
+// DO NOT VIOLATE THIS PROPERTY!
+// Types in this list can be serialized on demand from the background thread.
+#define HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(V) \
+ /* Subtypes of HeapObject */ \
+ V(PropertyCell)
+
+// This list is sorted such that subtypes appear before their supertypes.
+// DO NOT VIOLATE THIS PROPERTY!
#define HEAP_BROKER_SERIALIZED_OBJECT_LIST(V) \
/* Subtypes of JSObject */ \
V(JSArray) \
@@ -84,7 +119,6 @@ enum class OddballType : uint8_t {
V(JSFunction) \
V(JSGlobalObject) \
V(JSGlobalProxy) \
- V(JSRegExp) \
V(JSTypedArray) \
/* Subtypes of Context */ \
V(NativeContext) \
@@ -92,25 +126,19 @@ enum class OddballType : uint8_t {
V(Context) \
V(ScriptContextTable) \
/* Subtypes of FixedArrayBase */ \
- V(BytecodeArray) \
V(FixedArray) \
V(FixedDoubleArray) \
+ /* Subtypes of Name */ \
+ V(String) \
/* Subtypes of JSReceiver */ \
V(JSObject) \
/* Subtypes of HeapObject */ \
V(AllocationSite) \
- V(BigInt) \
- V(Code) \
V(DescriptorArray) \
- V(FeedbackCell) \
- V(FeedbackVector) \
V(FixedArrayBase) \
V(FunctionTemplateInfo) \
- V(HeapNumber) \
V(JSReceiver) \
- V(Map) \
- V(PropertyCell) \
- V(SharedFunctionInfo) \
+ V(Name) \
V(SourceTextModule) \
/* Subtypes of Object */ \
V(HeapObject)
@@ -123,18 +151,26 @@ class PerIsolateCompilerCache;
class PropertyAccessInfo;
#define FORWARD_DECL(Name) class Name##Ref;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
class V8_EXPORT_PRIVATE ObjectRef {
public:
+ enum class BackgroundSerialization {
+ kDisallowed,
+ kAllowed,
+ };
+
ObjectRef(JSHeapBroker* broker, Handle<Object> object,
+ BackgroundSerialization background_serialization =
+ BackgroundSerialization::kDisallowed,
bool check_type = true);
ObjectRef(JSHeapBroker* broker, ObjectData* data, bool check_type = true)
: data_(data), broker_(broker) {
CHECK_NOT_NULL(data_);
}
-
Handle<Object> object() const;
bool equals(const ObjectRef& other) const;
@@ -145,11 +181,15 @@ class V8_EXPORT_PRIVATE ObjectRef {
#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
#undef HEAP_IS_METHOD_DECL
#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
#undef HEAP_AS_METHOD_DECL
@@ -159,12 +199,6 @@ class V8_EXPORT_PRIVATE ObjectRef {
bool BooleanValue() const;
Maybe<double> OddballToNumber() const;
- // Return the element at key {index} if {index} is known to be an own data
- // property of the object that is non-writable and non-configurable.
- base::Optional<ObjectRef> GetOwnConstantElement(
- uint32_t index, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
-
Isolate* isolate() const;
struct Hash {
@@ -240,8 +274,10 @@ class HeapObjectType {
// the outermost Ref class in the inheritance chain only.
#define DEFINE_REF_CONSTRUCTOR(name, base) \
name##Ref(JSHeapBroker* broker, Handle<Object> object, \
+ BackgroundSerialization background_serialization = \
+ BackgroundSerialization::kDisallowed, \
bool check_type = true) \
- : base(broker, object, false) { \
+ : base(broker, object, background_serialization, false) { \
if (check_type) { \
CHECK(Is##name()); \
} \
@@ -271,9 +307,16 @@ class PropertyCellRef : public HeapObjectRef {
Handle<PropertyCell> object() const;
- PropertyDetails property_details() const;
+ // Can be called from a background thread.
+ V8_WARN_UNUSED_RESULT bool Serialize() const;
+ void SerializeAsProtector() const {
+ bool serialized = Serialize();
+ // A protector always holds a Smi value and its cell type never changes, so
+ // Serialize can't fail.
+ CHECK(serialized);
+ }
- void Serialize();
+ PropertyDetails property_details() const;
ObjectRef value() const;
};
@@ -290,17 +333,21 @@ class JSObjectRef : public JSReceiverRef {
Handle<JSObject> object() const;
- uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
- double RawFastDoublePropertyAt(FieldIndex index) const;
ObjectRef RawFastPropertyAt(FieldIndex index) const;
+ // Return the element at key {index} if {index} is known to be an own data
+ // property of the object that is non-writable and non-configurable.
+ base::Optional<ObjectRef> GetOwnConstantElement(
+ uint32_t index, SerializationPolicy policy =
+ SerializationPolicy::kAssumeSerialized) const;
+
// Return the value of the property identified by the field {index}
// if {index} is known to be an own data property of the object.
base::Optional<ObjectRef> GetOwnDataProperty(
Representation field_representation, FieldIndex index,
SerializationPolicy policy =
SerializationPolicy::kAssumeSerialized) const;
- FixedArrayBaseRef elements() const;
+ base::Optional<FixedArrayBaseRef> elements() const;
void SerializeElements();
void EnsureElementsTenured();
ElementsKind GetElementsKind() const;
@@ -342,7 +389,6 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
bool has_feedback_vector() const;
bool has_initial_map() const;
bool has_prototype() const;
- bool HasAttachedOptimizedCode() const;
bool PrototypeRequiresRuntimeLookup() const;
void Serialize();
@@ -354,25 +400,32 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
ContextRef context() const;
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
+ int InitialMapInstanceSizeWithMinSlack() const;
+
+ void SerializeCodeAndFeedback();
+ bool serialized_code_and_feedback() const;
+
+ // The following are available only after calling SerializeCodeAndFeedback().
+ // TODO(mvstanton): Once we allow inlining of functions we didn't see
+ // during serialization, we do need to ensure that any feedback vector
+ // we read here has been fully initialized (ie, store-ordered into the
+ // cell).
FeedbackVectorRef feedback_vector() const;
FeedbackCellRef raw_feedback_cell() const;
CodeRef code() const;
- int InitialMapInstanceSizeWithMinSlack() const;
};
-class JSRegExpRef : public JSObjectRef {
+class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
public:
- DEFINE_REF_CONSTRUCTOR(JSRegExp, JSObjectRef)
+ DEFINE_REF_CONSTRUCTOR(RegExpBoilerplateDescription, HeapObjectRef)
- Handle<JSRegExp> object() const;
+ Handle<RegExpBoilerplateDescription> object() const;
- ObjectRef raw_properties_or_hash() const;
- ObjectRef data() const;
- ObjectRef source() const;
- ObjectRef flags() const;
- ObjectRef last_index() const;
+ void Serialize();
- void SerializeAsRegExpBoilerplate();
+ FixedArrayRef data() const;
+ StringRef source() const;
+ int flags() const;
};
class HeapNumberRef : public HeapObjectRef {
@@ -425,42 +478,48 @@ class ContextRef : public HeapObjectRef {
V(JSGlobalObject, global_object) \
V(JSGlobalProxy, global_proxy_object) \
V(JSObject, promise_prototype) \
- V(Map, block_context_map) \
V(Map, bound_function_with_constructor_map) \
V(Map, bound_function_without_constructor_map) \
- V(Map, catch_context_map) \
- V(Map, eval_context_map) \
- V(Map, fast_aliased_arguments_map) \
- V(Map, function_context_map) \
- V(Map, initial_array_iterator_map) \
- V(Map, initial_string_iterator_map) \
- V(Map, iterator_result_map) \
V(Map, js_array_holey_double_elements_map) \
V(Map, js_array_holey_elements_map) \
V(Map, js_array_holey_smi_elements_map) \
V(Map, js_array_packed_double_elements_map) \
V(Map, js_array_packed_elements_map) \
V(Map, js_array_packed_smi_elements_map) \
- V(Map, sloppy_arguments_map) \
- V(Map, slow_object_with_null_prototype_map) \
- V(Map, strict_arguments_map) \
- V(Map, with_context_map) \
V(ScriptContextTable, script_context_table)
+#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, regexp_exec_function)
+
+#define BROKER_COMPULSORY_BACKGROUND_NATIVE_CONTEXT_FIELDS(V) \
+ V(Map, block_context_map) \
+ V(Map, catch_context_map) \
+ V(Map, eval_context_map) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, function_context_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, initial_string_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, slow_object_with_null_prototype_map) \
+ V(Map, strict_arguments_map) \
+ V(Map, with_context_map)
+
// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
// happened when Turbofan is invoked via --always-opt.
-#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
- V(Map, async_function_object_map) \
- V(Map, map_key_iterator_map) \
- V(Map, map_key_value_iterator_map) \
- V(Map, map_value_iterator_map) \
- V(JSFunction, regexp_exec_function) \
- V(Map, set_key_value_iterator_map) \
+#define BROKER_OPTIONAL_BACKGROUND_NATIVE_CONTEXT_FIELDS(V) \
+ V(Map, async_function_object_map) \
+ V(Map, map_key_iterator_map) \
+ V(Map, map_key_value_iterator_map) \
+ V(Map, map_value_iterator_map) \
+ V(Map, set_key_value_iterator_map) \
V(Map, set_value_iterator_map)
-#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V)
+#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_COMPULSORY_BACKGROUND_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_OPTIONAL_BACKGROUND_NATIVE_CONTEXT_FIELDS(V)
class NativeContextRef : public ContextRef {
public:
@@ -469,6 +528,7 @@ class NativeContextRef : public ContextRef {
Handle<NativeContext> object() const;
void Serialize();
+ void SerializeOnBackground();
#define DECL_ACCESSOR(type, name) type##Ref name() const;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
@@ -501,6 +561,11 @@ class DescriptorArrayRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(DescriptorArray, HeapObjectRef)
Handle<DescriptorArray> object() const;
+
+ PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
+ NameRef GetPropertyKey(InternalIndex descriptor_index) const;
+ base::Optional<ObjectRef> GetStrongValue(
+ InternalIndex descriptor_index) const;
};
class FeedbackCellRef : public HeapObjectRef {
@@ -509,7 +574,12 @@ class FeedbackCellRef : public HeapObjectRef {
Handle<FeedbackCell> object() const;
base::Optional<SharedFunctionInfoRef> shared_function_info() const;
- HeapObjectRef value() const;
+
+ // TODO(mvstanton): Once we allow inlining of functions we didn't see
+ // during serialization, we do need to ensure that any feedback vector
+ // we read here has been fully initialized (ie, store-ordered into the
+ // cell).
+ base::Optional<FeedbackVectorRef> value() const;
};
class FeedbackVectorRef : public HeapObjectRef {
@@ -642,10 +712,11 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const;
ObjectRef GetFieldType(InternalIndex descriptor_index) const;
- bool IsUnboxedDoubleField(InternalIndex descriptor_index) const;
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_number) const;
+ DescriptorArrayRef instance_descriptors() const;
+
void SerializeRootMap();
base::Optional<MapRef> FindRootMap() const;
@@ -734,26 +805,19 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
Handle<BytecodeArray> object() const;
+ // NOTE: Concurrent reads of the actual bytecodes as well as the constant pool
+ // (both immutable) do not go through BytecodeArrayRef but are performed
+ // directly through the handle by BytecodeArrayAccessor.
+
int register_count() const;
int parameter_count() const;
interpreter::Register incoming_new_target_or_generator_register() const;
- // Bytecode access methods.
- uint8_t get(int index) const;
- Address GetFirstBytecodeAddress() const;
-
Handle<ByteArray> SourcePositionTable() const;
- // Constant pool access.
- Handle<Object> GetConstantAtIndex(int index) const;
- bool IsConstantAtIndexSmi(int index) const;
- Smi GetConstantAtIndexAsSmi(int index) const;
-
// Exception handler table.
Address handler_table_address() const;
int handler_table_size() const;
-
- void SerializeForCompilation();
};
class JSArrayRef : public JSObjectRef {
@@ -762,13 +826,24 @@ class JSArrayRef : public JSObjectRef {
Handle<JSArray> object() const;
- ObjectRef length() const;
+ // The `length` property of boilerplate JSArray objects. Boilerplates are
+ // immutable after initialization. Must not be used for non-boilerplate
+ // JSArrays.
+ ObjectRef GetBoilerplateLength() const;
// Return the element at key {index} if the array has a copy-on-write elements
// storage and {index} is known to be an own data property.
+ // Note the value returned by this function is only valid if we ensure at
+ // runtime that the backing store has not changed.
base::Optional<ObjectRef> GetOwnCowElement(
- uint32_t index, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ FixedArrayBaseRef elements_ref, uint32_t index,
+ SerializationPolicy policy =
+ SerializationPolicy::kAssumeSerialized) const;
+
+ // The `JSArray::length` property; not safe to use in general, but can be
+ // used in some special cases that guarantee a valid `length` value despite
+ // concurrent reads.
+ ObjectRef length_unsafe() const;
};
class ScopeInfoRef : public HeapObjectRef {
@@ -786,20 +861,22 @@ class ScopeInfoRef : public HeapObjectRef {
void SerializeScopeInfoChain();
};
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(int, StartPosition) \
- V(bool, is_compiled) \
- V(bool, IsUserJavaScript)
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(int, StartPosition) \
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript) \
+ V(const wasm::WasmModule*, wasm_module) \
+ V(const wasm::FunctionSig*, wasm_function_signature)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
public:
@@ -833,6 +910,10 @@ class StringRef : public NameRef {
Handle<String> object() const;
+ base::Optional<ObjectRef> GetCharAsStringOrUndefined(
+ uint32_t index, SerializationPolicy policy =
+ SerializationPolicy::kAssumeSerialized) const;
+
base::Optional<int> length() const;
base::Optional<uint16_t> GetFirstChar();
base::Optional<double> ToNumber();
@@ -859,6 +940,7 @@ class JSTypedArrayRef : public JSObjectRef {
void Serialize();
bool serialized() const;
+ bool ShouldHaveBeenSerialized() const;
HeapObjectRef buffer() const;
};
@@ -922,7 +1004,7 @@ class CodeRef : public HeapObjectRef {
Handle<Code> object() const;
- unsigned inlined_bytecode_size() const;
+ unsigned GetInlinedBytecodeSize() const;
};
class InternalizedStringRef : public StringRef {
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 9684086a5d..251ce6ee5a 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -2049,13 +2049,11 @@ struct PromiseCtorFrameStateParams {
// Remnant of old-style JSCallReducer code. Could be ported to graph assembler,
// but probably not worth the effort.
-FrameState CreateArtificialFrameState(Node* node, Node* outer_frame_state,
- int parameter_count, BailoutId bailout_id,
- FrameStateType frame_state_type,
- const SharedFunctionInfoRef& shared,
- Node* context,
- CommonOperatorBuilder* common,
- Graph* graph) {
+FrameState CreateArtificialFrameState(
+ Node* node, Node* outer_frame_state, int parameter_count,
+ BytecodeOffset bailout_id, FrameStateType frame_state_type,
+ const SharedFunctionInfoRef& shared, Node* context,
+ CommonOperatorBuilder* common, Graph* graph) {
const FrameStateFunctionInfo* state_info =
common->CreateFrameStateFunctionInfo(
frame_state_type, parameter_count + 1, 0, shared.object());
@@ -2089,7 +2087,7 @@ FrameState PromiseConstructorFrameState(
DCHECK_EQ(1, params.shared.internal_formal_parameter_count());
return CreateArtificialFrameState(
params.node_ptr, params.outer_frame_state, 1,
- BailoutId::ConstructStubInvoke(), FrameStateType::kConstructStub,
+ BytecodeOffset::ConstructStubInvoke(), FrameStateType::kConstructStub,
params.shared, params.context, common, graph);
}
@@ -3244,7 +3242,8 @@ class IteratingArrayBuiltinHelper {
}
// TODO(jgruber): May only be needed for holey elements kinds.
- if (!dependencies->DependOnNoElementsProtector()) UNREACHABLE();
+ if (!dependencies->DependOnNoElementsProtector()) return;
+
has_stability_dependency_ = inference_.RelyOnMapsPreferStability(
dependencies, jsgraph, &effect_, control_, p.feedback());
@@ -3430,9 +3429,90 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
return ReplaceWithSubgraph(&a, subgraph);
}
+namespace {
+
+bool CanInlineJSToWasmCall(const wasm::FunctionSig* wasm_signature) {
+ DCHECK(FLAG_turbo_inline_js_wasm_calls);
+ if (wasm_signature->return_count() > 1) {
+ return false;
+ }
+
+ for (auto type : wasm_signature->all()) {
+#if defined(V8_TARGET_ARCH_32_BIT)
+ if (type == wasm::kWasmI64) return false;
+#endif
+ if (type != wasm::kWasmI32 && type != wasm::kWasmI64 &&
+ type != wasm::kWasmF32 && type != wasm::kWasmF64) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace
+
+Reduction JSCallReducer::ReduceCallWasmFunction(
+ Node* node, const SharedFunctionInfoRef& shared) {
+ JSCallNode n(node);
+ const CallParameters& p = n.Parameters();
+
+ // Avoid deoptimization loops
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ // TODO(paolosev@microsoft.com): Enable inlining for calls in try/catch.
+ if (NodeProperties::IsExceptionalCall(node)) {
+ return NoChange();
+ }
+
+ const wasm::FunctionSig* wasm_signature = shared.wasm_function_signature();
+ if (!CanInlineJSToWasmCall(wasm_signature)) {
+ return NoChange();
+ }
+
+ // Signal TurboFan that it should run the 'wasm-inlining' phase.
+ has_wasm_calls_ = true;
+
+ const wasm::WasmModule* wasm_module = shared.wasm_module();
+ const Operator* op =
+ javascript()->CallWasm(wasm_module, wasm_signature, p.feedback());
+
+ // Remove additional inputs
+ size_t actual_arity = n.ArgumentCount();
+ DCHECK(JSCallNode::kFeedbackVectorIsLastInput);
+ DCHECK_EQ(actual_arity + JSWasmCallNode::kExtraInputCount - 1,
+ n.FeedbackVectorIndex());
+ size_t expected_arity = wasm_signature->parameter_count();
+
+ while (actual_arity > expected_arity) {
+ int removal_index =
+ static_cast<int>(n.FirstArgumentIndex() + expected_arity);
+ DCHECK_LT(removal_index, static_cast<int>(node->InputCount()));
+ node->RemoveInput(removal_index);
+ actual_arity--;
+ }
+
+ // Add missing inputs
+ while (actual_arity < expected_arity) {
+ int insertion_index = n.ArgumentIndex(n.ArgumentCount());
+ node->InsertInput(graph()->zone(), insertion_index,
+ jsgraph()->UndefinedConstant());
+ actual_arity++;
+ }
+
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
+
#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
namespace {
bool HasFPParamsInSignature(const CFunctionInfo* c_signature) {
+ if (c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kFloat32 ||
+ c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kFloat64) {
+ return true;
+ }
for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat32 ||
c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat64) {
@@ -3447,6 +3527,10 @@ bool HasFPParamsInSignature(const CFunctionInfo* c_signature) {
#ifndef V8_TARGET_ARCH_64_BIT
namespace {
bool Has64BitIntegerParamsInSignature(const CFunctionInfo* c_signature) {
+ if (c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kInt64 ||
+ c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kUint64) {
+ return true;
+ }
for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kInt64 ||
c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kUint64) {
@@ -3804,13 +3888,13 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {arguments_list}).
CreateArgumentsType const type = CreateArgumentsTypeOf(arguments_list->op());
- Node* frame_state = NodeProperties::GetFrameStateInput(arguments_list);
- int start_index = 0;
+ FrameState frame_state =
+ FrameState{NodeProperties::GetFrameStateInput(arguments_list)};
int formal_parameter_count;
{
Handle<SharedFunctionInfo> shared;
- if (!FrameStateInfoOf(frame_state->op()).shared_info().ToHandle(&shared)) {
+ if (!frame_state.frame_state_info().shared_info().ToHandle(&shared)) {
return NoChange();
}
formal_parameter_count = SharedFunctionInfoRef(broker(), shared)
@@ -3828,8 +3912,6 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
return NoChange();
}
}
- } else if (type == CreateArgumentsType::kRestParameter) {
- start_index = formal_parameter_count;
}
// TODO(jgruber,v8:8888): Attempt to remove this restriction. The reason it
@@ -3846,13 +3928,19 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// Remove the {arguments_list} input from the {node}.
node->RemoveInput(arraylike_or_spread_index);
+ // The index of the first relevant parameter. Only non-zero when looking at
+ // rest parameters, in which case it is set to the index of the first rest
+ // parameter.
+ const int start_index = (type == CreateArgumentsType::kRestParameter)
+ ? formal_parameter_count
+ : 0;
+
// After removing the arraylike or spread object, the argument count is:
int argc =
arraylike_or_spread_index - JSCallOrConstructNode::FirstArgumentIndex();
// Check if are spreading to inlined arguments or to the arguments of
// the outermost function.
- Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- if (outer_state->opcode() != IrOpcode::kFrameState) {
+ if (!frame_state.has_outer_frame_state()) {
Operator const* op;
if (IsCallWithArrayLikeOrSpread(node)) {
static constexpr int kTargetAndReceiver = 2;
@@ -3867,40 +3955,22 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
NodeProperties::ChangeOp(node, op);
return Changed(node);
}
+ FrameState outer_state = frame_state.outer_frame_state();
// Get to the actual frame state from which to extract the arguments;
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {arg_array}).
- FrameStateInfo outer_info = FrameStateInfoOf(outer_state->op());
+ FrameStateInfo outer_info = outer_state.frame_state_info();
if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
// Need to take the parameters from the arguments adaptor.
frame_state = outer_state;
}
// Add the actual parameters to the {node}, skipping the receiver.
- const int argument_count =
- FrameStateInfoOf(frame_state->op()).parameter_count() -
- 1; // Minus receiver.
- if (start_index < argument_count) {
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin(); // Skip the receiver.
- for (int i = 0; i < start_index; i++) {
- // A non-zero start_index implies that there are rest arguments. Skip
- // them.
- ++parameters_it;
- }
- for (int i = start_index; i < argument_count; ++i, ++parameters_it) {
- Node* parameter_node = parameters_it.node();
- DCHECK_NOT_NULL(parameter_node);
- node->InsertInput(graph()->zone(),
- JSCallOrConstructNode::ArgumentIndex(argc++),
- parameter_node);
- }
- // TODO(jgruber): Currently, each use-site does the awkward dance above,
- // iterating based on the FrameStateInfo's parameter count minus one, and
- // manually advancing the iterator past the receiver. Consider wrapping all
- // this in an understandable iterator s.t. one only needs to iterate from
- // the beginning until done().
- DCHECK(parameters_it.done());
+ StateValuesAccess parameters_access(frame_state.parameters());
+ for (auto it = parameters_access.begin_without_receiver_and_skip(start_index);
+ !it.done(); ++it) {
+ DCHECK_NOT_NULL(it.node());
+ node->InsertInput(graph()->zone(),
+ JSCallOrConstructNode::ArgumentIndex(argc++), it.node());
}
if (IsCallWithArrayLikeOrSpread(node)) {
@@ -4089,8 +4159,13 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, SharedFunctionInfoRef(broker(), p.shared_info()));
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell(broker(), FeedbackCellOf(target->op()));
- return ReduceJSCall(node,
- cell.value().AsFeedbackVector().shared_function_info());
+ if (cell.shared_function_info().has_value()) {
+ return ReduceJSCall(node, *cell.shared_function_info());
+ } else {
+ TRACE_BROKER_MISSING(broker(), "Unable to reduce JSCall. FeedbackCell "
+ << cell << " has no FeedbackVector");
+ return NoChange();
+ }
}
// If {target} is the result of a JSCreateBoundFunction operation,
@@ -4169,11 +4244,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
FeedbackCellRef feedback_cell(
broker(), feedback_target.value().AsFeedbackCell().object());
- if (feedback_cell.value().IsFeedbackVector()) {
+ if (feedback_cell.value().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
// which uniquely identifies a given function inside a native context.
- FeedbackVectorRef feedback_vector =
- feedback_cell.value().AsFeedbackVector();
+ FeedbackVectorRef feedback_vector = *feedback_cell.value();
if (!feedback_vector.serialized()) {
TRACE_BROKER_MISSING(
broker(), "feedback vector, not serialized: " << feedback_vector);
@@ -4555,6 +4629,11 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
if (shared.function_template_info().has_value()) {
return ReduceCallApiFunction(node, shared);
}
+
+ if ((flags() & kInlineJSToWasmCalls) && shared.wasm_function_signature()) {
+ return ReduceCallWasmFunction(node, shared);
+ }
+
return NoChange();
}
@@ -5094,7 +5173,9 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds, true)) {
return inference.NoChange();
}
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (!dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
+ }
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -5229,7 +5310,9 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
return inference.NoChange();
}
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (!dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
+ }
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -5273,7 +5356,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
Node* efalse = effect;
Node* vfalse;
{
- // TODO(tebbi): We should trim the backing store if the capacity is too
+ // TODO(turbofan): We should trim the backing store if the capacity is too
// big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
// Load the elements backing store from the {receiver}.
@@ -5367,7 +5450,9 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
return inference.NoChange();
}
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (!dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
+ }
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -5605,8 +5690,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
if (!dependencies()->DependOnArraySpeciesProtector())
return inference.NoChange();
- if (can_be_holey) {
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (can_be_holey && !dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
}
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -5765,9 +5850,11 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
}
}
- if (IsHoleyElementsKind(elements_kind)) {
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (IsHoleyElementsKind(elements_kind) &&
+ !dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
}
+
// Since the map inference was done relative to {iterator_effect} rather than
// {effect}, we need to guard the use of the map(s) even when the inference
// was reliable.
@@ -6663,7 +6750,7 @@ Reduction JSCallReducer::ReduceTypedArrayConstructor(
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
- node, frame_state, arity, BailoutId::ConstructStubInvoke(),
+ node, frame_state, arity, BytecodeOffset::ConstructStubInvoke(),
FrameStateType::kConstructStub, shared, context, common(), graph());
// This continuation just returns the newly created JSTypedArray. We
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 0a2050b200..8d24175d4b 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
enum Flag {
kNoFlags = 0u,
kBailoutOnUninitialized = 1u << 0,
+ kInlineJSToWasmCalls = 1u << 1,
};
using Flags = base::Flags<Flag>;
@@ -69,10 +70,14 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Zone* ZoneForGraphAssembler() const { return temp_zone(); }
JSGraph* JSGraphForGraphAssembler() const { return jsgraph(); }
+ bool has_wasm_calls() const { return has_wasm_calls_; }
+
private:
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(Node* node,
const SharedFunctionInfoRef& shared);
+ Reduction ReduceCallWasmFunction(Node* node,
+ const SharedFunctionInfoRef& shared);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeBind(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
@@ -245,6 +250,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Flags const flags_;
CompilationDependencies* const dependencies_;
std::set<Node*> waitlist_;
+
+ bool has_wasm_calls_ = false;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 21f6c887c0..448652ad8d 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -89,13 +89,9 @@ namespace {
bool IsContextParameter(Node* node) {
DCHECK_EQ(IrOpcode::kParameter, node->opcode());
- Node* const start = NodeProperties::GetValueInput(node, 0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const index = ParameterIndexOf(node->op());
- // The context is always the last parameter to a JavaScript function, and
- // {Parameter} indices start at -1, so value outputs of {Start} look like
- // this: closure, receiver, param0, ..., paramN, context.
- return index == start->op()->ValueOutputCount() - 2;
+ return ParameterIndexOf(node->op()) ==
+ StartNode{NodeProperties::GetValueInput(node, 0)}
+ .ContextParameterIndex_MaybeNonStandardLayout();
}
// Given a context {node} and the {distance} from that context to the target
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 74cb7937fa..899922a27f 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -36,10 +36,10 @@ namespace compiler {
namespace {
// Retrieves the frame state holding actual argument values.
-Node* GetArgumentsFrameState(Node* frame_state) {
- Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state);
- FrameStateInfo outer_state_info = FrameStateInfoOf(outer_state->op());
- return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
+FrameState GetArgumentsFrameState(FrameState frame_state) {
+ FrameState outer_state{NodeProperties::GetFrameStateInput(frame_state)};
+ return outer_state.frame_state_info().type() ==
+ FrameStateType::kArgumentsAdaptor
? outer_state
: frame_state;
}
@@ -148,16 +148,15 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
- Node* const frame_state = NodeProperties::GetFrameStateInput(node);
- Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ FrameState frame_state{NodeProperties::GetFrameStateInput(node)};
Node* const control = graph()->start();
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+ FrameStateInfo state_info = frame_state.frame_state_info();
SharedFunctionInfoRef shared(broker(),
state_info.shared_info().ToHandleChecked());
// Use the ArgumentsAccessStub for materializing both mapped and unmapped
// arguments object, but only for non-inlined (i.e. outermost) frames.
- if (outer_state->opcode() != IrOpcode::kFrameState) {
+ if (!frame_state.has_outer_frame_state()) {
switch (type) {
case CreateArgumentsType::kMappedArguments: {
// TODO(turbofan): Duplicate parameters are not handled yet.
@@ -165,17 +164,15 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const callee = NodeProperties::GetValueInput(node, 0);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* const arguments_frame =
- graph()->NewNode(simplified()->ArgumentsFrame());
Node* const arguments_length =
- graph()->NewNode(simplified()->ArgumentsLength(
- shared.internal_formal_parameter_count()),
- arguments_frame);
+ graph()->NewNode(simplified()->ArgumentsLength());
// Allocate the elements backing store.
bool has_aliased_arguments = false;
- Node* const elements = effect = AllocateAliasedArguments(
- effect, control, context, arguments_frame, arguments_length, shared,
+ Node* const elements = effect = TryAllocateAliasedArguments(
+ effect, control, context, arguments_length, shared,
&has_aliased_arguments);
+ if (elements == nullptr) return NoChange();
+
// Load the arguments object map.
Node* const arguments_map = jsgraph()->Constant(
has_aliased_arguments
@@ -197,18 +194,14 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
case CreateArgumentsType::kUnmappedArguments: {
Node* effect = NodeProperties::GetEffectInput(node);
- Node* const arguments_frame =
- graph()->NewNode(simplified()->ArgumentsFrame());
Node* const arguments_length =
- graph()->NewNode(simplified()->ArgumentsLength(
- shared.internal_formal_parameter_count()),
- arguments_frame);
+ graph()->NewNode(simplified()->ArgumentsLength());
// Allocate the elements backing store.
Node* const elements = effect =
graph()->NewNode(simplified()->NewArgumentsElements(
CreateArgumentsType::kUnmappedArguments,
shared.internal_formal_parameter_count()),
- arguments_frame, arguments_length, effect);
+ arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map =
jsgraph()->Constant(native_context().strict_arguments_map());
@@ -227,21 +220,16 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
case CreateArgumentsType::kRestParameter: {
Node* effect = NodeProperties::GetEffectInput(node);
- Node* const arguments_frame =
- graph()->NewNode(simplified()->ArgumentsFrame());
Node* const arguments_length =
- graph()->NewNode(simplified()->ArgumentsLength(
- shared.internal_formal_parameter_count()),
- arguments_frame);
+ graph()->NewNode(simplified()->ArgumentsLength());
Node* const rest_length = graph()->NewNode(
- simplified()->RestLength(shared.internal_formal_parameter_count()),
- arguments_frame);
+ simplified()->RestLength(shared.internal_formal_parameter_count()));
// Allocate the elements backing store.
Node* const elements = effect =
graph()->NewNode(simplified()->NewArgumentsElements(
CreateArgumentsType::kRestParameter,
shared.internal_formal_parameter_count()),
- arguments_frame, arguments_length, effect);
+ arguments_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
native_context().js_array_packed_elements_map());
@@ -263,7 +251,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
// Use inline allocation for all mapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
- DCHECK_EQ(outer_state->opcode(), IrOpcode::kFrameState);
+ DCHECK_EQ(frame_state.outer_frame_state()->opcode(), IrOpcode::kFrameState);
switch (type) {
case CreateArgumentsType::kMappedArguments: {
Node* const callee = NodeProperties::GetValueInput(node, 0);
@@ -274,25 +262,20 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Choose the correct frame state and frame state info depending on
// whether there conceptually is an arguments adaptor frame in the call
// chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
- IrOpcode::kDeadValue) {
+ FrameState args_state = GetArgumentsFrameState(frame_state);
+ if (args_state.parameters()->opcode() == IrOpcode::kDeadValue) {
// This protects against an incompletely propagated DeadValue node.
// If the FrameState has a DeadValue input, then this node will be
// pruned anyway.
return NoChange();
}
- FrameStateInfo args_state_info = FrameStateInfoOf(args_state->op());
+ FrameStateInfo args_state_info = args_state.frame_state_info();
int length = args_state_info.parameter_count() - 1; // Minus receiver.
- // Check that the array allocated for arguments is not "large".
- {
- const int alloc_size = FixedArray::SizeFor(length);
- if (alloc_size > kMaxRegularHeapObjectSize) return NoChange();
- }
// Prepare element backing store to be used by arguments object.
bool has_aliased_arguments = false;
- Node* const elements = AllocateAliasedArguments(
+ Node* const elements = TryAllocateAliasedArguments(
effect, control, args_state, context, shared, &has_aliased_arguments);
+ if (elements == nullptr) return NoChange();
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
Node* const arguments_map = jsgraph()->Constant(
@@ -319,23 +302,18 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Choose the correct frame state and frame state info depending on
// whether there conceptually is an arguments adaptor frame in the call
// chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
- IrOpcode::kDeadValue) {
+ FrameState args_state = GetArgumentsFrameState(frame_state);
+ if (args_state.parameters()->opcode() == IrOpcode::kDeadValue) {
// This protects against an incompletely propagated DeadValue node.
// If the FrameState has a DeadValue input, then this node will be
// pruned anyway.
return NoChange();
}
- FrameStateInfo args_state_info = FrameStateInfoOf(args_state->op());
+ FrameStateInfo args_state_info = args_state.frame_state_info();
int length = args_state_info.parameter_count() - 1; // Minus receiver.
- // Check that the array allocated for arguments is not "large".
- {
- const int alloc_size = FixedArray::SizeFor(length);
- if (alloc_size > kMaxRegularHeapObjectSize) return NoChange();
- }
// Prepare element backing store to be used by arguments object.
- Node* const elements = AllocateArguments(effect, control, args_state);
+ Node* const elements = TryAllocateArguments(effect, control, args_state);
+ if (elements == nullptr) return NoChange();
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
Node* const arguments_map =
@@ -361,18 +339,18 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Choose the correct frame state and frame state info depending on
// whether there conceptually is an arguments adaptor frame in the call
// chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
- IrOpcode::kDeadValue) {
+ FrameState args_state = GetArgumentsFrameState(frame_state);
+ if (args_state.parameters()->opcode() == IrOpcode::kDeadValue) {
// This protects against an incompletely propagated DeadValue node.
// If the FrameState has a DeadValue input, then this node will be
// pruned anyway.
return NoChange();
}
- FrameStateInfo args_state_info = FrameStateInfoOf(args_state->op());
+ FrameStateInfo args_state_info = args_state.frame_state_info();
// Prepare element backing store to be used by the rest array.
Node* const elements =
- AllocateRestArguments(effect, control, args_state, start_index);
+ TryAllocateRestArguments(effect, control, args_state, start_index);
+ if (elements == nullptr) return NoChange();
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the JSArray object map.
Node* const jsarray_map =
@@ -424,11 +402,15 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
SharedFunctionInfoRef shared = js_function.shared();
DCHECK(shared.HasBytecodeArray());
int parameter_count_no_receiver = shared.internal_formal_parameter_count();
- int size = parameter_count_no_receiver +
- shared.GetBytecodeArray().register_count();
+ int length = parameter_count_no_receiver +
+ shared.GetBytecodeArray().register_count();
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
- ab.AllocateArray(size, MapRef(broker(), factory()->fixed_array_map()));
- for (int i = 0; i < size; ++i) {
+ if (!ab.CanAllocateArray(length, fixed_array_map)) {
+ return NoChange();
+ }
+ ab.AllocateArray(length, fixed_array_map);
+ for (int i = 0; i < length; ++i) {
ab.Store(AccessBuilder::ForFixedArraySlot(i),
jsgraph()->UndefinedConstant());
}
@@ -670,6 +652,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
} else {
PropertyCellRef array_constructor_protector(
broker(), factory()->array_constructor_protector());
+ array_constructor_protector.SerializeAsProtector();
can_inline_call = array_constructor_protector.value().AsSmi() ==
Protectors::kProtectorValid;
}
@@ -792,9 +775,12 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Create the register file.
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
- ab.AllocateArray(register_count,
- MapRef(broker(), factory()->fixed_array_map()));
+ if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
+ return NoChange();
+ }
+ ab.AllocateArray(register_count, fixed_array_map);
for (int i = 0; i < register_count; ++i) {
ab.Store(AccessBuilder::ForFixedArraySlot(i),
jsgraph()->UndefinedConstant());
@@ -904,13 +890,17 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
// Create the [[BoundArguments]] for the result.
Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant();
if (arity > 0) {
- AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(arity, MapRef(broker(), factory()->fixed_array_map()));
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ AllocationBuilder ab(jsgraph(), effect, control);
+ if (!ab.CanAllocateArray(arity, fixed_array_map)) {
+ return NoChange();
+ }
+ ab.AllocateArray(arity, fixed_array_map);
for (int i = 0; i < arity; ++i) {
- a.Store(AccessBuilder::ForFixedArraySlot(i),
- NodeProperties::GetValueInput(node, 2 + i));
+ ab.Store(AccessBuilder::ForFixedArraySlot(i),
+ NodeProperties::GetValueInput(node, 2 + i));
}
- bound_arguments = effect = a.Finish();
+ bound_arguments = effect = ab.Finish();
}
// Create the JSBoundFunction result.
@@ -1189,7 +1179,8 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
ProcessedFeedback const& feedback =
broker()->GetFeedbackForRegExpLiteral(p.feedback());
if (!feedback.IsInsufficient()) {
- JSRegExpRef literal = feedback.AsRegExpLiteral().value();
+ RegExpBoilerplateDescriptionRef literal =
+ feedback.AsRegExpLiteral().value();
Node* value = effect = AllocateLiteralRegExp(effect, control, literal);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1427,67 +1418,70 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
// Helper that allocates a FixedArray holding argument values recorded in the
// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
- Node* frame_state) {
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+Node* JSCreateLowering::TryAllocateArguments(Node* effect, Node* control,
+ FrameState frame_state) {
+ FrameStateInfo state_info = frame_state.frame_state_info();
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
// Prepare an iterator over argument values recorded in the frame state.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ Node* const parameters = frame_state.parameters();
StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin();
+ auto parameters_it = parameters_access.begin_without_receiver();
// Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(argument_count,
- MapRef(broker(), factory()->fixed_array_map()));
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ AllocationBuilder ab(jsgraph(), effect, control);
+ if (!ab.CanAllocateArray(argument_count, fixed_array_map)) {
+ return nullptr;
+ }
+ ab.AllocateArray(argument_count, fixed_array_map);
for (int i = 0; i < argument_count; ++i, ++parameters_it) {
DCHECK_NOT_NULL(parameters_it.node());
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
- parameters_it.node());
+ ab.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ parameters_it.node());
}
- return a.Finish();
+ return ab.Finish();
}
// Helper that allocates a FixedArray holding argument values recorded in the
// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
- Node* frame_state,
- int start_index) {
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+Node* JSCreateLowering::TryAllocateRestArguments(Node* effect, Node* control,
+ FrameState frame_state,
+ int start_index) {
+ FrameStateInfo state_info = frame_state.frame_state_info();
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
int num_elements = std::max(0, argument_count - start_index);
if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
// Prepare an iterator over argument values recorded in the frame state.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ Node* const parameters = frame_state.parameters();
StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin();
-
- // Skip unused arguments.
- for (int i = 0; i < start_index; i++) {
- ++parameters_it;
- }
+ auto parameters_it =
+ parameters_access.begin_without_receiver_and_skip(start_index);
// Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(num_elements, MapRef(broker(), factory()->fixed_array_map()));
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ AllocationBuilder ab(jsgraph(), effect, control);
+ if (!ab.CanAllocateArray(num_elements, fixed_array_map)) {
+ return nullptr;
+ }
+ ab.AllocateArray(num_elements, fixed_array_map);
for (int i = 0; i < num_elements; ++i, ++parameters_it) {
DCHECK_NOT_NULL(parameters_it.node());
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
- parameters_it.node());
+ ab.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ parameters_it.node());
}
- return a.Finish();
+ return ab.Finish();
}
// Helper that allocates a FixedArray serving as a parameter map for values
// recorded in the given {frame_state}. Some elements map to slots within the
// given {context}. Serves as backing store for JSCreateArguments nodes.
-Node* JSCreateLowering::AllocateAliasedArguments(
- Node* effect, Node* control, Node* frame_state, Node* context,
+Node* JSCreateLowering::TryAllocateAliasedArguments(
+ Node* effect, Node* control, FrameState frame_state, Node* context,
const SharedFunctionInfoRef& shared, bool* has_aliased_arguments) {
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+ FrameStateInfo state_info = frame_state.frame_state_info();
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
@@ -1495,40 +1489,50 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// any way, we can just return an unmapped backing store instead.
int parameter_count = shared.internal_formal_parameter_count();
if (parameter_count == 0) {
- return AllocateArguments(effect, control, frame_state);
+ return TryAllocateArguments(effect, control, frame_state);
}
// Calculate number of argument values being aliased/mapped.
int mapped_count = std::min(argument_count, parameter_count);
*has_aliased_arguments = true;
+ MapRef sloppy_arguments_elements_map(
+ broker(), factory()->sloppy_arguments_elements_map());
+ if (!AllocationBuilder::CanAllocateSloppyArgumentElements(
+ mapped_count, sloppy_arguments_elements_map)) {
+ return nullptr;
+ }
+
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ if (!AllocationBuilder::CanAllocateArray(argument_count, fixed_array_map)) {
+ return nullptr;
+ }
+
// Prepare an iterator over argument values recorded in the frame state.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ Node* const parameters = frame_state.parameters();
StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin();
+ auto parameters_it =
+ parameters_access.begin_without_receiver_and_skip(mapped_count);
// The unmapped argument values recorded in the frame state are stored yet
// another indirection away and then linked into the parameter map below,
// whereas mapped argument values are replaced with a hole instead.
- AllocationBuilder aa(jsgraph(), effect, control);
- aa.AllocateArray(argument_count,
- MapRef(broker(), factory()->fixed_array_map()));
- for (int i = 0; i < mapped_count; ++i, ++parameters_it) {
- aa.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ AllocationBuilder ab(jsgraph(), effect, control);
+ ab.AllocateArray(argument_count, fixed_array_map);
+ for (int i = 0; i < mapped_count; ++i) {
+ ab.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
jsgraph()->TheHoleConstant());
}
for (int i = mapped_count; i < argument_count; ++i, ++parameters_it) {
DCHECK_NOT_NULL(parameters_it.node());
- aa.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ ab.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
parameters_it.node());
}
- Node* arguments = aa.Finish();
+ Node* arguments = ab.Finish();
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), arguments, control);
- a.AllocateSloppyArgumentElements(
- mapped_count,
- MapRef(broker(), factory()->sloppy_arguments_elements_map()));
+ a.AllocateSloppyArgumentElements(mapped_count, sloppy_arguments_elements_map);
a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
for (int i = 0; i < mapped_count; ++i) {
@@ -1543,10 +1547,9 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// unknown at compile-time, the true {arguments_length} and {arguments_frame}
// values can only be determined dynamically at run-time and are provided.
// Serves as backing store for JSCreateArguments nodes.
-Node* JSCreateLowering::AllocateAliasedArguments(
- Node* effect, Node* control, Node* context, Node* arguments_frame,
- Node* arguments_length, const SharedFunctionInfoRef& shared,
- bool* has_aliased_arguments) {
+Node* JSCreateLowering::TryAllocateAliasedArguments(
+ Node* effect, Node* control, Node* context, Node* arguments_length,
+ const SharedFunctionInfoRef& shared, bool* has_aliased_arguments) {
// If there is no aliasing, the arguments object elements are not
// special in any way, we can just return an unmapped backing store.
int parameter_count = shared.internal_formal_parameter_count();
@@ -1554,14 +1557,21 @@ Node* JSCreateLowering::AllocateAliasedArguments(
return graph()->NewNode(
simplified()->NewArgumentsElements(
CreateArgumentsType::kUnmappedArguments, parameter_count),
- arguments_frame, arguments_length, effect);
+ arguments_length, effect);
+ }
+
+ int mapped_count = parameter_count;
+ MapRef sloppy_arguments_elements_map(
+ broker(), factory()->sloppy_arguments_elements_map());
+ if (!AllocationBuilder::CanAllocateSloppyArgumentElements(
+ mapped_count, sloppy_arguments_elements_map)) {
+ return nullptr;
}
// From here on we are going to allocate a mapped (aka. aliased) elements
// backing store. We do not statically know how many arguments exist, but
// dynamically selecting the hole for some of the "mapped" elements allows
// using a static shape for the parameter map.
- int mapped_count = parameter_count;
*has_aliased_arguments = true;
// The unmapped argument values are stored yet another indirection away and
@@ -1570,13 +1580,11 @@ Node* JSCreateLowering::AllocateAliasedArguments(
Node* arguments = effect =
graph()->NewNode(simplified()->NewArgumentsElements(
CreateArgumentsType::kMappedArguments, mapped_count),
- arguments_frame, arguments_length, effect);
+ arguments_length, effect);
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateSloppyArgumentElements(
- mapped_count,
- MapRef(broker(), factory()->sloppy_arguments_elements_map()));
+ a.AllocateSloppyArgumentElements(mapped_count, sloppy_arguments_elements_map);
a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
for (int i = 0; i < mapped_count; ++i) {
@@ -1667,52 +1675,35 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
kFullWriteBarrier,
LoadSensitivity::kUnsafe,
const_field_info};
+ ObjectRef boilerplate_value = boilerplate.RawFastPropertyAt(index);
+ bool is_uninitialized =
+ boilerplate_value.IsHeapObject() &&
+ boilerplate_value.AsHeapObject().map().oddball_type() ==
+ OddballType::kUninitialized;
+ if (is_uninitialized) {
+ access.const_field_info = ConstFieldInfo::None();
+ }
Node* value;
- if (boilerplate_map.IsUnboxedDoubleField(i)) {
- access.machine_type = MachineType::Float64();
- access.type = Type::Number();
- uint64_t value_bits = boilerplate.RawFastDoublePropertyAsBitsAt(index);
- if (value_bits == kHoleNanInt64) {
- // This special case is analogous to is_uninitialized being true in the
- // non-unboxed-double case below. The store of the hole NaN value here
- // will always be followed by another store that actually initializes
- // the field. The hole NaN should therefore be unobservable.
- // Load elimination expects there to be at most one const store to any
- // given field, so we always mark the unobservable ones as mutable.
- access.const_field_info = ConstFieldInfo::None();
- }
- value = jsgraph()->Constant(bit_cast<double>(value_bits));
+ if (boilerplate_value.IsJSObject()) {
+ JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
+ value = effect =
+ AllocateFastLiteral(effect, control, boilerplate_object, allocation);
+ } else if (property_details.representation().IsDouble()) {
+ double number = boilerplate_value.AsHeapNumber().value();
+ // Allocate a mutable HeapNumber box and store the value into it.
+ AllocationBuilder builder(jsgraph(), effect, control);
+ builder.Allocate(HeapNumber::kSize, allocation);
+ builder.Store(AccessBuilder::ForMap(),
+ MapRef(broker(), factory()->heap_number_map()));
+ builder.Store(AccessBuilder::ForHeapNumberValue(),
+ jsgraph()->Constant(number));
+ value = effect = builder.Finish();
+ } else if (property_details.representation().IsSmi()) {
+ // Ensure that value is stored as smi.
+ value = is_uninitialized ? jsgraph()->ZeroConstant()
+ : jsgraph()->Constant(boilerplate_value.AsSmi());
} else {
- ObjectRef boilerplate_value = boilerplate.RawFastPropertyAt(index);
- bool is_uninitialized =
- boilerplate_value.IsHeapObject() &&
- boilerplate_value.AsHeapObject().map().oddball_type() ==
- OddballType::kUninitialized;
- if (is_uninitialized) {
- access.const_field_info = ConstFieldInfo::None();
- }
- if (boilerplate_value.IsJSObject()) {
- JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
- value = effect = AllocateFastLiteral(effect, control,
- boilerplate_object, allocation);
- } else if (property_details.representation().IsDouble()) {
- double number = boilerplate_value.AsHeapNumber().value();
- // Allocate a mutable HeapNumber box and store the value into it.
- AllocationBuilder builder(jsgraph(), effect, control);
- builder.Allocate(HeapNumber::kSize, allocation);
- builder.Store(AccessBuilder::ForMap(),
- MapRef(broker(), factory()->heap_number_map()));
- builder.Store(AccessBuilder::ForHeapNumberValue(),
- jsgraph()->Constant(number));
- value = effect = builder.Finish();
- } else if (property_details.representation().IsSmi()) {
- // Ensure that value is stored as smi.
- value = is_uninitialized
- ? jsgraph()->ZeroConstant()
- : jsgraph()->Constant(boilerplate_value.AsSmi());
- } else {
- value = jsgraph()->Constant(boilerplate_value);
- }
+ value = jsgraph()->Constant(boilerplate_value);
}
inobject_fields.push_back(std::make_pair(access, value));
}
@@ -1744,7 +1735,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
JSArrayRef boilerplate_array = boilerplate.AsJSArray();
builder.Store(
AccessBuilder::ForJSArrayLength(boilerplate_array.GetElementsKind()),
- boilerplate_array.length());
+ boilerplate_array.GetBoilerplateLength());
}
for (auto const& inobject_field : inobject_fields) {
builder.Store(inobject_field.first, inobject_field.second);
@@ -1755,7 +1746,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
JSObjectRef boilerplate,
AllocationType allocation) {
- FixedArrayBaseRef boilerplate_elements = boilerplate.elements();
+ FixedArrayBaseRef boilerplate_elements = boilerplate.elements().value();
// Empty or copy-on-write elements just store a constant.
int const elements_length = boilerplate_elements.length();
@@ -1763,7 +1754,7 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
if (boilerplate_elements.length() == 0 || elements_map.IsFixedCowArrayMap()) {
if (allocation == AllocationType::kOld) {
boilerplate.EnsureElementsTenured();
- boilerplate_elements = boilerplate.elements();
+ boilerplate_elements = boilerplate.elements().value();
}
return jsgraph()->HeapConstant(boilerplate_elements.object());
}
@@ -1794,48 +1785,47 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
}
// Allocate the backing store array and store the elements.
- AllocationBuilder builder(jsgraph(), effect, control);
- builder.AllocateArray(elements_length, elements_map, allocation);
+ AllocationBuilder ab(jsgraph(), effect, control);
+ CHECK(ab.CanAllocateArray(elements_length, elements_map, allocation));
+ ab.AllocateArray(elements_length, elements_map, allocation);
ElementAccess const access =
(elements_map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE)
? AccessBuilder::ForFixedDoubleArrayElement()
: AccessBuilder::ForFixedArrayElement();
for (int i = 0; i < elements_length; ++i) {
- builder.Store(access, jsgraph()->Constant(i), elements_values[i]);
+ ab.Store(access, jsgraph()->Constant(i), elements_values[i]);
}
- return builder.Finish();
+ return ab.Finish();
}
-Node* JSCreateLowering::AllocateLiteralRegExp(Node* effect, Node* control,
- JSRegExpRef boilerplate) {
- MapRef boilerplate_map = boilerplate.map();
+Node* JSCreateLowering::AllocateLiteralRegExp(
+ Node* effect, Node* control, RegExpBoilerplateDescriptionRef boilerplate) {
+ MapRef initial_map = native_context().regexp_function().initial_map();
// Sanity check that JSRegExp object layout hasn't changed.
- STATIC_ASSERT(static_cast<int>(JSRegExp::kDataOffset) ==
- static_cast<int>(JSObject::kHeaderSize));
+ STATIC_ASSERT(JSRegExp::kDataOffset == JSObject::kHeaderSize);
STATIC_ASSERT(JSRegExp::kSourceOffset == JSRegExp::kDataOffset + kTaggedSize);
STATIC_ASSERT(JSRegExp::kFlagsOffset ==
JSRegExp::kSourceOffset + kTaggedSize);
STATIC_ASSERT(JSRegExp::kHeaderSize == JSRegExp::kFlagsOffset + kTaggedSize);
STATIC_ASSERT(JSRegExp::kLastIndexOffset == JSRegExp::kHeaderSize);
- STATIC_ASSERT(JSRegExp::kInObjectFieldCount == 1); // LastIndex.
-
- const AllocationType allocation = AllocationType::kYoung;
- const int size =
- JSRegExp::kHeaderSize + JSRegExp::kInObjectFieldCount * kTaggedSize;
+ DCHECK_EQ(JSRegExp::Size(), JSRegExp::kLastIndexOffset + kTaggedSize);
AllocationBuilder builder(jsgraph(), effect, control);
- builder.Allocate(size, allocation, Type::For(boilerplate_map));
- builder.Store(AccessBuilder::ForMap(), boilerplate_map);
+ builder.Allocate(JSRegExp::Size(), AllocationType::kYoung,
+ Type::For(initial_map));
+ builder.Store(AccessBuilder::ForMap(), initial_map);
builder.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- boilerplate.raw_properties_or_hash());
- builder.Store(AccessBuilder::ForJSObjectElements(), boilerplate.elements());
+ jsgraph()->EmptyFixedArrayConstant());
+ builder.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
builder.Store(AccessBuilder::ForJSRegExpData(), boilerplate.data());
builder.Store(AccessBuilder::ForJSRegExpSource(), boilerplate.source());
- builder.Store(AccessBuilder::ForJSRegExpFlags(), boilerplate.flags());
+ builder.Store(AccessBuilder::ForJSRegExpFlags(),
+ jsgraph()->SmiConstant(boilerplate.flags()));
builder.Store(AccessBuilder::ForJSRegExpLastIndex(),
- boilerplate.last_index());
+ jsgraph()->SmiConstant(JSRegExp::kInitialLastIndexValue));
return builder.Finish();
}
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 2fb28ebfd4..0edbda79a0 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -22,6 +22,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
class CompilationDependencies;
+class FrameState;
class JSGraph;
class JSOperatorBuilder;
class MachineOperatorBuilder;
@@ -82,17 +83,21 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
const SlackTrackingPrediction& slack_tracking_prediction);
Reduction ReduceJSCreateObject(Node* node);
- Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
- Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
- int start_index);
- Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
- Node* context,
- const SharedFunctionInfoRef& shared,
- bool* has_aliased_arguments);
- Node* AllocateAliasedArguments(Node* effect, Node* control, Node* context,
- Node* arguments_frame, Node* arguments_length,
- const SharedFunctionInfoRef& shared,
- bool* has_aliased_arguments);
+ // The following functions all return nullptr iff there are too many arguments
+ // for inline allocation.
+ Node* TryAllocateArguments(Node* effect, Node* control,
+ FrameState frame_state);
+ Node* TryAllocateRestArguments(Node* effect, Node* control,
+ FrameState frame_state, int start_index);
+ Node* TryAllocateAliasedArguments(Node* effect, Node* control,
+ FrameState frame_state, Node* context,
+ const SharedFunctionInfoRef& shared,
+ bool* has_aliased_arguments);
+ Node* TryAllocateAliasedArguments(Node* effect, Node* control, Node* context,
+ Node* arguments_length,
+ const SharedFunctionInfoRef& shared,
+ bool* has_aliased_arguments);
+
Node* AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind, int capacity,
AllocationType allocation);
@@ -108,7 +113,7 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
JSObjectRef boilerplate,
AllocationType allocation);
Node* AllocateLiteralRegExp(Node* effect, Node* control,
- JSRegExpRef boilerplate);
+ RegExpBoilerplateDescriptionRef boilerplate);
Factory* factory() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index c8cce37ad9..33f2f742b0 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -873,7 +873,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
Node* receiver = jsgraph()->UndefinedConstant();
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
// Register argument inputs are followed by stack argument inputs (such as
@@ -935,7 +935,7 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = jsgraph()->UndefinedConstant();
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
@@ -997,7 +997,7 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
// The single available register is needed for `slot`, thus `spread` remains
// on the stack here.
@@ -1088,7 +1088,7 @@ void JSGenericLowering::LowerJSCall(Node* node) {
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
node->InsertInput(zone(), 3, slot);
@@ -1128,7 +1128,7 @@ void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
Node* receiver = n.receiver();
Node* arguments_list = n.Argument(0);
Node* feedback_vector = n.feedback_vector();
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
// Shuffling inputs.
// Before: {target, receiver, arguments_list, vector}.
@@ -1193,7 +1193,7 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
// We pass the spread in a register, not on the stack.
Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
@@ -1251,6 +1251,9 @@ void JSGenericLowering::LowerJSCallRuntime(Node* node) {
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
+// Will be lowered in SimplifiedLowering.
+void JSGenericLowering::LowerJSWasmCall(Node* node) {}
+
void JSGenericLowering::LowerJSForInPrepare(Node* node) {
JSForInPrepareNode n(node);
Effect effect(node); // {node} is kept in the effect chain.
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 120f8ee21d..aca12b4cb5 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -48,11 +48,12 @@ Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
Node* JSGraph::Constant(const ObjectRef& ref) {
if (ref.IsSmi()) return Constant(ref.AsSmi());
- OddballType oddball_type =
- ref.AsHeapObject().GetHeapObjectType().oddball_type();
if (ref.IsHeapNumber()) {
return Constant(ref.AsHeapNumber().value());
- } else if (oddball_type == OddballType::kUndefined) {
+ }
+ OddballType oddball_type =
+ ref.AsHeapObject().GetHeapObjectType().oddball_type();
+ if (oddball_type == OddballType::kUndefined) {
DCHECK(ref.object().equals(isolate()->factory()->undefined_value()));
return UndefinedConstant();
} else if (oddball_type == OddballType::kNull) {
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index cd7e3df3e0..0d428995a1 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -32,6 +32,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/property-details.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/templates.h"
#include "src/utils/utils.h"
@@ -45,12 +46,12 @@ namespace compiler {
#define FORWARD_DECL(Name) class Name##Data;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
-// removed.
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
-// There are five kinds of ObjectData values.
+// There are several kinds of ObjectData values.
//
// kSmi: The underlying V8 object is a Smi and the data is an instance of the
// base class (ObjectData), i.e. it's basically just the handle. Because the
@@ -61,6 +62,9 @@ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
// data is an instance of the corresponding (most-specific) subclass, e.g.
// JSFunctionData, which provides serialized information about the object.
//
+// kBackgroundSerializedHeapObject: Like kSerializedHeapObject, but
+// allows serialization from the background thread.
+//
// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
// data is an instance of the base class (ObjectData), i.e. it basically
// carries no information other than the handle.
@@ -78,6 +82,7 @@ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
enum ObjectDataKind {
kSmi,
kSerializedHeapObject,
+ kBackgroundSerializedHeapObject,
kUnserializedHeapObject,
kNeverSerializedHeapObject,
kUnserializedReadOnlyHeapObject
@@ -90,6 +95,20 @@ bool IsReadOnlyHeapObject(Object object) {
(object.IsHeapObject() &&
ReadOnlyHeap::Contains(HeapObject::cast(object)));
}
+
+template <class T>
+constexpr bool IsSerializedHeapObject() {
+ return false;
+}
+
+#define DEFINE_MARKER(Name) \
+ template <> \
+ constexpr bool IsSerializedHeapObject<Name>() { \
+ return true; \
+ } \
+ STATIC_ASSERT(IsSerializedHeapObject<Name>());
+HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_MARKER)
+#undef DEFINE_MARKER
} // namespace
class ObjectData : public ZoneObject {
@@ -116,20 +135,24 @@ class ObjectData : public ZoneObject {
broker->mode() == JSHeapBroker::kSerializing,
broker->isolate()->handle_scope_data()->canonical_scope != nullptr);
CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
- (kind == kUnserializedReadOnlyHeapObject &&
- IsReadOnlyHeapObject(*object)) ||
- kind == kNeverSerializedHeapObject);
+ kind == kUnserializedReadOnlyHeapObject || kind == kSmi ||
+ kind == kNeverSerializedHeapObject ||
+ kind == kBackgroundSerializedHeapObject);
+ CHECK_IMPLIES(kind == kUnserializedReadOnlyHeapObject,
+ IsReadOnlyHeapObject(*object));
}
#define DECLARE_IS(Name) bool Is##Name() const;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_IS)
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_IS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
#undef DECLARE_IS
#define DECLARE_AS(Name) Name##Data* As##Name();
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
- // TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
- // removed.
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_AS)
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_AS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
#undef DECLARE_AS
@@ -155,7 +178,8 @@ class ObjectData : public ZoneObject {
class HeapObjectData : public ObjectData {
public:
HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapObject> object);
+ Handle<HeapObject> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
bool boolean_value() const { return boolean_value_; }
ObjectData* map() const { return map_; }
@@ -172,17 +196,26 @@ class HeapObjectData : public ObjectData {
class PropertyCellData : public HeapObjectData {
public:
PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<PropertyCell> object);
+ Handle<PropertyCell> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
- PropertyDetails property_details() const { return property_details_; }
+ bool Serialize(JSHeapBroker* broker);
- void Serialize(JSHeapBroker* broker);
- ObjectData* value() const { return value_; }
+ PropertyDetails property_details() const {
+ CHECK(serialized());
+ return property_details_;
+ }
- private:
- PropertyDetails const property_details_;
+ ObjectData* value() const {
+ DCHECK(serialized());
+ return value_;
+ }
+ private:
+ PropertyDetails property_details_ = PropertyDetails::Empty();
ObjectData* value_ = nullptr;
+
+ bool serialized() const { return value_ != nullptr; }
};
// TODO(mslekova): Once we have real-world usage data, we might want to
@@ -265,16 +298,65 @@ void JSHeapBroker::IncrementTracingIndentation() { ++trace_indentation_; }
void JSHeapBroker::DecrementTracingIndentation() { --trace_indentation_; }
PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<PropertyCell> object)
- : HeapObjectData(broker, storage, object),
- property_details_(object->property_details()) {}
+ Handle<PropertyCell> object,
+ ObjectDataKind kind)
+ : HeapObjectData(broker, storage, object, kind) {}
-void PropertyCellData::Serialize(JSHeapBroker* broker) {
- if (value_ != nullptr) return;
+bool PropertyCellData::Serialize(JSHeapBroker* broker) {
+ if (serialized()) return true;
TraceScope tracer(broker, this, "PropertyCellData::Serialize");
auto cell = Handle<PropertyCell>::cast(object());
- value_ = broker->GetOrCreateData(cell->value());
+
+ // While this code runs on a background thread, the property cell might
+ // undergo state transitions via calls to PropertyCell::Transition. These
+ // transitions follow a certain protocol on which we rely here to ensure that
+ // we only report success when we can guarantee consistent data. A key
+ // property is that after transitioning from cell type A to B (A != B), there
+ // will never be a transition back to A, unless A is kConstant and the new
+ // value is the hole (i.e. the property cell was invalidated, which is a final
+ // state).
+
+ PropertyDetails property_details = cell->property_details(kAcquireLoad);
+
+ Handle<Object> value =
+ broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
+ if (broker->ObjectMayBeUninitialized(value)) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+
+ {
+ PropertyDetails property_details_again =
+ cell->property_details(kAcquireLoad);
+ if (property_details != property_details_again) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+ }
+
+ if (property_details.cell_type() == PropertyCellType::kConstant) {
+ Handle<Object> value_again =
+ broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
+ if (*value != *value_again) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+ }
+
+ ObjectData* value_data = broker->TryGetOrCreateData(value, false);
+ if (value_data == nullptr) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+
+ PropertyCell::CheckDataIsCompatible(property_details, *value);
+
+ DCHECK(!serialized());
+ property_details_ = property_details;
+ value_ = value_data;
+ DCHECK(serialized());
+ return true;
}
void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
@@ -303,32 +385,6 @@ void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
data_ = broker->GetOrCreateData(call_handler_info->data());
}
-class JSObjectField {
- public:
- bool IsDouble() const { return object_ == nullptr; }
- uint64_t AsBitsOfDouble() const {
- CHECK(IsDouble());
- return number_bits_;
- }
- double AsDouble() const {
- CHECK(IsDouble());
- return bit_cast<double>(number_bits_);
- }
-
- bool IsObject() const { return object_ != nullptr; }
- ObjectData* AsObject() const {
- CHECK(IsObject());
- return object_;
- }
-
- explicit JSObjectField(uint64_t value_bits) : number_bits_(value_bits) {}
- explicit JSObjectField(ObjectData* value) : object_(value) {}
-
- private:
- ObjectData* object_ = nullptr;
- uint64_t number_bits_ = 0;
-};
-
class JSReceiverData : public HeapObjectData {
public:
JSReceiverData(JSHeapBroker* broker, ObjectData** storage,
@@ -343,7 +399,7 @@ class JSObjectData : public JSReceiverData {
// Recursive serialization of all reachable JSObjects.
void SerializeAsBoilerplate(JSHeapBroker* broker);
- const JSObjectField& GetInobjectField(int property_index) const;
+ ObjectData* GetInobjectField(int property_index) const;
// Shallow serialization of {elements}.
void SerializeElements(JSHeapBroker* broker);
@@ -382,7 +438,7 @@ class JSObjectData : public JSReceiverData {
bool serialized_as_boilerplate_ = false;
bool serialized_elements_ = false;
- ZoneVector<JSObjectField> inobject_fields_;
+ ZoneVector<ObjectData*> inobject_fields_;
bool serialized_object_create_map_ = false;
ObjectData* object_create_map_ = nullptr;
@@ -424,6 +480,7 @@ void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
}
namespace {
+
base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
Handle<Object> receiver,
uint32_t index,
@@ -491,33 +548,30 @@ ObjectData* JSObjectData::GetOwnDataProperty(JSHeapBroker* broker,
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSTypedArray> object);
+ Handle<JSTypedArray> object)
+ : JSObjectData(broker, storage, object) {}
- bool is_on_heap() const { return is_on_heap_; }
- size_t length() const { return length_; }
- void* data_ptr() const { return data_ptr_; }
+ // TODO(v8:7790): Once JSObject is no longer serialized, also make
+ // JSTypedArrayRef never-serialized.
+ STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
+ bool is_on_heap() const { return is_on_heap_; }
+ size_t length() const { return length_; }
+ void* data_ptr() const { return data_ptr_; }
+
ObjectData* buffer() const { return buffer_; }
private:
- bool const is_on_heap_;
- size_t const length_;
- void* const data_ptr_;
-
bool serialized_ = false;
+ bool is_on_heap_ = false;
+ size_t length_ = 0;
+ void* data_ptr_ = nullptr;
ObjectData* buffer_ = nullptr;
};
-JSTypedArrayData::JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSTypedArray> object)
- : JSObjectData(broker, storage, object),
- is_on_heap_(object->is_on_heap()),
- length_(object->length()),
- data_ptr_(object->DataPtr()) {}
-
void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
@@ -525,6 +579,10 @@ void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "JSTypedArrayData::Serialize");
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object());
+ is_on_heap_ = typed_array->is_on_heap();
+ length_ = typed_array->length();
+ data_ptr_ = typed_array->DataPtr();
+
if (!is_on_heap()) {
DCHECK_NULL(buffer_);
buffer_ = broker->GetOrCreateData(typed_array->buffer());
@@ -599,7 +657,6 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector() const { return has_feedback_vector_; }
bool has_initial_map() const { return has_initial_map_; }
bool has_prototype() const { return has_prototype_; }
- bool HasAttachedOptimizedCode() const { return has_attached_optimized_code_; }
bool PrototypeRequiresRuntimeLookup() const {
return PrototypeRequiresRuntimeLookup_;
}
@@ -607,14 +664,29 @@ class JSFunctionData : public JSObjectData {
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
+ void SerializeCodeAndFeedback(JSHeapBroker* broker);
+ bool serialized_code_and_feedback() const {
+ return serialized_code_and_feedback_;
+ }
+
ObjectData* context() const { return context_; }
ObjectData* native_context() const { return native_context_; }
ObjectData* initial_map() const { return initial_map_; }
ObjectData* prototype() const { return prototype_; }
ObjectData* shared() const { return shared_; }
- ObjectData* raw_feedback_cell() const { return feedback_cell_; }
- ObjectData* feedback_vector() const { return feedback_vector_; }
- ObjectData* code() const { return code_; }
+ ObjectData* raw_feedback_cell() const {
+ DCHECK(serialized_code_and_feedback());
+ return feedback_cell_;
+ }
+ ObjectData* feedback_vector() const {
+ DCHECK(serialized_code_and_feedback());
+ return feedback_vector_;
+ }
+ ObjectData* code() const {
+ DCHECK(serialized_code_and_feedback());
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ return code_;
+ }
int initial_map_instance_size_with_min_slack() const {
CHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
@@ -624,10 +696,10 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector_;
bool has_initial_map_;
bool has_prototype_;
- bool has_attached_optimized_code_;
bool PrototypeRequiresRuntimeLookup_;
bool serialized_ = false;
+ bool serialized_code_and_feedback_ = false;
ObjectData* context_ = nullptr;
ObjectData* native_context_ = nullptr;
@@ -640,35 +712,40 @@ class JSFunctionData : public JSObjectData {
int initial_map_instance_size_with_min_slack_;
};
-class JSRegExpData : public JSObjectData {
+class RegExpBoilerplateDescriptionData : public HeapObjectData {
public:
- JSRegExpData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSRegExp> object)
- : JSObjectData(broker, storage, object) {}
-
- void SerializeAsRegExpBoilerplate(JSHeapBroker* broker);
+ RegExpBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<RegExpBoilerplateDescription> object)
+ : HeapObjectData(broker, storage, object) {}
- ObjectData* raw_properties_or_hash() const { return raw_properties_or_hash_; }
- ObjectData* data() const { return data_; }
- ObjectData* source() const { return source_; }
- ObjectData* flags() const { return flags_; }
- ObjectData* last_index() const { return last_index_; }
+ void Serialize(JSHeapBroker* broker);
+ ObjectData* data() const {
+ CHECK(serialized_);
+ return data_;
+ }
+ ObjectData* source() const {
+ CHECK(serialized_);
+ return source_;
+ }
+ int flags() const {
+ CHECK(serialized_);
+ return flags_;
+ }
private:
- bool serialized_as_reg_exp_boilerplate_ = false;
-
- ObjectData* raw_properties_or_hash_ = nullptr;
+ bool serialized_ = false;
ObjectData* data_ = nullptr;
ObjectData* source_ = nullptr;
- ObjectData* flags_ = nullptr;
- ObjectData* last_index_ = nullptr;
+ int flags_;
};
class HeapNumberData : public HeapObjectData {
public:
HeapNumberData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapNumber> object)
- : HeapObjectData(broker, storage, object), value_(object->value()) {}
+ Handle<HeapNumber> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
+ : HeapObjectData(broker, storage, object, kind),
+ value_(object->value()) {}
double value() const { return value_; }
@@ -741,21 +818,27 @@ class NativeContextData : public ContextData {
#undef DECL_ACCESSOR
const ZoneVector<ObjectData*>& function_maps() const {
- CHECK(serialized_);
+ CHECK_NE(state_, State::kUnserialized);
return function_maps_;
}
ObjectData* scope_info() const {
- CHECK(serialized_);
+ CHECK_NE(state_, State::kUnserialized);
return scope_info_;
}
NativeContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<NativeContext> object);
void Serialize(JSHeapBroker* broker);
+ void SerializeOnBackground(JSHeapBroker* broker);
private:
- bool serialized_ = false;
+ // After Serialize is called the class is partially serialized and it the
+ // kSerializedOnMainThread state. It then becomes kFullySerialized once
+ // SerializeOnBackground is called.
+ enum class State { kUnserialized, kSerializedOnMainThread, kFullySerialized };
+ State state_;
+
#define DECL_MEMBER(type, name) ObjectData* name##_ = nullptr;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
@@ -766,9 +849,7 @@ class NativeContextData : public ContextData {
class NameData : public HeapObjectData {
public:
NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
- : HeapObjectData(broker, storage, object) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
+ : HeapObjectData(broker, storage, object) {}
};
class StringData : public NameData {
@@ -781,7 +862,7 @@ class StringData : public NameData {
bool is_external_string() const { return is_external_string_; }
bool is_seq_string() const { return is_seq_string_; }
- ObjectData* GetCharAsString(
+ ObjectData* GetCharAsStringOrUndefined(
JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
@@ -814,9 +895,7 @@ StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
to_number_(TryStringToDouble(broker->local_isolate(), object)),
is_external_string_(object->IsExternalString()),
is_seq_string_(object->IsSeqString()),
- chars_as_strings_(broker->zone()) {
- DCHECK(!FLAG_turbo_direct_heap_access);
-}
+ chars_as_strings_(broker->zone()) {}
class InternalizedStringData : public StringData {
public:
@@ -827,8 +906,9 @@ class InternalizedStringData : public StringData {
}
};
-ObjectData* StringData::GetCharAsString(JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy) {
+ObjectData* StringData::GetCharAsStringOrUndefined(JSHeapBroker* broker,
+ uint32_t index,
+ SerializationPolicy policy) {
if (index >= static_cast<uint32_t>(length())) return nullptr;
for (auto const& p : chars_as_strings_) {
@@ -902,7 +982,6 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
DCHECK_EQ(kData, details.kind());
if ((*max_properties)-- == 0) return false;
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
- if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@@ -968,8 +1047,9 @@ class AllocationSiteData : public HeapObjectData {
class BigIntData : public HeapObjectData {
public:
- BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object)
- : HeapObjectData(broker, storage, object),
+ BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
+ : HeapObjectData(broker, storage, object, kind),
as_uint64_(object->AsUint64(nullptr)) {}
uint64_t AsUint64() const { return as_uint64_; }
@@ -993,12 +1073,12 @@ struct PropertyDescriptor {
FieldIndex field_index;
ObjectData* field_owner = nullptr;
ObjectData* field_type = nullptr;
- bool is_unboxed_double_field = false;
};
class MapData : public HeapObjectData {
public:
- MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object);
+ MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
InstanceType instance_type() const { return instance_type_; }
int instance_size() const { return instance_size_; }
@@ -1157,8 +1237,8 @@ void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) {
}
HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapObject> object)
- : ObjectData(broker, storage, object, kSerializedHeapObject),
+ Handle<HeapObject> object, ObjectDataKind kind)
+ : ObjectData(broker, storage, object, kind),
boolean_value_(object->BooleanValue(broker->isolate())),
// We have to use a raw cast below instead of AsMap() because of
// recursion. AsMap() would call IsMap(), which accesses the
@@ -1166,7 +1246,10 @@ HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
// meta map (whose map is itself), this member has not yet been
// initialized.
map_(broker->GetOrCreateData(object->map())) {
- CHECK_EQ(broker->mode(), JSHeapBroker::kSerializing);
+ CHECK_IMPLIES(kind == kSerializedHeapObject,
+ broker->mode() == JSHeapBroker::kSerializing);
+ CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
+ kind == kBackgroundSerializedHeapObject);
}
InstanceType HeapObjectData::GetMapInstanceType() const {
@@ -1192,8 +1275,7 @@ bool SupportsFastArrayIteration(Isolate* isolate, Handle<Map> map) {
return map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(map->elements_kind()) &&
map->prototype().IsJSArray() &&
- isolate->IsAnyInitialArrayPrototype(
- handle(JSArray::cast(map->prototype()), isolate)) &&
+ isolate->IsAnyInitialArrayPrototype(JSArray::cast(map->prototype())) &&
Protectors::IsNoElementsIntact(isolate);
}
@@ -1203,8 +1285,9 @@ bool SupportsFastArrayResize(Isolate* isolate, Handle<Map> map) {
}
} // namespace
-MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
- : HeapObjectData(broker, storage, object),
+MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
+ ObjectDataKind kind)
+ : HeapObjectData(broker, storage, object, kind),
instance_type_(object->instance_type()),
instance_size_(object->instance_size()),
bit_field_(object->bit_field()),
@@ -1238,7 +1321,6 @@ JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
has_initial_map_(object->has_prototype_slot() &&
object->has_initial_map()),
has_prototype_(object->has_prototype_slot() && object->has_prototype()),
- has_attached_optimized_code_(object->HasAttachedOptimizedCode()),
PrototypeRequiresRuntimeLookup_(
object->PrototypeRequiresRuntimeLookup()) {}
@@ -1254,18 +1336,11 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(initial_map_);
DCHECK_NULL(prototype_);
DCHECK_NULL(shared_);
- DCHECK_NULL(feedback_cell_);
- DCHECK_NULL(feedback_vector_);
- DCHECK_NULL(code_);
context_ = broker->GetOrCreateData(function->context());
native_context_ = broker->GetOrCreateData(function->native_context());
shared_ = broker->GetOrCreateData(function->shared());
- feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
- feedback_vector_ = has_feedback_vector()
- ? broker->GetOrCreateData(function->feedback_vector())
- : nullptr;
- code_ = broker->GetOrCreateData(function->code());
+
initial_map_ = has_initial_map()
? broker->GetOrCreateData(function->initial_map())
: nullptr;
@@ -1288,6 +1363,29 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
}
}
+void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) {
+ DCHECK(serialized_);
+ if (serialized_code_and_feedback_) return;
+ serialized_code_and_feedback_ = true;
+
+ TraceScope tracer(broker, this, "JSFunctionData::SerializeCodeAndFeedback");
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object());
+
+ DCHECK_NULL(feedback_cell_);
+ DCHECK_NULL(feedback_vector_);
+ DCHECK_NULL(code_);
+ if (!FLAG_turbo_direct_heap_access) {
+ // This is conditionalized because Code objects are never serialized now.
+ // We only need to represent the code object in serialized data when
+ // we're unable to perform direct heap accesses.
+ code_ = broker->GetOrCreateData(function->code(kAcquireLoad));
+ }
+ feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
+ feedback_vector_ = has_feedback_vector()
+ ? broker->GetOrCreateData(function->feedback_vector())
+ : nullptr;
+}
+
void MapData::SerializeElementsKindGeneralizations(JSHeapBroker* broker) {
if (serialized_elements_kind_generalizations_) return;
serialized_elements_kind_generalizations_ = true;
@@ -1314,12 +1412,78 @@ class DescriptorArrayData : public HeapObjectData {
Handle<DescriptorArray> object)
: HeapObjectData(broker, storage, object), contents_(broker->zone()) {}
- ZoneMap<int, PropertyDescriptor>& contents() { return contents_; }
+ ObjectData* FindFieldOwner(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).field_owner;
+ }
+
+ PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).details;
+ }
+
+ ObjectData* GetPropertyKey(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).key;
+ }
+
+ FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).field_index;
+ }
+
+ ObjectData* GetFieldType(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).field_type;
+ }
+
+ ObjectData* GetStrongValue(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).value;
+ }
+
+ bool serialized_descriptor(InternalIndex descriptor_index) const {
+ return contents_.find(descriptor_index.as_int()) != contents_.end();
+ }
+
+ void SerializeDescriptor(JSHeapBroker* broker, Handle<Map> map,
+ InternalIndex descriptor_index);
private:
ZoneMap<int, PropertyDescriptor> contents_;
};
+void DescriptorArrayData::SerializeDescriptor(JSHeapBroker* broker,
+ Handle<Map> map,
+ InternalIndex descriptor_index) {
+ CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
+ if (contents_.find(descriptor_index.as_int()) != contents_.end()) return;
+
+ Isolate* const isolate = broker->isolate();
+ auto descriptors = Handle<DescriptorArray>::cast(object());
+ CHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
+
+ PropertyDescriptor d;
+ d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
+ MaybeObject value = descriptors->GetValue(descriptor_index);
+ HeapObject obj;
+ if (value.GetHeapObjectIfStrong(&obj)) {
+ d.value = broker->GetOrCreateData(obj);
+ }
+ d.details = descriptors->GetDetails(descriptor_index);
+ if (d.details.location() == kField) {
+ d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
+ d.field_owner =
+ broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index));
+ d.field_type =
+ broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
+ }
+ contents_[descriptor_index.as_int()] = d;
+
+ if (d.details.location() == kField && !d.field_owner->should_access_heap()) {
+ // Recurse on the owner map.
+ d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
+ }
+
+ TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
+ << this << " (" << contents_.size()
+ << " total)");
+}
+
class FeedbackCellData : public HeapObjectData {
public:
FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
@@ -1334,7 +1498,11 @@ class FeedbackCellData : public HeapObjectData {
FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
Handle<FeedbackCell> object)
: HeapObjectData(broker, storage, object),
- value_(broker->GetOrCreateData(object->value())) {}
+ value_(object->value().IsFeedbackVector()
+ ? broker->GetOrCreateData(object->value())
+ : nullptr) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
class FeedbackVectorData : public HeapObjectData {
public:
@@ -1365,7 +1533,9 @@ FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
Handle<FeedbackVector> object)
: HeapObjectData(broker, storage, object),
invocation_count_(object->invocation_count()),
- closure_feedback_cell_array_(broker->zone()) {}
+ closure_feedback_cell_array_(broker->zone()) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker,
int index) const {
@@ -1555,51 +1725,18 @@ class BytecodeArrayData : public FixedArrayBaseData {
return incoming_new_target_or_generator_register_;
}
- Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const {
- return constant_pool_[index]->object();
- }
-
- bool IsConstantAtIndexSmi(int index) const {
- return constant_pool_[index]->is_smi();
- }
-
- Smi GetConstantAtIndexAsSmi(int index) const {
- return *(Handle<Smi>::cast(constant_pool_[index]->object()));
- }
-
- void SerializeForCompilation(JSHeapBroker* broker) {
- if (is_serialized_for_compilation_) return;
-
- // Convinience cast: object() is already a canonical persistent handle.
- Handle<BytecodeArray> bytecodes = Handle<BytecodeArray>::cast(object());
-
- DCHECK(constant_pool_.empty());
- Handle<FixedArray> constant_pool(bytecodes->constant_pool(),
- broker->isolate());
- constant_pool_.reserve(constant_pool->length());
- for (int i = 0; i < constant_pool->length(); i++) {
- constant_pool_.push_back(broker->GetOrCreateData(constant_pool->get(i)));
- }
-
- is_serialized_for_compilation_ = true;
- }
-
BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<BytecodeArray> object)
: FixedArrayBaseData(broker, storage, object),
register_count_(object->register_count()),
parameter_count_(object->parameter_count()),
incoming_new_target_or_generator_register_(
- object->incoming_new_target_or_generator_register()),
- constant_pool_(broker->zone()) {}
+ object->incoming_new_target_or_generator_register()) {}
private:
int const register_count_;
int const parameter_count_;
interpreter::Register const incoming_new_target_or_generator_register_;
-
- bool is_serialized_for_compilation_ = false;
- ZoneVector<ObjectData*> constant_pool_;
};
class JSArrayData : public JSObjectData {
@@ -1608,7 +1745,10 @@ class JSArrayData : public JSObjectData {
Handle<JSArray> object);
void Serialize(JSHeapBroker* broker);
- ObjectData* length() const { return length_; }
+ ObjectData* length() const {
+ CHECK(serialized_);
+ return length_;
+ }
ObjectData* GetOwnElement(
JSHeapBroker* broker, uint32_t index,
@@ -1630,6 +1770,8 @@ JSArrayData::JSArrayData(JSHeapBroker* broker, ObjectData** storage,
: JSObjectData(broker, storage, object), own_elements_(broker->zone()) {}
void JSArrayData::Serialize(JSHeapBroker* broker) {
+ CHECK(!FLAG_turbo_direct_heap_access);
+
if (serialized_) return;
serialized_ = true;
@@ -1969,7 +2111,12 @@ class CodeData : public HeapObjectData {
public:
CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
: HeapObjectData(broker, storage, object),
- inlined_bytecode_size_(object->inlined_bytecode_size()) {}
+ inlined_bytecode_size_(object->inlined_bytecode_size() > 0 &&
+ !object->marked_for_deoptimization()
+ ? object->inlined_bytecode_size()
+ : 0) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
@@ -1988,16 +2135,21 @@ class CodeData : public HeapObjectData {
return InstanceTypeChecker::Is##Name(instance_type); \
}
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
#undef DEFINE_IS
-#define DEFINE_AS(Name) \
- Name##Data* ObjectData::As##Name() { \
- CHECK(Is##Name()); \
- CHECK_EQ(kind_, kSerializedHeapObject); \
- return static_cast<Name##Data*>(this); \
+#define DEFINE_AS(Name) \
+ Name##Data* ObjectData::As##Name() { \
+ CHECK(Is##Name()); \
+ CHECK(kind_ == kSerializedHeapObject || \
+ kind_ == kBackgroundSerializedHeapObject); \
+ return static_cast<Name##Data*>(this); \
}
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
@@ -2014,7 +2166,7 @@ HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
-const JSObjectField& JSObjectData::GetInobjectField(int property_index) const {
+ObjectData* JSObjectData::GetInobjectField(int property_index) const {
CHECK_LT(static_cast<size_t>(property_index), inobject_fields_.size());
return inobject_fields_[property_index];
}
@@ -2023,7 +2175,10 @@ bool JSObjectData::cow_or_empty_elements_tenured() const {
return cow_or_empty_elements_tenured_;
}
-ObjectData* JSObjectData::elements() const { return elements_; }
+ObjectData* JSObjectData::elements() const {
+ CHECK(serialized_elements_);
+ return elements_;
+}
void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) {
SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth);
@@ -2086,14 +2241,6 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
}
}
-ObjectData* MapData::GetStrongValue(InternalIndex descriptor_index) const {
- DescriptorArrayData* descriptor_array =
- instance_descriptors()->AsDescriptorArray();
- auto data = descriptor_array->contents().find(descriptor_index.as_int());
- if (data == descriptor_array->contents().end()) return nullptr;
- return data->second.value;
-}
-
void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
InternalIndex descriptor_index) {
TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
@@ -2104,42 +2251,11 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
broker->GetOrCreateData(map->instance_descriptors(kRelaxedLoad));
}
- ZoneMap<int, PropertyDescriptor>& contents =
- instance_descriptors()->AsDescriptorArray()->contents();
- CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
- if (contents.find(descriptor_index.as_int()) != contents.end()) return;
-
- Isolate* const isolate = broker->isolate();
- auto descriptors =
- Handle<DescriptorArray>::cast(instance_descriptors()->object());
- CHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
-
- PropertyDescriptor d;
- d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
- MaybeObject value = descriptors->GetValue(descriptor_index);
- HeapObject obj;
- if (value.GetHeapObjectIfStrong(&obj)) {
- d.value = broker->GetOrCreateData(obj);
- }
- d.details = descriptors->GetDetails(descriptor_index);
- if (d.details.location() == kField) {
- d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
- d.field_owner =
- broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index));
- d.field_type =
- broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
- d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index);
- }
- contents[descriptor_index.as_int()] = d;
-
- if (d.details.location() == kField && !d.field_owner->should_access_heap()) {
- // Recurse on the owner map.
- d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
+ if (!instance_descriptors()->should_access_heap()) {
+ DescriptorArrayData* descriptors =
+ instance_descriptors()->AsDescriptorArray();
+ descriptors->SerializeDescriptor(broker, map, descriptor_index);
}
-
- TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
- << instance_descriptors() << " ("
- << contents.size() << " total)");
}
void MapData::SerializeRootMap(JSHeapBroker* broker) {
@@ -2191,6 +2307,8 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
}
DCHECK_NULL(elements_);
+ DCHECK(!serialized_elements_);
+ serialized_elements_ = true;
elements_ = broker->GetOrCreateData(elements_object);
DCHECK(elements_->IsFixedArrayBase());
@@ -2238,33 +2356,24 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
// this field.
DCHECK_EQ(field_index.property_index(),
static_cast<int>(inobject_fields_.size()));
- if (boilerplate->IsUnboxedDoubleField(field_index)) {
- uint64_t value_bits =
- boilerplate->RawFastDoublePropertyAsBitsAt(field_index);
- inobject_fields_.push_back(JSObjectField{value_bits});
- } else {
- Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
- isolate);
- // In case of double fields we use a sentinel NaN value to mark
- // uninitialized fields. A boilerplate value with such a field may migrate
- // from its double to a tagged representation. If the double is unboxed,
- // the raw double is converted to a heap number, otherwise the (boxed)
- // double ceases to be mutable, and becomes a normal heap number. The
- // sentinel value carries no special meaning when it occurs in a heap
- // number, so we would like to recover the uninitialized value. We check
- // for the sentinel here, specifically, since migrations might have been
- // triggered as part of boilerplate serialization.
- if (!details.representation().IsDouble() && value->IsHeapNumber() &&
- HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
- value = isolate->factory()->uninitialized_value();
- }
- ObjectData* value_data = broker->GetOrCreateData(value);
- if (value_data->IsJSObject() && !value_data->should_access_heap()) {
- value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
- depth - 1);
- }
- inobject_fields_.push_back(JSObjectField{value_data});
+ Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+ // In case of double fields we use a sentinel NaN value to mark
+ // uninitialized fields. A boilerplate value with such a field may migrate
+ // from its double to a tagged representation. The sentinel value carries
+ // no special meaning when it occurs in a heap number, so we would like to
+ // recover the uninitialized value. We check for the sentinel here,
+ // specifically, since migrations might have been triggered as part of
+ // boilerplate serialization.
+ if (!details.representation().IsDouble() && value->IsHeapNumber() &&
+ HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
+ value = isolate->factory()->uninitialized_value();
}
+ ObjectData* value_data = broker->GetOrCreateData(value);
+ if (value_data->IsJSObject() && !value_data->should_access_heap()) {
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
+ }
+ inobject_fields_.push_back(value_data);
}
TRACE(broker, "Copied " << inobject_fields_.size() << " in-object fields");
@@ -2272,24 +2381,22 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
map()->AsMap()->SerializeOwnDescriptors(broker);
}
- if (IsJSArray()) AsJSArray()->Serialize(broker);
+ if (IsJSArray() && !FLAG_turbo_direct_heap_access) {
+ AsJSArray()->Serialize(broker);
+ }
}
-void JSRegExpData::SerializeAsRegExpBoilerplate(JSHeapBroker* broker) {
- if (serialized_as_reg_exp_boilerplate_) return;
- serialized_as_reg_exp_boilerplate_ = true;
-
- TraceScope tracer(broker, this, "JSRegExpData::SerializeAsRegExpBoilerplate");
- Handle<JSRegExp> boilerplate = Handle<JSRegExp>::cast(object());
+void RegExpBoilerplateDescriptionData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return; // Only serialize once.
+ serialized_ = true;
- SerializeElements(broker);
+ TraceScope tracer(broker, this,
+ "RegExpBoilerplateDescriptionData::Serialize");
+ auto boilerplate = Handle<RegExpBoilerplateDescription>::cast(object());
- raw_properties_or_hash_ =
- broker->GetOrCreateData(boilerplate->raw_properties_or_hash());
data_ = broker->GetOrCreateData(boilerplate->data());
source_ = broker->GetOrCreateData(boilerplate->source());
- flags_ = broker->GetOrCreateData(boilerplate->flags());
- last_index_ = broker->GetOrCreateData(boilerplate->last_index());
+ flags_ = boilerplate->flags();
}
#ifdef DEBUG
@@ -2374,6 +2481,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
+ is_isolate_bootstrapping_(isolate->bootstrapper()->IsActive()),
code_kind_(code_kind),
feedback_(zone()),
property_access_infos_(zone()),
@@ -2427,6 +2535,12 @@ void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
DCHECK_NOT_NULL(local_isolate_);
local_isolate_->heap()->AttachPersistentHandles(
info->DetachPersistentHandles());
+
+ if (FLAG_turbo_direct_heap_access) {
+ // Ensure any serialization that happens on the background has been
+ // performed.
+ target_native_context().SerializeOnBackground();
+ }
}
void JSHeapBroker::DetachLocalIsolate(OptimizedCompilationInfo* info) {
@@ -2547,7 +2661,8 @@ void JSHeapBroker::SetTargetNativeContextRef(
(mode() == kSerializing &&
target_native_context_->object().equals(native_context) &&
target_native_context_->data_->kind() == kUnserializedHeapObject));
- target_native_context_ = NativeContextRef(this, native_context);
+ target_native_context_ =
+ NativeContextRef(this, CanonicalPersistentHandle(*native_context));
}
void JSHeapBroker::CollectArrayAndObjectPrototypes() {
@@ -2637,18 +2752,21 @@ void JSHeapBroker::InitializeAndStartSerializing(
// Throw away the dummy data that we created while disabled.
refs_->Clear();
- refs_ = nullptr;
-
refs_ =
zone()->New<RefsMap>(kInitialRefsBucketCount, AddressMatcher(), zone());
SetTargetNativeContextRef(native_context);
target_native_context().Serialize();
+ if (!FLAG_turbo_direct_heap_access) {
+ // Perform full native context serialization now if we can't do it later on
+ // the background thread.
+ target_native_context().SerializeOnBackground();
+ }
CollectArrayAndObjectPrototypes();
Factory* const f = isolate()->factory();
- {
+ if (!FLAG_turbo_direct_heap_access) {
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
@@ -2676,56 +2794,125 @@ void JSHeapBroker::InitializeAndStartSerializing(
TRACE(this, "Finished serializing standard objects");
}
+ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object,
+ ObjectRef::BackgroundSerialization background_serialization) {
+ ObjectData* return_value =
+ TryGetOrCreateData(object, true, background_serialization);
+ DCHECK_NOT_NULL(return_value);
+ return return_value;
+}
+
// clang-format off
-ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object) {
- RefsMap::Entry* entry = refs_->LookupOrInsert(object.address());
- ObjectData* object_data = entry->value;
-
- if (object_data == nullptr) {
- ObjectData** data_storage = &(entry->value);
- // TODO(neis): Remove these Allow* once we serialize everything upfront.
- AllowHandleDereference handle_dereference;
- if (object->IsSmi()) {
- object_data = zone()->New<ObjectData>(this, data_storage, object, kSmi);
- } else if (IsReadOnlyHeapObject(*object)) {
- object_data = zone()->New<ObjectData>(this, data_storage, object,
- kUnserializedReadOnlyHeapObject);
+ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
+ bool crash_on_error,
+ ObjectRef::BackgroundSerialization background_serialization) {
+ RefsMap::Entry* entry = refs_->Lookup(object.address());
+ if (entry != nullptr) return entry->value;
+
+ if (mode() == JSHeapBroker::kDisabled) {
+ entry = refs_->LookupOrInsert(object.address());
+ ObjectData** storage = &(entry->value);
+ if (*storage == nullptr) {
+ entry->value = zone()->New<ObjectData>(
+ this, storage, object,
+ object->IsSmi() ? kSmi : kUnserializedHeapObject);
+ }
+ return *storage;
+ }
+
+ CHECK(mode() == JSHeapBroker::kSerializing ||
+ mode() == JSHeapBroker::kSerialized);
+
+ ObjectData* object_data;
+ if (object->IsSmi()) {
+ entry = refs_->LookupOrInsert(object.address());
+ object_data = zone()->New<ObjectData>(this, &(entry->value), object, kSmi);
+ } else if (IsReadOnlyHeapObject(*object)) {
+ entry = refs_->LookupOrInsert(object.address());
+ object_data = zone()->New<ObjectData>(this, &(entry->value), object,
+ kUnserializedReadOnlyHeapObject);
// TODO(solanes, v8:10866): Remove the if/else in this macro once we remove the
// FLAG_turbo_direct_heap_access.
-#define CREATE_DATA_FOR_DIRECT_READ(name) \
- } else if (object->Is##name()) { \
- if (FLAG_turbo_direct_heap_access) { \
- object_data = zone()->New<ObjectData>( \
- this, data_storage, object, kNeverSerializedHeapObject); \
- } else { \
- CHECK_EQ(mode(), kSerializing); \
- AllowHandleAllocation handle_allocation; \
- object_data = zone()->New<name##Data>(this, data_storage, \
- Handle<name>::cast(object)); \
- }
- HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_DIRECT_READ)
+#define CREATE_DATA_FOR_DIRECT_READ(name) \
+ } else if (object->Is##name()) { \
+ if (FLAG_turbo_direct_heap_access) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<ObjectData>( \
+ this, &(entry->value), object, kNeverSerializedHeapObject); \
+ } else if (mode() == kSerializing) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<name##Data>(this, &(entry->value), \
+ Handle<name>::cast(object)); \
+ } else { \
+ CHECK(!crash_on_error); \
+ return nullptr; \
+ }
+ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_DIRECT_READ)
#undef CREATE_DATA_FOR_DIRECT_READ
-#define CREATE_DATA_FOR_SERIALIZATION(name) \
- } else if (object->Is##name()) { \
- CHECK_EQ(mode(), kSerializing); \
- AllowHandleAllocation handle_allocation; \
- object_data = zone()->New<name##Data>(this, data_storage, \
- Handle<name>::cast(object));
- HEAP_BROKER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_SERIALIZATION)
+#define CREATE_DATA_FOR_POSSIBLE_SERIALIZATION(name) \
+ } else if (object->Is##name()) { \
+ if (mode() == kSerialized && \
+ background_serialization != \
+ ObjectRef::BackgroundSerialization::kAllowed) { \
+ CHECK(!crash_on_error); \
+ return nullptr; \
+ } \
+ entry = refs_->LookupOrInsert(object.address()); \
+ ObjectDataKind kind = (background_serialization == \
+ ObjectRef::BackgroundSerialization::kAllowed) \
+ ? kBackgroundSerializedHeapObject \
+ : kSerializedHeapObject; \
+ object_data = zone()->New<name##Data>(this, &(entry->value), \
+ Handle<name>::cast(object), \
+ kind);
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(
+ CREATE_DATA_FOR_POSSIBLE_SERIALIZATION)
+#undef CREATE_DATA_FOR_POSSIBLE_SERIALIZATION
+#define CREATE_DATA_FOR_BACKGROUND_SERIALIZATION(name) \
+ } else if (object->Is##name()) { \
+ if (FLAG_turbo_direct_heap_access) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<name##Data>( \
+ this, &(entry->value), Handle<name>::cast(object), \
+ kBackgroundSerializedHeapObject); \
+ } else if (mode() == kSerializing) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<name##Data>(this, &(entry->value), \
+ Handle<name>::cast(object)); \
+ } else { \
+ CHECK(!crash_on_error); \
+ return nullptr; \
+ }
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(
+ CREATE_DATA_FOR_BACKGROUND_SERIALIZATION)
#undef CREATE_DATA_FOR_SERIALIZATION
- } else {
- UNREACHABLE();
+#define CREATE_DATA_FOR_SERIALIZATION(name) \
+ } else if (object->Is##name()) { \
+ if (mode() == kSerializing) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<name##Data>(this, &(entry->value), \
+ Handle<name>::cast(object)); \
+ } else { \
+ CHECK(!crash_on_error); \
+ return nullptr; \
}
- // At this point the entry pointer is not guaranteed to be valid as
- // the refs_ hash hable could be resized by one of the constructors above.
- DCHECK_EQ(object_data, refs_->Lookup(object.address())->value);
+ HEAP_BROKER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_SERIALIZATION)
+#undef CREATE_DATA_FOR_SERIALIZATION
+ } else {
+ UNREACHABLE();
}
+ // At this point the entry pointer is not guaranteed to be valid as
+ // the refs_ hash hable could be resized by one of the constructors above.
+ DCHECK_EQ(object_data, refs_->Lookup(object.address())->value);
return object_data;
}
// clang-format on
-ObjectData* JSHeapBroker::GetOrCreateData(Object object) {
- return GetOrCreateData(CanonicalPersistentHandle(object));
+ObjectData* JSHeapBroker::GetOrCreateData(
+ Object object,
+ ObjectRef::BackgroundSerialization background_serialization) {
+ return GetOrCreateData(CanonicalPersistentHandle(object),
+ background_serialization);
}
#define DEFINE_IS_AND_AS(Name) \
@@ -2735,9 +2922,24 @@ ObjectData* JSHeapBroker::GetOrCreateData(Object object) {
return Name##Ref(broker(), data()); \
}
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
#undef DEFINE_IS_AND_AS
+bool JSHeapBroker::StackHasOverflowed() const {
+ DCHECK_IMPLIES(local_isolate_ == nullptr,
+ ThreadId::Current() == isolate_->thread_id());
+ return (local_isolate_ != nullptr)
+ ? StackLimitCheck::HasOverflowed(local_isolate_)
+ : StackLimitCheck(isolate_).HasOverflowed();
+}
+
+bool JSHeapBroker::ObjectMayBeUninitialized(Handle<Object> object) const {
+ return !IsMainThread() && object->IsHeapObject() &&
+ isolate()->heap()->IsPendingAllocation(HeapObject::cast(*object));
+}
+
bool ObjectRef::IsSmi() const { return data()->is_smi(); }
int ObjectRef::AsSmi() const {
@@ -2900,7 +3102,13 @@ OddballType MapRef::oddball_type() const {
FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
if (data_->should_access_heap()) {
- return FeedbackCellRef(broker(), object()->GetClosureFeedbackCell(index));
+ FeedbackCell cell = object()->closure_feedback_cell(index);
+ Handle<FeedbackCell> cell_handle =
+ broker()->CanonicalPersistentHandle(cell);
+ // These should all be available because we request the cell for each
+ // CreateClosure bytecode.
+ ObjectData* cell_data = broker()->GetOrCreateData(cell_handle);
+ return FeedbackCellRef(broker(), cell_data);
}
return FeedbackCellRef(
@@ -2908,24 +3116,6 @@ FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index));
}
-double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
- if (data_->should_access_heap()) {
- return object()->RawFastDoublePropertyAt(index);
- }
- JSObjectData* object_data = data()->AsJSObject();
- CHECK(index.is_inobject());
- return object_data->GetInobjectField(index.property_index()).AsDouble();
-}
-
-uint64_t JSObjectRef::RawFastDoublePropertyAsBitsAt(FieldIndex index) const {
- if (data_->should_access_heap()) {
- return object()->RawFastDoublePropertyAsBitsAt(index);
- }
- JSObjectData* object_data = data()->AsJSObject();
- CHECK(index.is_inobject());
- return object_data->GetInobjectField(index.property_index()).AsBitsOfDouble();
-}
-
ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
if (data_->should_access_heap()) {
return ObjectRef(broker(), broker()->CanonicalPersistentHandle(
@@ -2933,9 +3123,8 @@ ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
}
JSObjectData* object_data = data()->AsJSObject();
CHECK(index.is_inobject());
- return ObjectRef(
- broker(),
- object_data->GetInobjectField(index.property_index()).AsObject());
+ return ObjectRef(broker(),
+ object_data->GetInobjectField(index.property_index()));
}
bool AllocationSiteRef::IsFastLiteral() const {
@@ -2961,7 +3150,7 @@ void JSObjectRef::SerializeElements() {
void JSObjectRef::EnsureElementsTenured() {
if (data_->should_access_heap()) {
- Handle<FixedArrayBase> object_elements = elements().object();
+ Handle<FixedArrayBase> object_elements = elements().value().object();
if (ObjectInYoungGeneration(*object_elements)) {
// If we would like to pretenure a fixed cow array, we must ensure that
// the array is already in old space, otherwise we'll create too many
@@ -2977,12 +3166,13 @@ void JSObjectRef::EnsureElementsTenured() {
}
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap()) {
return FieldIndex::ForDescriptor(*object(), descriptor_index);
}
DescriptorArrayData* descriptors =
data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return descriptors->contents().at(descriptor_index.as_int()).field_index;
+ return descriptors->GetFieldIndexFor(descriptor_index);
}
int MapRef::GetInObjectPropertyOffset(int i) const {
@@ -2994,27 +3184,13 @@ int MapRef::GetInObjectPropertyOffset(int i) const {
PropertyDetails MapRef::GetPropertyDetails(
InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- return object()
- ->instance_descriptors(kRelaxedLoad)
- .GetDetails(descriptor_index);
- }
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return descriptors->contents().at(descriptor_index.as_int()).details;
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetPropertyDetails(descriptor_index);
}
NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- return NameRef(broker(), broker()->CanonicalPersistentHandle(
- object()
- ->instance_descriptors(kRelaxedLoad)
- .GetKey(descriptor_index)));
- }
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return NameRef(broker(),
- descriptors->contents().at(descriptor_index.as_int()).key);
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetPropertyKey(descriptor_index);
}
bool MapRef::IsFixedCowArrayMap() const {
@@ -3028,20 +3204,21 @@ bool MapRef::IsPrimitiveMap() const {
}
MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- Handle<Map> owner(
- object()->FindFieldOwner(broker()->isolate(), descriptor_index),
- broker()->isolate());
- return MapRef(broker(), owner);
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // TODO(solanes, v8:7790): Consider caching the result of the field owner on
+ // the descriptor array. It would be useful for same map as well as any
+ // other map sharing that descriptor array.
+ return MapRef(broker(), broker()->GetOrCreateData(object()->FindFieldOwner(
+ broker()->isolate(), descriptor_index)));
}
DescriptorArrayData* descriptors =
data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return MapRef(
- broker(),
- descriptors->contents().at(descriptor_index.as_int()).field_owner);
+ return MapRef(broker(), descriptors->FindFieldOwner(descriptor_index));
}
ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap()) {
Handle<FieldType> field_type(object()
->instance_descriptors(kRelaxedLoad)
@@ -3051,21 +3228,22 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
}
DescriptorArrayData* descriptors =
data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return ObjectRef(
- broker(),
- descriptors->contents().at(descriptor_index.as_int()).field_type);
+ return ObjectRef(broker(), descriptors->GetFieldType(descriptor_index));
}
-bool MapRef::IsUnboxedDoubleField(InternalIndex descriptor_index) const {
+base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
+ uint32_t index, SerializationPolicy policy) const {
if (data_->should_access_heap()) {
- return object()->IsUnboxedDoubleField(
- FieldIndex::ForDescriptor(*object(), descriptor_index));
+ // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optimization for
+ // concurrent inlining when we have the infrastructure to safely do so.
+ if (broker()->is_concurrent_inlining()) return base::nullopt;
+ CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
+ return GetOwnElementFromHeap(broker(), object(), index, true);
}
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return descriptors->contents()
- .at(descriptor_index.as_int())
- .is_unboxed_double_field;
+ ObjectData* element =
+ data()->AsString()->GetCharAsStringOrUndefined(broker(), index, policy);
+ if (element == nullptr) return base::nullopt;
+ return ObjectRef(broker(), element);
}
base::Optional<int> StringRef::length() const {
@@ -3142,40 +3320,6 @@ Float64 FixedDoubleArrayRef::get(int i) const {
}
}
-uint8_t BytecodeArrayRef::get(int index) const { return object()->get(index); }
-
-Address BytecodeArrayRef::GetFirstBytecodeAddress() const {
- return object()->GetFirstBytecodeAddress();
-}
-
-Handle<Object> BytecodeArrayRef::GetConstantAtIndex(int index) const {
- if (data_->should_access_heap()) {
- return broker()->CanonicalPersistentHandle(
- object()->constant_pool().get(index));
- }
- return data()->AsBytecodeArray()->GetConstantAtIndex(index,
- broker()->isolate());
-}
-
-bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const {
- if (data_->should_access_heap()) {
- return object()->constant_pool().get(index).IsSmi();
- }
- return data()->AsBytecodeArray()->IsConstantAtIndexSmi(index);
-}
-
-Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const {
- if (data_->should_access_heap()) {
- return Smi::cast(object()->constant_pool().get(index));
- }
- return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index);
-}
-
-void BytecodeArrayRef::SerializeForCompilation() {
- if (data_->should_access_heap()) return;
- data()->AsBytecodeArray()->SerializeForCompilation(broker());
-}
-
Handle<ByteArray> BytecodeArrayRef::SourcePositionTable() const {
return broker()->CanonicalPersistentHandle(object()->SourcePositionTable());
}
@@ -3273,8 +3417,6 @@ BIMODAL_ACCESSOR(HeapObject, Map, map)
BIMODAL_ACCESSOR_C(HeapNumber, double, value)
-BIMODAL_ACCESSOR(JSArray, Object, length)
-
BIMODAL_ACCESSOR(JSBoundFunction, JSReceiver, bound_target_function)
BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_this)
BIMODAL_ACCESSOR(JSBoundFunction, FixedArray, bound_arguments)
@@ -3284,7 +3426,6 @@ BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
-BIMODAL_ACCESSOR_C(JSFunction, bool, HasAttachedOptimizedCode)
BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
BIMODAL_ACCESSOR(JSFunction, Context, context)
BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
@@ -3293,14 +3434,9 @@ BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
-BIMODAL_ACCESSOR(JSFunction, Code, code)
BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
-BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap)
-BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
-BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
-
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field2, elements_kind,
Map::Bits2::ElementsKindBits)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_dictionary_map,
@@ -3329,11 +3465,9 @@ BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
BIMODAL_ACCESSOR(Map, HeapObject, prototype)
BIMODAL_ACCESSOR_C(Map, InstanceType, instance_type)
BIMODAL_ACCESSOR(Map, Object, GetConstructor)
-BIMODAL_ACCESSOR(Map, HeapObject, GetBackPointer)
+BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, GetBackPointer)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
-BIMODAL_ACCESSOR_C(Code, unsigned, inlined_bytecode_size)
-
#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
BIMODAL_ACCESSOR(NativeContext, type, name)
BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
@@ -3344,6 +3478,10 @@ BIMODAL_ACCESSOR_C(ObjectBoilerplateDescription, int, size)
BIMODAL_ACCESSOR(PropertyCell, Object, value)
BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
+BIMODAL_ACCESSOR(RegExpBoilerplateDescription, FixedArray, data)
+BIMODAL_ACCESSOR(RegExpBoilerplateDescription, String, source)
+BIMODAL_ACCESSOR_C(RegExpBoilerplateDescription, int, flags)
+
base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
if (data_->should_access_heap()) {
return CallHandlerInfoRef(broker(), broker()->CanonicalPersistentHandle(
@@ -3474,24 +3612,39 @@ SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
return ObjectRef ::data()->AsSharedFunctionInfo()->GetInlineability();
}
-BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value)
-
-base::Optional<ObjectRef> MapRef::GetStrongValue(
- InternalIndex descriptor_index) const {
+base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const {
if (data_->should_access_heap()) {
- MaybeObject value =
- object()->instance_descriptors(kRelaxedLoad).GetValue(descriptor_index);
- HeapObject object;
- if (value.GetHeapObjectIfStrong(&object)) {
- return ObjectRef(broker(), broker()->CanonicalPersistentHandle((object)));
+ // Note that we use the synchronized accessor.
+ Object value = object()->value(kAcquireLoad);
+ if (!value.IsFeedbackVector()) return base::nullopt;
+ auto vector_handle = broker()->CanonicalPersistentHandle(value);
+ ObjectData* vector = broker()->TryGetOrCreateData(vector_handle);
+ if (vector) {
+ return FeedbackVectorRef(broker(), vector);
}
+ TRACE_BROKER_MISSING(
+ broker(),
+ "Unable to retrieve FeedbackVector from FeedbackCellRef " << *this);
return base::nullopt;
}
- ObjectData* value = data()->AsMap()->GetStrongValue(descriptor_index);
- if (!value) {
- return base::nullopt;
+ ObjectData* vector = ObjectRef::data()->AsFeedbackCell()->value();
+ return FeedbackVectorRef(broker(), vector->AsFeedbackVector());
+}
+
+base::Optional<ObjectRef> MapRef::GetStrongValue(
+ InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetStrongValue(descriptor_index);
+}
+
+DescriptorArrayRef MapRef::instance_descriptors() const {
+ if (data_->should_access_heap()) {
+ return DescriptorArrayRef(
+ broker(), broker()->CanonicalPersistentHandle(
+ object()->instance_descriptors(kRelaxedLoad)));
}
- return ObjectRef(broker(), value);
+
+ return DescriptorArrayRef(broker(), data()->AsMap()->instance_descriptors());
}
void MapRef::SerializeRootMap() {
@@ -3513,8 +3666,51 @@ base::Optional<MapRef> MapRef::FindRootMap() const {
return base::nullopt;
}
+bool JSTypedArrayRef::is_on_heap() const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Safe to read concurrently because:
+ // - host object seen by serializer.
+ // - underlying field written 1. during initialization or 2. with
+ // release-store.
+ return object()->is_on_heap(kAcquireLoad);
+ }
+ return data()->AsJSTypedArray()->data_ptr();
+}
+
+size_t JSTypedArrayRef::length() const {
+ CHECK(!is_on_heap());
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Safe to read concurrently because:
+ // - immutable after initialization.
+ // - host object seen by serializer.
+ return object()->length();
+ }
+ return data()->AsJSTypedArray()->length();
+}
+
+HeapObjectRef JSTypedArrayRef::buffer() const {
+ CHECK(!is_on_heap());
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Safe to read concurrently because:
+ // - immutable after initialization.
+ // - host object seen by serializer.
+ Handle<JSArrayBuffer> value =
+ broker()->CanonicalPersistentHandle(object()->buffer());
+ return JSObjectRef{broker(), value};
+ }
+ return HeapObjectRef{broker(), data()->AsJSTypedArray()->buffer()};
+}
+
void* JSTypedArrayRef::data_ptr() const {
- if (data_->should_access_heap()) {
+ CHECK(!is_on_heap());
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Safe to read concurrently because:
+ // - host object seen by serializer.
+ // - underlying field written 1. during initialization or 2. protected by
+ // the is_on_heap release/acquire semantics (external_pointer store
+ // happens-before base_pointer store, and this external_pointer load
+ // happens-after base_pointer load).
+ STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
return object()->DataPtr();
}
return data()->AsJSTypedArray()->data_ptr();
@@ -3707,23 +3903,14 @@ Maybe<double> ObjectRef::OddballToNumber() const {
}
}
-base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
+base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
uint32_t index, SerializationPolicy policy) const {
- if (!(IsJSObject() || IsString())) return base::nullopt;
if (data_->should_access_heap()) {
- // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optmization for
- // concurrent inlining when we have the infrastructure to safely do so.
- if (broker()->is_concurrent_inlining() && IsString()) return base::nullopt;
CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
return GetOwnElementFromHeap(broker(), object(), index, true);
}
- ObjectData* element = nullptr;
- if (IsJSObject()) {
- element =
- data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
- } else if (IsString()) {
- element = data()->AsString()->GetCharAsString(broker(), index, policy);
- }
+ ObjectData* element =
+ data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
if (element == nullptr) return base::nullopt;
return ObjectRef(broker(), element);
}
@@ -3742,25 +3929,81 @@ base::Optional<ObjectRef> JSObjectRef::GetOwnDataProperty(
return ObjectRef(broker(), property);
}
-base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
- uint32_t index, SerializationPolicy policy) const {
- if (data_->should_access_heap()) {
- if (!object()->elements().IsCowArray()) return base::nullopt;
- return GetOwnElementFromHeap(broker(), object(), index, false);
- }
+ObjectRef JSArrayRef::GetBoilerplateLength() const {
+ // Safe to read concurrently because:
+ // - boilerplates are immutable after initialization.
+ // - boilerplates are published into the feedback vector.
+ return length_unsafe();
+}
- if (policy == SerializationPolicy::kSerializeIfNeeded) {
- data()->AsJSObject()->SerializeElements(broker());
- } else if (!data()->AsJSObject()->serialized_elements()) {
- TRACE(broker(), "'elements' on " << this);
- return base::nullopt;
+ObjectRef JSArrayRef::length_unsafe() const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ Object o = object()->length(broker()->isolate(), kRelaxedLoad);
+ return ObjectRef{broker(), broker()->CanonicalPersistentHandle(o)};
+ } else {
+ return ObjectRef{broker(), data()->AsJSArray()->length()};
}
- if (!elements().map().IsFixedCowArrayMap()) return base::nullopt;
+}
- ObjectData* element =
- data()->AsJSArray()->GetOwnElement(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
+base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
+ FixedArrayBaseRef elements_ref, uint32_t index,
+ SerializationPolicy policy) const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // `elements` are currently still serialized as members of JSObjectRef.
+ // TODO(jgruber,v8:7790): Remove the elements equality DCHECK below once
+ // JSObject is no longer serialized.
+ static_assert(std::is_base_of<JSObject, JSArray>::value, "");
+ STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
+
+ // The elements_ref is passed in by callers to make explicit that it is
+ // also used outside of this function, and must match the `elements` used
+ // inside this function.
+ DCHECK(elements_ref.equals(elements().value()));
+
+ // Due to concurrency, the kind read here may not be consistent with
+ // `elements_ref`. But consistency is guaranteed at runtime due to the
+ // `elements` equality check in the caller.
+ ElementsKind elements_kind = GetElementsKind();
+
+ // We only inspect fixed COW arrays, which may only occur for fast
+ // smi/objects elements kinds.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (!elements_ref.map().IsFixedCowArrayMap()) return {};
+
+ // As the name says, the `length` read here is unsafe and may not match
+ // `elements`. We rely on the invariant that any `length` change will
+ // also result in an `elements` change to make this safe. The `elements`
+ // equality check in the caller thus also guards the value of `length`.
+ ObjectRef length_ref = length_unsafe();
+
+ // Likewise we only deal with smi lengths.
+ if (!length_ref.IsSmi()) return {};
+
+ base::Optional<Object> result =
+ ConcurrentLookupIterator::TryGetOwnCowElement(
+ broker()->isolate(), *elements_ref.AsFixedArray().object(),
+ elements_kind, length_ref.AsSmi(), index);
+
+ if (!result.has_value()) return {};
+
+ return ObjectRef{broker(),
+ broker()->CanonicalPersistentHandle(result.value())};
+ } else {
+ DCHECK(!data_->should_access_heap());
+ DCHECK(!FLAG_turbo_direct_heap_access);
+
+ // Just to clarify that `elements_ref` is not used on this path.
+ // GetOwnElement accesses the serialized `elements` field on its own.
+ USE(elements_ref);
+
+ if (!elements().value().map().IsFixedCowArrayMap()) return base::nullopt;
+
+ ObjectData* element =
+ data()->AsJSArray()->GetOwnElement(broker(), index, policy);
+ if (element == nullptr) return base::nullopt;
+ return ObjectRef(broker(), element);
+ }
}
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
@@ -3784,30 +4027,12 @@ ObjectRef SourceTextModuleRef::import_meta() const {
}
ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object,
+ BackgroundSerialization background_serialization,
bool check_type)
: broker_(broker) {
- switch (broker->mode()) {
- // We may have to create data in JSHeapBroker::kSerialized as well since we
- // read the data from read only heap objects directly instead of serializing
- // them.
- case JSHeapBroker::kSerialized:
- case JSHeapBroker::kSerializing:
- data_ = broker->GetOrCreateData(object);
- break;
- case JSHeapBroker::kDisabled: {
- RefsMap::Entry* entry = broker->refs_->LookupOrInsert(object.address());
- ObjectData** storage = &(entry->value);
- if (*storage == nullptr) {
- entry->value = broker->zone()->New<ObjectData>(
- broker, storage, object,
- object->IsSmi() ? kSmi : kUnserializedHeapObject);
- }
- data_ = *storage;
- break;
- }
- case JSHeapBroker::kRetired:
- UNREACHABLE();
- }
+ CHECK_NE(broker->mode(), JSHeapBroker::kRetired);
+
+ data_ = broker->GetOrCreateData(object, background_serialization);
if (!data_) { // TODO(mslekova): Remove once we're on the background thread.
object->Print();
}
@@ -3873,12 +4098,17 @@ ElementsKind JSObjectRef::GetElementsKind() const {
return map().elements_kind();
}
-FixedArrayBaseRef JSObjectRef::elements() const {
+base::Optional<FixedArrayBaseRef> JSObjectRef::elements() const {
if (data_->should_access_heap()) {
return FixedArrayBaseRef(
broker(), broker()->CanonicalPersistentHandle(object()->elements()));
}
- return FixedArrayBaseRef(broker(), data()->AsJSObject()->elements());
+ const JSObjectData* d = data()->AsJSObject();
+ if (!d->serialized_elements()) {
+ TRACE(broker(), "'elements' on " << this);
+ return base::nullopt;
+ }
+ return FixedArrayBaseRef(broker(), d->elements());
}
int FixedArrayBaseRef::length() const {
@@ -3897,12 +4127,59 @@ Float64 FixedDoubleArrayData::Get(int i) const {
return contents_[i];
}
+PropertyDetails DescriptorArrayRef::GetPropertyDetails(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ return object()->GetDetails(descriptor_index);
+ }
+ return data()->AsDescriptorArray()->GetPropertyDetails(descriptor_index);
+}
+
+NameRef DescriptorArrayRef::GetPropertyKey(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ NameRef result(broker(), broker()->CanonicalPersistentHandle(
+ object()->GetKey(descriptor_index)));
+ CHECK(result.IsUniqueName());
+ return result;
+ }
+ return NameRef(broker(),
+ data()->AsDescriptorArray()->GetPropertyKey(descriptor_index));
+}
+
+base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ HeapObject heap_object;
+ if (object()
+ ->GetValue(descriptor_index)
+ .GetHeapObjectIfStrong(&heap_object)) {
+ // Since the descriptors in the descriptor array can be changed in-place
+ // via DescriptorArray::Replace, we might get a value that we haven't seen
+ // before.
+ ObjectData* data = broker()->TryGetOrCreateData(
+ broker()->CanonicalPersistentHandle(heap_object));
+ if (data) return ObjectRef(broker(), data);
+
+ TRACE_BROKER_MISSING(broker(), "strong value for descriptor array "
+ << *this << " at index "
+ << descriptor_index.as_int());
+ // Fall through to the base::nullopt below.
+ }
+ return base::nullopt;
+ }
+ ObjectData* value =
+ data()->AsDescriptorArray()->GetStrongValue(descriptor_index);
+ if (!value) return base::nullopt;
+ return ObjectRef(broker(), value);
+}
+
base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
const {
- if (value().IsFeedbackVector()) {
- FeedbackVectorRef vector = value().AsFeedbackVector();
+ if (value()) {
+ FeedbackVectorRef vector = *value();
if (vector.serialized()) {
- return value().AsFeedbackVector().shared_function_info();
+ return vector.shared_function_info();
}
}
return base::nullopt;
@@ -3924,36 +4201,23 @@ bool NameRef::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
-ObjectRef JSRegExpRef::data() const {
- IF_ACCESS_FROM_HEAP(Object, data);
- return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->data());
-}
-
-ObjectRef JSRegExpRef::flags() const {
- IF_ACCESS_FROM_HEAP(Object, flags);
- return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->flags());
-}
-
-ObjectRef JSRegExpRef::last_index() const {
- IF_ACCESS_FROM_HEAP(Object, last_index);
- return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->last_index());
-}
-
-ObjectRef JSRegExpRef::raw_properties_or_hash() const {
- IF_ACCESS_FROM_HEAP(Object, raw_properties_or_hash);
- return ObjectRef(broker(),
- ObjectRef::data()->AsJSRegExp()->raw_properties_or_hash());
-}
-
-ObjectRef JSRegExpRef::source() const {
- IF_ACCESS_FROM_HEAP(Object, source);
- return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->source());
-}
-
-void JSRegExpRef::SerializeAsRegExpBoilerplate() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- JSObjectRef::data()->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker());
+void RegExpBoilerplateDescriptionRef::Serialize() {
+ if (data_->should_access_heap()) {
+ // Even if the regexp boilerplate object itself is no longer serialized,
+ // both `data` and `source` fields still are and thus we need to make sure
+ // to visit them.
+ // TODO(jgruber,v8:7790): Remove once these are no longer serialized types.
+ STATIC_ASSERT(IsSerializedHeapObject<FixedArray>());
+ FixedArrayRef data_ref{
+ broker(), broker()->CanonicalPersistentHandle(object()->data())};
+ STATIC_ASSERT(IsSerializedHeapObject<String>());
+ StringRef source_ref{
+ broker(), broker()->CanonicalPersistentHandle(object()->source())};
+ } else {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ HeapObjectRef::data()->AsRegExpBoilerplateDescription()->Serialize(
+ broker());
+ }
}
Handle<Object> ObjectRef::object() const {
@@ -3983,6 +4247,8 @@ Handle<Object> ObjectRef::object() const {
#endif // DEBUG
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
#undef DEF_OBJECT_GETTER
@@ -4015,11 +4281,13 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
NativeContextData::NativeContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<NativeContext> object)
- : ContextData(broker, storage, object), function_maps_(broker->zone()) {}
+ : ContextData(broker, storage, object),
+ state_(State::kUnserialized),
+ function_maps_(broker->zone()) {}
void NativeContextData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
+ if (state_ != State::kUnserialized) return;
+ state_ = State::kSerializedOnMainThread;
TraceScope tracer(broker, this, "NativeContextData::Serialize");
Handle<NativeContext> context = Handle<NativeContext>::cast(object());
@@ -4028,14 +4296,16 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(name##_); \
name##_ = broker->GetOrCreateData(context->name()); \
if (!name##_->should_access_heap()) { \
- if (name##_->IsJSFunction()) name##_->AsJSFunction()->Serialize(broker); \
if (name##_->IsMap() && \
!InstanceTypeChecker::IsContext(name##_->AsMap()->instance_type())) { \
name##_->AsMap()->SerializeConstructor(broker); \
} \
+ if (name##_->IsJSFunction()) { \
+ name##_->AsJSFunction()->Serialize(broker); \
+ } \
}
BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
- if (!broker->isolate()->bootstrapper()->IsActive()) {
+ if (!broker->is_isolate_bootstrapping()) {
BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
}
#undef SERIALIZE_MEMBER
@@ -4048,15 +4318,39 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
broker);
}
+ scope_info_ = broker->GetOrCreateData(context->scope_info());
+}
+
+void NativeContextData::SerializeOnBackground(JSHeapBroker* broker) {
+ if (state_ == State::kFullySerialized) return;
+ DCHECK_EQ(state_, State::kSerializedOnMainThread);
+ state_ = State::kSerializedOnMainThread;
+
+ UnparkedScopeIfNeeded unparked_scope(broker);
+ TraceScope tracer(broker, this, "NativeContextData::SerializeOnBackground");
+ Handle<NativeContext> context = Handle<NativeContext>::cast(object());
+
+ constexpr auto kAllowed = ObjectRef::BackgroundSerialization::kAllowed;
+#define SERIALIZE_MEMBER(type, name) \
+ DCHECK_NULL(name##_); \
+ name##_ = broker->GetOrCreateData(context->name(), kAllowed); \
+ if (!name##_->should_access_heap()) { \
+ DCHECK(!name##_->IsJSFunction()); \
+ }
+ BROKER_COMPULSORY_BACKGROUND_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ if (!broker->is_isolate_bootstrapping()) {
+ BROKER_OPTIONAL_BACKGROUND_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ }
+#undef SERIALIZE_MEMBER
+
DCHECK(function_maps_.empty());
int const first = Context::FIRST_FUNCTION_MAP_INDEX;
int const last = Context::LAST_FUNCTION_MAP_INDEX;
function_maps_.reserve(last + 1 - first);
for (int i = first; i <= last; ++i) {
- function_maps_.push_back(broker->GetOrCreateData(context->get(i)));
+ function_maps_.push_back(
+ broker->GetOrCreateData(context->get(i), kAllowed));
}
-
- scope_info_ = broker->GetOrCreateData(context->scope_info());
}
void JSFunctionRef::Serialize() {
@@ -4065,6 +4359,12 @@ void JSFunctionRef::Serialize() {
data()->AsJSFunction()->Serialize(broker());
}
+void JSFunctionRef::SerializeCodeAndFeedback() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSFunction()->SerializeCodeAndFeedback(broker());
+}
+
bool JSBoundFunctionRef::serialized() const {
if (data_->should_access_heap()) return true;
return data()->AsJSBoundFunction()->serialized();
@@ -4075,6 +4375,20 @@ bool JSFunctionRef::serialized() const {
return data()->AsJSFunction()->serialized();
}
+bool JSFunctionRef::serialized_code_and_feedback() const {
+ if (data_->should_access_heap()) return true;
+ return data()->AsJSFunction()->serialized_code_and_feedback();
+}
+
+CodeRef JSFunctionRef::code() const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ return CodeRef(broker(), broker()->CanonicalPersistentHandle(
+ object()->code(kAcquireLoad)));
+ }
+
+ return CodeRef(broker(), ObjectRef::data()->AsJSFunction()->code());
+}
+
void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -4091,9 +4405,11 @@ base::Optional<FunctionTemplateInfoRef>
SharedFunctionInfoRef::function_template_info() const {
if (data_->should_access_heap()) {
if (object()->IsApiFunction()) {
- return FunctionTemplateInfoRef(
- broker(), broker()->CanonicalPersistentHandle(
- object()->function_data(kAcquireLoad)));
+ ObjectData* data =
+ broker()->TryGetOrCreateData(broker()->CanonicalPersistentHandle(
+ object()->function_data(kAcquireLoad)));
+ if (data == nullptr) return base::nullopt;
+ return FunctionTemplateInfoRef(broker(), data, true);
}
return base::nullopt;
}
@@ -4123,6 +4439,7 @@ void JSObjectRef::SerializeObjectCreateMap() {
}
void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index);
@@ -4133,10 +4450,10 @@ bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) return true;
ObjectData* maybe_desc_array_data = data()->AsMap()->instance_descriptors();
if (!maybe_desc_array_data) return false;
+ if (maybe_desc_array_data->should_access_heap()) return true;
DescriptorArrayData* desc_array_data =
maybe_desc_array_data->AsDescriptorArray();
- return desc_array_data->contents().find(descriptor_index.as_int()) !=
- desc_array_data->contents().end();
+ return desc_array_data->serialized_descriptor(descriptor_index);
}
void MapRef::SerializeBackPointer() {
@@ -4169,10 +4486,28 @@ void NativeContextRef::Serialize() {
data()->AsNativeContext()->Serialize(broker());
}
-void JSTypedArrayRef::Serialize() {
+void NativeContextRef::SerializeOnBackground() {
if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSTypedArray()->Serialize(broker());
+ CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
+ broker()->mode() == JSHeapBroker::kSerialized);
+ data()->AsNativeContext()->SerializeOnBackground(broker());
+}
+
+void JSTypedArrayRef::Serialize() {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Even if the typed array object itself is no longer serialized (besides
+ // the JSObject parts), the `buffer` field still is and thus we need to
+ // make sure to visit it.
+ // TODO(jgruber,v8:7790): Remove once JSObject is no longer serialized.
+ static_assert(
+ std::is_base_of<JSObject, decltype(object()->buffer())>::value, "");
+ STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
+ JSObjectRef data_ref{
+ broker(), broker()->CanonicalPersistentHandle(object()->buffer())};
+ } else {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSTypedArray()->Serialize(broker());
+ }
}
bool JSTypedArrayRef::serialized() const {
@@ -4180,16 +4515,22 @@ bool JSTypedArrayRef::serialized() const {
return data()->AsJSTypedArray()->serialized();
}
+bool JSTypedArrayRef::ShouldHaveBeenSerialized() const {
+ if (FLAG_turbo_direct_heap_access) return false;
+ return ObjectRef::ShouldHaveBeenSerialized();
+}
+
bool JSBoundFunctionRef::Serialize() {
if (data_->should_access_heap()) return true;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
return data()->AsJSBoundFunction()->Serialize(broker());
}
-void PropertyCellRef::Serialize() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsPropertyCell()->Serialize(broker());
+bool PropertyCellRef::Serialize() const {
+ if (data_->should_access_heap()) return true;
+ CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
+ broker()->mode() == JSHeapBroker::kSerialized);
+ return data()->AsPropertyCell()->Serialize(broker());
}
void FunctionTemplateInfoRef::SerializeCallCode() {
@@ -4285,7 +4626,6 @@ GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
- cell_or_context_(base::nullopt),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
@@ -4330,6 +4670,8 @@ bool GlobalAccessFeedback::immutable() const {
base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const {
if (IsPropertyCell()) {
+ bool cell_serialized = property_cell().Serialize();
+ CHECK(cell_serialized); // Can't fail on the main thread.
return property_cell().value();
} else if (IsScriptContextSlot() && immutable()) {
return script_context().get(slot_index());
@@ -4652,7 +4994,10 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
// The wanted name belongs (or did belong) to a property on the global
// object and the feedback is the cell holding its value.
PropertyCellRef cell(this, Handle<PropertyCell>::cast(feedback_value));
- cell.Serialize();
+ ObjectRef(
+ this,
+ CanonicalPersistentHandle(
+ Handle<PropertyCell>::cast(feedback_value)->value(kAcquireLoad)));
return *zone()->New<GlobalAccessFeedback>(cell, nexus.kind());
}
@@ -4727,9 +5072,9 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
return NewInsufficientFeedback(nexus.kind());
}
- JSRegExpRef regexp(this, handle(object, isolate()));
- regexp.SerializeAsRegExpBoilerplate();
- return *zone()->New<RegExpLiteralFeedback>(regexp, nexus.kind());
+ RegExpBoilerplateDescriptionRef boilerplate(this, handle(object, isolate()));
+ boilerplate.Serialize();
+ return *zone()->New<RegExpLiteralFeedback>(boilerplate, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
@@ -4753,6 +5098,12 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
base::Optional<HeapObjectRef> target_ref;
{
+ // TODO(mvstanton): this read has a special danger when done on the
+ // background thread, because the CallIC has a site in generated code
+ // where a JSFunction is installed in this slot without store ordering.
+ // Therefore, we will need to check {maybe_target} to ensure that it
+ // has been store ordered by the heap's mechanism for store-ordering
+ // batches of new objects.
MaybeObject maybe_target = nexus.GetFeedback();
HeapObject target_object;
if (maybe_target->GetHeapObject(&target_object)) {
@@ -5123,42 +5474,18 @@ TemplateObjectFeedback const& ProcessedFeedback::AsTemplateObject() const {
return *static_cast<TemplateObjectFeedback const*>(this);
}
-bool JSHeapBroker::StackHasOverflowed() const {
- DCHECK_IMPLIES(local_isolate_ == nullptr,
- ThreadId::Current() == isolate_->thread_id());
- return (local_isolate_ != nullptr)
- ? StackLimitCheck::HasOverflowed(local_isolate_)
- : StackLimitCheck(isolate_).HasOverflowed();
-}
-
-OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array)
- : array_(bytecode_array) {}
-
-int OffHeapBytecodeArray::length() const { return array_.length(); }
-
-int OffHeapBytecodeArray::parameter_count() const {
- return array_.parameter_count();
-}
-
-uint8_t OffHeapBytecodeArray::get(int index) const { return array_.get(index); }
-
-void OffHeapBytecodeArray::set(int index, uint8_t value) { UNREACHABLE(); }
-
-Address OffHeapBytecodeArray::GetFirstBytecodeAddress() const {
- return array_.GetFirstBytecodeAddress();
-}
-
-Handle<Object> OffHeapBytecodeArray::GetConstantAtIndex(
- int index, Isolate* isolate) const {
- return array_.GetConstantAtIndex(index);
-}
-
-bool OffHeapBytecodeArray::IsConstantAtIndexSmi(int index) const {
- return array_.IsConstantAtIndexSmi(index);
-}
+unsigned CodeRef::GetInlinedBytecodeSize() const {
+ if (data_->should_access_heap()) {
+ unsigned value = object()->inlined_bytecode_size();
+ if (value > 0) {
+ // Don't report inlined bytecode size if the code object was already
+ // deoptimized.
+ value = object()->marked_for_deoptimization() ? 0 : value;
+ }
+ return value;
+ }
-Smi OffHeapBytecodeArray::GetConstantAtIndexAsSmi(int index) const {
- return array_.GetConstantAtIndexAsSmi(index);
+ return ObjectRef::data()->AsCode()->inlined_bytecode_size();
}
#undef BIMODAL_ACCESSOR
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 97d54c39c1..cc86b1451c 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -104,6 +104,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
+ bool is_isolate_bootstrapping() const { return is_isolate_bootstrapping_; }
bool is_native_context_independent() const {
return code_kind_ == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
@@ -148,9 +149,21 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Handle<Object> GetRootHandle(Object object);
// Never returns nullptr.
- ObjectData* GetOrCreateData(Handle<Object>);
+ ObjectData* GetOrCreateData(
+ Handle<Object>,
+ ObjectRef::BackgroundSerialization background_serialization =
+ ObjectRef::BackgroundSerialization::kDisallowed);
// Like the previous but wraps argument in handle first (for convenience).
- ObjectData* GetOrCreateData(Object);
+ ObjectData* GetOrCreateData(
+ Object, ObjectRef::BackgroundSerialization background_serialization =
+ ObjectRef::BackgroundSerialization::kDisallowed);
+
+ // Gets data only if we have it. However, thin wrappers will be created for
+ // smis, read-only objects and never-serialized objects.
+ ObjectData* TryGetOrCreateData(
+ Handle<Object>, bool crash_on_error = false,
+ ObjectRef::BackgroundSerialization background_serialization =
+ ObjectRef::BackgroundSerialization::kDisallowed);
// Check if {object} is any native context's %ArrayPrototype% or
// %ObjectPrototype%.
@@ -300,6 +313,16 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
friend class HeapObjectRef;
friend class ObjectRef;
friend class ObjectData;
+ friend class PropertyCellData;
+
+ bool IsMainThread() const {
+ return local_isolate() == nullptr || local_isolate()->is_main_thread();
+ }
+
+ // If this returns false, the object is guaranteed to be fully initialized and
+ // thus safe to read from a memory safety perspective. The converse does not
+ // necessarily hold.
+ bool ObjectMayBeUninitialized(Handle<Object> object) const;
bool CanUseFeedback(const FeedbackNexus& nexus) const;
const ProcessedFeedback& NewInsufficientFeedback(FeedbackSlotKind kind) const;
@@ -369,6 +392,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
+ bool const is_isolate_bootstrapping_;
CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
LocalIsolate* local_isolate_ = nullptr;
@@ -443,23 +467,6 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
// compilation is finished.
bool CanInlineElementAccess(MapRef const& map);
-class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray {
- public:
- explicit OffHeapBytecodeArray(BytecodeArrayRef bytecode_array);
-
- int length() const override;
- int parameter_count() const override;
- uint8_t get(int index) const override;
- void set(int index, uint8_t value) override;
- Address GetFirstBytecodeAddress() const override;
- Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const override;
- bool IsConstantAtIndexSmi(int index) const override;
- Smi GetConstantAtIndexAsSmi(int index) const override;
-
- private:
- BytecodeArrayRef array_;
-};
-
// Scope that unparks the LocalHeap, if:
// a) We have a JSHeapBroker,
// b) Said JSHeapBroker has a LocalIsolate and thus a LocalHeap,
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 7ebc383ea5..3b45b9d82b 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -29,8 +29,10 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kCheckClosure: {
FeedbackCellRef cell(broker(), FeedbackCellOf(node->op()));
- FeedbackVectorRef feedback_vector = cell.value().AsFeedbackVector();
- feedback_vector.Serialize();
+ base::Optional<FeedbackVectorRef> feedback_vector = cell.value();
+ if (feedback_vector.has_value()) {
+ feedback_vector->Serialize();
+ }
break;
}
case IrOpcode::kHeapConstant: {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index a09644ff9a..b38199bfff 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -44,7 +44,6 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
<< feedback_vector << " (missing data)");
return false;
}
-
TRACE("Considering " << shared << " for inlining with " << feedback_vector);
return true;
}
@@ -57,12 +56,13 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
return false;
}
- if (!function.serialized()) {
+ if (!function.serialized() || !function.serialized_code_and_feedback()) {
TRACE_BROKER_MISSING(
broker, "data for " << function << " (cannot consider for inlining)");
TRACE("Cannot consider " << function << " for inlining (missing data)");
return false;
}
+
return CanConsiderForInlining(broker, function.shared(),
function.feedback_vector());
}
@@ -111,12 +111,9 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsCheckClosure()) {
DCHECK(!out.functions[0].has_value());
FeedbackCellRef feedback_cell(broker(), FeedbackCellOf(m.op()));
- SharedFunctionInfoRef shared_info =
- feedback_cell.shared_function_info().value();
+ SharedFunctionInfoRef shared_info = *feedback_cell.shared_function_info();
out.shared_info = shared_info;
- if (feedback_cell.value().IsFeedbackVector() &&
- CanConsiderForInlining(broker(), shared_info,
- feedback_cell.value().AsFeedbackVector())) {
+ if (CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
out.bytecode[0] = shared_info.GetBytecodeArray();
}
out.num_functions = 1;
@@ -129,9 +126,8 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
SharedFunctionInfoRef shared_info(broker(), p.shared_info());
out.shared_info = shared_info;
- if (feedback_cell.value().IsFeedbackVector() &&
- CanConsiderForInlining(broker(), shared_info,
- feedback_cell.value().AsFeedbackVector())) {
+ if (feedback_cell.value().has_value() &&
+ CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
out.bytecode[0] = shared_info.GetBytecodeArray();
}
out.num_functions = 1;
@@ -142,6 +138,12 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
}
Reduction JSInliningHeuristic::Reduce(Node* node) {
+ if (mode() == kWasmOnly) {
+ return (node->opcode() == IrOpcode::kJSWasmCall)
+ ? inliner_.ReduceJSWasmCall(node)
+ : NoChange();
+ }
+ DCHECK_EQ(mode(), kJSOnly);
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute) {
@@ -165,8 +167,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
bool can_inline_candidate = false, candidate_is_small = true;
candidate.total_size = 0;
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
+ FrameState frame_state{NodeProperties::GetFrameStateInput(node)};
+ FrameStateInfo const& frame_info = frame_state.frame_state_info();
Handle<SharedFunctionInfo> frame_shared_info;
for (int i = 0; i < candidate.num_functions; ++i) {
if (!candidate.bytecode[i].has_value()) {
@@ -202,10 +204,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
unsigned inlined_bytecode_size = 0;
if (candidate.functions[i].has_value()) {
JSFunctionRef function = candidate.functions[i].value();
- if (function.HasAttachedOptimizedCode()) {
- inlined_bytecode_size = function.code().inlined_bytecode_size();
- candidate.total_size += inlined_bytecode_size;
- }
+ inlined_bytecode_size = function.code().GetInlinedBytecodeSize();
+ candidate.total_size += inlined_bytecode_size;
}
candidate_is_small = candidate_is_small &&
IsSmall(bytecode.length() + inlined_bytecode_size);
@@ -335,19 +335,18 @@ Node* JSInliningHeuristic::DuplicateStateValuesAndRename(Node* state_values,
namespace {
-bool CollectFrameStateUniqueUses(Node* node, Node* frame_state,
+bool CollectFrameStateUniqueUses(Node* node, FrameState frame_state,
NodeAndIndex* uses_buffer, size_t* use_count,
size_t max_uses) {
// Only accumulate states that are not shared with other users.
if (frame_state->UseCount() > 1) return true;
- if (frame_state->InputAt(kFrameStateStackInput) == node) {
+ if (frame_state.stack() == node) {
if (*use_count >= max_uses) return false;
- uses_buffer[*use_count] = {frame_state, kFrameStateStackInput};
+ uses_buffer[*use_count] = {frame_state, FrameState::kFrameStateStackInput};
(*use_count)++;
}
- if (!CollectStateValuesOwnedUses(node,
- frame_state->InputAt(kFrameStateLocalsInput),
- uses_buffer, use_count, max_uses)) {
+ if (!CollectStateValuesOwnedUses(node, frame_state.locals(), uses_buffer,
+ use_count, max_uses)) {
return false;
}
return true;
@@ -355,28 +354,28 @@ bool CollectFrameStateUniqueUses(Node* node, Node* frame_state,
} // namespace
-Node* JSInliningHeuristic::DuplicateFrameStateAndRename(Node* frame_state,
- Node* from, Node* to,
- StateCloneMode mode) {
+FrameState JSInliningHeuristic::DuplicateFrameStateAndRename(
+ FrameState frame_state, Node* from, Node* to, StateCloneMode mode) {
// Only rename in states that are not shared with other users. This needs to
// be in sync with the condition in {DuplicateFrameStateAndRename}.
if (frame_state->UseCount() > 1) return frame_state;
- Node* copy = mode == kChangeInPlace ? frame_state : nullptr;
- if (frame_state->InputAt(kFrameStateStackInput) == from) {
+ Node* copy =
+ mode == kChangeInPlace ? static_cast<Node*>(frame_state) : nullptr;
+ if (frame_state.stack() == from) {
if (!copy) {
copy = graph()->CloneNode(frame_state);
}
- copy->ReplaceInput(kFrameStateStackInput, to);
+ copy->ReplaceInput(FrameState::kFrameStateStackInput, to);
}
- Node* locals = frame_state->InputAt(kFrameStateLocalsInput);
+ Node* locals = frame_state.locals();
Node* new_locals = DuplicateStateValuesAndRename(locals, from, to, mode);
if (new_locals != locals) {
if (!copy) {
copy = graph()->CloneNode(frame_state);
}
- copy->ReplaceInput(kFrameStateLocalsInput, new_locals);
+ copy->ReplaceInput(FrameState::kFrameStateLocalsInput, new_locals);
}
- return copy ? copy : frame_state;
+ return copy != nullptr ? FrameState{copy} : frame_state;
}
bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
@@ -538,14 +537,15 @@ bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
Node* checkpoint_state = nullptr;
if (checkpoint) {
checkpoint_state = checkpoint->InputAt(0);
- if (!CollectFrameStateUniqueUses(callee, checkpoint_state, replaceable_uses,
- &replaceable_uses_count, kMaxUses)) {
+ if (!CollectFrameStateUniqueUses(callee, FrameState{checkpoint_state},
+ replaceable_uses, &replaceable_uses_count,
+ kMaxUses)) {
return false;
}
}
// Collect the uses to check case 3.
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ FrameState frame_state{NodeProperties::GetFrameStateInput(node)};
if (!CollectFrameStateUniqueUses(callee, frame_state, replaceable_uses,
&replaceable_uses_count, kMaxUses)) {
return false;
@@ -582,15 +582,15 @@ bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
if (checkpoint) {
// Duplicate the checkpoint.
- Node* new_checkpoint_state = DuplicateFrameStateAndRename(
- checkpoint_state, callee, target,
+ FrameState new_checkpoint_state = DuplicateFrameStateAndRename(
+ FrameState{checkpoint_state}, callee, target,
(i == num_calls - 1) ? kChangeInPlace : kCloneState);
effect = graph()->NewNode(checkpoint->op(), new_checkpoint_state, effect,
control);
}
// Duplicate the call.
- Node* new_lazy_frame_state = DuplicateFrameStateAndRename(
+ FrameState new_lazy_frame_state = DuplicateFrameStateAndRename(
frame_state, callee, target,
(i == num_calls - 1) ? kChangeInPlace : kCloneState);
inputs[0] = target;
@@ -670,6 +670,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
bool small_function) {
int const num_calls = candidate.num_functions;
Node* const node = candidate.node;
+ DCHECK_NE(node->opcode(), IrOpcode::kJSWasmCall);
if (num_calls == 1) {
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
@@ -788,9 +789,11 @@ void JSInliningHeuristic::PrintCandidates() {
os << ", bytecode size: " << candidate.bytecode[i]->length();
if (candidate.functions[i].has_value()) {
JSFunctionRef function = candidate.functions[i].value();
- if (function.HasAttachedOptimizedCode()) {
+ unsigned inlined_bytecode_size =
+ function.code().GetInlinedBytecodeSize();
+ if (inlined_bytecode_size > 0) {
os << ", existing opt code's inlined bytecode size: "
- << function.code().inlined_bytecode_size();
+ << inlined_bytecode_size;
}
}
} else {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index a613dacaaa..02280deaf3 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -13,17 +13,20 @@ namespace compiler {
class JSInliningHeuristic final : public AdvancedReducer {
public:
+ enum Mode { kJSOnly, kWasmOnly };
+
JSInliningHeuristic(Editor* editor, Zone* local_zone,
OptimizedCompilationInfo* info, JSGraph* jsgraph,
JSHeapBroker* broker,
- SourcePositionTable* source_positions)
+ SourcePositionTable* source_positions, Mode mode)
: AdvancedReducer(editor),
inliner_(editor, local_zone, info, jsgraph, broker, source_positions),
candidates_(local_zone),
seen_(local_zone),
source_positions_(source_positions),
jsgraph_(jsgraph),
- broker_(broker) {}
+ broker_(broker),
+ mode_(mode) {}
const char* reducer_name() const override { return "JSInliningHeuristic"; }
@@ -78,8 +81,8 @@ class JSInliningHeuristic final : public AdvancedReducer {
bool TryReuseDispatch(Node* node, Node* callee, Node** if_successes,
Node** calls, Node** inputs, int input_count);
enum StateCloneMode { kCloneState, kChangeInPlace };
- Node* DuplicateFrameStateAndRename(Node* frame_state, Node* from, Node* to,
- StateCloneMode mode);
+ FrameState DuplicateFrameStateAndRename(FrameState frame_state, Node* from,
+ Node* to, StateCloneMode mode);
Node* DuplicateStateValuesAndRename(Node* state_values, Node* from, Node* to,
StateCloneMode mode);
Candidate CollectFunctions(Node* node, int functions_size);
@@ -91,6 +94,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const { return jsgraph_->isolate(); }
SimplifiedOperatorBuilder* simplified() const;
+ Mode mode() const { return mode_; }
JSInliner inliner_;
Candidates candidates_;
@@ -99,6 +103,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
int total_inlined_bytecode_size_ = 0;
+ const Mode mode_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 050e26799e..5da0c9c181 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -20,6 +20,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/execution/isolate-inl.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/parsing/parse-info.h"
@@ -60,8 +61,8 @@ class JSCallAccessor {
Node* new_target() const { return JSConstructNode{call_}.new_target(); }
- Node* frame_state() const {
- return NodeProperties::GetFrameStateInput(call_);
+ FrameState frame_state() const {
+ return FrameState{NodeProperties::GetFrameStateInput(call_)};
}
int argument_count() const {
@@ -80,11 +81,25 @@ class JSCallAccessor {
Node* call_;
};
+Reduction JSInliner::InlineJSWasmCall(Node* call, Node* new_target,
+ Node* context, Node* frame_state,
+ StartNode start, Node* end,
+ Node* exception_target,
+ const NodeVector& uncaught_subcalls) {
+ JSWasmCallNode n(call);
+ return InlineCall(
+ call, new_target, context, frame_state, start, end, exception_target,
+ uncaught_subcalls,
+ static_cast<int>(n.Parameters().signature()->parameter_count()));
+}
+
Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
- Node* frame_state, Node* start, Node* end,
+ Node* frame_state, StartNode start, Node* end,
Node* exception_target,
- const NodeVector& uncaught_subcalls) {
- JSCallAccessor c(call);
+ const NodeVector& uncaught_subcalls,
+ int argument_count) {
+ DCHECK_IMPLIES(IrOpcode::IsInlineeOpcode(call->opcode()),
+ argument_count == JSCallAccessor(call).argument_count());
// The scheduler is smart enough to place our code; we just ensure {control}
// becomes the control input of the start of the inlinee, and {effect} becomes
@@ -92,16 +107,13 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
Node* control = NodeProperties::GetControlInput(call);
Node* effect = NodeProperties::GetEffectInput(call);
- int const inlinee_new_target_index =
- static_cast<int>(start->op()->ValueOutputCount()) - 3;
- int const inlinee_arity_index =
- static_cast<int>(start->op()->ValueOutputCount()) - 2;
- int const inlinee_context_index =
- static_cast<int>(start->op()->ValueOutputCount()) - 1;
+ int const inlinee_new_target_index = start.NewTargetOutputIndex();
+ int const inlinee_arity_index = start.ArgCountOutputIndex();
+ int const inlinee_context_index = start.ContextOutputIndex();
// {inliner_inputs} counts the target, receiver/new_target, and arguments; but
// not feedback vector, context, effect or control.
- const int inliner_inputs = c.argument_count() +
+ const int inliner_inputs = argument_count +
JSCallOrConstructNode::kExtraInputCount -
JSCallOrConstructNode::kFeedbackVectorInputCount;
// Iterate over all uses of the start node.
@@ -120,7 +132,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
Replace(use, new_target);
} else if (index == inlinee_arity_index) {
// The projection is requesting the number of arguments.
- Replace(use, jsgraph()->Constant(c.argument_count()));
+ Replace(use, jsgraph()->Constant(argument_count));
} else if (index == inlinee_context_index) {
// The projection is requesting the inlinee function context.
Replace(use, context);
@@ -231,12 +243,10 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
}
}
-Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
- int parameter_count,
- BailoutId bailout_id,
- FrameStateType frame_state_type,
- SharedFunctionInfoRef shared,
- Node* context) {
+FrameState JSInliner::CreateArtificialFrameState(
+ Node* node, FrameState outer_frame_state, int parameter_count,
+ BytecodeOffset bailout_id, FrameStateType frame_state_type,
+ SharedFunctionInfoRef shared, Node* context) {
const int parameter_count_with_receiver =
parameter_count + JSCallOrConstructNode::kReceiverOrNewTargetInputCount;
const FrameStateFunctionInfo* state_info =
@@ -259,9 +269,9 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
Node* params_node = graph()->NewNode(
op_param, static_cast<int>(params.size()), &params.front());
if (context == nullptr) context = jsgraph()->UndefinedConstant();
- return graph()->NewNode(op, params_node, node0, node0, context,
- node->InputAt(JSCallOrConstructNode::TargetIndex()),
- outer_frame_state);
+ return FrameState{graph()->NewNode(
+ op, params_node, node0, node0, context,
+ node->InputAt(JSCallOrConstructNode::TargetIndex()), outer_frame_state)};
}
namespace {
@@ -374,8 +384,70 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
UNREACHABLE();
}
+Reduction JSInliner::ReduceJSWasmCall(Node* node) {
+ // Create the subgraph for the inlinee.
+ Node* start_node;
+ Node* end;
+ {
+ Graph::SubgraphScope scope(graph());
+
+ graph()->SetEnd(nullptr);
+
+ JSWasmCallNode n(node);
+ const JSWasmCallParameters& wasm_call_params = n.Parameters();
+
+ // Create a nested frame state inside the frame state attached to the
+ // call; this will ensure that lazy deoptimizations at this point will
+ // still return the result of the Wasm function call.
+ Node* continuation_frame_state =
+ CreateJSWasmCallBuiltinContinuationFrameState(
+ jsgraph(), n.context(), n.frame_state(),
+ wasm_call_params.signature());
+ JSWasmCallData js_wasm_call_data(wasm_call_params.signature());
+ BuildInlinedJSToWasmWrapper(
+ graph()->zone(), jsgraph(), wasm_call_params.signature(),
+ wasm_call_params.module(), source_positions_,
+ StubCallMode::kCallBuiltinPointer, wasm::WasmFeatures::FromFlags(),
+ &js_wasm_call_data, continuation_frame_state);
+
+ // Extract the inlinee start/end nodes.
+ start_node = graph()->start();
+ end = graph()->end();
+ }
+ StartNode start{start_node};
+
+ Node* exception_target = nullptr;
+ NodeProperties::IsExceptionalCall(node, &exception_target);
+
+ // If we are inlining into a surrounding exception handler, we collect all
+ // potentially throwing nodes within the inlinee that are not handled locally
+ // by the inlinee itself. They are later wired into the surrounding handler.
+ NodeVector uncaught_subcalls(local_zone_);
+ if (exception_target != nullptr) {
+ // Find all uncaught 'calls' in the inlinee.
+ AllNodes inlined_nodes(local_zone_, end, graph());
+ for (Node* subnode : inlined_nodes.reachable) {
+ // Every possibly throwing node should get {IfSuccess} and {IfException}
+ // projections, unless there already is local exception handling.
+ if (subnode->op()->HasProperty(Operator::kNoThrow)) continue;
+ if (!NodeProperties::IsExceptionalCall(subnode)) {
+ DCHECK_EQ(2, subnode->op()->ControlOutputCount());
+ uncaught_subcalls.push_back(subnode);
+ }
+ }
+ }
+
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* new_target = jsgraph()->UndefinedConstant();
+
+ return InlineJSWasmCall(node, new_target, context, frame_state, start, end,
+ exception_target, uncaught_subcalls);
+}
+
Reduction JSInliner::ReduceJSCall(Node* node) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+ DCHECK_NE(node->opcode(), IrOpcode::kJSWasmCall);
JSCallAccessor call(node);
// Determine the call target.
@@ -405,9 +477,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// To ensure inlining always terminates, we have an upper limit on inlining
// the nested calls.
int nesting_level = 0;
- for (Node* frame_state = call.frame_state();
+ for (FrameState frame_state = FrameState{call.frame_state()};
frame_state->opcode() == IrOpcode::kFrameState;
- frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
+ frame_state = frame_state.outer_frame_state()) {
nesting_level++;
if (nesting_level > kMaxDepthForInlining) {
TRACE("Not inlining "
@@ -433,15 +505,19 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
shared_info->object());
}
- TRACE("Inlining " << *shared_info << " into " << outer_shared_info
- << ((exception_target != nullptr) ? " (inside try-block)"
- : ""));
// Determine the target's feedback vector and its context.
Node* context;
FeedbackCellRef feedback_cell = DetermineCallContext(node, &context);
- CHECK(broker()->IsSerializedForCompilation(
- *shared_info, feedback_cell.value().AsFeedbackVector()));
+ if (!broker()->IsSerializedForCompilation(*shared_info,
+ *feedback_cell.value())) {
+ TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
+ << " because it wasn't serialized for compilation.");
+ return NoChange();
+ }
+ TRACE("Inlining " << *shared_info << " into " << outer_shared_info
+ << ((exception_target != nullptr) ? " (inside try-block)"
+ : ""));
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
// We shall not bailout from inlining if we got here.
@@ -454,7 +530,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
source_positions_->GetSourcePosition(node));
// Create the subgraph for the inlinee.
- Node* start;
+ Node* start_node;
Node* end;
{
// Run the BytecodeGraphBuilder to create the subgraph.
@@ -470,15 +546,16 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
{
CallFrequency frequency = call.frequency();
BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_cell,
- BailoutId::None(), jsgraph(), frequency,
+ BytecodeOffset::None(), jsgraph(), frequency,
source_positions_, inlining_id, info_->code_kind(),
flags, &info_->tick_counter());
}
// Extract the inlinee start/end nodes.
- start = graph()->start();
+ start_node = graph()->start();
end = graph()->end();
}
+ StartNode start{start_node};
// If we are inlining into a surrounding exception handler, we collect all
// potentially throwing nodes within the inlinee that are not handled locally
@@ -498,7 +575,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
}
- Node* frame_state = call.frame_state();
+ FrameState frame_state = call.frame_state();
Node* new_target = jsgraph()->UndefinedConstant();
// Inline {JSConstruct} requires some additional magic.
@@ -523,7 +600,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Control control = n.control();
Node* frame_state_inside = CreateArtificialFrameState(
node, frame_state, n.ArgumentCount(),
- BailoutId::ConstructStubCreate(), FrameStateType::kConstructStub,
+ BytecodeOffset::ConstructStubCreate(), FrameStateType::kConstructStub,
*shared_info, context);
Node* create =
graph()->NewNode(javascript()->Create(), call.target(), new_target,
@@ -577,8 +654,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
- node, frame_state, n.ArgumentCount(), BailoutId::ConstructStubInvoke(),
- FrameStateType::kConstructStub, *shared_info, context);
+ node, frame_state, n.ArgumentCount(),
+ BytecodeOffset::ConstructStubInvoke(), FrameStateType::kConstructStub,
+ *shared_info, context);
}
// Insert a JSConvertReceiver node for sloppy callees. Note that the context
@@ -600,19 +678,18 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
// Insert argument adaptor frame if required. The callees formal parameter
- // count (i.e. value outputs of start node minus target, receiver, new target,
- // arguments count and context) have to match the number of arguments passed
+ // count have to match the number of arguments passed
// to the call.
int parameter_count = shared_info->internal_formal_parameter_count();
- DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
+ DCHECK_EQ(parameter_count, start.FormalParameterCountWithoutReceiver());
if (call.argument_count() != parameter_count) {
frame_state = CreateArtificialFrameState(
- node, frame_state, call.argument_count(), BailoutId::None(),
+ node, frame_state, call.argument_count(), BytecodeOffset::None(),
FrameStateType::kArgumentsAdaptor, *shared_info);
}
return InlineCall(node, new_target, context, frame_state, start, end,
- exception_target, uncaught_subcalls);
+ exception_target, uncaught_subcalls, call.argument_count());
}
Graph* JSInliner::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 0648c86f62..e1e1bdfa0a 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-class BailoutId;
+class BytecodeOffset;
class OptimizedCompilationInfo;
namespace compiler {
@@ -41,6 +41,8 @@ class JSInliner final : public AdvancedReducer {
// using the above generic reducer interface of the inlining machinery.
Reduction ReduceJSCall(Node* node);
+ Reduction ReduceJSWasmCall(Node* node);
+
private:
Zone* zone() const { return local_zone_; }
CommonOperatorBuilder* common() const;
@@ -61,16 +63,20 @@ class JSInliner final : public AdvancedReducer {
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
FeedbackCellRef DetermineCallContext(Node* node, Node** context_out);
- Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
- int parameter_count, BailoutId bailout_id,
- FrameStateType frame_state_type,
- SharedFunctionInfoRef shared,
- Node* context = nullptr);
+ FrameState CreateArtificialFrameState(
+ Node* node, FrameState outer_frame_state, int parameter_count,
+ BytecodeOffset bailout_id, FrameStateType frame_state_type,
+ SharedFunctionInfoRef shared, Node* context = nullptr);
Reduction InlineCall(Node* call, Node* new_target, Node* context,
- Node* frame_state, Node* start, Node* end,
+ Node* frame_state, StartNode start, Node* end,
Node* exception_target,
- const NodeVector& uncaught_subcalls);
+ const NodeVector& uncaught_subcalls, int argument_count);
+
+ Reduction InlineJSWasmCall(Node* call, Node* new_target, Node* context,
+ Node* frame_state, StartNode start, Node* end,
+ Node* exception_target,
+ const NodeVector& uncaught_subcalls);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 68d9fa05f0..2d105e55a8 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -802,9 +802,9 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* lookup_start_object, Node* receiver, Node* value,
NameRef const& name, AccessMode access_mode, Node* key,
PropertyCellRef const& property_cell, Node* effect) {
- Node* control = NodeProperties::GetControlInput(node);
- if (effect == nullptr) {
- effect = NodeProperties::GetEffectInput(node);
+ if (!property_cell.Serialize()) {
+ TRACE_BROKER_MISSING(broker(), "usable data for " << property_cell);
+ return NoChange();
}
ObjectRef property_cell_value = property_cell.value();
@@ -819,6 +819,11 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
PropertyCellType property_cell_type = property_details.cell_type();
DCHECK_EQ(kData, property_details.kind());
+ Node* control = NodeProperties::GetControlInput(node);
+ if (effect == nullptr) {
+ effect = NodeProperties::GetEffectInput(node);
+ }
+
// We have additional constraints for stores.
if (access_mode == AccessMode::kStore) {
DCHECK_EQ(receiver, lookup_start_object);
@@ -923,10 +928,6 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
DCHECK_EQ(receiver, lookup_start_object);
DCHECK(!property_details.IsReadOnly());
switch (property_details.cell_type()) {
- case PropertyCellType::kUndefined: {
- UNREACHABLE();
- break;
- }
case PropertyCellType::kConstant: {
// Record a code dependency on the cell, and just deoptimize if the new
// value doesn't match the previous value stored inside the cell.
@@ -997,6 +998,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
jsgraph()->Constant(property_cell), value, effect, control);
break;
}
+ case PropertyCellType::kUndefined:
+ UNREACHABLE();
}
}
@@ -1950,26 +1953,36 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
NumberMatcher mkey(key);
if (mkey.IsInteger() && mkey.IsInRange(0.0, kMaxUInt32 - 1.0)) {
uint32_t index = static_cast<uint32_t>(mkey.ResolvedValue());
- base::Optional<ObjectRef> element =
- receiver_ref.GetOwnConstantElement(index);
- if (!element.has_value() && receiver_ref.IsJSArray()) {
- // We didn't find a constant element, but if the receiver is a cow-array
- // we can exploit the fact that any future write to the element will
- // replace the whole elements storage.
- element = receiver_ref.AsJSArray().GetOwnCowElement(index);
- if (element.has_value()) {
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, effect, control);
- FixedArrayRef array_elements =
- receiver_ref.AsJSArray().elements().AsFixedArray();
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), elements,
- jsgraph()->Constant(array_elements));
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kCowArrayElementsChanged),
- check, effect, control);
+ base::Optional<ObjectRef> element;
+
+ if (receiver_ref.IsJSObject()) {
+ element = receiver_ref.AsJSObject().GetOwnConstantElement(index);
+ if (!element.has_value() && receiver_ref.IsJSArray()) {
+ // We didn't find a constant element, but if the receiver is a cow-array
+ // we can exploit the fact that any future write to the element will
+ // replace the whole elements storage.
+ JSArrayRef array_ref = receiver_ref.AsJSArray();
+ base::Optional<FixedArrayBaseRef> array_elements = array_ref.elements();
+ if (array_elements.has_value()) {
+ element = array_ref.GetOwnCowElement(*array_elements, index);
+ if (element.has_value()) {
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), elements,
+ jsgraph()->Constant(*array_elements));
+ effect = graph()->NewNode(
+ simplified()->CheckIf(
+ DeoptimizeReason::kCowArrayElementsChanged),
+ check, effect, control);
+ }
+ }
}
+ } else if (receiver_ref.IsString()) {
+ element = receiver_ref.AsString().GetCharAsStringOrUndefined(index);
}
+
if (element.has_value()) {
Node* value = access_mode == AccessMode::kHas
? jsgraph()->TrueConstant()
@@ -2469,43 +2482,40 @@ JSNativeContextSpecialization::BuildPropertyStore(
value = effect =
graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value,
effect, control);
- if (!field_index.is_inobject() || !FLAG_unbox_double_fields) {
- if (access_info.HasTransitionMap()) {
- // Allocate a HeapNumber for the new property.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(HeapNumber::kSize, AllocationType::kYoung,
- Type::OtherInternal());
- a.Store(AccessBuilder::ForMap(),
- MapRef(broker(), factory()->heap_number_map()));
- FieldAccess value_field_access =
- AccessBuilder::ForHeapNumberValue();
- value_field_access.const_field_info = field_access.const_field_info;
- a.Store(value_field_access, value);
- value = effect = a.Finish();
-
- field_access.type = Type::Any();
- field_access.machine_type = MachineType::TaggedPointer();
- field_access.write_barrier_kind = kPointerWriteBarrier;
- } else {
- // We just store directly to the HeapNumber.
- FieldAccess const storage_access = {
- kTaggedBase,
- field_index.offset(),
- name.object(),
- MaybeHandle<Map>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier,
- LoadSensitivity::kUnsafe,
- access_info.GetConstFieldInfo(),
- access_mode == AccessMode::kStoreInLiteral};
- storage = effect =
- graph()->NewNode(simplified()->LoadField(storage_access),
- storage, effect, control);
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
- field_access.machine_type = MachineType::Float64();
- }
+ if (access_info.HasTransitionMap()) {
+ // Allocate a HeapNumber for the new property.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(HeapNumber::kSize, AllocationType::kYoung,
+ Type::OtherInternal());
+ a.Store(AccessBuilder::ForMap(),
+ MapRef(broker(), factory()->heap_number_map()));
+ FieldAccess value_field_access = AccessBuilder::ForHeapNumberValue();
+ value_field_access.const_field_info = field_access.const_field_info;
+ a.Store(value_field_access, value);
+ value = effect = a.Finish();
+
+ field_access.type = Type::Any();
+ field_access.machine_type = MachineType::TaggedPointer();
+ field_access.write_barrier_kind = kPointerWriteBarrier;
+ } else {
+ // We just store directly to the HeapNumber.
+ FieldAccess const storage_access = {
+ kTaggedBase,
+ field_index.offset(),
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kUnsafe,
+ access_info.GetConstFieldInfo(),
+ access_mode == AccessMode::kStoreInLiteral};
+ storage = effect =
+ graph()->NewNode(simplified()->LoadField(storage_access), storage,
+ effect, control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ field_access.machine_type = MachineType::Float64();
}
if (store_to_existing_constant_field) {
DCHECK(!access_info.HasTransitionMap());
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index da3af62bf2..e565f1dfce 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -676,6 +676,50 @@ ForInParameters const& ForInParametersOf(const Operator* op) {
return OpParameter<ForInParameters>(op);
}
+JSWasmCallParameters const& JSWasmCallParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSWasmCall, op->opcode());
+ return OpParameter<JSWasmCallParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os, JSWasmCallParameters const& p) {
+ return os << p.module() << ", " << p.signature() << ", " << p.feedback();
+}
+
+size_t hash_value(JSWasmCallParameters const& p) {
+ return base::hash_combine(p.module(), p.signature(),
+ FeedbackSource::Hash()(p.feedback()));
+}
+
+bool operator==(JSWasmCallParameters const& lhs,
+ JSWasmCallParameters const& rhs) {
+ return lhs.module() == rhs.module() && lhs.signature() == rhs.signature() &&
+ lhs.feedback() == rhs.feedback();
+}
+
+int JSWasmCallParameters::arity_without_implicit_args() const {
+ return static_cast<int>(signature_->parameter_count());
+}
+
+int JSWasmCallParameters::input_count() const {
+ return static_cast<int>(signature_->parameter_count()) +
+ JSWasmCallNode::kExtraInputCount;
+}
+
+// static
+Type JSWasmCallNode::TypeForWasmReturnType(const wasm::ValueType& type) {
+ switch (type.kind()) {
+ case wasm::kI32:
+ return Type::Signed32();
+ case wasm::kI64:
+ return Type::BigInt();
+ case wasm::kF32:
+ case wasm::kF64:
+ return Type::Number();
+ default:
+ UNREACHABLE();
+ }
+}
+
#define CACHED_OP_LIST(V) \
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
@@ -874,6 +918,17 @@ const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CallWasm(
+ const wasm::WasmModule* wasm_module,
+ const wasm::FunctionSig* wasm_signature, FeedbackSource const& feedback) {
+ JSWasmCallParameters parameters(wasm_module, wasm_signature, feedback);
+ return zone()->New<Operator1<JSWasmCallParameters>>(
+ IrOpcode::kJSWasmCall, Operator::kNoProperties, // opcode
+ "JSWasmCall", // name
+ parameters.input_count(), 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::ConstructForwardVarargs(
size_t arity, uint32_t start_index) {
ConstructForwardVarargsParameters parameters(arity, start_index);
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 518eff7fc0..46258f3bb1 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -7,6 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/codegen/tnode.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/globals.h"
#include "src/compiler/node-properties.h"
@@ -25,6 +26,10 @@ class ArrayBoilerplateDescription;
class FeedbackCell;
class SharedFunctionInfo;
+namespace wasm {
+class ValueType;
+}
+
namespace compiler {
// Forward declarations.
@@ -816,6 +821,35 @@ size_t hash_value(ForInParameters const&);
std::ostream& operator<<(std::ostream&, ForInParameters const&);
const ForInParameters& ForInParametersOf(const Operator* op);
+class JSWasmCallParameters {
+ public:
+ explicit JSWasmCallParameters(const wasm::WasmModule* module,
+ const wasm::FunctionSig* signature,
+ FeedbackSource const& feedback)
+ : module_(module), signature_(signature), feedback_(feedback) {
+ DCHECK_NOT_NULL(module);
+ DCHECK_NOT_NULL(signature);
+ }
+
+ const wasm::WasmModule* module() const { return module_; }
+ const wasm::FunctionSig* signature() const { return signature_; }
+ FeedbackSource const& feedback() const { return feedback_; }
+ int input_count() const;
+ int arity_without_implicit_args() const;
+
+ private:
+ const wasm::WasmModule* const module_;
+ const wasm::FunctionSig* const signature_;
+ const FeedbackSource feedback_;
+};
+
+JSWasmCallParameters const& JSWasmCallParametersOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ JSWasmCallParameters const&);
+size_t hash_value(JSWasmCallParameters const&);
+bool operator==(JSWasmCallParameters const&, JSWasmCallParameters const&);
+
int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
int GeneratorStoreValueCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
@@ -925,6 +959,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
+ const Operator* CallWasm(const wasm::WasmModule* wasm_module,
+ const wasm::FunctionSig* wasm_signature,
+ FeedbackSource const& feedback);
+
const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Construct(uint32_t arity,
CallFrequency const& frequency = CallFrequency(),
@@ -1247,7 +1285,8 @@ class JSCallOrConstructNode : public JSNodeWrapperBase {
node->opcode() == IrOpcode::kJSCallWithSpread ||
node->opcode() == IrOpcode::kJSConstruct ||
node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
- node->opcode() == IrOpcode::kJSConstructWithSpread);
+ node->opcode() == IrOpcode::kJSConstructWithSpread ||
+ node->opcode() == IrOpcode::kJSWasmCall);
}
#define INPUTS(V) \
@@ -1259,8 +1298,8 @@ class JSCallOrConstructNode : public JSNodeWrapperBase {
// Besides actual arguments, JSCall nodes (and variants) also take the
// following. Note that we rely on the fact that all variants (JSCall,
// JSCallWithArrayLike, JSCallWithSpread, JSConstruct,
- // JSConstructWithArrayLike, JSConstructWithSpread) have the same underlying
- // node layout.
+ // JSConstructWithArrayLike, JSConstructWithSpread, JSWasmCall) have the same
+ // underlying node layout.
static constexpr int kTargetInputCount = 1;
static constexpr int kReceiverOrNewTargetInputCount = 1;
static constexpr int kFeedbackVectorInputCount = 1;
@@ -1355,6 +1394,35 @@ using JSCallNode = JSCallNodeBase<IrOpcode::kJSCall>;
using JSCallWithSpreadNode = JSCallNodeBase<IrOpcode::kJSCallWithSpread>;
using JSCallWithArrayLikeNode = JSCallNodeBase<IrOpcode::kJSCallWithArrayLike>;
+class JSWasmCallNode final : public JSCallOrConstructNode {
+ public:
+ explicit constexpr JSWasmCallNode(Node* node) : JSCallOrConstructNode(node) {
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSWasmCall);
+ }
+
+ const JSWasmCallParameters& Parameters() const {
+ return OpParameter<JSWasmCallParameters>(node()->op());
+ }
+
+#define INPUTS(V) \
+ V(Target, target, 0, Object) \
+ V(Receiver, receiver, 1, Object)
+ INPUTS(DEFINE_INPUT_ACCESSORS)
+#undef INPUTS
+
+ static constexpr int kReceiverInputCount = 1;
+ STATIC_ASSERT(kReceiverInputCount ==
+ JSCallOrConstructNode::kReceiverOrNewTargetInputCount);
+
+ int ArgumentCount() const override {
+ // Note: The count reported by this function depends only on the parameter
+ // count, thus adding/removing inputs will not affect it.
+ return Parameters().arity_without_implicit_args();
+ }
+
+ static Type TypeForWasmReturnType(const wasm::ValueType& type);
+};
+
template <int kOpcode>
class JSConstructNodeBase final : public JSCallOrConstructNode {
public:
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index a8dce04d44..008aacdb39 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -594,6 +594,8 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
PropertyCellRef string_length_protector(
broker(), factory()->string_length_protector());
+ string_length_protector.SerializeAsProtector();
+
if (string_length_protector.value().AsSmi() ==
Protectors::kProtectorValid) {
// We can just deoptimize if the {length} is out-of-bounds. Besides
@@ -1569,15 +1571,6 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
NodeProperties::ChangeOp(node, jsgraph->common()->Call(call_descriptor));
}
-
-#ifndef V8_NO_ARGUMENTS_ADAPTOR
-bool NeedsArgumentAdaptorFrame(SharedFunctionInfoRef shared, int arity) {
- static const int sentinel = kDontAdaptArgumentsSentinel;
- const int num_decl_parms = shared.internal_formal_parameter_count();
- return (num_decl_parms != arity && num_decl_parms != sentinel);
-}
-#endif
-
} // namespace
Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
@@ -1722,7 +1715,10 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
shared = SharedFunctionInfoRef(broker(), ccp.shared_info());
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell(broker(), FeedbackCellOf(target->op()));
- shared = cell.value().AsFeedbackVector().shared_function_info();
+ base::Optional<FeedbackVectorRef> feedback_vector = cell.value();
+ if (feedback_vector.has_value()) {
+ shared = feedback_vector->shared_function_info();
+ }
}
if (shared.has_value()) {
@@ -1762,7 +1758,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
int formal_count = shared->internal_formal_parameter_count();
if (formal_count != kDontAdaptArgumentsSentinel && formal_count > arity) {
node->RemoveInput(n.FeedbackVectorIndex());
@@ -1781,22 +1776,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + formal_count,
flags | CallDescriptor::kCanUseRoots)));
-#else
- if (NeedsArgumentAdaptorFrame(*shared, arity)) {
- node->RemoveInput(n.FeedbackVectorIndex());
- // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
- Callable callable = CodeFactory::ArgumentAdaptor(isolate());
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(
- graph()->zone(), 4,
- jsgraph()->Constant(shared->internal_formal_parameter_count()));
- NodeProperties::ChangeOp(
- node, common()->Call(Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 1 + arity, flags)));
-#endif
} else if (shared->HasBuiltinId() &&
Builtins::IsCpp(shared->builtin_id())) {
// Patch {node} to a direct CEntry call.
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index ea316513d8..4f1565d0a9 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -77,21 +77,6 @@ MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const {
return zone->New<MachineSignature>(return_count, param_count, types);
}
-int CallDescriptor::GetFirstUnusedStackSlot() const {
- int slots_above_sp = 0;
- for (size_t i = 0; i < InputCount(); ++i) {
- LinkageLocation operand = GetInputLocation(i);
- if (!operand.IsRegister()) {
- int new_candidate =
- -operand.GetLocation() + operand.GetSizeInPointers() - 1;
- if (new_candidate > slots_above_sp) {
- slots_above_sp = new_candidate;
- }
- }
- }
- return slots_above_sp;
-}
-
int CallDescriptor::GetStackParameterDelta(
CallDescriptor const* tail_caller) const {
// In the IsTailCallForTierUp case, the callee has
@@ -100,8 +85,8 @@ int CallDescriptor::GetStackParameterDelta(
// inputs to the TailCall node, since they already exist on the stack.
if (IsTailCallForTierUp()) return 0;
- int callee_slots_above_sp = GetFirstUnusedStackSlot();
- int tail_caller_slots_above_sp = tail_caller->GetFirstUnusedStackSlot();
+ int callee_slots_above_sp = GetOffsetToReturns();
+ int tail_caller_slots_above_sp = tail_caller->GetOffsetToReturns();
int stack_param_delta = callee_slots_above_sp - tail_caller_slots_above_sp;
if (ShouldPadArguments(stack_param_delta)) {
if (callee_slots_above_sp % 2 != 0) {
@@ -119,10 +104,43 @@ int CallDescriptor::GetStackParameterDelta(
return stack_param_delta;
}
+int CallDescriptor::GetFirstUnusedStackSlot() const {
+ int start_of_args = 0;
+ for (size_t i = 0; i < InputCount(); ++i) {
+ LinkageLocation operand = GetInputLocation(i);
+ if (!operand.IsRegister()) {
+ // Reverse, since arguments have negative offsets in the frame.
+ int reverse_location =
+ -operand.GetLocation() + operand.GetSizeInPointers() - 1;
+ DCHECK_GE(reverse_location, 0);
+ start_of_args = std::max(start_of_args, reverse_location);
+ }
+ }
+ return start_of_args;
+}
+
int CallDescriptor::GetOffsetToReturns() const {
- int offset = static_cast<int>(StackParameterCount());
- if (ShouldPadArguments(offset)) offset++;
- return offset;
+ // If there are return stack slots, return the first slot of the last one.
+ constexpr int kNoReturnSlot = std::numeric_limits<int>::max();
+ int end_of_returns = kNoReturnSlot;
+ for (size_t i = 0; i < ReturnCount(); ++i) {
+ LinkageLocation operand = GetReturnLocation(i);
+ if (!operand.IsRegister()) {
+ // Reverse, since returns have negative offsets in the frame.
+ int reverse_location = -operand.GetLocation() - 1;
+ DCHECK_GE(reverse_location, 0);
+ end_of_returns = std::min(end_of_returns, reverse_location);
+ }
+ }
+ if (end_of_returns != kNoReturnSlot) return end_of_returns;
+
+ // Otherwise, return the first unused slot before the parameters, with any
+ // additional padding slot if it exists.
+ end_of_returns = GetFirstUnusedStackSlot();
+ if (ShouldPadArguments(end_of_returns)) end_of_returns++;
+
+ DCHECK_EQ(end_of_returns == 0, StackParameterCount() == 0);
+ return end_of_returns;
}
int CallDescriptor::GetTaggedParameterSlots() const {
@@ -138,11 +156,12 @@ int CallDescriptor::GetTaggedParameterSlots() const {
bool CallDescriptor::CanTailCall(const CallDescriptor* callee) const {
if (ReturnCount() != callee->ReturnCount()) return false;
- const int stack_param_delta = callee->GetStackParameterDelta(this);
+ const int stack_returns_delta =
+ GetOffsetToReturns() - callee->GetOffsetToReturns();
for (size_t i = 0; i < ReturnCount(); ++i) {
if (GetReturnLocation(i).IsCallerFrameSlot() &&
callee->GetReturnLocation(i).IsCallerFrameSlot()) {
- if (GetReturnLocation(i).AsCallerFrameSlot() - stack_param_delta !=
+ if (GetReturnLocation(i).AsCallerFrameSlot() + stack_returns_delta !=
callee->GetReturnLocation(i).AsCallerFrameSlot()) {
return false;
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 5e3a1163cc..4aecb7c3a8 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -389,12 +389,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final
bool UsesOnlyRegisters() const;
- // Returns the first stack slot that is not used by the stack parameters.
- int GetFirstUnusedStackSlot() const;
-
int GetStackParameterDelta(const CallDescriptor* tail_caller) const;
- // Returns the number of slots to the first return value slot.
+ // Returns the first stack slot that is not used by the stack parameters,
+ // which is the return slot area, or a padding slot for frame alignment.
+ int GetFirstUnusedStackSlot() const;
+
+ // If there are return stack slots, returns the first slot of the last one.
+ // Otherwise, return the first unused slot before the parameters. This is the
+ // slot where returns would go if there were any.
int GetOffsetToReturns() const;
int GetTaggedParameterSlots() const;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 228375349d..2a0189ae12 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -1081,8 +1081,9 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
// Make sure we don't resurrect dead {replacement} nodes.
// Skip lowering if the type of the {replacement} node is not a subtype
// of the original {node}'s type.
- // TODO(tebbi): We should insert a {TypeGuard} for the intersection of
- // these two types here once we properly handle {Type::None} everywhere.
+ // TODO(turbofan): We should insert a {TypeGuard} for the intersection
+ // of these two types here once we properly handle {Type::None}
+ // everywhere.
if (!replacement->IsDead() && NodeProperties::GetType(replacement)
.Is(NodeProperties::GetType(node))) {
ReplaceWithValue(node, replacement, effect);
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index 11c610fc88..582eebd8f5 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -542,7 +542,44 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
return loop_tree;
}
-Node* LoopTree::HeaderNode(Loop* loop) {
+bool LoopFinder::HasMarkedExits(LoopTree* loop_tree,
+ const LoopTree::Loop* loop) {
+ // Look for returns and if projections that are outside the loop but whose
+ // control input is inside the loop.
+ Node* loop_node = loop_tree->GetLoopControl(loop);
+ for (Node* node : loop_tree->LoopNodes(loop)) {
+ for (Node* use : node->uses()) {
+ if (!loop_tree->Contains(loop, use)) {
+ bool unmarked_exit;
+ switch (node->opcode()) {
+ case IrOpcode::kLoopExit:
+ unmarked_exit = (node->InputAt(1) != loop_node);
+ break;
+ case IrOpcode::kLoopExitValue:
+ case IrOpcode::kLoopExitEffect:
+ unmarked_exit = (node->InputAt(1)->InputAt(1) != loop_node);
+ break;
+ default:
+ unmarked_exit = (use->opcode() != IrOpcode::kTerminate);
+ }
+ if (unmarked_exit) {
+ if (FLAG_trace_turbo_loop) {
+ Node* loop_node = loop_tree->GetLoopControl(loop);
+ PrintF(
+ "Cannot peel loop %i. Loop exit without explicit mark: Node %i "
+ "(%s) is inside loop, but its use %i (%s) is outside.\n",
+ loop_node->id(), node->id(), node->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+Node* LoopTree::HeaderNode(const Loop* loop) {
Node* first = *HeaderNodes(loop).begin();
if (first->opcode() == IrOpcode::kLoop) return first;
DCHECK(IrOpcode::IsPhiOpcode(first->opcode()));
@@ -551,6 +588,54 @@ Node* LoopTree::HeaderNode(Loop* loop) {
return header;
}
+Node* NodeCopier::map(Node* node, uint32_t copy_index) {
+ DCHECK_LT(copy_index, copy_count_);
+ if (node_map_.Get(node) == 0) return node;
+ return copies_->at(node_map_.Get(node) + copy_index);
+}
+
+void NodeCopier::Insert(Node* original, const NodeVector& new_copies) {
+ DCHECK_EQ(new_copies.size(), copy_count_);
+ node_map_.Set(original, copies_->size() + 1);
+ copies_->push_back(original);
+ copies_->insert(copies_->end(), new_copies.begin(), new_copies.end());
+}
+
+void NodeCopier::Insert(Node* original, Node* copy) {
+ DCHECK_EQ(copy_count_, 1);
+ node_map_.Set(original, copies_->size() + 1);
+ copies_->push_back(original);
+ copies_->push_back(copy);
+}
+
+void NodeCopier::CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead,
+ NodeRange nodes,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins) {
+ // Copy all the nodes first.
+ for (Node* original : nodes) {
+ SourcePositionTable::Scope position(
+ source_positions, source_positions->GetSourcePosition(original));
+ NodeOriginTable::Scope origin_scope(node_origins, "copy nodes", original);
+ node_map_.Set(original, copies_->size() + 1);
+ copies_->push_back(original);
+ for (uint32_t copy_index = 0; copy_index < copy_count_; copy_index++) {
+ Node* copy = graph->CloneNode(original);
+ copies_->push_back(copy);
+ }
+ }
+
+ // Fix inputs of the copies.
+ for (Node* original : nodes) {
+ for (uint32_t copy_index = 0; copy_index < copy_count_; copy_index++) {
+ Node* copy = map(original, copy_index);
+ for (int i = 0; i < copy->InputCount(); i++) {
+ copy->ReplaceInput(i, map(original->InputAt(i), copy_index));
+ }
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 043833a54c..3cce611be9 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -7,7 +7,11 @@
#include "src/base/iterator.h"
#include "src/common/globals.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node-origin-table.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/zone/zone-containers.h"
@@ -41,11 +45,11 @@ class LoopTree : public ZoneObject {
public:
Loop* parent() const { return parent_; }
const ZoneVector<Loop*>& children() const { return children_; }
- size_t HeaderSize() const { return body_start_ - header_start_; }
- size_t BodySize() const { return exits_start_ - body_start_; }
- size_t ExitsSize() const { return exits_end_ - exits_start_; }
- size_t TotalSize() const { return exits_end_ - header_start_; }
- size_t depth() const { return static_cast<size_t>(depth_); }
+ uint32_t HeaderSize() const { return body_start_ - header_start_; }
+ uint32_t BodySize() const { return exits_start_ - body_start_; }
+ uint32_t ExitsSize() const { return exits_end_ - exits_start_; }
+ uint32_t TotalSize() const { return exits_end_ - header_start_; }
+ uint32_t depth() const { return depth_; }
private:
friend class LoopTree;
@@ -77,7 +81,7 @@ class LoopTree : public ZoneObject {
// Check if the {loop} contains the {node}, either directly or by containing
// a nested loop that contains {node}.
- bool Contains(Loop* loop, Node* node) {
+ bool Contains(const Loop* loop, Node* node) {
for (Loop* c = ContainingLoop(node); c != nullptr; c = c->parent_) {
if (c == loop) return true;
}
@@ -87,40 +91,51 @@ class LoopTree : public ZoneObject {
// Return the list of outer loops.
const ZoneVector<Loop*>& outer_loops() const { return outer_loops_; }
+ // Return a new vector containing the inner loops.
+ ZoneVector<const Loop*> inner_loops() const {
+ ZoneVector<const Loop*> inner_loops(zone_);
+ for (const Loop& loop : all_loops_) {
+ if (loop.children().empty()) {
+ inner_loops.push_back(&loop);
+ }
+ }
+ return inner_loops;
+ }
+
// Return the unique loop number for a given loop. Loop numbers start at {1}.
- int LoopNum(Loop* loop) const {
+ int LoopNum(const Loop* loop) const {
return 1 + static_cast<int>(loop - &all_loops_[0]);
}
// Return a range which can iterate over the header nodes of {loop}.
- NodeRange HeaderNodes(Loop* loop) {
+ NodeRange HeaderNodes(const Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->header_start_,
&loop_nodes_[0] + loop->body_start_);
}
// Return the header control node for a loop.
- Node* HeaderNode(Loop* loop);
+ Node* HeaderNode(const Loop* loop);
// Return a range which can iterate over the body nodes of {loop}.
- NodeRange BodyNodes(Loop* loop) {
+ NodeRange BodyNodes(const Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->body_start_,
&loop_nodes_[0] + loop->exits_start_);
}
// Return a range which can iterate over the body nodes of {loop}.
- NodeRange ExitNodes(Loop* loop) {
+ NodeRange ExitNodes(const Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->exits_start_,
&loop_nodes_[0] + loop->exits_end_);
}
// Return a range which can iterate over the nodes of {loop}.
- NodeRange LoopNodes(Loop* loop) {
+ NodeRange LoopNodes(const Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->header_start_,
&loop_nodes_[0] + loop->exits_end_);
}
// Return the node that represents the control, i.e. the loop node itself.
- Node* GetLoopControl(Loop* loop) {
+ Node* GetLoopControl(const Loop* loop) {
// TODO(turbofan): make the loop control node always first?
for (Node* node : HeaderNodes(loop)) {
if (node->opcode() == IrOpcode::kLoop) return node;
@@ -161,8 +176,49 @@ class V8_EXPORT_PRIVATE LoopFinder {
// Build a loop tree for the entire graph.
static LoopTree* BuildLoopTree(Graph* graph, TickCounter* tick_counter,
Zone* temp_zone);
+
+ static bool HasMarkedExits(LoopTree* loop_tree_, const LoopTree::Loop* loop);
};
+// Copies a range of nodes any number of times.
+class NodeCopier {
+ public:
+ // {max}: The maximum number of nodes that this copier will track, including
+ // The original nodes and all copies.
+ // {p}: A vector that holds the original nodes and all copies.
+ // {copy_count}: How many times the nodes should be copied.
+ NodeCopier(Graph* graph, uint32_t max, NodeVector* p, uint32_t copy_count)
+ : node_map_(graph, max), copies_(p), copy_count_(copy_count) {
+ DCHECK_GT(copy_count, 0);
+ }
+
+ // Returns the mapping of {node} in the {copy_index}'th copy, or {node} itself
+ // if it is not present in the mapping. The copies are 0-indexed.
+ Node* map(Node* node, uint32_t copy_index);
+
+ // Helper version of {map} for one copy.
+ V8_INLINE Node* map(Node* node) { return map(node, 0); }
+
+ // Insert a new mapping from {original} to {new_copies} into the copier.
+ void Insert(Node* original, const NodeVector& new_copies);
+
+ // Helper version of {Insert} for one copy.
+ void Insert(Node* original, Node* copy);
+
+ void CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead, NodeRange nodes,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins);
+
+ bool Marked(Node* node) { return node_map_.Get(node) > 0; }
+
+ private:
+ // Maps a node to its index in the {copies_} vector.
+ NodeMarker<size_t> node_map_;
+ // The vector which contains the mapped nodes.
+ NodeVector* copies_;
+ // How many copies of the nodes should be generated.
+ const uint32_t copy_count_;
+};
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index 3cbf7b583a..e666f8c642 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -3,9 +3,11 @@
// found in the LICENSE file.
#include "src/compiler/loop-peeling.h"
+
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
+#include "src/compiler/loop-analysis.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/node-properties.h"
@@ -103,59 +105,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-struct Peeling {
- // Maps a node to its index in the {pairs} vector.
- NodeMarker<size_t> node_map;
- // The vector which contains the mapped nodes.
- NodeVector* pairs;
-
- Peeling(Graph* graph, size_t max, NodeVector* p)
- : node_map(graph, static_cast<uint32_t>(max)), pairs(p) {}
-
- Node* map(Node* node) {
- if (node_map.Get(node) == 0) return node;
- return pairs->at(node_map.Get(node));
- }
-
- void Insert(Node* original, Node* copy) {
- node_map.Set(original, 1 + pairs->size());
- pairs->push_back(original);
- pairs->push_back(copy);
- }
-
- void CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead, NodeRange nodes,
- SourcePositionTable* source_positions,
- NodeOriginTable* node_origins) {
- NodeVector inputs(tmp_zone_);
- // Copy all the nodes first.
- for (Node* node : nodes) {
- SourcePositionTable::Scope position(
- source_positions, source_positions->GetSourcePosition(node));
- NodeOriginTable::Scope origin_scope(node_origins, "copy nodes", node);
- inputs.clear();
- for (Node* input : node->inputs()) {
- inputs.push_back(map(input));
- }
- Node* copy = graph->NewNode(node->op(), node->InputCount(), &inputs[0]);
- if (NodeProperties::IsTyped(node)) {
- NodeProperties::SetType(copy, NodeProperties::GetType(node));
- }
- Insert(node, copy);
- }
-
- // Fix remaining inputs of the copies.
- for (Node* original : nodes) {
- Node* copy = pairs->at(node_map.Get(original));
- for (int i = 0; i < copy->InputCount(); i++) {
- copy->ReplaceInput(i, map(original->InputAt(i)));
- }
- }
- }
-
- bool Marked(Node* node) { return node_map.Get(node) > 0; }
-};
-
-
class PeeledIterationImpl : public PeeledIteration {
public:
NodeVector node_pairs_;
@@ -173,43 +122,6 @@ Node* PeeledIteration::map(Node* node) {
return node;
}
-bool LoopPeeler::CanPeel(LoopTree::Loop* loop) {
- // Look for returns and if projections that are outside the loop but whose
- // control input is inside the loop.
- Node* loop_node = loop_tree_->GetLoopControl(loop);
- for (Node* node : loop_tree_->LoopNodes(loop)) {
- for (Node* use : node->uses()) {
- if (!loop_tree_->Contains(loop, use)) {
- bool unmarked_exit;
- switch (node->opcode()) {
- case IrOpcode::kLoopExit:
- unmarked_exit = (node->InputAt(1) != loop_node);
- break;
- case IrOpcode::kLoopExitValue:
- case IrOpcode::kLoopExitEffect:
- unmarked_exit = (node->InputAt(1)->InputAt(1) != loop_node);
- break;
- default:
- unmarked_exit = (use->opcode() != IrOpcode::kTerminate);
- }
- if (unmarked_exit) {
- if (FLAG_trace_turbo_loop) {
- Node* loop_node = loop_tree_->GetLoopControl(loop);
- PrintF(
- "Cannot peel loop %i. Loop exit without explicit mark: Node %i "
- "(%s) is inside "
- "loop, but its use %i (%s) is outside.\n",
- loop_node->id(), node->id(), node->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- }
- return false;
- }
- }
- }
- }
- return true;
-}
-
PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
if (!CanPeel(loop)) return nullptr;
@@ -217,19 +129,19 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
// Construct the peeled iteration.
//============================================================================
PeeledIterationImpl* iter = tmp_zone_->New<PeeledIterationImpl>(tmp_zone_);
- size_t estimated_peeled_size = 5 + (loop->TotalSize()) * 2;
- Peeling peeling(graph_, estimated_peeled_size, &iter->node_pairs_);
+ uint32_t estimated_peeled_size = 5 + loop->TotalSize() * 2;
+ NodeCopier copier(graph_, estimated_peeled_size, &iter->node_pairs_, 1);
Node* dead = graph_->NewNode(common_->Dead());
// Map the loop header nodes to their entry values.
for (Node* node : loop_tree_->HeaderNodes(loop)) {
- peeling.Insert(node, node->InputAt(kAssumedLoopEntryIndex));
+ copier.Insert(node, node->InputAt(kAssumedLoopEntryIndex));
}
// Copy all the nodes of loop body for the peeled iteration.
- peeling.CopyNodes(graph_, tmp_zone_, dead, loop_tree_->BodyNodes(loop),
- source_positions_, node_origins_);
+ copier.CopyNodes(graph_, tmp_zone_, dead, loop_tree_->BodyNodes(loop),
+ source_positions_, node_origins_);
//============================================================================
// Replace the entry to the loop with the output of the peeled iteration.
@@ -242,7 +154,7 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
// from the peeled iteration.
NodeVector inputs(tmp_zone_);
for (int i = 1; i < loop_node->InputCount(); i++) {
- inputs.push_back(peeling.map(loop_node->InputAt(i)));
+ inputs.push_back(copier.map(loop_node->InputAt(i)));
}
Node* merge =
graph_->NewNode(common_->Merge(backedges), backedges, &inputs[0]);
@@ -252,7 +164,7 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
if (node->opcode() == IrOpcode::kLoop) continue; // already done.
inputs.clear();
for (int i = 0; i < backedges; i++) {
- inputs.push_back(peeling.map(node->InputAt(1 + i)));
+ inputs.push_back(copier.map(node->InputAt(1 + i)));
}
for (Node* input : inputs) {
if (input != inputs[0]) { // Non-redundant phi.
@@ -269,9 +181,9 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
// Only one backedge, simply replace the input to loop with output of
// peeling.
for (Node* node : loop_tree_->HeaderNodes(loop)) {
- node->ReplaceInput(0, peeling.map(node->InputAt(1)));
+ node->ReplaceInput(0, copier.map(node->InputAt(1)));
}
- new_entry = peeling.map(loop_node->InputAt(1));
+ new_entry = copier.map(loop_node->InputAt(1));
}
loop_node->ReplaceInput(0, new_entry);
@@ -282,18 +194,18 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
switch (exit->opcode()) {
case IrOpcode::kLoopExit:
// Change the loop exit node to a merge node.
- exit->ReplaceInput(1, peeling.map(exit->InputAt(0)));
+ exit->ReplaceInput(1, copier.map(exit->InputAt(0)));
NodeProperties::ChangeOp(exit, common_->Merge(2));
break;
case IrOpcode::kLoopExitValue:
// Change exit marker to phi.
- exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
+ exit->InsertInput(graph_->zone(), 1, copier.map(exit->InputAt(0)));
NodeProperties::ChangeOp(
exit, common_->Phi(LoopExitValueRepresentationOf(exit->op()), 2));
break;
case IrOpcode::kLoopExitEffect:
// Change effect exit marker to effect phi.
- exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
+ exit->InsertInput(graph_->zone(), 1, copier.map(exit->InputAt(0)));
NodeProperties::ChangeOp(exit, common_->EffectPhi(2));
break;
default:
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index 730900af54..af7b5f6ce0 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -43,7 +43,9 @@ class V8_EXPORT_PRIVATE LoopPeeler {
tmp_zone_(tmp_zone),
source_positions_(source_positions),
node_origins_(node_origins) {}
- bool CanPeel(LoopTree::Loop* loop);
+ bool CanPeel(LoopTree::Loop* loop) {
+ return LoopFinder::HasMarkedExits(loop_tree_, loop);
+ }
PeeledIteration* Peel(LoopTree::Loop* loop);
void PeelInnerLoopsOfTree();
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 5eeb5dc248..16ff3ff936 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -582,7 +582,7 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedSigned:
if (COMPRESS_POINTERS_BOOL &&
node->opcode() == IrOpcode::kStore &&
- CanBeTaggedPointer(
+ IsAnyTagged(
StoreRepresentationOf(node->op()).representation())) {
CheckValueInputIsCompressedOrTagged(node, 2);
} else {
@@ -977,7 +977,7 @@ class MachineRepresentationChecker {
return IsAnyCompressed(actual);
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
- // TODO(tebbi): At the moment, the machine graph doesn't contain
+ // TODO(turbofan): At the moment, the machine graph doesn't contain
// reliable information if a node is kTaggedSigned, kTaggedPointer or
// kTagged, and often this is context-dependent. We should at least
// check for obvious violations: kTaggedSigned where we expect
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 918caaf8fd..2220cdb82f 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -145,6 +145,19 @@ class Word64Adapter {
MachineOperatorReducer* r_;
};
+namespace {
+
+// TODO(jgruber): Consider replacing all uses of this function by
+// std::numeric_limits<T>::quiet_NaN().
+template <class T>
+T SilenceNaN(T x) {
+ DCHECK(std::isnan(x));
+ // Do some calculation to make a signalling NaN quiet.
+ return x - x;
+}
+
+} // namespace
+
MachineOperatorReducer::MachineOperatorReducer(Editor* editor,
MachineGraph* mcgraph,
bool allow_signalling_nan)
@@ -465,14 +478,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat32(m.right().ResolvedValue() -
- m.right().ResolvedValue());
+ return ReplaceFloat32(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) { // NaN - x => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat32(m.left().ResolvedValue() -
- m.left().ResolvedValue());
+ return ReplaceFloat32(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat32(m.left().ResolvedValue() -
@@ -499,6 +508,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
+ if (m.right().IsNaN()) { // x + NaN => NaN
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
+ }
+ if (m.left().IsNaN()) { // NaN + x => NaN
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
+ }
if (m.IsFoldable()) { // K + K => K (K stands for arbitrary constants)
return ReplaceFloat64(m.left().ResolvedValue() +
m.right().ResolvedValue());
@@ -512,14 +527,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().ResolvedValue() -
- m.right().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) { // NaN - x => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.left().ResolvedValue() -
- m.left().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat64(m.left().ResolvedValue() -
@@ -555,9 +566,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Changed(node);
}
if (m.right().IsNaN()) { // x * NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().ResolvedValue() -
- m.right().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
return ReplaceFloat64(m.left().ResolvedValue() *
@@ -576,14 +585,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node()); // x / 1.0 => x
// TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
if (m.right().IsNaN()) { // x / NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().ResolvedValue() -
- m.right().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) { // NaN / x => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.left().ResolvedValue() -
- m.left().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
return ReplaceFloat64(
@@ -610,10 +615,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceFloat64(std::numeric_limits<double>::quiet_NaN());
}
if (m.right().IsNaN()) { // x % NaN => NaN
- return Replace(m.right().node());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) { // NaN % x => NaN
- return Replace(m.left().node());
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
return ReplaceFloat64(
@@ -660,10 +665,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Atan2: {
Float64BinopMatcher m(node);
if (m.right().IsNaN()) {
- return Replace(m.right().node());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) {
- return Replace(m.left().node());
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) {
return ReplaceFloat64(base::ieee754::atan2(m.left().ResolvedValue(),
@@ -732,21 +737,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
m.right().ResolvedValue()));
} else if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
return ReplaceFloat64(1.0);
- } else if (m.right().Is(-2.0)) { // x ** -2.0 => 1 / (x * x)
- node->ReplaceInput(0, Float64Constant(1.0));
- node->ReplaceInput(1, Float64Mul(m.left().node(), m.left().node()));
- NodeProperties::ChangeOp(node, machine()->Float64Div());
- return Changed(node);
} else if (m.right().Is(2.0)) { // x ** 2.0 => x * x
node->ReplaceInput(1, m.left().node());
NodeProperties::ChangeOp(node, machine()->Float64Mul());
return Changed(node);
- } else if (m.right().Is(-0.5)) {
- // x ** 0.5 => 1 / (if x <= -Infinity then Infinity else sqrt(0.0 + x))
- node->ReplaceInput(0, Float64Constant(1.0));
- node->ReplaceInput(1, Float64PowHalf(m.left().node()));
- NodeProperties::ChangeOp(node, machine()->Float64Div());
- return Changed(node);
} else if (m.right().Is(0.5)) {
// x ** 0.5 => if x <= -Infinity then Infinity else sqrt(0.0 + x)
return Replace(Float64PowHalf(m.left().node()));
@@ -781,8 +775,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
Float32Matcher m(node->InputAt(0));
if (m.HasResolvedValue()) {
if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
- // Do some calculation to make guarantee the value is a quiet NaN.
- return ReplaceFloat64(m.ResolvedValue() + m.ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.ResolvedValue()));
}
return ReplaceFloat64(m.ResolvedValue());
}
@@ -856,10 +849,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kTruncateFloat64ToFloat32: {
Float64Matcher m(node->InputAt(0));
if (m.HasResolvedValue()) {
- if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
- // Do some calculation to make guarantee the value is a quiet NaN.
- return ReplaceFloat32(
- DoubleToFloat32(m.ResolvedValue() + m.ResolvedValue()));
+ if (!allow_signalling_nan_ && m.IsNaN()) {
+ return ReplaceFloat32(DoubleToFloat32(SilenceNaN(m.ResolvedValue())));
}
return ReplaceFloat32(DoubleToFloat32(m.ResolvedValue()));
}
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 9a90fa6f7c..f90f9345a3 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "src/compiler/machine-operator.h"
-#include <type_traits>
+#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
@@ -207,6 +207,17 @@ ShiftKind ShiftKindOf(Operator const* op) {
return OpParameter<ShiftKind>(op);
}
+size_t hash_value(TruncateKind kind) { return static_cast<size_t>(kind); }
+
+std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
+ switch (kind) {
+ case TruncateKind::kArchitectureDefault:
+ return os << "kArchitectureDefault";
+ case TruncateKind::kSetOverflowToMin:
+ return os << "kSetOverflowToMin";
+ }
+}
+
// The format is:
// V(Name, properties, value_input_count, control_input_count, output_count)
#define PURE_BINARY_OP_LIST_32(V) \
@@ -382,6 +393,9 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(F64x2Floor, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Trunc, Operator::kNoProperties, 1, 0, 1) \
V(F64x2NearestInt, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2ConvertLowI32x4S, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2ConvertLowI32x4U, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2PromoteLowF32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
@@ -409,8 +423,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(F32x4Floor, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Trunc, Operator::kNoProperties, 1, 0, 1) \
V(F32x4NearestInt, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4DemoteF64x2Zero, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SplatI32Pair, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2Abs, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SConvertI32x4Low, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SConvertI32x4High, Operator::kNoProperties, 1, 0, 1) \
@@ -423,6 +439,9 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \
V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \
V(I64x2ShrU, Operator::kNoProperties, 2, 0, 1) \
V(I64x2ExtMulLowI32x4S, Operator::kCommutative, 2, 0, 1) \
V(I64x2ExtMulHighI32x4S, Operator::kCommutative, 2, 0, 1) \
@@ -464,6 +483,8 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I32x4SignSelect, Operator::kNoProperties, 3, 0, 1) \
V(I32x4ExtAddPairwiseI16x8S, Operator::kNoProperties, 1, 0, 1) \
V(I32x4ExtAddPairwiseI16x8U, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4TruncSatF64x2SZero, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4TruncSatF64x2UZero, Operator::kNoProperties, 1, 0, 1) \
V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
@@ -542,11 +563,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
V(S128AndNot, Operator::kNoProperties, 2, 0, 1) \
- V(V32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V128AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V64x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(V32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(V16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(V8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Swizzle, Operator::kNoProperties, 2, 0, 1)
@@ -653,6 +673,14 @@ ShiftKind ShiftKindOf(Operator const* op) {
ATOMIC_REPRESENTATION_LIST(V) \
V(kWord64)
+#define ATOMIC_PAIR_BINOP_LIST(V) \
+ V(Add) \
+ V(Sub) \
+ V(And) \
+ V(Or) \
+ V(Xor) \
+ V(Exchange)
+
#define SIMD_LANE_OP_LIST(V) \
V(F64x2, 2) \
V(F32x4, 4) \
@@ -673,397 +701,551 @@ ShiftKind ShiftKindOf(Operator const* op) {
#define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
-template <IrOpcode::Value op, int value_input_count, int effect_input_count,
- int control_input_count, int value_output_count,
- int effect_output_count, int control_output_count>
-struct CachedOperator : public Operator {
- CachedOperator(Operator::Properties properties, const char* mnemonic)
- : Operator(op, properties, mnemonic, value_input_count,
- effect_input_count, control_input_count, value_output_count,
- effect_output_count, control_output_count) {}
-};
-
-template <IrOpcode::Value op, int value_input_count, int control_input_count,
- int value_output_count>
-struct CachedPureOperator : public Operator {
- CachedPureOperator(Operator::Properties properties, const char* mnemonic)
- : Operator(op, Operator::kPure | properties, mnemonic, value_input_count,
- 0, control_input_count, value_output_count, 0, 0) {}
-};
-
-template <class Op>
-const Operator* GetCachedOperator() {
- STATIC_ASSERT(std::is_trivially_destructible<Op>::value);
- static const Op op;
- return &op;
-}
-
-template <class Op>
-const Operator* GetCachedOperator(Operator::Properties properties,
- const char* mnemonic) {
-#ifdef DEBUG
- static Operator::Properties const initial_properties = properties;
- static const char* const initial_mnemonic = mnemonic;
- DCHECK_EQ(properties, initial_properties);
- DCHECK_EQ(mnemonic, initial_mnemonic);
-#endif
- STATIC_ASSERT(std::is_trivially_destructible<Op>::value);
- static const Op op(properties, mnemonic);
- return &op;
-}
-
struct StackSlotOperator : public Operator1<StackSlotRepresentation> {
explicit StackSlotOperator(int size, int alignment)
- : Operator1(IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
- "StackSlot", 0, 0, 0, 1, 0, 0,
- StackSlotRepresentation(size, alignment)) {}
-};
-
-template <int size, int alignment>
-struct CachedStackSlotOperator : StackSlotOperator {
- CachedStackSlotOperator() : StackSlotOperator(size, alignment) {}
+ : Operator1<StackSlotRepresentation>(
+ IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
+ "StackSlot", 0, 0, 0, 1, 0, 0,
+ StackSlotRepresentation(size, alignment)) {}
};
+struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
- const OptionalOperator MachineOperatorBuilder::Name() { \
- return OptionalOperator( \
- flags_ & k##Name, \
- GetCachedOperator< \
- CachedPureOperator<IrOpcode::k##Name, value_input_count, \
- control_input_count, output_count>>(properties, \
- #Name)); \
- }
-PURE_OPTIONAL_OP_LIST(PURE)
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, Operator::kPure | properties, #Name, \
+ value_input_count, 0, control_input_count, output_count, 0, \
+ 0) {} \
+ }; \
+ Name##Operator k##Name;
+ MACHINE_PURE_OP_LIST(PURE)
+ struct NormalWord32SarOperator final : public Operator1<ShiftKind> {
+ NormalWord32SarOperator()
+ : Operator1<ShiftKind>(IrOpcode::kWord32Sar, Operator::kPure,
+ "Word32Sar", 2, 0, 0, 1, 0, 0,
+ ShiftKind::kNormal) {}
+ };
+ NormalWord32SarOperator kNormalWord32Sar;
+ struct ShiftOutZerosWord32SarOperator final : public Operator1<ShiftKind> {
+ ShiftOutZerosWord32SarOperator()
+ : Operator1<ShiftKind>(IrOpcode::kWord32Sar, Operator::kPure,
+ "Word32Sar", 2, 0, 0, 1, 0, 0,
+ ShiftKind::kShiftOutZeros) {}
+ };
+ ShiftOutZerosWord32SarOperator kShiftOutZerosWord32Sar;
+ struct NormalWord64SarOperator final : public Operator1<ShiftKind> {
+ NormalWord64SarOperator()
+ : Operator1<ShiftKind>(IrOpcode::kWord64Sar, Operator::kPure,
+ "Word64Sar", 2, 0, 0, 1, 0, 0,
+ ShiftKind::kNormal) {}
+ };
+ NormalWord64SarOperator kNormalWord64Sar;
+ struct ShiftOutZerosWord64SarOperator final : public Operator1<ShiftKind> {
+ ShiftOutZerosWord64SarOperator()
+ : Operator1<ShiftKind>(IrOpcode::kWord64Sar, Operator::kPure,
+ "Word64Sar", 2, 0, 0, 1, 0, 0,
+ ShiftKind::kShiftOutZeros) {}
+ };
+ ShiftOutZerosWord64SarOperator kShiftOutZerosWord64Sar;
+
+ struct ArchitectureDefaultTruncateFloat32ToUint32Operator final
+ : public Operator1<TruncateKind> {
+ ArchitectureDefaultTruncateFloat32ToUint32Operator()
+ : Operator1<TruncateKind>(IrOpcode::kTruncateFloat32ToUint32,
+ Operator::kPure, "TruncateFloat32ToUint32", 1,
+ 0, 0, 1, 0, 0,
+ TruncateKind::kArchitectureDefault) {}
+ };
+ ArchitectureDefaultTruncateFloat32ToUint32Operator
+ kArchitectureDefaultTruncateFloat32ToUint32;
+ struct SetOverflowToMinTruncateFloat32ToUint32Operator final
+ : public Operator1<TruncateKind> {
+ SetOverflowToMinTruncateFloat32ToUint32Operator()
+ : Operator1<TruncateKind>(IrOpcode::kTruncateFloat32ToUint32,
+ Operator::kPure, "TruncateFloat32ToUint32", 1,
+ 0, 0, 1, 0, 0,
+ TruncateKind::kSetOverflowToMin) {}
+ };
+ SetOverflowToMinTruncateFloat32ToUint32Operator
+ kSetOverflowToMinTruncateFloat32ToUint32;
+
+ struct ArchitectureDefaultTruncateFloat32ToInt32Operator final
+ : public Operator1<TruncateKind> {
+ ArchitectureDefaultTruncateFloat32ToInt32Operator()
+ : Operator1<TruncateKind>(IrOpcode::kTruncateFloat32ToInt32,
+ Operator::kPure, "TruncateFloat32ToInt32", 1,
+ 0, 0, 1, 0, 0,
+ TruncateKind::kArchitectureDefault) {}
+ };
+ ArchitectureDefaultTruncateFloat32ToInt32Operator
+ kArchitectureDefaultTruncateFloat32ToInt32;
+ struct SetOverflowToMinTruncateFloat32ToInt32Operator final
+ : public Operator1<TruncateKind> {
+ SetOverflowToMinTruncateFloat32ToInt32Operator()
+ : Operator1<TruncateKind>(IrOpcode::kTruncateFloat32ToInt32,
+ Operator::kPure, "TruncateFloat32ToInt32", 1,
+ 0, 0, 1, 0, 0,
+ TruncateKind::kSetOverflowToMin) {}
+ };
+ SetOverflowToMinTruncateFloat32ToInt32Operator
+ kSetOverflowToMinTruncateFloat32ToInt32;
+
+ struct ArchitectureDefaultTruncateFloat64ToInt64Operator final
+ : public Operator1<TruncateKind> {
+ ArchitectureDefaultTruncateFloat64ToInt64Operator()
+ : Operator1(IrOpcode::kTruncateFloat64ToInt64, Operator::kPure,
+ "TruncateFloat64ToInt64", 1, 0, 0, 1, 0, 0,
+ TruncateKind::kArchitectureDefault) {}
+ };
+ ArchitectureDefaultTruncateFloat64ToInt64Operator
+ kArchitectureDefaultTruncateFloat64ToInt64;
+ struct SetOverflowToMinTruncateFloat64ToInt64Operator final
+ : public Operator1<TruncateKind> {
+ SetOverflowToMinTruncateFloat64ToInt64Operator()
+ : Operator1(IrOpcode::kTruncateFloat64ToInt64, Operator::kPure,
+ "TruncateFloat64ToInt64", 1, 0, 0, 1, 0, 0,
+ TruncateKind::kSetOverflowToMin) {}
+ };
+ SetOverflowToMinTruncateFloat64ToInt64Operator
+ kSetOverflowToMinTruncateFloat64ToInt64;
+ PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
-#define OVERFLOW_OP(Name, properties) \
- const Operator* MachineOperatorBuilder::Name() { \
- return GetCachedOperator< \
- CachedOperator<IrOpcode::k##Name, 2, 0, 1, 2, 0, 0>>( \
- Operator::kEliminatable | Operator::kNoRead | properties, #Name); \
- }
-OVERFLOW_OP_LIST(OVERFLOW_OP)
+ struct PrefetchTemporalOperator final : public Operator {
+ PrefetchTemporalOperator()
+ : Operator(IrOpcode::kPrefetchTemporal,
+ Operator::kNoDeopt | Operator::kNoThrow, "PrefetchTemporal",
+ 2, 1, 1, 0, 1, 0) {}
+ };
+ PrefetchTemporalOperator kPrefetchTemporal;
+ struct PrefetchNonTemporalOperator final : public Operator {
+ PrefetchNonTemporalOperator()
+ : Operator(IrOpcode::kPrefetchNonTemporal,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "PrefetchNonTemporal", 2, 1, 1, 0, 1, 0) {}
+ };
+ PrefetchNonTemporalOperator kPrefetchNonTemporal;
+
+#define OVERFLOW_OP(Name, properties) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, \
+ Operator::kEliminatable | Operator::kNoRead | properties, \
+ #Name, 2, 0, 1, 2, 0, 0) {} \
+ }; \
+ Name##Operator k##Name;
+ OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
-template <ShiftKind kind>
-struct Word32SarOperator : Operator1<ShiftKind> {
- Word32SarOperator()
- : Operator1(IrOpcode::kWord32Sar, Operator::kPure, "Word32Sar", 2, 0, 0,
- 1, 0, 0, kind) {}
-};
-
-const Operator* MachineOperatorBuilder::Word32Sar(ShiftKind kind) {
- switch (kind) {
- case ShiftKind::kNormal:
- return GetCachedOperator<Word32SarOperator<ShiftKind::kNormal>>();
- case ShiftKind::kShiftOutZeros:
- return GetCachedOperator<Word32SarOperator<ShiftKind::kShiftOutZeros>>();
- }
-}
-
-template <ShiftKind kind>
-struct Word64SarOperator : Operator1<ShiftKind> {
- Word64SarOperator()
- : Operator1(IrOpcode::kWord64Sar, Operator::kPure, "Word64Sar", 2, 0, 0,
- 1, 0, 0, kind) {}
-};
-
-const Operator* MachineOperatorBuilder::Word64Sar(ShiftKind kind) {
- switch (kind) {
- case ShiftKind::kNormal:
- return GetCachedOperator<Word64SarOperator<ShiftKind::kNormal>>();
- case ShiftKind::kShiftOutZeros:
- return GetCachedOperator<Word64SarOperator<ShiftKind::kShiftOutZeros>>();
- }
-}
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct LoadOperator : public Operator1<LoadRepresentation> {
- LoadOperator()
- : Operator1(IrOpcode::kLoad, Operator::kEliminatable, "Load", 2, 1, 1, 1,
- 1, 0, LoadRepresentation(rep, sem)) {}
-};
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct PoisonedLoadOperator : public Operator1<LoadRepresentation> {
- PoisonedLoadOperator()
- : Operator1(IrOpcode::kPoisonedLoad, Operator::kEliminatable,
- "PoisonedLoad", 2, 1, 1, 1, 1, 0,
- LoadRepresentation(rep, sem)) {}
-};
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct UnalignedLoadOperator : public Operator1<LoadRepresentation> {
- UnalignedLoadOperator()
- : Operator1(IrOpcode::kUnalignedLoad, Operator::kEliminatable,
- "UnalignedLoad", 2, 1, 1, 1, 1, 0,
- LoadRepresentation(rep, sem)) {}
-};
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct ProtectedLoadOperator : public Operator1<LoadRepresentation> {
- ProtectedLoadOperator()
- : Operator1(IrOpcode::kProtectedLoad,
- Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2,
- 1, 1, 1, 1, 0, LoadRepresentation(rep, sem)) {}
-};
-
-template <MemoryAccessKind kind, LoadTransformation type>
-struct LoadTransformOperator : public Operator1<LoadTransformParameters> {
- LoadTransformOperator()
- : Operator1(IrOpcode::kLoadTransform,
- kind == MemoryAccessKind::kProtected
- ? Operator::kNoDeopt | Operator::kNoThrow
- : Operator::kEliminatable,
- "LoadTransform", 2, 1, 1, 1, 1, 0,
- LoadTransformParameters{kind, type}) {}
-};
-
-template <MemoryAccessKind kind, MachineRepresentation rep, MachineSemantic sem,
- uint8_t laneidx>
-struct LoadLaneOperator : public Operator1<LoadLaneParameters> {
- LoadLaneOperator()
- : Operator1(
- IrOpcode::kLoadLane,
- kind == MemoryAccessKind::kProtected
- ? Operator::kNoDeopt | Operator::kNoThrow
- : Operator::kEliminatable,
- "LoadLane", 3, 1, 1, 1, 1, 0,
- LoadLaneParameters{kind, LoadRepresentation(rep, sem), laneidx}) {}
-};
-
-template <MachineRepresentation rep, WriteBarrierKind write_barrier_kind>
-struct StoreOperator : public Operator1<StoreRepresentation> {
- StoreOperator()
- : Operator1(IrOpcode::kStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "Store", 3, 1, 1, 0, 1, 0,
- StoreRepresentation(rep, write_barrier_kind)) {}
-};
-
-template <MachineRepresentation rep>
-struct UnalignedStoreOperator : public Operator1<UnalignedStoreRepresentation> {
- UnalignedStoreOperator()
- : Operator1(IrOpcode::kUnalignedStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "UnalignedStore", 3, 1, 1, 0, 1, 0, rep) {}
-};
-
-template <MachineRepresentation rep>
-struct ProtectedStoreOperator : public Operator1<StoreRepresentation> {
- ProtectedStoreOperator()
- : Operator1(IrOpcode::kProtectedStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "Store", 3, 1, 1, 0, 1, 0,
- StoreRepresentation(rep, kNoWriteBarrier)) {}
-};
-
-template <MemoryAccessKind kind, MachineRepresentation rep, uint8_t laneidx>
-struct StoreLaneOperator : public Operator1<StoreLaneParameters> {
- StoreLaneOperator()
- : Operator1(IrOpcode::kStoreLane,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "StoreLane", 3, 1, 1, 0, 1, 0,
- StoreLaneParameters{kind, rep, laneidx}) {}
-};
+#define LOAD(Type) \
+ struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
+ Load##Type##Operator() \
+ : Operator1<LoadRepresentation>(IrOpcode::kLoad, \
+ Operator::kEliminatable, "Load", 2, 1, \
+ 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct PoisonedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ PoisonedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kPoisonedLoad, Operator::kEliminatable, \
+ "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct UnalignedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ UnalignedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kUnalignedLoad, Operator::kEliminatable, \
+ "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct ProtectedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ ProtectedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kProtectedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2, 1, \
+ 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Load##Type##Operator kLoad##Type; \
+ PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
+ UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
+ ProtectedLoad##Type##Operator kProtectedLoad##Type;
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
-template <MachineRepresentation rep, MachineSemantic sem>
-struct Word32AtomicLoadOperator : public Operator1<LoadRepresentation> {
- Word32AtomicLoadOperator()
- : Operator1(IrOpcode::kWord32AtomicLoad, Operator::kEliminatable,
- "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType(rep, sem)) {
- }
-};
+#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
+ struct KIND##LoadTransform##TYPE##Operator final \
+ : public Operator1<LoadTransformParameters> { \
+ KIND##LoadTransform##TYPE##Operator() \
+ : Operator1<LoadTransformParameters>( \
+ IrOpcode::kLoadTransform, \
+ MemoryAccessKind::k##KIND == MemoryAccessKind::kProtected \
+ ? Operator::kNoDeopt | Operator::kNoThrow \
+ : Operator::kEliminatable, \
+ #KIND "LoadTransform", 2, 1, 1, 1, 1, 0, \
+ LoadTransformParameters{MemoryAccessKind::k##KIND, \
+ LoadTransformation::k##TYPE}) {} \
+ }; \
+ KIND##LoadTransform##TYPE##Operator k##KIND##LoadTransform##TYPE;
-template <MachineRepresentation rep, MachineSemantic sem>
-struct Word64AtomicLoadOperator : public Operator1<LoadRepresentation> {
- Word64AtomicLoadOperator()
- : Operator1(IrOpcode::kWord64AtomicLoad, Operator::kEliminatable,
- "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType(rep, sem)) {
- }
-};
+#define LOAD_TRANSFORM(TYPE) \
+ LOAD_TRANSFORM_KIND(TYPE, Normal) \
+ LOAD_TRANSFORM_KIND(TYPE, Unaligned) \
+ LOAD_TRANSFORM_KIND(TYPE, Protected)
-template <MachineRepresentation rep>
-struct Word32AtomicStoreOperator : public Operator1<MachineRepresentation> {
- Word32AtomicStoreOperator()
- : Operator1(IrOpcode::kWord32AtomicStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "Word32AtomicStore", 3, 1, 1, 0, 1, 0, rep) {}
-};
+ LOAD_TRANSFORM_LIST(LOAD_TRANSFORM)
+#undef LOAD_TRANSFORM
+#undef LOAD_TRANSFORM_KIND
-template <MachineRepresentation rep>
-struct Word64AtomicStoreOperator : public Operator1<MachineRepresentation> {
- Word64AtomicStoreOperator()
- : Operator1(IrOpcode::kWord64AtomicStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "Word64AtomicStore", 3, 1, 1, 0, 1, 0, rep) {}
-};
+#define STACKSLOT(Size, Alignment) \
+ struct StackSlotOfSize##Size##OfAlignment##Alignment##Operator final \
+ : public StackSlotOperator { \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator() \
+ : StackSlotOperator(Size, Alignment) {} \
+ }; \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator \
+ kStackSlotOfSize##Size##OfAlignment##Alignment;
+ STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(STACKSLOT)
+#undef STACKSLOT
+
+#define STORE(Type) \
+ struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
+ explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind) \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Store", 3, 1, 1, 0, 1, 0, \
+ StoreRepresentation(MachineRepresentation::Type, \
+ write_barrier_kind)) {} \
+ }; \
+ struct Store##Type##NoWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##NoWriteBarrier##Operator() \
+ : Store##Type##Operator(kNoWriteBarrier) {} \
+ }; \
+ struct Store##Type##AssertNoWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##AssertNoWriteBarrier##Operator() \
+ : Store##Type##Operator(kAssertNoWriteBarrier) {} \
+ }; \
+ struct Store##Type##MapWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##MapWriteBarrier##Operator() \
+ : Store##Type##Operator(kMapWriteBarrier) {} \
+ }; \
+ struct Store##Type##PointerWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##PointerWriteBarrier##Operator() \
+ : Store##Type##Operator(kPointerWriteBarrier) {} \
+ }; \
+ struct Store##Type##EphemeronKeyWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##EphemeronKeyWriteBarrier##Operator() \
+ : Store##Type##Operator(kEphemeronKeyWriteBarrier) {} \
+ }; \
+ struct Store##Type##FullWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##FullWriteBarrier##Operator() \
+ : Store##Type##Operator(kFullWriteBarrier) {} \
+ }; \
+ struct UnalignedStore##Type##Operator final \
+ : public Operator1<UnalignedStoreRepresentation> { \
+ UnalignedStore##Type##Operator() \
+ : Operator1<UnalignedStoreRepresentation>( \
+ IrOpcode::kUnalignedStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "UnalignedStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
+ struct ProtectedStore##Type##Operator \
+ : public Operator1<StoreRepresentation> { \
+ explicit ProtectedStore##Type##Operator() \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kProtectedStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Store", 3, 1, 1, 0, 1, 0, \
+ StoreRepresentation(MachineRepresentation::Type, \
+ kNoWriteBarrier)) {} \
+ }; \
+ Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
+ Store##Type##AssertNoWriteBarrier##Operator \
+ kStore##Type##AssertNoWriteBarrier; \
+ Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
+ Store##Type##PointerWriteBarrier##Operator \
+ kStore##Type##PointerWriteBarrier; \
+ Store##Type##EphemeronKeyWriteBarrier##Operator \
+ kStore##Type##EphemeronKeyWriteBarrier; \
+ Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
+ UnalignedStore##Type##Operator kUnalignedStore##Type; \
+ ProtectedStore##Type##Operator kProtectedStore##Type;
+ MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
-#define ATOMIC_OP(op) \
- template <MachineRepresentation rep, MachineSemantic sem> \
- struct op##Operator : public Operator1<MachineType> { \
- op##Operator() \
- : Operator1(IrOpcode::k##op, Operator::kNoDeopt | Operator::kNoThrow, \
- #op, 3, 1, 1, 1, 1, 0, MachineType(rep, sem)) {} \
- };
-ATOMIC_OP(Word32AtomicAdd)
-ATOMIC_OP(Word32AtomicSub)
-ATOMIC_OP(Word32AtomicAnd)
-ATOMIC_OP(Word32AtomicOr)
-ATOMIC_OP(Word32AtomicXor)
-ATOMIC_OP(Word32AtomicExchange)
-ATOMIC_OP(Word64AtomicAdd)
-ATOMIC_OP(Word64AtomicSub)
-ATOMIC_OP(Word64AtomicAnd)
-ATOMIC_OP(Word64AtomicOr)
-ATOMIC_OP(Word64AtomicXor)
-ATOMIC_OP(Word64AtomicExchange)
+#define ATOMIC_LOAD(Type) \
+ struct Word32AtomicLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ Word32AtomicLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
+ ATOMIC_TYPE_LIST(ATOMIC_LOAD)
+#undef ATOMIC_LOAD
+
+#define ATOMIC_LOAD(Type) \
+ struct Word64AtomicLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ Word64AtomicLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
+ ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
+#undef ATOMIC_LOAD
+
+#define ATOMIC_STORE(Type) \
+ struct Word32AtomicStore##Type##Operator \
+ : public Operator1<MachineRepresentation> { \
+ Word32AtomicStore##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kWord32AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
+ Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
+ ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
+#undef ATOMIC_STORE
+
+#define ATOMIC_STORE(Type) \
+ struct Word64AtomicStore##Type##Operator \
+ : public Operator1<MachineRepresentation> { \
+ Word64AtomicStore##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kWord64AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
+ Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
+ ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
+#undef ATOMIC_STORE
+
+#define ATOMIC_OP(op, type) \
+ struct op##type##Operator : public Operator1<MachineType> { \
+ op##type##Operator() \
+ : Operator1<MachineType>(IrOpcode::k##op, \
+ Operator::kNoDeopt | Operator::kNoThrow, #op, \
+ 3, 1, 1, 1, 1, 0, MachineType::type()) {} \
+ }; \
+ op##type##Operator k##op##type;
+#define ATOMIC_OP_LIST(type) \
+ ATOMIC_OP(Word32AtomicAdd, type) \
+ ATOMIC_OP(Word32AtomicSub, type) \
+ ATOMIC_OP(Word32AtomicAnd, type) \
+ ATOMIC_OP(Word32AtomicOr, type) \
+ ATOMIC_OP(Word32AtomicXor, type) \
+ ATOMIC_OP(Word32AtomicExchange, type)
+ ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
+#undef ATOMIC_OP_LIST
+#define ATOMIC64_OP_LIST(type) \
+ ATOMIC_OP(Word64AtomicAdd, type) \
+ ATOMIC_OP(Word64AtomicSub, type) \
+ ATOMIC_OP(Word64AtomicAnd, type) \
+ ATOMIC_OP(Word64AtomicOr, type) \
+ ATOMIC_OP(Word64AtomicXor, type) \
+ ATOMIC_OP(Word64AtomicExchange, type)
+ ATOMIC_U64_TYPE_LIST(ATOMIC64_OP_LIST)
+#undef ATOMIC64_OP_LIST
#undef ATOMIC_OP
-template <MachineRepresentation rep, MachineSemantic sem>
-struct Word32AtomicCompareExchangeOperator : public Operator1<MachineType> {
- Word32AtomicCompareExchangeOperator()
- : Operator1(IrOpcode::kWord32AtomicCompareExchange,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicCompareExchange", 4, 1, 1, 1, 1, 0,
- MachineType(rep, sem)) {}
-};
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct Word64AtomicCompareExchangeOperator : public Operator1<MachineType> {
- Word64AtomicCompareExchangeOperator()
- : Operator1(IrOpcode::kWord64AtomicCompareExchange,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word64AtomicCompareExchange", 4, 1, 1, 1, 1, 0,
- MachineType(rep, sem)) {}
-};
-
-struct Word32AtomicPairLoadOperator : public Operator {
- Word32AtomicPairLoadOperator()
- : Operator(IrOpcode::kWord32AtomicPairLoad,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
-};
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct Word32AtomicCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ Word32AtomicCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kWord32AtomicCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word32AtomicCompareExchange", 4, 1, 1, 1, 1, \
+ 0, MachineType::Type()) {} \
+ }; \
+ Word32AtomicCompareExchange##Type##Operator \
+ kWord32AtomicCompareExchange##Type;
+ ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+#undef ATOMIC_COMPARE_EXCHANGE
+
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct Word64AtomicCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ Word64AtomicCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kWord64AtomicCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word64AtomicCompareExchange", 4, 1, 1, 1, 1, \
+ 0, MachineType::Type()) {} \
+ }; \
+ Word64AtomicCompareExchange##Type##Operator \
+ kWord64AtomicCompareExchange##Type;
+ ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+#undef ATOMIC_COMPARE_EXCHANGE
+
+ struct Word32AtomicPairLoadOperator : public Operator {
+ Word32AtomicPairLoadOperator()
+ : Operator(IrOpcode::kWord32AtomicPairLoad,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
+ };
+ Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
-struct Word32AtomicPairStoreOperator : public Operator {
- Word32AtomicPairStoreOperator()
- : Operator(IrOpcode::kWord32AtomicPairStore,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
-};
+ struct Word32AtomicPairStoreOperator : public Operator {
+ Word32AtomicPairStoreOperator()
+ : Operator(IrOpcode::kWord32AtomicPairStore,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+ };
+ Word32AtomicPairStoreOperator kWord32AtomicPairStore;
#define ATOMIC_PAIR_OP(op) \
struct Word32AtomicPair##op##Operator : public Operator { \
Word32AtomicPair##op##Operator() \
: Operator(IrOpcode::kWord32AtomicPair##op, \
Operator::kNoDeopt | Operator::kNoThrow, \
- "Word32AtomicPair" #op, 4, 1, 1, 2, 1, 0) {} \
- };
-ATOMIC_PAIR_OP(Add)
-ATOMIC_PAIR_OP(Sub)
-ATOMIC_PAIR_OP(And)
-ATOMIC_PAIR_OP(Or)
-ATOMIC_PAIR_OP(Xor)
-ATOMIC_PAIR_OP(Exchange)
+ "Word32AtomicPair##op", 4, 1, 1, 2, 1, 0) {} \
+ }; \
+ Word32AtomicPair##op##Operator kWord32AtomicPair##op;
+ ATOMIC_PAIR_BINOP_LIST(ATOMIC_PAIR_OP)
#undef ATOMIC_PAIR_OP
+#undef ATOMIC_PAIR_BINOP_LIST
-struct Word32AtomicPairCompareExchangeOperator : public Operator {
- Word32AtomicPairCompareExchangeOperator()
- : Operator(IrOpcode::kWord32AtomicPairCompareExchange,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairCompareExchange", 6, 1, 1, 2, 1, 0) {}
-};
-
-struct MemoryBarrierOperator : public Operator {
- MemoryBarrierOperator()
- : Operator(IrOpcode::kMemoryBarrier,
- Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0, 1,
- 1, 0, 1, 0) {}
-};
+ struct Word32AtomicPairCompareExchangeOperator : public Operator {
+ Word32AtomicPairCompareExchangeOperator()
+ : Operator(IrOpcode::kWord32AtomicPairCompareExchange,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairCompareExchange", 6, 1, 1, 2, 1, 0) {}
+ };
+ Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
-// The {BitcastWordToTagged} operator must not be marked as pure (especially
-// not idempotent), because otherwise the splitting logic in the Scheduler
-// might decide to split these operators, thus potentially creating live
-// ranges of allocation top across calls or other things that might allocate.
-// See https://bugs.chromium.org/p/v8/issues/detail?id=6059 for more details.
-struct BitcastWordToTaggedOperator : public Operator {
- BitcastWordToTaggedOperator()
- : Operator(IrOpcode::kBitcastWordToTagged,
- Operator::kEliminatable | Operator::kNoWrite,
- "BitcastWordToTagged", 1, 1, 1, 1, 1, 0) {}
-};
+ struct MemoryBarrierOperator : public Operator {
+ MemoryBarrierOperator()
+ : Operator(IrOpcode::kMemoryBarrier,
+ Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0,
+ 1, 1, 0, 1, 0) {}
+ };
+ MemoryBarrierOperator kMemoryBarrier;
+
+ // The {BitcastWordToTagged} operator must not be marked as pure (especially
+ // not idempotent), because otherwise the splitting logic in the Scheduler
+ // might decide to split these operators, thus potentially creating live
+ // ranges of allocation top across calls or other things that might allocate.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=6059 for more details.
+ struct BitcastWordToTaggedOperator : public Operator {
+ BitcastWordToTaggedOperator()
+ : Operator(IrOpcode::kBitcastWordToTagged,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastWordToTagged", 1, 1, 1, 1, 1, 0) {}
+ };
+ BitcastWordToTaggedOperator kBitcastWordToTagged;
-struct BitcastTaggedToWordOperator : public Operator {
- BitcastTaggedToWordOperator()
- : Operator(IrOpcode::kBitcastTaggedToWord,
- Operator::kEliminatable | Operator::kNoWrite,
- "BitcastTaggedToWord", 1, 1, 1, 1, 1, 0) {}
-};
+ struct BitcastTaggedToWordOperator : public Operator {
+ BitcastTaggedToWordOperator()
+ : Operator(IrOpcode::kBitcastTaggedToWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastTaggedToWord", 1, 1, 1, 1, 1, 0) {}
+ };
+ BitcastTaggedToWordOperator kBitcastTaggedToWord;
-struct BitcastMaybeObjectToWordOperator : public Operator {
- BitcastMaybeObjectToWordOperator()
- : Operator(IrOpcode::kBitcastTaggedToWord,
- Operator::kEliminatable | Operator::kNoWrite,
- "BitcastMaybeObjectToWord", 1, 1, 1, 1, 1, 0) {}
-};
+ struct BitcastMaybeObjectToWordOperator : public Operator {
+ BitcastMaybeObjectToWordOperator()
+ : Operator(IrOpcode::kBitcastTaggedToWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastMaybeObjectToWord", 1, 1, 1, 1, 1, 0) {}
+ };
+ BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
-struct TaggedPoisonOnSpeculationOperator : public Operator {
- TaggedPoisonOnSpeculationOperator()
- : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
-};
+ struct TaggedPoisonOnSpeculation : public Operator {
+ TaggedPoisonOnSpeculation()
+ : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+ };
+ TaggedPoisonOnSpeculation kTaggedPoisonOnSpeculation;
-struct Word32PoisonOnSpeculationOperator : public Operator {
- Word32PoisonOnSpeculationOperator()
- : Operator(IrOpcode::kWord32PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
-};
+ struct Word32PoisonOnSpeculation : public Operator {
+ Word32PoisonOnSpeculation()
+ : Operator(IrOpcode::kWord32PoisonOnSpeculation,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+ };
+ Word32PoisonOnSpeculation kWord32PoisonOnSpeculation;
-struct Word64PoisonOnSpeculationOperator : public Operator {
- Word64PoisonOnSpeculationOperator()
- : Operator(IrOpcode::kWord64PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
-};
+ struct Word64PoisonOnSpeculation : public Operator {
+ Word64PoisonOnSpeculation()
+ : Operator(IrOpcode::kWord64PoisonOnSpeculation,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+ };
+ Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
-struct AbortCSAAssertOperator : public Operator {
- AbortCSAAssertOperator()
- : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
- "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
-};
+ struct AbortCSAAssertOperator : public Operator {
+ AbortCSAAssertOperator()
+ : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
+ "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
+ };
+ AbortCSAAssertOperator kAbortCSAAssert;
-struct DebugBreakOperator : public Operator {
- DebugBreakOperator()
- : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0, 1,
- 1, 0, 1, 0) {}
-};
+ struct DebugBreakOperator : public Operator {
+ DebugBreakOperator()
+ : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
+ 1, 1, 0, 1, 0) {}
+ };
+ DebugBreakOperator kDebugBreak;
-struct UnsafePointerAddOperator : public Operator {
- UnsafePointerAddOperator()
- : Operator(IrOpcode::kUnsafePointerAdd, Operator::kKontrol,
- "UnsafePointerAdd", 2, 1, 1, 1, 1, 0) {}
-};
+ struct UnsafePointerAddOperator final : public Operator {
+ UnsafePointerAddOperator()
+ : Operator(IrOpcode::kUnsafePointerAdd, Operator::kKontrol,
+ "UnsafePointerAdd", 2, 1, 1, 1, 1, 0) {}
+ };
+ UnsafePointerAddOperator kUnsafePointerAdd;
-template <StackCheckKind kind>
-struct StackPointerGreaterThanOperator : public Operator1<StackCheckKind> {
- StackPointerGreaterThanOperator()
- : Operator1(IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable,
- "StackPointerGreaterThan", 1, 1, 0, 1, 1, 0, kind) {}
+ struct StackPointerGreaterThanOperator : public Operator1<StackCheckKind> {
+ explicit StackPointerGreaterThanOperator(StackCheckKind kind)
+ : Operator1<StackCheckKind>(
+ IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable,
+ "StackPointerGreaterThan", 1, 1, 0, 1, 1, 0, kind) {}
+ };
+#define STACK_POINTER_GREATER_THAN(Kind) \
+ struct StackPointerGreaterThan##Kind##Operator final \
+ : public StackPointerGreaterThanOperator { \
+ StackPointerGreaterThan##Kind##Operator() \
+ : StackPointerGreaterThanOperator(StackCheckKind::k##Kind) {} \
+ }; \
+ StackPointerGreaterThan##Kind##Operator kStackPointerGreaterThan##Kind;
+
+ STACK_POINTER_GREATER_THAN(JSFunctionEntry)
+ STACK_POINTER_GREATER_THAN(JSIterationBody)
+ STACK_POINTER_GREATER_THAN(CodeStubAssembler)
+ STACK_POINTER_GREATER_THAN(Wasm)
+#undef STACK_POINTER_GREATER_THAN
};
struct CommentOperator : public Operator1<const char*> {
explicit CommentOperator(const char* msg)
- : Operator1(IrOpcode::kComment, Operator::kNoThrow | Operator::kNoWrite,
- "Comment", 0, 1, 1, 0, 1, 0, msg) {}
+ : Operator1<const char*>(IrOpcode::kComment,
+ Operator::kNoThrow | Operator::kNoWrite,
+ "Comment", 0, 1, 1, 0, 1, 0, msg) {}
};
+namespace {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(MachineOperatorGlobalCache,
+ GetMachineOperatorGlobalCache)
+}
+
MachineOperatorBuilder::MachineOperatorBuilder(
Zone* zone, MachineRepresentation word, Flags flags,
AlignmentRequirements alignmentRequirements)
: zone_(zone),
+ cache_(*GetMachineOperatorGlobalCache()),
word_(word),
flags_(flags),
alignment_requirements_(alignmentRequirements) {
@@ -1072,11 +1254,9 @@ MachineOperatorBuilder::MachineOperatorBuilder(
}
const Operator* MachineOperatorBuilder::UnalignedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- UnalignedLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kUnalignedLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1088,8 +1268,7 @@ const Operator* MachineOperatorBuilder::UnalignedStore(
switch (rep) {
#define STORE(kRep) \
case MachineRepresentation::kRep: \
- return GetCachedOperator< \
- UnalignedStoreOperator<MachineRepresentation::kRep>>();
+ return &cache_.kUnalignedStore##kRep;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
@@ -1099,103 +1278,85 @@ const Operator* MachineOperatorBuilder::UnalignedStore(
UNREACHABLE();
}
-template <TruncateKind kind>
-struct TruncateFloat32ToUint32Operator : Operator1<TruncateKind> {
- TruncateFloat32ToUint32Operator()
- : Operator1(IrOpcode::kTruncateFloat32ToUint32, Operator::kPure,
- "TruncateFloat32ToUint32", 1, 0, 0, 1, 0, 0, kind) {}
-};
+#define PURE(Name, properties, value_input_count, control_input_count, \
+ output_count) \
+ const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
+MACHINE_PURE_OP_LIST(PURE)
+#undef PURE
-const Operator* MachineOperatorBuilder::TruncateFloat32ToUint32(
- TruncateKind kind) {
+const Operator* MachineOperatorBuilder::Word32Sar(ShiftKind kind) {
switch (kind) {
- case TruncateKind::kArchitectureDefault:
- return GetCachedOperator<TruncateFloat32ToUint32Operator<
- TruncateKind::kArchitectureDefault>>();
- case TruncateKind::kSetOverflowToMin:
- return GetCachedOperator<
- TruncateFloat32ToUint32Operator<TruncateKind::kSetOverflowToMin>>();
+ case ShiftKind::kNormal:
+ return &cache_.kNormalWord32Sar;
+ case ShiftKind::kShiftOutZeros:
+ return &cache_.kShiftOutZerosWord32Sar;
}
}
-template <TruncateKind kind>
-struct TruncateFloat32ToInt32Operator : Operator1<TruncateKind> {
- TruncateFloat32ToInt32Operator()
- : Operator1(IrOpcode::kTruncateFloat32ToInt32, Operator::kPure,
- "TruncateFloat32ToInt32", 1, 0, 0, 1, 0, 0, kind) {}
-};
+const Operator* MachineOperatorBuilder::Word64Sar(ShiftKind kind) {
+ switch (kind) {
+ case ShiftKind::kNormal:
+ return &cache_.kNormalWord64Sar;
+ case ShiftKind::kShiftOutZeros:
+ return &cache_.kShiftOutZerosWord64Sar;
+ }
+}
-const Operator* MachineOperatorBuilder::TruncateFloat32ToInt32(
+const Operator* MachineOperatorBuilder::TruncateFloat32ToUint32(
TruncateKind kind) {
switch (kind) {
case TruncateKind::kArchitectureDefault:
- return GetCachedOperator<
- TruncateFloat32ToInt32Operator<TruncateKind::kArchitectureDefault>>();
+ return &cache_.kArchitectureDefaultTruncateFloat32ToUint32;
case TruncateKind::kSetOverflowToMin:
- return GetCachedOperator<
- TruncateFloat32ToInt32Operator<TruncateKind::kSetOverflowToMin>>();
+ return &cache_.kSetOverflowToMinTruncateFloat32ToUint32;
}
}
-template <TruncateKind kind>
-struct TruncateFloat64ToInt64Operator : Operator1<TruncateKind> {
- TruncateFloat64ToInt64Operator()
- : Operator1(IrOpcode::kTruncateFloat64ToInt64, Operator::kPure,
- "TruncateFloat64ToInt64", 1, 0, 0, 1, 0, 0, kind) {}
-};
-
const Operator* MachineOperatorBuilder::TruncateFloat64ToInt64(
TruncateKind kind) {
switch (kind) {
case TruncateKind::kArchitectureDefault:
- return GetCachedOperator<
- TruncateFloat64ToInt64Operator<TruncateKind::kArchitectureDefault>>();
+ return &cache_.kArchitectureDefaultTruncateFloat64ToInt64;
case TruncateKind::kSetOverflowToMin:
- return GetCachedOperator<
- TruncateFloat64ToInt64Operator<TruncateKind::kSetOverflowToMin>>();
+ return &cache_.kSetOverflowToMinTruncateFloat64ToInt64;
}
}
-size_t hash_value(TruncateKind kind) { return static_cast<size_t>(kind); }
-
-std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
+const Operator* MachineOperatorBuilder::TruncateFloat32ToInt32(
+ TruncateKind kind) {
switch (kind) {
case TruncateKind::kArchitectureDefault:
- return os << "kArchitectureDefault";
+ return &cache_.kArchitectureDefaultTruncateFloat32ToInt32;
case TruncateKind::kSetOverflowToMin:
- return os << "kSetOverflowToMin";
+ return &cache_.kSetOverflowToMinTruncateFloat32ToInt32;
}
}
-#define PURE(Name, properties, value_input_count, control_input_count, \
- output_count) \
- const Operator* MachineOperatorBuilder::Name() { \
- return GetCachedOperator< \
- CachedPureOperator<IrOpcode::k##Name, value_input_count, \
- control_input_count, output_count>>(properties, \
- #Name); \
+#define PURE(Name, properties, value_input_count, control_input_count, \
+ output_count) \
+ const OptionalOperator MachineOperatorBuilder::Name() { \
+ return OptionalOperator(flags_ & k##Name, &cache_.k##Name); \
}
-MACHINE_PURE_OP_LIST(PURE)
+PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
const Operator* MachineOperatorBuilder::PrefetchTemporal() {
- return GetCachedOperator<
- CachedOperator<IrOpcode::kPrefetchTemporal, 2, 1, 1, 0, 1, 0>>(
- Operator::kNoDeopt | Operator::kNoThrow, "PrefetchTemporal");
+ return &cache_.kPrefetchTemporal;
}
const Operator* MachineOperatorBuilder::PrefetchNonTemporal() {
- return GetCachedOperator<
- CachedOperator<IrOpcode::kPrefetchNonTemporal, 2, 1, 1, 0, 1, 0>>(
- Operator::kNoDeopt | Operator::kNoThrow, "PrefetchNonTemporal");
+ return &cache_.kPrefetchNonTemporal;
}
+#define OVERFLOW_OP(Name, properties) \
+ const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
+OVERFLOW_OP_LIST(OVERFLOW_OP)
+#undef OVERFLOW_OP
+
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- LoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1203,11 +1364,9 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
}
const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- PoisonedLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kPoisonedLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1215,11 +1374,9 @@ const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
}
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- ProtectedLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kProtectedLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1228,11 +1385,10 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
const Operator* MachineOperatorBuilder::LoadTransform(
MemoryAccessKind kind, LoadTransformation transform) {
-#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
- if (kind == MemoryAccessKind::k##KIND && \
- transform == LoadTransformation::k##TYPE) { \
- return GetCachedOperator<LoadTransformOperator< \
- MemoryAccessKind::k##KIND, LoadTransformation::k##TYPE>>(); \
+#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
+ if (kind == MemoryAccessKind::k##KIND && \
+ transform == LoadTransformation::k##TYPE) { \
+ return &cache_.k##KIND##LoadTransform##TYPE; \
}
#define LOAD_TRANSFORM(TYPE) \
LOAD_TRANSFORM_KIND(TYPE, Normal) \
@@ -1251,9 +1407,14 @@ const Operator* MachineOperatorBuilder::LoadLane(MemoryAccessKind kind,
#define LOAD_LANE_KIND(TYPE, KIND, LANEIDX) \
if (kind == MemoryAccessKind::k##KIND && rep == MachineType::TYPE() && \
laneidx == LANEIDX) { \
- return GetCachedOperator<LoadLaneOperator< \
- MemoryAccessKind::k##KIND, MachineType::TYPE().representation(), \
- MachineType::TYPE().semantic(), LANEIDX>>(); \
+ return zone_->New<Operator1<LoadLaneParameters>>( \
+ IrOpcode::kLoadLane, \
+ MemoryAccessKind::k##KIND == MemoryAccessKind::kProtected \
+ ? Operator::kNoDeopt | Operator::kNoThrow \
+ : Operator::kEliminatable, \
+ "LoadLane", 3, 1, 1, 1, 1, 0, \
+ LoadLaneParameters{MemoryAccessKind::k##KIND, \
+ LoadRepresentation::TYPE(), LANEIDX}); \
}
#define LOAD_LANE_T(T, LANE) \
@@ -1282,11 +1443,15 @@ const Operator* MachineOperatorBuilder::LoadLane(MemoryAccessKind kind,
const Operator* MachineOperatorBuilder::StoreLane(MemoryAccessKind kind,
MachineRepresentation rep,
uint8_t laneidx) {
-#define STORE_LANE_KIND(REP, KIND, LANEIDX) \
- if (kind == MemoryAccessKind::k##KIND && \
- rep == MachineRepresentation::REP && laneidx == LANEIDX) { \
- return GetCachedOperator<StoreLaneOperator< \
- MemoryAccessKind::k##KIND, MachineRepresentation::REP, LANEIDX>>(); \
+#define STORE_LANE_KIND(REP, KIND, LANEIDX) \
+ if (kind == MemoryAccessKind::k##KIND && \
+ rep == MachineRepresentation::REP && laneidx == LANEIDX) { \
+ return zone_->New<Operator1<StoreLaneParameters>>( \
+ IrOpcode::kStoreLane, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "StoreLane", 3, 1, 1, 0, 1, 0, \
+ StoreLaneParameters{MemoryAccessKind::k##KIND, \
+ MachineRepresentation::REP, LANEIDX}); \
}
#define STORE_LANE_T(T, LANE) \
@@ -1315,9 +1480,9 @@ const Operator* MachineOperatorBuilder::StoreLane(MemoryAccessKind kind,
const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
DCHECK_LE(0, size);
DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16);
-#define CASE_CACHED_SIZE(Size, Alignment) \
- if (size == Size && alignment == Alignment) { \
- return GetCachedOperator<CachedStackSlotOperator<Size, Alignment>>(); \
+#define CASE_CACHED_SIZE(Size, Alignment) \
+ if (size == Size && alignment == Alignment) { \
+ return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment; \
}
STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(CASE_CACHED_SIZE)
@@ -1333,28 +1498,22 @@ const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep,
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
switch (store_rep.representation()) {
-#define STORE(kRep) \
- case MachineRepresentation::kRep: \
- switch (store_rep.write_barrier_kind()) { \
- case kNoWriteBarrier: \
- return GetCachedOperator< \
- StoreOperator<MachineRepresentation::kRep, kNoWriteBarrier>>(); \
- case kAssertNoWriteBarrier: \
- return GetCachedOperator<StoreOperator<MachineRepresentation::kRep, \
- kAssertNoWriteBarrier>>(); \
- case kMapWriteBarrier: \
- return GetCachedOperator< \
- StoreOperator<MachineRepresentation::kRep, kMapWriteBarrier>>(); \
- case kPointerWriteBarrier: \
- return GetCachedOperator<StoreOperator<MachineRepresentation::kRep, \
- kPointerWriteBarrier>>(); \
- case kEphemeronKeyWriteBarrier: \
- return GetCachedOperator<StoreOperator<MachineRepresentation::kRep, \
- kEphemeronKeyWriteBarrier>>(); \
- case kFullWriteBarrier: \
- return GetCachedOperator< \
- StoreOperator<MachineRepresentation::kRep, kFullWriteBarrier>>(); \
- } \
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ switch (store_rep.write_barrier_kind()) { \
+ case kNoWriteBarrier: \
+ return &cache_.k##Store##kRep##NoWriteBarrier; \
+ case kAssertNoWriteBarrier: \
+ return &cache_.k##Store##kRep##AssertNoWriteBarrier; \
+ case kMapWriteBarrier: \
+ return &cache_.k##Store##kRep##MapWriteBarrier; \
+ case kPointerWriteBarrier: \
+ return &cache_.k##Store##kRep##PointerWriteBarrier; \
+ case kEphemeronKeyWriteBarrier: \
+ return &cache_.k##Store##kRep##EphemeronKeyWriteBarrier; \
+ case kFullWriteBarrier: \
+ return &cache_.k##Store##kRep##FullWriteBarrier; \
+ } \
break;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1368,10 +1527,9 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
const Operator* MachineOperatorBuilder::ProtectedStore(
MachineRepresentation rep) {
switch (rep) {
-#define STORE(kRep) \
- case MachineRepresentation::kRep: \
- return GetCachedOperator< \
- ProtectedStoreOperator<MachineRepresentation::kRep>>(); \
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ return &cache_.kProtectedStore##kRep; \
break;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1383,46 +1541,42 @@ const Operator* MachineOperatorBuilder::ProtectedStore(
}
const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
- return GetCachedOperator<UnsafePointerAddOperator>();
+ return &cache_.kUnsafePointerAdd;
}
const Operator* MachineOperatorBuilder::StackPointerGreaterThan(
StackCheckKind kind) {
switch (kind) {
case StackCheckKind::kJSFunctionEntry:
- return GetCachedOperator<
- StackPointerGreaterThanOperator<StackCheckKind::kJSFunctionEntry>>();
+ return &cache_.kStackPointerGreaterThanJSFunctionEntry;
case StackCheckKind::kJSIterationBody:
- return GetCachedOperator<
- StackPointerGreaterThanOperator<StackCheckKind::kJSIterationBody>>();
+ return &cache_.kStackPointerGreaterThanJSIterationBody;
case StackCheckKind::kCodeStubAssembler:
- return GetCachedOperator<StackPointerGreaterThanOperator<
- StackCheckKind::kCodeStubAssembler>>();
+ return &cache_.kStackPointerGreaterThanCodeStubAssembler;
case StackCheckKind::kWasm:
- return GetCachedOperator<
- StackPointerGreaterThanOperator<StackCheckKind::kWasm>>();
+ return &cache_.kStackPointerGreaterThanWasm;
}
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
- return GetCachedOperator<BitcastWordToTaggedOperator>();
+ return &cache_.kBitcastWordToTagged;
}
const Operator* MachineOperatorBuilder::BitcastTaggedToWord() {
- return GetCachedOperator<BitcastTaggedToWordOperator>();
+ return &cache_.kBitcastTaggedToWord;
}
const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() {
- return GetCachedOperator<BitcastMaybeObjectToWordOperator>();
+ return &cache_.kBitcastMaybeObjectToWord;
}
const Operator* MachineOperatorBuilder::AbortCSAAssert() {
- return GetCachedOperator<AbortCSAAssertOperator>();
+ return &cache_.kAbortCSAAssert;
}
const Operator* MachineOperatorBuilder::DebugBreak() {
- return GetCachedOperator<DebugBreakOperator>();
+ return &cache_.kDebugBreak;
}
const Operator* MachineOperatorBuilder::Comment(const char* msg) {
@@ -1430,16 +1584,14 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
}
const Operator* MachineOperatorBuilder::MemBarrier() {
- return GetCachedOperator<MemoryBarrierOperator>();
+ return &cache_.kMemoryBarrier;
}
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kWord32AtomicLoad##Type; \
}
ATOMIC_TYPE_LIST(LOAD)
#undef LOAD
@@ -1448,10 +1600,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicLoad(
const Operator* MachineOperatorBuilder::Word32AtomicStore(
MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return GetCachedOperator< \
- Word32AtomicStoreOperator<MachineRepresentation::kRep>>(); \
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return &cache_.kWord32AtomicStore##kRep; \
}
ATOMIC_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1459,11 +1610,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicStore(
}
const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType type) {
-#define EXCHANGE(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicExchangeOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicExchange##kType; \
}
ATOMIC_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
@@ -1472,11 +1621,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType type) {
const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
MachineType type) {
-#define COMPARE_EXCHANGE(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator<Word32AtomicCompareExchangeOperator< \
- MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define COMPARE_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicCompareExchange##kType; \
}
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
@@ -1484,11 +1631,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
}
const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType type) {
-#define ADD(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicAddOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicAdd##kType; \
}
ATOMIC_TYPE_LIST(ADD)
#undef ADD
@@ -1496,11 +1641,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType type) {
-#define SUB(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicSubOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicSub##kType; \
}
ATOMIC_TYPE_LIST(SUB)
#undef SUB
@@ -1508,11 +1651,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType type) {
-#define AND(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicAndOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicAnd##kType; \
}
ATOMIC_TYPE_LIST(AND)
#undef AND
@@ -1520,11 +1661,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType type) {
-#define OR(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicOrOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicOr##kType; \
}
ATOMIC_TYPE_LIST(OR)
#undef OR
@@ -1532,11 +1671,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
-#define XOR(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicXorOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicXor##kType; \
}
ATOMIC_TYPE_LIST(XOR)
#undef XOR
@@ -1545,11 +1682,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kWord64AtomicLoad##Type; \
}
ATOMIC_U64_TYPE_LIST(LOAD)
#undef LOAD
@@ -1558,10 +1693,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicLoad(
const Operator* MachineOperatorBuilder::Word64AtomicStore(
MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return GetCachedOperator< \
- Word64AtomicStoreOperator<MachineRepresentation::kRep>>(); \
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return &cache_.kWord64AtomicStore##kRep; \
}
ATOMIC64_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1569,11 +1703,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicStore(
}
const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType type) {
-#define ADD(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicAddOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicAdd##kType; \
}
ATOMIC_U64_TYPE_LIST(ADD)
#undef ADD
@@ -1581,11 +1713,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType type) {
-#define SUB(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicSubOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicSub##kType; \
}
ATOMIC_U64_TYPE_LIST(SUB)
#undef SUB
@@ -1593,11 +1723,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType type) {
-#define AND(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicAndOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicAnd##kType; \
}
ATOMIC_U64_TYPE_LIST(AND)
#undef AND
@@ -1605,11 +1733,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType type) {
-#define OR(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicOrOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicOr##kType; \
}
ATOMIC_U64_TYPE_LIST(OR)
#undef OR
@@ -1617,11 +1743,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType type) {
-#define XOR(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicXorOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicXor##kType; \
}
ATOMIC_U64_TYPE_LIST(XOR)
#undef XOR
@@ -1629,11 +1753,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType type) {
-#define EXCHANGE(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicExchangeOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicExchange##kType; \
}
ATOMIC_U64_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
@@ -1642,11 +1764,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType type) {
const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
MachineType type) {
-#define COMPARE_EXCHANGE(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator<Word64AtomicCompareExchangeOperator< \
- MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define COMPARE_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicCompareExchange##kType; \
}
ATOMIC_U64_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
@@ -1654,51 +1774,51 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
}
const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
- return GetCachedOperator<Word32AtomicPairLoadOperator>();
+ return &cache_.kWord32AtomicPairLoad;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
- return GetCachedOperator<Word32AtomicPairStoreOperator>();
+ return &cache_.kWord32AtomicPairStore;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
- return GetCachedOperator<Word32AtomicPairAddOperator>();
+ return &cache_.kWord32AtomicPairAdd;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairSub() {
- return GetCachedOperator<Word32AtomicPairSubOperator>();
+ return &cache_.kWord32AtomicPairSub;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAnd() {
- return GetCachedOperator<Word32AtomicPairAndOperator>();
+ return &cache_.kWord32AtomicPairAnd;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairOr() {
- return GetCachedOperator<Word32AtomicPairOrOperator>();
+ return &cache_.kWord32AtomicPairOr;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairXor() {
- return GetCachedOperator<Word32AtomicPairXorOperator>();
+ return &cache_.kWord32AtomicPairXor;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairExchange() {
- return GetCachedOperator<Word32AtomicPairExchangeOperator>();
+ return &cache_.kWord32AtomicPairExchange;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
- return GetCachedOperator<Word32AtomicPairCompareExchangeOperator>();
+ return &cache_.kWord32AtomicPairCompareExchange;
}
const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
- return GetCachedOperator<TaggedPoisonOnSpeculationOperator>();
+ return &cache_.kTaggedPoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
- return GetCachedOperator<Word32PoisonOnSpeculationOperator>();
+ return &cache_.kWord32PoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
- return GetCachedOperator<Word64PoisonOnSpeculationOperator>();
+ return &cache_.kWord64PoisonOnSpeculation;
}
#define EXTRACT_LANE_OP(Type, Sign, lane_count) \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index eceeb623b0..c798580845 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -18,6 +18,7 @@ namespace internal {
namespace compiler {
// Forward declarations.
+struct MachineOperatorGlobalCache;
class Operator;
@@ -625,6 +626,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2Floor();
const Operator* F64x2Trunc();
const Operator* F64x2NearestInt();
+ const Operator* F64x2ConvertLowI32x4S();
+ const Operator* F64x2ConvertLowI32x4U();
+ const Operator* F64x2PromoteLowF32x4();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
@@ -655,12 +659,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Floor();
const Operator* F32x4Trunc();
const Operator* F32x4NearestInt();
+ const Operator* F32x4DemoteF64x2Zero();
const Operator* I64x2Splat();
const Operator* I64x2SplatI32Pair();
const Operator* I64x2ExtractLane(int32_t);
const Operator* I64x2ReplaceLane(int32_t);
const Operator* I64x2ReplaceLaneI32Pair(int32_t);
+ const Operator* I64x2Abs();
const Operator* I64x2Neg();
const Operator* I64x2SConvertI32x4Low();
const Operator* I64x2SConvertI32x4High();
@@ -673,6 +679,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I64x2Sub();
const Operator* I64x2Mul();
const Operator* I64x2Eq();
+ const Operator* I64x2Ne();
+ const Operator* I64x2GtS();
+ const Operator* I64x2GeS();
const Operator* I64x2ShrU();
const Operator* I64x2ExtMulLowI32x4S();
const Operator* I64x2ExtMulHighI32x4S();
@@ -718,6 +727,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4SignSelect();
const Operator* I32x4ExtAddPairwiseI16x8S();
const Operator* I32x4ExtAddPairwiseI16x8U();
+ const Operator* I32x4TruncSatF64x2SZero();
+ const Operator* I32x4TruncSatF64x2UZero();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLaneU(int32_t);
@@ -813,11 +824,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16Swizzle();
const Operator* I8x16Shuffle(const uint8_t shuffle[16]);
- const Operator* V32x4AnyTrue();
+ const Operator* V128AnyTrue();
+ const Operator* V64x2AllTrue();
const Operator* V32x4AllTrue();
- const Operator* V16x8AnyTrue();
const Operator* V16x8AllTrue();
- const Operator* V8x16AnyTrue();
const Operator* V8x16AllTrue();
// load [base + index]
@@ -981,6 +991,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
private:
Zone* zone_;
+ MachineOperatorGlobalCache const& cache_;
MachineRepresentation const word_;
Flags const flags_;
AlignmentRequirements const alignment_requirements_;
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 858cec5cb3..0208b3ec5f 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -41,7 +41,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kRetain:
case IrOpcode::kStackPointerGreaterThan:
case IrOpcode::kStaticAssert:
- // TODO(tebbi): Store nodes might do a bump-pointer allocation.
+ // TODO(turbofan): Store nodes might do a bump-pointer allocation.
// We should introduce a special bump-pointer store node to
// differentiate that.
case IrOpcode::kStore:
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index a342afc94d..a1e254d333 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -309,7 +309,7 @@ struct BinopMatcher : public NodeMatcher {
protected:
void SwapInputs() {
std::swap(left_, right_);
- // TODO(tebbi): This modification should notify the reducers using
+ // TODO(turbofan): This modification should notify the reducers using
// BinopMatcher. Alternatively, all reducers (especially value numbering)
// could ignore the ordering for commutative binops.
node()->ReplaceInput(0, left().node());
diff --git a/deps/v8/src/compiler/node-observer.cc b/deps/v8/src/compiler/node-observer.cc
new file mode 100644
index 0000000000..52953dbabc
--- /dev/null
+++ b/deps/v8/src/compiler/node-observer.cc
@@ -0,0 +1,61 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-observer.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ObservableNodeState::ObservableNodeState(const Node* node, Zone* zone)
+ : id_(node->id()),
+ op_(node->op()),
+ type_(NodeProperties::GetTypeOrAny(node)) {}
+
+void ObserveNodeManager::StartObserving(Node* node, NodeObserver* observer) {
+ DCHECK_NOT_NULL(node);
+ DCHECK_NOT_NULL(observer);
+ DCHECK(observations_.find(node->id()) == observations_.end());
+
+ observer->set_has_observed_changes();
+ NodeObserver::Observation observation = observer->OnNodeCreated(node);
+ if (observation == NodeObserver::Observation::kContinue) {
+ observations_[node->id()] =
+ zone_->New<NodeObservation>(observer, node, zone_);
+ } else {
+ DCHECK_EQ(observation, NodeObserver::Observation::kStop);
+ }
+}
+
+void ObserveNodeManager::OnNodeChanged(const char* reducer_name,
+ const Node* old_node,
+ const Node* new_node) {
+ const auto it = observations_.find(old_node->id());
+ if (it == observations_.end()) return;
+
+ ObservableNodeState new_state{new_node, zone_};
+ NodeObservation* observation = it->second;
+ if (observation->state == new_state) return;
+
+ ObservableNodeState old_state = observation->state;
+ observation->state = new_state;
+
+ NodeObserver::Observation result =
+ observation->observer->OnNodeChanged(reducer_name, new_node, old_state);
+ if (result == NodeObserver::Observation::kStop) {
+ observations_.erase(old_node->id());
+ } else {
+ DCHECK_EQ(result, NodeObserver::Observation::kContinue);
+ if (old_node != new_node) {
+ observations_.erase(old_node->id());
+ observations_[new_node->id()] = observation;
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/node-observer.h b/deps/v8/src/compiler/node-observer.h
new file mode 100644
index 0000000000..8978156464
--- /dev/null
+++ b/deps/v8/src/compiler/node-observer.h
@@ -0,0 +1,130 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file declares the implementation of a new intrinsic %ObserveNode(expr),
+// which has noop semantics but triggers the invocation of callbacks on a
+// NodeObserver object. The NodeObserver is set on the OptimizedCompilationInfo
+// and callbacks are called when the node generated for 'expr' is created or
+// changed in any phase, until EffectControlLinearization.
+//
+// The modifications currently observed are changes to the observed Node
+// operator and type and its replacement with another Node.
+//
+// This provides the infrastructure to write unit tests that check for the
+// construction of or the lowering to specific nodes in the TurboFan graphs.
+
+#ifndef V8_COMPILER_NODE_OBSERVER_H_
+#define V8_COMPILER_NODE_OBSERVER_H_
+
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Node;
+class Operator;
+
+class ObservableNodeState {
+ public:
+ ObservableNodeState(const Node* node, Zone* zone);
+
+ uint32_t id() const { return id_; }
+ const Operator* op() const { return op_; }
+ int16_t opcode() const { return op_->opcode(); }
+ Type type() const { return type_; }
+
+ private:
+ uint32_t id_;
+ const Operator* op_;
+ Type type_;
+};
+
+inline bool operator==(const ObservableNodeState& lhs,
+ const ObservableNodeState& rhs) {
+ return lhs.id() == rhs.id() && lhs.op() == rhs.op() &&
+ lhs.type() == rhs.type();
+}
+
+inline bool operator!=(const ObservableNodeState& lhs,
+ const ObservableNodeState& rhs) {
+ return !operator==(lhs, rhs);
+}
+
+class NodeObserver : public ZoneObject {
+ public:
+ enum class Observation {
+ kContinue,
+ kStop,
+ };
+
+ NodeObserver() = default;
+ virtual ~NodeObserver() = 0;
+
+ NodeObserver(const NodeObserver&) = delete;
+ NodeObserver& operator=(const NodeObserver&) = delete;
+
+ virtual Observation OnNodeCreated(const Node* node) {
+ return Observation::kContinue;
+ }
+
+ virtual Observation OnNodeChanged(const char* reducer_name, const Node* node,
+ const ObservableNodeState& old_state) {
+ return Observation::kContinue;
+ }
+
+ void set_has_observed_changes() { has_observed_changes_ = true; }
+ bool has_observed_changes() const { return has_observed_changes_; }
+
+ private:
+ bool has_observed_changes_ = false;
+};
+inline NodeObserver::~NodeObserver() = default;
+
+struct NodeObservation : public ZoneObject {
+ NodeObservation(NodeObserver* node_observer, const Node* node, Zone* zone)
+ : observer(node_observer), state(node, zone) {
+ DCHECK_NOT_NULL(node_observer);
+ }
+
+ NodeObserver* observer;
+ ObservableNodeState state;
+};
+
+class ObserveNodeManager : public ZoneObject {
+ public:
+ explicit ObserveNodeManager(Zone* zone) : zone_(zone), observations_(zone) {}
+
+ void StartObserving(Node* node, NodeObserver* observer);
+ void OnNodeChanged(const char* reducer_name, const Node* old_node,
+ const Node* new_node);
+
+ private:
+ Zone* zone_;
+ ZoneMap<NodeId, NodeObservation*> observations_;
+};
+
+struct ObserveNodeInfo {
+ ObserveNodeInfo() : observe_node_manager(nullptr), node_observer(nullptr) {}
+ ObserveNodeInfo(ObserveNodeManager* manager, NodeObserver* observer)
+ : observe_node_manager(manager), node_observer(observer) {}
+
+ void StartObserving(Node* node) const {
+ if (observe_node_manager) {
+ DCHECK_NOT_NULL(node_observer);
+ observe_node_manager->StartObserving(node, node_observer);
+ }
+ }
+
+ ObserveNodeManager* observe_node_manager;
+ NodeObserver* node_observer;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_NODE_OBSERVER_H_
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index bc25b83d92..3219c216c6 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -568,11 +568,10 @@ Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
}
// static
-Type NodeProperties::GetTypeOrAny(Node* node) {
+Type NodeProperties::GetTypeOrAny(const Node* node) {
return IsTyped(node) ? node->type() : Type::Any();
}
-
// static
bool NodeProperties::AllValueInputsAreTyped(Node* node) {
int input_count = node->op()->ValueInputCount();
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 059db4f5cb..d08d328bee 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -21,7 +21,7 @@ class Operator;
class CommonOperatorBuilder;
// A facade that simplifies access to the different kinds of inputs to a node.
-class V8_EXPORT_PRIVATE NodeProperties final {
+class V8_EXPORT_PRIVATE NodeProperties {
public:
// ---------------------------------------------------------------------------
// Input layout.
@@ -244,12 +244,12 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// ---------------------------------------------------------------------------
// Type.
- static bool IsTyped(Node* node) { return !node->type().IsInvalid(); }
+ static bool IsTyped(const Node* node) { return !node->type().IsInvalid(); }
static Type GetType(Node* node) {
DCHECK(IsTyped(node));
return node->type();
}
- static Type GetTypeOrAny(Node* node);
+ static Type GetTypeOrAny(const Node* node);
static void SetType(Node* node, Type type) {
DCHECK(!type.IsInvalid());
node->set_type(type);
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 8525fa0b01..912c0966d1 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -499,3 +499,7 @@ bool Node::Uses::empty() const { return begin() == end(); }
} // namespace compiler
} // namespace internal
} // namespace v8
+
+V8_EXPORT_PRIVATE extern void _v8_internal_Node_Print(void* object) {
+ reinterpret_cast<i::compiler::Node*>(object)->Print();
+}
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 823bee4597..117bea7212 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -367,25 +367,6 @@ class Control : public NodeWrapper {
}
};
-class FrameState : public NodeWrapper {
- public:
- explicit constexpr FrameState(Node* node) : NodeWrapper(node) {
- // TODO(jgruber): Disallow kStart (needed for PromiseConstructorBasic unit
- // test, among others).
- SLOW_DCHECK(node->opcode() == IrOpcode::kFrameState ||
- node->opcode() == IrOpcode::kStart);
- }
-
- // Duplicating here from frame-states.h for ease of access and to keep
- // header include-balls small. Equality of the two constants is
- // static-asserted elsewhere.
- static constexpr int kFrameStateOuterStateInput = 5;
-
- FrameState outer_frame_state() const {
- return FrameState{node()->InputAt(kFrameStateOuterStateInput)};
- }
-};
-
// Typedefs to shorten commonly used Node containers.
using NodeDeque = ZoneDeque<Node*>;
using NodeSet = ZoneSet<Node*>;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index cf65864e8d..bd2011ada2 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -196,7 +196,8 @@
V(JSCall) \
V(JSCallForwardVarargs) \
V(JSCallWithArrayLike) \
- V(JSCallWithSpread)
+ V(JSCallWithSpread) \
+ V(JSWasmCall)
#define JS_CONSTRUCT_OP_LIST(V) \
V(JSConstructForwardVarargs) \
@@ -393,7 +394,6 @@
#define SIMPLIFIED_OTHER_OP_LIST(V) \
V(Allocate) \
V(AllocateRaw) \
- V(ArgumentsFrame) \
V(ArgumentsLength) \
V(AssertType) \
V(BooleanNot) \
@@ -783,6 +783,9 @@
V(F64x2Floor) \
V(F64x2Trunc) \
V(F64x2NearestInt) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -814,11 +817,13 @@
V(F32x4Floor) \
V(F32x4Trunc) \
V(F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero) \
V(I64x2Splat) \
V(I64x2SplatI32Pair) \
V(I64x2ExtractLane) \
V(I64x2ReplaceLane) \
V(I64x2ReplaceLaneI32Pair) \
+ V(I64x2Abs) \
V(I64x2Neg) \
V(I64x2SConvertI32x4Low) \
V(I64x2SConvertI32x4High) \
@@ -831,6 +836,9 @@
V(I64x2Sub) \
V(I64x2Mul) \
V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
V(I64x2ShrU) \
V(I64x2ExtMulLowI32x4S) \
V(I64x2ExtMulHighI32x4S) \
@@ -878,6 +886,8 @@
V(I32x4SignSelect) \
V(I32x4ExtAddPairwiseI16x8S) \
V(I32x4ExtAddPairwiseI16x8U) \
+ V(I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero) \
V(I16x8Splat) \
V(I16x8ExtractLaneU) \
V(I16x8ExtractLaneS) \
@@ -973,11 +983,10 @@
V(S128AndNot) \
V(I8x16Swizzle) \
V(I8x16Shuffle) \
- V(V32x4AnyTrue) \
+ V(V128AnyTrue) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
- V(V16x8AnyTrue) \
V(V16x8AllTrue) \
- V(V8x16AnyTrue) \
V(V8x16AllTrue) \
V(LoadTransform) \
V(PrefetchTemporal) \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index a8e29416b5..8c72ae3c72 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -224,6 +224,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCall:
case IrOpcode::kJSCallWithArrayLike:
case IrOpcode::kJSCallWithSpread:
+ case IrOpcode::kJSWasmCall:
// Misc operations
case IrOpcode::kJSAsyncFunctionEnter:
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 3239eb0269..e47441208f 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -68,6 +68,8 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
Operator(const Operator&) = delete;
Operator& operator=(const Operator&) = delete;
+ virtual ~Operator() = default;
+
// A small integer unique to all instances of a particular kind of operator,
// useful for quick matching for specific kinds of operators. For fast access
// the opcode is stored directly in the operator object.
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index d497fc5669..d8ebe23abd 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -16,9 +16,9 @@ namespace compiler {
OsrHelper::OsrHelper(OptimizedCompilationInfo* info)
: parameter_count_(info->bytecode_array()->parameter_count()),
- stack_slot_count_(InterpreterFrameConstants::RegisterStackSlotCount(
+ stack_slot_count_(UnoptimizedFrameConstants::RegisterStackSlotCount(
info->bytecode_array()->register_count()) +
- InterpreterFrameConstants::kExtraSlotCount) {}
+ UnoptimizedFrameConstants::kExtraSlotCount) {}
void OsrHelper::SetupFrame(Frame* frame) {
// The optimized frame will subsume the unoptimized frame. Do so by reserving
diff --git a/deps/v8/src/compiler/persistent-map.h b/deps/v8/src/compiler/persistent-map.h
index 75e4833e1a..84e905b812 100644
--- a/deps/v8/src/compiler/persistent-map.h
+++ b/deps/v8/src/compiler/persistent-map.h
@@ -28,8 +28,8 @@ namespace compiler {
// - iteration: amortized O(1) per step
// - Zip: O(n)
// - equality check: O(n)
-// TODO(tebbi): Cache map transitions to avoid re-allocation of the same map.
-// TODO(tebbi): Implement an O(1) equality check based on hash consing or
+// TODO(turbofan): Cache map transitions to avoid re-allocation of the same map.
+// TODO(turbofan): Implement an O(1) equality check based on hash consing or
// something similar.
template <class Key, class Value, class Hasher = base::hash<Key>>
class PersistentMap {
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 42560b5451..c4e88db841 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -60,6 +60,7 @@
#include "src/compiler/machine-graph-verifier.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/memory-optimizer.h"
+#include "src/compiler/node-observer.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
@@ -174,6 +175,10 @@ class PipelineData {
javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_, javascript_,
simplified_, machine_);
+ observe_node_manager_ =
+ info->node_observer()
+ ? graph_zone_->New<ObserveNodeManager>(graph_zone_)
+ : nullptr;
dependencies_ =
info_->zone()->New<CompilationDependencies>(broker_, info_->zone());
}
@@ -346,6 +351,10 @@ class PipelineData {
}
void reset_schedule() { schedule_ = nullptr; }
+ ObserveNodeManager* observe_node_manager() const {
+ return observe_node_manager_;
+ }
+
Zone* instruction_zone() const { return instruction_zone_; }
Zone* codegen_zone() const { return codegen_zone_; }
InstructionSequence* sequence() const { return sequence_; }
@@ -567,6 +576,12 @@ class PipelineData {
runtime_call_stats_ = stats;
}
+ // Used to skip the "wasm-inlining" phase when there are no JS-to-Wasm calls.
+ bool has_js_wasm_calls() const { return has_js_wasm_calls_; }
+ void set_has_js_wasm_calls(bool has_js_wasm_calls) {
+ has_js_wasm_calls_ = has_js_wasm_calls;
+ }
+
private:
Isolate* const isolate_;
wasm::WasmEngine* const wasm_engine_ = nullptr;
@@ -600,6 +615,7 @@ class PipelineData {
JSGraph* jsgraph_ = nullptr;
MachineGraph* mcgraph_ = nullptr;
Schedule* schedule_ = nullptr;
+ ObserveNodeManager* observe_node_manager_ = nullptr;
// All objects in the following group of fields are allocated in
// instruction_zone_. They are all set to nullptr when the instruction_zone_
@@ -639,6 +655,8 @@ class PipelineData {
RuntimeCallStats* runtime_call_stats_ = nullptr;
const ProfileDataFromFile* profile_data_ = nullptr;
+
+ bool has_js_wasm_calls_ = false;
};
class PipelineImpl final {
@@ -693,6 +711,8 @@ class PipelineImpl final {
Isolate* isolate() const;
CodeGenerator* code_generator() const;
+ ObserveNodeManager* observe_node_manager() const;
+
private:
PipelineData* const data_;
};
@@ -712,7 +732,7 @@ class SourcePositionWrapper final : public Reducer {
Reduction Reduce(Node* node) final {
SourcePosition const pos = table_->GetSourcePosition(node);
SourcePositionTable::Scope position(table_, pos);
- return reducer_->Reduce(node);
+ return reducer_->Reduce(node, nullptr);
}
void Finalize() final { reducer_->Finalize(); }
@@ -734,7 +754,7 @@ class NodeOriginsWrapper final : public Reducer {
Reduction Reduce(Node* node) final {
NodeOriginTable::Scope position(table_, reducer_name(), node);
- return reducer_->Reduce(node);
+ return reducer_->Reduce(node, nullptr);
}
void Finalize() final { reducer_->Finalize(); }
@@ -1029,7 +1049,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
public:
PipelineCompilationJob(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info,
- Handle<JSFunction> function, BailoutId osr_offset,
+ Handle<JSFunction> function, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame, CodeKind code_kind);
~PipelineCompilationJob() final;
PipelineCompilationJob(const PipelineCompilationJob&) = delete;
@@ -1067,7 +1087,7 @@ bool ShouldUseConcurrentInlining(CodeKind code_kind, bool is_osr) {
PipelineCompilationJob::PipelineCompilationJob(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
- Handle<JSFunction> function, BailoutId osr_offset,
+ Handle<JSFunction> function, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame, CodeKind code_kind)
// Note that the OptimizedCompilationInfo is not initialized at the time
// we pass it to the CompilationJob constructor, but it is not
@@ -1183,6 +1203,10 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
}
}
+ if (FLAG_turbo_direct_heap_access) {
+ isolate->heap()->PublishPendingAllocations();
+ }
+
return SUCCEEDED;
}
@@ -1422,7 +1446,9 @@ struct GraphBuilderPhase {
closure.raw_feedback_cell(), data->info()->osr_offset(),
data->jsgraph(), frequency, data->source_positions(),
SourcePosition::kNotInlined, data->info()->code_kind(), flags,
- &data->info()->tick_counter());
+ &data->info()->tick_counter(),
+ ObserveNodeInfo{data->observe_node_manager(),
+ data->info()->node_observer()});
}
};
@@ -1432,7 +1458,8 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
OptimizedCompilationInfo* info = data->info();
GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
- data->broker(), data->jsgraph()->Dead());
+ data->broker(), data->jsgraph()->Dead(),
+ data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
@@ -1443,6 +1470,9 @@ struct InliningPhase {
if (data->info()->bailout_on_uninitialized()) {
call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
}
+ if (FLAG_turbo_inline_js_wasm_calls && data->info()->inlining()) {
+ call_reducer_flags |= JSCallReducer::kInlineJSToWasmCalls;
+ }
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
temp_zone, call_reducer_flags,
data->dependencies());
@@ -1463,9 +1493,9 @@ struct InliningPhase {
JSNativeContextSpecialization native_context_specialization(
&graph_reducer, data->jsgraph(), data->broker(), flags,
data->dependencies(), temp_zone, info->zone());
- JSInliningHeuristic inlining(&graph_reducer,
- temp_zone, data->info(), data->jsgraph(),
- data->broker(), data->source_positions());
+ JSInliningHeuristic inlining(
+ &graph_reducer, temp_zone, data->info(), data->jsgraph(),
+ data->broker(), data->source_positions(), JSInliningHeuristic::kJSOnly);
JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
data->broker());
@@ -1483,9 +1513,37 @@ struct InliningPhase {
}
graph_reducer.ReduceGraph();
info->set_inlined_bytecode_size(inlining.total_inlined_bytecode_size());
+
+ // Skip the "wasm-inlining" phase if there are no Wasm functions calls.
+ if (call_reducer.has_wasm_calls()) {
+ data->set_has_js_wasm_calls(true);
+ }
}
};
+struct WasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+ void Run(PipelineData* data, Zone* temp_zone) {
+ DCHECK(data->has_js_wasm_calls());
+
+ OptimizedCompilationInfo* info = data->info();
+ GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
+ data->broker(), data->jsgraph()->Dead());
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->broker(), data->common(),
+ data->machine(), temp_zone);
+ JSInliningHeuristic inlining(&graph_reducer, temp_zone, data->info(),
+ data->jsgraph(), data->broker(),
+ data->source_positions(),
+ JSInliningHeuristic::kWasmOnly);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &inlining);
+ graph_reducer.ReduceGraph();
+ }
+};
struct TyperPhase {
DECL_PIPELINE_PHASE_CONSTANTS(Typer)
@@ -1530,9 +1588,9 @@ struct UntyperPhase {
NodeProperties::RemoveType(node);
}
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
RemoveTypeReducer remove_type_reducer;
AddReducer(data, &graph_reducer, &remove_type_reducer);
graph_reducer.ReduceGraph();
@@ -1551,9 +1609,9 @@ struct CopyMetadataForConcurrentCompilePhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(SerializeMetadata)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
JSHeapCopyReducer heap_copy_reducer(data->broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
graph_reducer.ReduceGraph();
@@ -1597,9 +1655,9 @@ struct TypedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
@@ -1648,7 +1706,7 @@ struct EscapeAnalysisPhase {
GraphReducer reducer(temp_zone, data->graph(),
&data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ data->jsgraph()->Dead(), data->observe_node_manager());
EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
escape_analysis.analysis_result(),
temp_zone);
@@ -1659,7 +1717,8 @@ struct EscapeAnalysisPhase {
UnparkedScopeIfNeeded scope(data->broker());
reducer.ReduceGraph();
- // TODO(tebbi): Turn this into a debug mode check once we have confidence.
+ // TODO(turbofan): Turn this into a debug mode check once we have
+ // confidence.
escape_reducer.VerifyReplacement();
}
};
@@ -1668,9 +1727,9 @@ struct TypeAssertionsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TypeAssertions)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
temp_zone);
AddReducer(data, &graph_reducer, &type_assertions);
@@ -1682,10 +1741,10 @@ struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
- data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
- &data->info()->tick_counter(), linkage);
+ SimplifiedLowering lowering(
+ data->jsgraph(), data->broker(), temp_zone, data->source_positions(),
+ data->node_origins(), data->info()->GetPoisoningMitigationLevel(),
+ &data->info()->tick_counter(), linkage, data->observe_node_manager());
// RepresentationChanger accesses the heap.
UnparkedScopeIfNeeded scope(data->broker());
@@ -1701,7 +1760,10 @@ struct LoopPeelingPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
- trimmer.TrimGraph(roots.begin(), roots.end());
+ {
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+ }
LoopTree* loop_tree = LoopFinder::BuildLoopTree(
data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
@@ -1726,9 +1788,9 @@ struct GenericLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(GenericLowering)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer,
data->broker());
AddReducer(data, &graph_reducer, &generic_lowering);
@@ -1744,9 +1806,9 @@ struct EarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(EarlyOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
@@ -1789,7 +1851,10 @@ struct EffectControlLinearizationPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
- trimmer.TrimGraph(roots.begin(), roots.end());
+ {
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+ }
// Schedule the graph without node splitting so that we can
// fix the effect and control flow for nodes with low-level side
@@ -1824,7 +1889,8 @@ struct EffectControlLinearizationPhase {
// it, to eliminate conditional deopts with a constant condition.
GraphReducer graph_reducer(temp_zone, data->graph(),
&data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ data->jsgraph()->Dead(),
+ data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -1844,7 +1910,10 @@ struct StoreStoreEliminationPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
- trimmer.TrimGraph(roots.begin(), roots.end());
+ {
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+ }
StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
temp_zone);
@@ -1855,9 +1924,9 @@ struct LoadEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(LoadElimination)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone,
BranchElimination::kEARLY);
@@ -1904,7 +1973,10 @@ struct MemoryOptimizationPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
- trimmer.TrimGraph(roots.begin(), roots.end());
+ {
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+ }
// Optimize allocations and load/store operations.
MemoryOptimizer optimizer(
@@ -1921,9 +1993,9 @@ struct LateOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(LateOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
@@ -1949,9 +2021,9 @@ struct MachineOperatorOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(MachineOperatorOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
@@ -2021,9 +2093,9 @@ struct CsaEarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(CSAEarlyOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -2049,9 +2121,9 @@ struct CsaOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(CSAOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
@@ -2077,6 +2149,7 @@ struct EarlyGraphTrimmingPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
trimmer.TrimGraph(roots.begin(), roots.end());
}
};
@@ -2091,6 +2164,7 @@ struct LateGraphTrimmingPhase {
if (data->jsgraph()) {
data->jsgraph()->GetCachedNodes(&roots);
}
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
trimmer.TrimGraph(roots.begin(), roots.end());
}
};
@@ -2302,7 +2376,7 @@ struct ResolveControlFlowPhase {
};
struct MidTierRegisterOutputDefinitionPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)
+ DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterOutputDefinition)
void Run(PipelineData* data, Zone* temp_zone) {
DefineOutputs(data->mid_tier_register_allocator_data());
@@ -2584,6 +2658,12 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<SimplifiedLoweringPhase>(linkage);
RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
+ if (data->has_js_wasm_calls()) {
+ DCHECK(FLAG_turbo_inline_js_wasm_calls);
+ Run<WasmInliningPhase>();
+ RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ }
+
// From now on it is invalid to look at types on the nodes, because the types
// on the nodes might not make sense after representation selection due to the
// way we handle truncations; if we'd want to look at types afterwards we'd
@@ -3116,7 +3196,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
// static
std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, CodeKind code_kind,
- bool has_script, BailoutId osr_offset, JavaScriptFrame* osr_frame) {
+ bool has_script, BytecodeOffset osr_offset, JavaScriptFrame* osr_frame) {
Handle<SharedFunctionInfo> shared =
handle(function->shared(), function->GetIsolate());
return std::make_unique<PipelineCompilationJob>(
@@ -3155,6 +3235,11 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
+ if (FLAG_wasm_loop_unrolling) {
+ pipeline.Run<LoopExitEliminationPhase>();
+ pipeline.RunPrintAndVerify("V8.LoopExitEliminationPhase", true);
+ }
+
data.BeginPhaseKind("V8.WasmOptimization");
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) {
@@ -3163,9 +3248,9 @@ void Pipeline::GenerateCodeForWasmFunction(
if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization",
RuntimeCallCounterId::kOptimizeWasmFullOptimization);
- GraphReducer graph_reducer(scope.zone(), data.graph(),
- &data.info()->tick_counter(), data.broker(),
- data.mcgraph()->Dead());
+ GraphReducer graph_reducer(
+ scope.zone(), data.graph(), &data.info()->tick_counter(), data.broker(),
+ data.mcgraph()->Dead(), data.observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
data.common(), scope.zone());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
@@ -3183,9 +3268,9 @@ void Pipeline::GenerateCodeForWasmFunction(
} else {
PipelineRunScope scope(&data, "V8.OptimizeWasmBaseOptimization",
RuntimeCallCounterId::kOptimizeWasmBaseOptimization);
- GraphReducer graph_reducer(scope.zone(), data.graph(),
- &data.info()->tick_counter(), data.broker(),
- data.mcgraph()->Dead());
+ GraphReducer graph_reducer(
+ scope.zone(), data.graph(), &data.info()->tick_counter(), data.broker(),
+ data.mcgraph()->Dead(), data.observe_node_manager());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
AddReducer(&data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
@@ -3754,6 +3839,10 @@ CodeGenerator* PipelineImpl::code_generator() const {
return data_->code_generator();
}
+ObserveNodeManager* PipelineImpl::observe_node_manager() const {
+ return data_->observe_node_manager();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 509f5febc6..db3aab4623 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -48,7 +48,7 @@ class Pipeline : public AllStatic {
static V8_EXPORT_PRIVATE std::unique_ptr<OptimizedCompilationJob>
NewCompilationJob(Isolate* isolate, Handle<JSFunction> function,
CodeKind code_kind, bool has_script,
- BailoutId osr_offset = BailoutId::None(),
+ BytecodeOffset osr_offset = BytecodeOffset::None(),
JavaScriptFrame* osr_frame = nullptr);
// Run the pipeline for the WebAssembly compilation info.
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index da3785f35e..4cecd338c5 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -246,7 +246,7 @@ class LiteralFeedback
};
class RegExpLiteralFeedback
- : public SingleValueFeedback<JSRegExpRef,
+ : public SingleValueFeedback<RegExpBoilerplateDescriptionRef,
ProcessedFeedback::kRegExpLiteral> {
using SingleValueFeedback::SingleValueFeedback;
};
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 5214f7ad9b..a7eddbe826 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -199,54 +199,49 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
}
if (field_access.machine_type.representation() ==
MachineRepresentation::kFloat64) {
- bool const is_heapnumber = !is_inobject || !FLAG_unbox_double_fields;
- if (is_heapnumber) {
- if (dependencies() == nullptr) {
- FieldAccess const storage_access = {kTaggedBase,
- field_access.offset,
- name.object(),
- MaybeHandle<Map>(),
- Type::Any(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier,
- LoadSensitivity::kCritical,
- field_access.const_field_info};
- storage = *effect =
- graph()->NewNode(simplified()->LoadField(storage_access), storage,
- *effect, *control);
- // We expect the loaded value to be a heap number here. With
- // in-place field representation changes it is possible this is a
- // no longer a heap number without map transitions. If we haven't taken
- // a dependency on field representation, we should verify the loaded
- // value is a heap number.
- storage = *effect = graph()->NewNode(simplified()->CheckHeapObject(),
- storage, *effect, *control);
- Node* map = *effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- storage, *effect, *control);
- Node* is_heap_number =
- graph()->NewNode(simplified()->ReferenceEqual(), map,
- jsgraph()->HeapNumberMapConstant());
- *effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNotAHeapNumber),
- is_heap_number, *effect, *control);
- } else {
- FieldAccess const storage_access = {kTaggedBase,
- field_access.offset,
- name.object(),
- MaybeHandle<Map>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier,
- LoadSensitivity::kCritical,
- field_access.const_field_info};
- storage = *effect =
- graph()->NewNode(simplified()->LoadField(storage_access), storage,
- *effect, *control);
- }
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
+ if (dependencies() == nullptr) {
+ FieldAccess const storage_access = {kTaggedBase,
+ field_access.offset,
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kCritical,
+ field_access.const_field_info};
+ storage = *effect = graph()->NewNode(
+ simplified()->LoadField(storage_access), storage, *effect, *control);
+ // We expect the loaded value to be a heap number here. With
+ // in-place field representation changes it is possible this is a
+ // no longer a heap number without map transitions. If we haven't taken
+ // a dependency on field representation, we should verify the loaded
+ // value is a heap number.
+ storage = *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ storage, *effect, *control);
+ Node* map = *effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ storage, *effect, *control);
+ Node* is_heap_number =
+ graph()->NewNode(simplified()->ReferenceEqual(), map,
+ jsgraph()->HeapNumberMapConstant());
+ *effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNotAHeapNumber),
+ is_heap_number, *effect, *control);
+ } else {
+ FieldAccess const storage_access = {kTaggedBase,
+ field_access.offset,
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kCritical,
+ field_access.const_field_info};
+ storage = *effect = graph()->NewNode(
+ simplified()->LoadField(storage_access), storage, *effect, *control);
}
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
}
Node* value = *effect = graph()->NewNode(
simplified()->LoadField(field_access), storage, *effect, *control);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 1bff7c82a7..df12030c31 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -384,6 +384,7 @@ Node* RawMachineAssembler::CreateNodeFromPredecessors(
return sidetable[predecessors.front()->id().ToSize()];
}
std::vector<Node*> inputs;
+ inputs.reserve(predecessors.size());
for (BasicBlock* predecessor : predecessors) {
inputs.push_back(sidetable[predecessor->id().ToSize()]);
}
@@ -410,6 +411,7 @@ void RawMachineAssembler::MakePhiBinary(Node* phi, int split_point,
left_input = NodeProperties::GetValueInput(phi, 0);
} else {
std::vector<Node*> inputs;
+ inputs.reserve(left_input_count);
for (int i = 0; i < left_input_count; ++i) {
inputs.push_back(NodeProperties::GetValueInput(phi, i));
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index eff7a845c6..907464f57e 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -1053,7 +1053,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// A post-processing pass to add effect and control edges so that the graph
// can be optimized and re-scheduled.
- // TODO(tebbi): Move this to a separate class.
+ // TODO(turbofan): Move this to a separate class.
void MakeReschedulable();
Node* CreateNodeFromPredecessors(const std::vector<BasicBlock*>& predecessors,
const std::vector<Node*>& sidetable,
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 2455ea3115..64b274cdcc 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -1105,7 +1105,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
// BigInts are only represented as tagged pointer and word64.
if (!CanBeTaggedPointer(output_rep) &&
output_rep != MachineRepresentation::kWord64) {
- DCHECK(!output_type.Is(Type::BigInt()));
+ DCHECK(!output_type.Equals(Type::BigInt()));
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
return jsgraph()->graph()->NewNode(
diff --git a/deps/v8/src/compiler/scheduled-machine-lowering.cc b/deps/v8/src/compiler/scheduled-machine-lowering.cc
index 903052be1d..fde836e4e8 100644
--- a/deps/v8/src/compiler/scheduled-machine-lowering.cc
+++ b/deps/v8/src/compiler/scheduled-machine-lowering.cc
@@ -38,7 +38,7 @@ void ScheduledMachineLowering::Run() {
Node* node = *instr;
Reduction reduction;
for (auto reducer : reducers_) {
- reduction = reducer->Reduce(node);
+ reduction = reducer->Reduce(node, nullptr);
if (reduction.Changed()) break;
}
if (reduction.Changed()) {
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 2733aaaa7d..5be9a7d705 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -382,7 +382,7 @@ class SerializerForBackgroundCompilation {
SerializerForBackgroundCompilation(
ZoneStats* zone_stats, JSHeapBroker* broker,
CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset);
+ SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset);
Hints Run(); // NOTE: Returns empty for an
// already-serialized function.
@@ -405,6 +405,8 @@ class SerializerForBackgroundCompilation {
SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
+ void VisitShortStar(interpreter::Register reg);
+
Hints& register_hints(interpreter::Register reg);
// Return a vector containing the hints for the given register range (in
@@ -555,7 +557,7 @@ class SerializerForBackgroundCompilation {
Zone* zone() { return zone_scope_.zone(); }
Environment* environment() const { return environment_; }
SerializerForBackgroundCompilationFlags flags() const { return flags_; }
- BailoutId osr_offset() const { return osr_offset_; }
+ BytecodeOffset osr_offset() const { return osr_offset_; }
const BytecodeAnalysis& bytecode_analysis() { return *bytecode_analysis_; }
JSHeapBroker* const broker_;
@@ -565,7 +567,7 @@ class SerializerForBackgroundCompilation {
// Instead of storing the virtual_closure here, we could extract it from the
// {closure_hints_} but that would be cumbersome.
VirtualClosure const function_;
- BailoutId const osr_offset_;
+ BytecodeOffset const osr_offset_;
base::Optional<BytecodeAnalysis> bytecode_analysis_;
ZoneUnorderedMap<int, Environment*> jump_target_environments_;
Environment* const environment_;
@@ -579,7 +581,7 @@ class SerializerForBackgroundCompilation {
void RunSerializerForBackgroundCompilation(
ZoneStats* zone_stats, JSHeapBroker* broker,
CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset) {
+ SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset) {
SerializerForBackgroundCompilation serializer(
zone_stats, broker, dependencies, closure, flags, osr_offset);
serializer.Run();
@@ -1056,7 +1058,7 @@ std::ostream& operator<<(
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
ZoneStats* zone_stats, JSHeapBroker* broker,
CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset)
+ SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset)
: broker_(broker),
dependencies_(dependencies),
zone_scope_(zone_stats, ZONE_NAME),
@@ -1070,6 +1072,7 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
arguments_(zone()) {
closure_hints_.AddConstant(closure, zone(), broker_);
JSFunctionRef(broker, closure).Serialize();
+ JSFunctionRef(broker, closure).SerializeCodeAndFeedback();
TRACE_BROKER(broker_, "Hints for <closure>: " << closure_hints_);
TRACE_BROKER(broker_, "Initial environment:\n" << *environment_);
@@ -1086,7 +1089,7 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
zone_scope_(zone_stats, ZONE_NAME),
flags_(flags),
function_(function.virtual_closure()),
- osr_offset_(BailoutId::None()),
+ osr_offset_(BytecodeOffset::None()),
jump_target_environments_(zone()),
environment_(zone()->New<Environment>(zone(), broker_->isolate(),
function, new_target, arguments,
@@ -1097,6 +1100,7 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
if (function.closure().ToHandle(&closure)) {
closure_hints_.AddConstant(closure, zone(), broker);
JSFunctionRef(broker, closure).Serialize();
+ JSFunctionRef(broker, closure).SerializeCodeAndFeedback();
} else {
closure_hints_.AddVirtualClosure(function.virtual_closure(), zone(),
broker);
@@ -1263,7 +1267,6 @@ Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array()
void SerializerForBackgroundCompilation::TraverseBytecode() {
bytecode_analysis_.emplace(bytecode_array(), zone(), osr_offset(), false);
- BytecodeArrayRef(broker(), bytecode_array()).SerializeForCompilation();
BytecodeArrayIterator iterator(bytecode_array());
HandlerRangeMatcher try_start_matcher(iterator, bytecode_array());
@@ -1309,13 +1312,20 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
}
}
- switch (iterator.current_bytecode()) {
+ interpreter::Bytecode current_bytecode = iterator.current_bytecode();
+ switch (current_bytecode) {
#define DEFINE_BYTECODE_CASE(name) \
case interpreter::Bytecode::k##name: \
Visit##name(&iterator); \
break;
SUPPORTED_BYTECODE_LIST(DEFINE_BYTECODE_CASE)
#undef DEFINE_BYTECODE_CASE
+
+#define DEFINE_SHORT_STAR_CASE(Name, ...) case interpreter::Bytecode::k##Name:
+ SHORT_STAR_BYTECODE_LIST(DEFINE_SHORT_STAR_CASE)
+#undef DEFINE_SHORT_STAR_CASE
+ VisitShortStar(interpreter::Register::FromShortStar(current_bytecode));
+ break;
}
}
@@ -1521,10 +1531,14 @@ void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
void SerializerForBackgroundCompilation::VisitLdaConstant(
BytecodeArrayIterator* iterator) {
- ObjectRef object(
- broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- environment()->accumulator_hints() =
- Hints::SingleConstant(object.object(), zone());
+ Handle<Object> constant =
+ iterator->GetConstantForIndexOperand(0, broker()->isolate());
+ // TODO(v8:7790): FixedArrays still need to be serialized until they are
+ // moved to kNeverSerialized.
+ if (!FLAG_turbo_direct_heap_access || constant->IsFixedArray()) {
+ ObjectRef(broker(), constant);
+ }
+ environment()->accumulator_hints() = Hints::SingleConstant(constant, zone());
}
void SerializerForBackgroundCompilation::VisitPushContext(
@@ -1692,6 +1706,11 @@ void SerializerForBackgroundCompilation::VisitStar(
register_hints(reg).Reset(&environment()->accumulator_hints(), zone());
}
+void SerializerForBackgroundCompilation::VisitShortStar(
+ interpreter::Register reg) {
+ register_hints(reg).Reset(&environment()->accumulator_hints(), zone());
+}
+
void SerializerForBackgroundCompilation::VisitMov(
BytecodeArrayIterator* iterator) {
interpreter::Register src = iterator->GetRegisterOperand(0);
@@ -2136,10 +2155,8 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
callee.AddConstant(target->object(), zone(), broker());
} else {
// Call; target is feedback cell or callee.
- if (target->IsFeedbackCell() &&
- target->AsFeedbackCell().value().IsFeedbackVector()) {
- FeedbackVectorRef vector =
- target->AsFeedbackCell().value().AsFeedbackVector();
+ if (target->IsFeedbackCell() && target->AsFeedbackCell().value()) {
+ FeedbackVectorRef vector = *target->AsFeedbackCell().value();
vector.Serialize();
VirtualClosure virtual_closure(
vector.shared_function_info().object(), vector.object(),
@@ -2255,7 +2272,7 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
FunctionTemplateInfoRef target_template_info(
broker(),
- handle(target->function_data(kAcquireLoad), broker()->isolate()));
+ broker()->CanonicalPersistentHandle(target->function_data(kAcquireLoad)));
if (!target_template_info.has_call_code()) return;
target_template_info.SerializeCallCode();
@@ -2987,7 +3004,9 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
base::Optional<PropertyCellRef> cell = global_object.GetPropertyCell(
name, SerializationPolicy::kSerializeIfNeeded);
if (access_mode == AccessMode::kLoad && cell.has_value()) {
- result_hints->AddConstant(cell->value().object(), zone(), broker());
+ result_hints->AddConstant(
+ handle(cell->object()->value(), broker()->isolate()), zone(),
+ broker());
}
}
@@ -3017,7 +3036,8 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
Handle<SharedFunctionInfo> sfi = function.shared().object();
if (sfi->IsApiFunction()) {
FunctionTemplateInfoRef fti_ref(
- broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
+ broker(),
+ broker()->CanonicalPersistentHandle(sfi->get_api_func_data()));
if (fti_ref.has_call_code()) {
fti_ref.SerializeCallCode();
ProcessReceiverMapForApiCall(fti_ref, receiver_map->object());
@@ -3030,7 +3050,8 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
// For JSCallReducer::ReduceJSCall.
function.Serialize();
} else {
- FunctionTemplateInfoRef fti(broker(), access_info.constant());
+ FunctionTemplateInfoRef fti(broker(), broker()->CanonicalPersistentHandle(
+ access_info.constant()));
if (fti.has_call_code()) fti.SerializeCallCode();
}
} else if (access_info.IsModuleExport()) {
@@ -3301,14 +3322,22 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
ObjectRef key_ref(broker(), hint);
// TODO(neis): Do this for integer-HeapNumbers too?
if (key_ref.IsSmi() && key_ref.AsSmi() >= 0) {
- base::Optional<ObjectRef> element =
- receiver_ref.GetOwnConstantElement(
- key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
- if (!element.has_value() && receiver_ref.IsJSArray()) {
- // We didn't find a constant element, but if the receiver is a
- // cow-array we can exploit the fact that any future write to the
- // element will replace the whole elements storage.
- receiver_ref.AsJSArray().GetOwnCowElement(
+ base::Optional<ObjectRef> element;
+ if (receiver_ref.IsJSObject()) {
+ element = receiver_ref.AsJSObject().GetOwnConstantElement(
+ key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
+ if (!element.has_value() && receiver_ref.IsJSArray()) {
+ // We didn't find a constant element, but if the receiver is a
+ // cow-array we can exploit the fact that any future write to the
+ // element will replace the whole elements storage.
+ JSArrayRef array_ref = receiver_ref.AsJSArray();
+ array_ref.SerializeElements();
+ array_ref.GetOwnCowElement(
+ array_ref.elements().value(), key_ref.AsSmi(),
+ SerializationPolicy::kSerializeIfNeeded);
+ }
+ } else if (receiver_ref.IsString()) {
+ element = receiver_ref.AsString().GetCharAsStringOrUndefined(
key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
}
}
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
index f2330332d8..f01e73452e 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-class BailoutId;
+class BytecodeOffset;
class Zone;
namespace compiler {
@@ -31,7 +31,7 @@ using SerializerForBackgroundCompilationFlags =
void RunSerializerForBackgroundCompilation(
ZoneStats* zone_stats, JSHeapBroker* broker,
CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset);
+ SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index f9bf22f8be..445898d882 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -114,6 +114,8 @@ void SimdScalarLowering::LowerGraph() {
V(I64x2Splat) \
V(I64x2ExtractLane) \
V(I64x2ReplaceLane) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
V(I64x2Neg) \
V(I64x2Shl) \
V(I64x2ShrS) \
@@ -166,11 +168,10 @@ void SimdScalarLowering::LowerGraph() {
V(S128Not) \
V(S128AndNot) \
V(S128Select) \
- V(V32x4AnyTrue) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
- V(V16x8AnyTrue) \
V(V16x8AllTrue) \
- V(V8x16AnyTrue) \
+ V(V128AnyTrue) \
V(V8x16AllTrue) \
V(I32x4BitMask) \
V(I32x4ExtMulLowI16x8S) \
@@ -1188,7 +1189,7 @@ Node* SimdScalarLowering::ConstructPhiForComparison(Diamond d,
int false_value) {
// Close the given Diamond d using a Phi node, taking care of constructing the
// right kind of constants (Int32 or Int64) based on rep_type.
- if (rep_type == SimdType::kFloat64x2) {
+ if (rep_type == SimdType::kFloat64x2 || rep_type == SimdType::kInt64x2) {
MachineRepresentation rep = MachineRepresentation::kWord64;
return d.Phi(rep, mcgraph_->Int64Constant(true_value),
mcgraph_->Int64Constant(false_value));
@@ -1261,15 +1262,33 @@ void SimdScalarLowering::LowerAllTrueOp(Node* node, SimdType rep_type) {
int num_lanes = NumLanes(rep_type);
DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node* zero;
+ Node* tmp_result;
+ MachineRepresentation result_rep = MachineRepresentation::kWord32;
+ const Operator* equals;
+
+ if (SimdType::kInt64x2 == rep_type) {
+ zero = mcgraph_->Int64Constant(0);
+ tmp_result = mcgraph_->Int64Constant(1);
+ result_rep = MachineRepresentation::kWord64;
+ equals = machine()->Word64Equal();
+ } else {
+ zero = mcgraph_->Int32Constant(0);
+ tmp_result = mcgraph_->Int32Constant(1);
+ equals = machine()->Word32Equal();
+ }
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- Node* zero = mcgraph_->Int32Constant(0);
- Node* tmp_result = mcgraph_->Int32Constant(1);
for (int i = 0; i < num_lanes; ++i) {
- Diamond d(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), rep[i], zero));
- tmp_result = d.Phi(MachineRepresentation::kWord32, zero, tmp_result);
+ Diamond d(graph(), common(), graph()->NewNode(equals, rep[i], zero));
+ tmp_result = d.Phi(result_rep, zero, tmp_result);
+ }
+
+ if (SimdType::kInt64x2 == rep_type) {
+ tmp_result =
+ graph()->NewNode(machine()->TruncateInt64ToInt32(), tmp_result);
}
+
rep_node[0] = tmp_result;
ReplaceNode(node, rep_node, 1);
}
@@ -2102,6 +2121,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
COMPARISON_CASE(Float32x4, kF32x4Le, Float32LessThanOrEqual, false)
COMPARISON_CASE(Float32x4, kF32x4Gt, Float32LessThan, true)
COMPARISON_CASE(Float32x4, kF32x4Ge, Float32LessThanOrEqual, true)
+ COMPARISON_CASE(Int64x2, kI64x2Eq, Word64Equal, false)
COMPARISON_CASE(Int32x4, kI32x4Eq, Word32Equal, false)
COMPARISON_CASE(Int32x4, kI32x4LtS, Int32LessThan, false)
COMPARISON_CASE(Int32x4, kI32x4LeS, Int32LessThanOrEqual, false)
@@ -2138,6 +2158,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerNotEqual(node, SimdType::kFloat32x4, machine()->Float32Equal());
break;
}
+ case IrOpcode::kI64x2Ne: {
+ LowerNotEqual(node, SimdType::kInt64x2, machine()->Word64Equal());
+ break;
+ }
case IrOpcode::kI32x4Ne: {
LowerNotEqual(node, SimdType::kInt32x4, machine()->Word32Equal());
break;
@@ -2220,9 +2244,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, 16);
break;
}
- case IrOpcode::kV32x4AnyTrue:
- case IrOpcode::kV16x8AnyTrue:
- case IrOpcode::kV8x16AnyTrue: {
+ case IrOpcode::kV128AnyTrue: {
DCHECK_EQ(1, node->InputCount());
// AnyTrue always returns a I32x4, and can work with inputs of any shape,
// but we still need GetReplacementsWithType if input is float.
@@ -2242,6 +2264,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, 1);
break;
}
+ case IrOpcode::kV64x2AllTrue: {
+ LowerAllTrueOp(node, SimdType::kInt64x2);
+ break;
+ }
case IrOpcode::kV32x4AllTrue: {
LowerAllTrueOp(node, SimdType::kInt32x4);
break;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index a61ff7bf70..49df06a0ec 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -18,8 +18,8 @@
#include "src/compiler/diamond.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-observer.h"
#include "src/compiler/node-origin-table.h"
-#include "src/compiler/node-properties.h"
#include "src/compiler/operation-typer.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/representation-change.h"
@@ -28,6 +28,7 @@
#include "src/numbers/conversions-inl.h"
#include "src/objects/objects.h"
#include "src/utils/address-map.h"
+#include "src/wasm/value-type.h"
namespace v8 {
namespace internal {
@@ -39,6 +40,8 @@ namespace compiler {
if (FLAG_trace_representation) PrintF(__VA_ARGS__); \
} while (false)
+const char* kSimplifiedLoweringReducerName = "SimplifiedLowering";
+
// Representation selection and lowering of {Simplified} operators to machine
// operators are interwined. We use a fixpoint calculation to compute both the
// output representation and the best possible lowering for {Simplified} nodes.
@@ -241,6 +244,16 @@ class InputUseInfos {
#endif // DEBUG
class RepresentationSelector {
+ // The purpose of this nested class is to hide method
+ // v8::internal::compiler::NodeProperties::ChangeOp which should not be
+ // directly used by code in RepresentationSelector and SimplifiedLowering.
+ // RepresentationSelector code should call RepresentationSelector::ChangeOp in
+ // place of NodeProperties::ChangeOp, in order to notify the changes to a
+ // registered ObserveNodeManager and support the %ObserveNode intrinsic.
+ class NodeProperties : public compiler::NodeProperties {
+ static void ChangeOp(Node* node, const Operator* new_op) { UNREACHABLE(); }
+ };
+
public:
// Information for each node tracked during the fixpoint.
class NodeInfo final {
@@ -290,7 +303,8 @@ class RepresentationSelector {
RepresentationChanger* changer,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- TickCounter* tick_counter, Linkage* linkage)
+ TickCounter* tick_counter, Linkage* linkage,
+ ObserveNodeManager* observe_node_manager)
: jsgraph_(jsgraph),
zone_(zone),
might_need_revisit_(zone),
@@ -308,7 +322,8 @@ class RepresentationSelector {
type_cache_(TypeCache::Get()),
op_typer_(broker, graph_zone()),
tick_counter_(tick_counter),
- linkage_(linkage) {
+ linkage_(linkage),
+ observe_node_manager_(observe_node_manager) {
}
void ResetNodeInfoState() {
@@ -763,7 +778,7 @@ class RepresentationSelector {
DCHECK(TypeOf(node).IsNone());
// If the node is unreachable, insert an Unreachable node and mark the
// value dead.
- // TODO(jarin,tebbi) Find a way to unify/merge this insertion with
+ // TODO(jarin,turbofan) Find a way to unify/merge this insertion with
// InsertUnreachableIfNecessary.
Node* unreachable = effect =
graph()->NewNode(jsgraph_->common()->Unreachable(), effect, control);
@@ -772,7 +787,7 @@ class RepresentationSelector {
node->ReplaceInput(0, unreachable);
node->TrimInputCount(dead_value->ValueInputCount());
ReplaceEffectControlUses(node, effect, control);
- NodeProperties::ChangeOp(node, dead_value);
+ ChangeOp(node, dead_value);
}
void ChangeToPureOp(Node* node, const Operator* new_op) {
@@ -792,7 +807,7 @@ class RepresentationSelector {
} else {
DCHECK_EQ(0, node->op()->ControlInputCount());
}
- NodeProperties::ChangeOp(node, new_op);
+ ChangeOp(node, new_op);
}
void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op,
@@ -816,7 +831,7 @@ class RepresentationSelector {
DCHECK_EQ(0, node->op()->ControlInputCount());
}
node->InsertInput(jsgraph_->zone(), new_input_index, new_input);
- NodeProperties::ChangeOp(node, new_op);
+ ChangeOp(node, new_op);
}
// Converts input {index} of {node} according to given UseInfo {use},
@@ -1038,8 +1053,7 @@ class RepresentationSelector {
// Update the select operator.
SelectParameters p = SelectParametersOf(node->op());
if (output != p.representation()) {
- NodeProperties::ChangeOp(node,
- lowering->common()->Select(output, p.hint()));
+ ChangeOp(node, lowering->common()->Select(output, p.hint()));
}
}
// Convert inputs to the output representation of this phi, pass the
@@ -1063,7 +1077,7 @@ class RepresentationSelector {
if (lower<T>()) {
// Update the phi operator.
if (output != PhiRepresentationOf(node->op())) {
- NodeProperties::ChangeOp(node, lowering->common()->Phi(output, values));
+ ChangeOp(node, lowering->common()->Phi(output, values));
}
}
@@ -1216,41 +1230,47 @@ class RepresentationSelector {
DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
}
SparseInputMask mask = SparseInputMaskOf(node->op());
- NodeProperties::ChangeOp(
- node, jsgraph_->common()->TypedStateValues(types, mask));
+ ChangeOp(node, jsgraph_->common()->TypedStateValues(types, mask));
}
SetOutput<T>(node, MachineRepresentation::kTagged);
}
template <Phase T>
- void VisitFrameState(Node* node) {
+ void VisitFrameState(FrameState node) {
DCHECK_EQ(5, node->op()->ValueInputCount());
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ DCHECK_EQ(FrameState::kFrameStateInputCount, node->InputCount());
- ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // Parameters.
- ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // Registers.
+ ProcessInput<T>(node, FrameState::kFrameStateParametersInput,
+ UseInfo::AnyTagged());
+ ProcessInput<T>(node, FrameState::kFrameStateLocalsInput,
+ UseInfo::AnyTagged());
// Accumulator is a special flower - we need to remember its type in
// a singleton typed-state-values node (as if it was a singleton
// state-values node).
- Node* accumulator = node->InputAt(2);
+ Node* accumulator = node.stack();
if (propagate<T>()) {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(accumulator).Is(Type::BigInt())) {
- EnqueueInput<T>(node, 2, UseInfo::AnyTagged());
+ EnqueueInput<T>(node, FrameState::kFrameStateStackInput,
+ UseInfo::AnyTagged());
} else {
- EnqueueInput<T>(node, 2, UseInfo::Any());
+ EnqueueInput<T>(node, FrameState::kFrameStateStackInput,
+ UseInfo::Any());
}
} else if (lower<T>()) {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(accumulator).Is(Type::BigInt())) {
- ConvertInput(node, 2, UseInfo::AnyTagged());
+ ConvertInput(node, FrameState::kFrameStateStackInput,
+ UseInfo::AnyTagged());
}
Zone* zone = jsgraph_->zone();
if (accumulator == jsgraph_->OptimizedOutConstant()) {
- node->ReplaceInput(2, jsgraph_->SingleDeadTypedStateValues());
+ node->ReplaceInput(FrameState::kFrameStateStackInput,
+ jsgraph_->SingleDeadTypedStateValues());
} else {
ZoneVector<MachineType>* types =
zone->New<ZoneVector<MachineType>>(1, zone);
@@ -1258,15 +1278,19 @@ class RepresentationSelector {
TypeOf(accumulator));
node->ReplaceInput(
- 2, jsgraph_->graph()->NewNode(jsgraph_->common()->TypedStateValues(
- types, SparseInputMask::Dense()),
- node->InputAt(2)));
+ FrameState::kFrameStateStackInput,
+ jsgraph_->graph()->NewNode(jsgraph_->common()->TypedStateValues(
+ types, SparseInputMask::Dense()),
+ node.stack()));
}
}
- ProcessInput<T>(node, 3, UseInfo::AnyTagged()); // Context.
- ProcessInput<T>(node, 4, UseInfo::AnyTagged()); // Closure.
- ProcessInput<T>(node, 5, UseInfo::AnyTagged()); // Outer frame state.
+ ProcessInput<T>(node, FrameState::kFrameStateContextInput,
+ UseInfo::AnyTagged());
+ ProcessInput<T>(node, FrameState::kFrameStateFunctionInput,
+ UseInfo::AnyTagged());
+ ProcessInput<T>(node, FrameState::kFrameStateOuterStateInput,
+ UseInfo::AnyTagged());
return SetOutput<T>(node, MachineRepresentation::kTagged);
}
@@ -1296,8 +1320,8 @@ class RepresentationSelector {
ConvertInput(node, i, UseInfo::AnyTagged());
}
}
- NodeProperties::ChangeOp(node, jsgraph_->common()->TypedObjectState(
- ObjectIdOf(node->op()), types));
+ ChangeOp(node, jsgraph_->common()->TypedObjectState(
+ ObjectIdOf(node->op()), types));
}
SetOutput<T>(node, MachineRepresentation::kTagged);
}
@@ -1406,15 +1430,15 @@ class RepresentationSelector {
IsSomePositiveOrderedNumber(input1_type)
? CheckForMinusZeroMode::kDontCheckForMinusZero
: CheckForMinusZeroMode::kCheckForMinusZero;
- NodeProperties::ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode));
+ ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode));
}
void ChangeToInt32OverflowOp(Node* node) {
- NodeProperties::ChangeOp(node, Int32OverflowOp(node));
+ ChangeOp(node, Int32OverflowOp(node));
}
void ChangeToUint32OverflowOp(Node* node) {
- NodeProperties::ChangeOp(node, Uint32OverflowOp(node));
+ ChangeOp(node, Uint32OverflowOp(node));
}
template <Phase T>
@@ -1674,19 +1698,19 @@ class RepresentationSelector {
// TODO(neis): Move this into TypedOptimization?
new_flags |= CheckBoundsFlag::kAbortOnOutOfBounds;
}
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(feedback, new_flags));
+ ChangeOp(node,
+ simplified()->CheckedUint32Bounds(feedback, new_flags));
}
} else if (p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero) {
VisitBinop<T>(node, UseInfo::CheckedTaggedAsArrayIndex(feedback),
UseInfo::Word(), MachineType::PointerRepresentation());
if (lower<T>()) {
if (jsgraph_->machine()->Is64()) {
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint64Bounds(feedback, new_flags));
+ ChangeOp(node,
+ simplified()->CheckedUint64Bounds(feedback, new_flags));
} else {
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(feedback, new_flags));
+ ChangeOp(node,
+ simplified()->CheckedUint32Bounds(feedback, new_flags));
}
}
} else {
@@ -1694,8 +1718,8 @@ class RepresentationSelector {
node, UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, feedback),
UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower<T>()) {
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(feedback, new_flags));
+ ChangeOp(node,
+ simplified()->CheckedUint32Bounds(feedback, new_flags));
}
}
} else {
@@ -1708,35 +1732,11 @@ class RepresentationSelector {
UseInfo::CheckedSigned64AsWord64(zero_handling, feedback),
UseInfo::Word64(), MachineRepresentation::kWord64);
if (lower<T>()) {
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint64Bounds(feedback, new_flags));
+ ChangeOp(node, simplified()->CheckedUint64Bounds(feedback, new_flags));
}
}
}
- static MachineType MachineTypeFor(CTypeInfo::Type type) {
- switch (type) {
- case CTypeInfo::Type::kVoid:
- return MachineType::AnyTagged();
- case CTypeInfo::Type::kBool:
- return MachineType::Bool();
- case CTypeInfo::Type::kInt32:
- return MachineType::Int32();
- case CTypeInfo::Type::kUint32:
- return MachineType::Uint32();
- case CTypeInfo::Type::kInt64:
- return MachineType::Int64();
- case CTypeInfo::Type::kUint64:
- return MachineType::Uint64();
- case CTypeInfo::Type::kFloat32:
- return MachineType::Float32();
- case CTypeInfo::Type::kFloat64:
- return MachineType::Float64();
- case CTypeInfo::Type::kV8Value:
- return MachineType::AnyTagged();
- }
- }
-
UseInfo UseInfoForFastApiCallArgument(CTypeInfo::Type type,
FeedbackSource const& feedback) {
switch (type) {
@@ -1801,10 +1801,106 @@ class RepresentationSelector {
ProcessInput<T>(node, i, UseInfo::AnyTagged());
}
ProcessRemainingInputs<T>(node, value_input_count);
+ SetOutput<T>(node, MachineRepresentation::kTagged);
+ }
+
+ static MachineType MachineTypeForWasmReturnType(wasm::ValueType type) {
+ switch (type.kind()) {
+ case wasm::kI32:
+ return MachineType::Int32();
+ case wasm::kF32:
+ return MachineType::Float32();
+ case wasm::kF64:
+ return MachineType::Float64();
+ case wasm::kI64:
+ // Not used for i64, see VisitJSWasmCall().
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ UseInfo UseInfoForJSWasmCallArgument(Node* input, wasm::ValueType type,
+ FeedbackSource const& feedback) {
+ // If the input type is a Number or Oddball, we can directly convert the
+ // input into the Wasm native type of the argument. If not, we return
+ // UseInfo::AnyTagged to signal that WasmWrapperGraphBuilder will need to
+ // add Nodes to perform the conversion (in WasmWrapperGraphBuilder::FromJS).
+ switch (type.kind()) {
+ case wasm::kI32:
+ return UseInfo::CheckedNumberOrOddballAsWord32(feedback);
+ case wasm::kI64:
+ return UseInfo::AnyTagged();
+ case wasm::kF32:
+ case wasm::kF64:
+ // For Float32, TruncateFloat64ToFloat32 will be inserted later in
+ // WasmWrapperGraphBuilder::BuildJSToWasmWrapper.
+ return UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
+ feedback);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ template <Phase T>
+ void VisitJSWasmCall(Node* node, SimplifiedLowering* lowering) {
+ DCHECK_EQ(JSWasmCallNode::TargetIndex(), 0);
+ DCHECK_EQ(JSWasmCallNode::ReceiverIndex(), 1);
+ DCHECK_EQ(JSWasmCallNode::FirstArgumentIndex(), 2);
+
+ JSWasmCallNode n(node);
+
+ JSWasmCallParameters const& params = n.Parameters();
+ const wasm::FunctionSig* wasm_signature = params.signature();
+ int wasm_arg_count = static_cast<int>(wasm_signature->parameter_count());
+ DCHECK_EQ(wasm_arg_count, n.ArgumentCount());
+
+ base::SmallVector<UseInfo, kInitialArgumentsCount> arg_use_info(
+ wasm_arg_count);
+
+ // Visit JSFunction and Receiver nodes.
+ ProcessInput<T>(node, JSWasmCallNode::TargetIndex(), UseInfo::Any());
+ ProcessInput<T>(node, JSWasmCallNode::ReceiverIndex(), UseInfo::Any());
+
+ // Propagate representation information from TypeInfo.
+ for (int i = 0; i < wasm_arg_count; i++) {
+ TNode<Object> input = n.Argument(i);
+ DCHECK_NOT_NULL(input);
+ arg_use_info[i] = UseInfoForJSWasmCallArgument(
+ input, wasm_signature->GetParam(i), params.feedback());
+ ProcessInput<T>(node, JSWasmCallNode::ArgumentIndex(i), arg_use_info[i]);
+ }
+
+ // Visit value, context and frame state inputs as tagged.
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
+ DCHECK(first_effect_index >
+ JSWasmCallNode::FirstArgumentIndex() + wasm_arg_count);
+ for (int i = JSWasmCallNode::FirstArgumentIndex() + wasm_arg_count;
+ i < first_effect_index; i++) {
+ ProcessInput<T>(node, i, UseInfo::AnyTagged());
+ }
+
+ // Effect and Control.
+ ProcessRemainingInputs<T>(node, NodeProperties::FirstEffectIndex(node));
+
+ if (wasm_signature->return_count() == 1) {
+ if (wasm_signature->GetReturn().kind() == wasm::kI64) {
+ // Conversion between negative int64 and BigInt not supported yet.
+ // Do not bypass the type conversion when the result type is i64.
+ SetOutput<T>(node, MachineRepresentation::kTagged);
+ } else {
+ MachineType return_type =
+ MachineTypeForWasmReturnType(wasm_signature->GetReturn());
+ SetOutput<T>(
+ node, return_type.representation(),
+ JSWasmCallNode::TypeForWasmReturnType(wasm_signature->GetReturn()));
+ }
+ } else {
+ DCHECK_EQ(wasm_signature->return_count(), 0);
+ SetOutput<T>(node, MachineRepresentation::kTagged);
+ }
- MachineType return_type =
- MachineTypeFor(c_signature->ReturnInfo().GetType());
- SetOutput<T>(node, return_type.representation());
+ // The actual lowering of JSWasmCall nodes happens later, in the subsequent
+ // "wasm-inlining" phase.
}
// Dispatching routine for visiting the node {node} with the usage {use}.
@@ -1936,11 +2032,11 @@ class RepresentationSelector {
if (input_info->representation() == MachineRepresentation::kBit) {
// BooleanNot(x: kRepBit) => Word32Equal(x, #0)
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
- NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
+ ChangeOp(node, lowering->machine()->Word32Equal());
} else if (CanBeTaggedPointer(input_info->representation())) {
// BooleanNot(x: kRepTagged) => WordEqual(x, #false)
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
- NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ ChangeOp(node, lowering->machine()->WordEqual());
} else {
DCHECK(TypeOf(node->InputAt(0)).IsNone());
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
@@ -1968,7 +2064,7 @@ class RepresentationSelector {
// => unsigned Int32Cmp
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ if (lower<T>()) ChangeOp(node, Uint32Op(node));
return;
}
if ((lhs_type.Is(Type::Signed32OrMinusZero()) &&
@@ -1979,13 +2075,13 @@ class RepresentationSelector {
// => signed Int32Cmp
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node));
+ if (lower<T>()) ChangeOp(node, Int32Op(node));
return;
}
// => Float64Cmp
VisitBinop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberLessThan:
@@ -2000,18 +2096,18 @@ class RepresentationSelector {
// => unsigned Int32Cmp
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ if (lower<T>()) ChangeOp(node, Uint32Op(node));
} else if (lhs_type.Is(Type::Signed32OrMinusZero()) &&
rhs_type.Is(Type::Signed32OrMinusZero())) {
// => signed Int32Cmp
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node));
+ if (lower<T>()) ChangeOp(node, Int32Op(node));
} else {
// => Float64Cmp
VisitBinop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
}
return;
}
@@ -2337,7 +2433,7 @@ class RepresentationSelector {
case IrOpcode::kNumberBitwiseXor:
case IrOpcode::kNumberBitwiseAnd: {
VisitWord32TruncatingBinop<T>(node);
- if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node));
+ if (lower<T>()) ChangeOp(node, Int32Op(node));
return;
}
case IrOpcode::kSpeculativeNumberBitwiseOr:
@@ -2439,8 +2535,8 @@ class RepresentationSelector {
MachineRepresentation::kWord32, Type::Unsigned31());
if (lower<T>()) {
node->RemoveInput(1);
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32ToInt32(FeedbackSource()));
+ ChangeOp(node,
+ simplified()->CheckedUint32ToInt32(FeedbackSource()));
}
return;
}
@@ -2482,27 +2578,27 @@ class RepresentationSelector {
} else {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
}
return;
}
case IrOpcode::kNumberClz32: {
VisitUnop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
- if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ if (lower<T>()) ChangeOp(node, Uint32Op(node));
return;
}
case IrOpcode::kNumberImul: {
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
- if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ if (lower<T>()) ChangeOp(node, Uint32Op(node));
return;
}
case IrOpcode::kNumberFround: {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat32);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberMax: {
@@ -2557,7 +2653,7 @@ class RepresentationSelector {
lowering->DoMax(node, lowering->machine()->Float64LessThan(),
MachineRepresentation::kFloat64);
} else {
- NodeProperties::ChangeOp(node, Float64Op(node));
+ ChangeOp(node, Float64Op(node));
}
}
}
@@ -2616,7 +2712,7 @@ class RepresentationSelector {
lowering->machine()->Float64LessThanOrEqual(),
MachineRepresentation::kFloat64);
} else {
- NodeProperties::ChangeOp(node, Float64Op(node));
+ ChangeOp(node, Float64Op(node));
}
}
}
@@ -2626,7 +2722,7 @@ class RepresentationSelector {
case IrOpcode::kNumberPow: {
VisitBinop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberCeil:
@@ -2647,7 +2743,7 @@ class RepresentationSelector {
} else if (node->opcode() == IrOpcode::kNumberRound) {
DeferReplacement(node, lowering->Float64Round(node));
} else {
- NodeProperties::ChangeOp(node, Float64Op(node));
+ ChangeOp(node, Float64Op(node));
}
}
return;
@@ -2687,7 +2783,7 @@ class RepresentationSelector {
case IrOpcode::kNumberTanh: {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberSign: {
@@ -2712,14 +2808,14 @@ class RepresentationSelector {
} else {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
}
return;
}
case IrOpcode::kNumberSqrt: {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberToBoolean: {
@@ -2792,9 +2888,9 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower<T>()) {
if (COMPRESS_POINTERS_BOOL) {
- NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
+ ChangeOp(node, lowering->machine()->Word32Equal());
} else {
- NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ ChangeOp(node, lowering->machine()->WordEqual());
}
}
return;
@@ -2810,8 +2906,7 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- lowering->simplified()->NumberSameValue());
+ ChangeOp(node, lowering->simplified()->NumberSameValue());
}
} else {
VisitBinop<T>(node, UseInfo::AnyTagged(),
@@ -2864,7 +2959,7 @@ class RepresentationSelector {
UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
MachineRepresentation::kTaggedPointer);
if (lower<T>()) {
- NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd());
+ ChangeOp(node, lowering->simplified()->BigIntAdd());
}
}
return;
@@ -2882,8 +2977,7 @@ class RepresentationSelector {
UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
MachineRepresentation::kTaggedPointer);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- lowering->simplified()->BigIntSubtract());
+ ChangeOp(node, lowering->simplified()->BigIntSubtract());
}
}
return;
@@ -3064,6 +3158,10 @@ class RepresentationSelector {
SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kLoadFramePointer: {
+ SetOutput<T>(node, MachineType::PointerRepresentation());
+ return;
+ }
case IrOpcode::kLoadMessage: {
if (truncation.IsUnused()) return VisitUnused<T>(node);
VisitUnop<T>(node, UseInfo::Word(), MachineRepresentation::kTagged);
@@ -3114,8 +3212,7 @@ class RepresentationSelector {
if (lower<T>()) {
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
- NodeProperties::ChangeOp(
- node, jsgraph_->simplified()->StoreField(access));
+ ChangeOp(node, jsgraph_->simplified()->StoreField(access));
}
}
return;
@@ -3157,8 +3254,7 @@ class RepresentationSelector {
if (lower<T>()) {
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
- NodeProperties::ChangeOp(
- node, jsgraph_->simplified()->StoreElement(access));
+ ChangeOp(node, jsgraph_->simplified()->StoreElement(access));
}
}
return;
@@ -3177,24 +3273,21 @@ class RepresentationSelector {
if (value_type.Is(Type::SignedSmall())) {
ProcessInput<T>(node, 2, UseInfo::TruncatingWord32()); // value
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- simplified()->StoreSignedSmallElement());
+ ChangeOp(node, simplified()->StoreSignedSmallElement());
}
} else if (value_type.Is(Type::Number())) {
ProcessInput<T>(node, 2, UseInfo::TruncatingFloat64()); // value
if (lower<T>()) {
Handle<Map> double_map = DoubleMapParameterOf(node->op());
- NodeProperties::ChangeOp(
- node,
- simplified()->TransitionAndStoreNumberElement(double_map));
+ ChangeOp(node,
+ simplified()->TransitionAndStoreNumberElement(double_map));
}
} else if (value_type.Is(Type::NonNumber())) {
ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // value
if (lower<T>()) {
Handle<Map> fast_map = FastMapParameterOf(node->op());
- NodeProperties::ChangeOp(
- node, simplified()->TransitionAndStoreNonNumberElement(
- fast_map, value_type));
+ ChangeOp(node, simplified()->TransitionAndStoreNonNumberElement(
+ fast_map, value_type));
}
} else {
ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // value
@@ -3263,9 +3356,8 @@ class RepresentationSelector {
} else if (input_type.Is(Type::NullOrUndefined())) {
DeferReplacement(node, node->InputAt(1));
} else if (!input_type.Maybe(Type::NullOrUndefined())) {
- NodeProperties::ChangeOp(
- node, lowering->simplified()->ConvertReceiver(
- ConvertReceiverMode::kNotNullOrUndefined));
+ ChangeOp(node, lowering->simplified()->ConvertReceiver(
+ ConvertReceiverMode::kNotNullOrUndefined));
}
}
return;
@@ -3278,7 +3370,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kTagged);
if (lower<T>()) {
- NodeProperties::ChangeOp(node, simplified()->StringToNumber());
+ ChangeOp(node, simplified()->StringToNumber());
}
} else if (truncation.IsUsedAsWord32()) {
if (InputIs(node, Type::NumberOrOddball())) {
@@ -3289,8 +3381,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kWord32);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- simplified()->PlainPrimitiveToWord32());
+ ChangeOp(node, simplified()->PlainPrimitiveToWord32());
}
}
} else if (truncation.TruncatesOddballAndBigIntToNumber()) {
@@ -3302,8 +3393,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kFloat64);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- simplified()->PlainPrimitiveToFloat64());
+ ChangeOp(node, simplified()->PlainPrimitiveToFloat64());
}
}
} else {
@@ -3371,8 +3461,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- lowering->simplified()->NumberIsFinite());
+ ChangeOp(node, lowering->simplified()->NumberIsFinite());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3400,8 +3489,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(
- node, lowering->simplified()->NumberIsSafeInteger());
+ ChangeOp(node, lowering->simplified()->NumberIsSafeInteger());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3427,8 +3515,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- lowering->simplified()->NumberIsInteger());
+ ChangeOp(node, lowering->simplified()->NumberIsInteger());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3456,7 +3543,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node, simplified()->NumberIsMinusZero());
+ ChangeOp(node, simplified()->NumberIsMinusZero());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3479,7 +3566,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node, simplified()->NumberIsNaN());
+ ChangeOp(node, simplified()->NumberIsNaN());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3520,14 +3607,9 @@ class RepresentationSelector {
VisitObjectIs<T>(node, Type::Undetectable(), lowering);
return;
}
- case IrOpcode::kArgumentsFrame: {
- SetOutput<T>(node, MachineType::PointerRepresentation());
- return;
- }
case IrOpcode::kArgumentsLength:
case IrOpcode::kRestLength: {
- VisitUnop<T>(node, UseInfo::Word(),
- MachineRepresentation::kTaggedSigned);
+ SetOutput<T>(node, MachineRepresentation::kTaggedSigned);
return;
}
case IrOpcode::kNewDoubleElements:
@@ -3537,8 +3619,8 @@ class RepresentationSelector {
return;
}
case IrOpcode::kNewArgumentsElements: {
- VisitBinop<T>(node, UseInfo::Word(), UseInfo::TaggedSigned(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node, UseInfo::TaggedSigned(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kCheckFloat64Hole: {
@@ -3645,7 +3727,7 @@ class RepresentationSelector {
VisitInputs<T>(node);
return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kFrameState:
- return VisitFrameState<T>(node);
+ return VisitFrameState<T>(FrameState{node});
case IrOpcode::kStateValues:
return VisitStateValues<T>(node);
case IrOpcode::kObjectState:
@@ -3693,7 +3775,7 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
MachineType::PointerRepresentation());
if (lower<T>()) {
- NodeProperties::ChangeOp(
+ ChangeOp(
node,
lowering->simplified()->FindOrderedHashMapEntryForInt32Key());
}
@@ -3732,7 +3814,8 @@ class RepresentationSelector {
case IrOpcode::kArgumentsLengthState:
case IrOpcode::kUnreachable:
case IrOpcode::kRuntimeAbort:
-// All JavaScript operators except JSToNumber have uniform handling.
+// All JavaScript operators except JSToNumber, JSToNumberConvertBigInt,
+// kJSToNumeric and JSWasmCall have uniform handling.
#define OPCODE_CASE(name, ...) case IrOpcode::k##name:
JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
JS_OBJECT_OP_LIST(OPCODE_CASE)
@@ -3748,6 +3831,9 @@ class RepresentationSelector {
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
case IrOpcode::kJSParseInt:
+ if (node->opcode() == IrOpcode::kJSWasmCall) {
+ return VisitJSWasmCall<T>(node, lowering);
+ }
VisitInputs<T>(node);
// Assume the output is tagged.
return SetOutput<T>(node, MachineRepresentation::kTagged);
@@ -3787,6 +3873,8 @@ class RepresentationSelector {
replacements_.push_back(replacement);
node->NullAllInputs(); // Node is now dead.
+
+ NotifyNodeReplaced(node, replacement);
}
void Kill(Node* node) {
@@ -3810,6 +3898,20 @@ class RepresentationSelector {
}
private:
+ void ChangeOp(Node* node, const Operator* new_op) {
+ compiler::NodeProperties::ChangeOp(node, new_op);
+
+ if (V8_UNLIKELY(observe_node_manager_ != nullptr))
+ observe_node_manager_->OnNodeChanged(kSimplifiedLoweringReducerName, node,
+ node);
+ }
+
+ void NotifyNodeReplaced(Node* node, Node* replacement) {
+ if (V8_UNLIKELY(observe_node_manager_ != nullptr))
+ observe_node_manager_->OnNodeChanged(kSimplifiedLoweringReducerName, node,
+ replacement);
+ }
+
JSGraph* jsgraph_;
Zone* zone_; // Temporary zone.
// Map from node to its uses that might need to be revisited.
@@ -3840,6 +3942,7 @@ class RepresentationSelector {
OperationTyper op_typer_; // helper for the feedback typer
TickCounter* const tick_counter_;
Linkage* const linkage_;
+ ObserveNodeManager* const observe_node_manager_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() < count_);
@@ -4006,13 +4109,11 @@ void RepresentationSelector::InsertUnreachableIfNecessary<LOWER>(Node* node) {
}
}
-SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
- Zone* zone,
- SourcePositionTable* source_positions,
- NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level,
- TickCounter* tick_counter,
- Linkage* linkage)
+SimplifiedLowering::SimplifiedLowering(
+ JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
+ SourcePositionTable* source_positions, NodeOriginTable* node_origins,
+ PoisoningMitigationLevel poisoning_level, TickCounter* tick_counter,
+ Linkage* linkage, ObserveNodeManager* observe_node_manager)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
@@ -4021,13 +4122,14 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
node_origins_(node_origins),
poisoning_level_(poisoning_level),
tick_counter_(tick_counter),
- linkage_(linkage) {}
+ linkage_(linkage),
+ observe_node_manager_(observe_node_manager) {}
void SimplifiedLowering::LowerAllNodes() {
RepresentationChanger changer(jsgraph(), broker_);
- RepresentationSelector selector(jsgraph(), broker_, zone_, &changer,
- source_positions_, node_origins_,
- tick_counter_, linkage_);
+ RepresentationSelector selector(
+ jsgraph(), broker_, zone_, &changer, source_positions_, node_origins_,
+ tick_counter_, linkage_, observe_node_manager_);
selector.Run(this);
}
@@ -4548,7 +4650,7 @@ void SimplifiedLowering::DoMax(Node* node, Operator const* op,
node->ReplaceInput(0, graph()->NewNode(op, lhs, rhs));
DCHECK_EQ(rhs, node->InputAt(1));
node->AppendInput(graph()->zone(), lhs);
- NodeProperties::ChangeOp(node, common()->Select(rep));
+ ChangeOp(node, common()->Select(rep));
}
void SimplifiedLowering::DoMin(Node* node, Operator const* op,
@@ -4559,7 +4661,7 @@ void SimplifiedLowering::DoMin(Node* node, Operator const* op,
node->InsertInput(graph()->zone(), 0, graph()->NewNode(op, lhs, rhs));
DCHECK_EQ(lhs, node->InputAt(1));
DCHECK_EQ(rhs, node->InputAt(2));
- NodeProperties::ChangeOp(node, common()->Select(rep));
+ ChangeOp(node, common()->Select(rep));
}
void SimplifiedLowering::DoIntegral32ToBit(Node* node) {
@@ -4569,7 +4671,7 @@ void SimplifiedLowering::DoIntegral32ToBit(Node* node) {
node->ReplaceInput(0, graph()->NewNode(op, input, zero));
node->AppendInput(graph()->zone(), zero);
- NodeProperties::ChangeOp(node, op);
+ ChangeOp(node, op);
}
void SimplifiedLowering::DoOrderedNumberToBit(Node* node) {
@@ -4578,7 +4680,7 @@ void SimplifiedLowering::DoOrderedNumberToBit(Node* node) {
node->ReplaceInput(0, graph()->NewNode(machine()->Float64Equal(), input,
jsgraph()->Float64Constant(0.0)));
node->AppendInput(graph()->zone(), jsgraph()->Int32Constant(0));
- NodeProperties::ChangeOp(node, machine()->Word32Equal());
+ ChangeOp(node, machine()->Word32Equal());
}
void SimplifiedLowering::DoNumberToBit(Node* node) {
@@ -4587,7 +4689,7 @@ void SimplifiedLowering::DoNumberToBit(Node* node) {
node->ReplaceInput(0, jsgraph()->Float64Constant(0.0));
node->AppendInput(graph()->zone(),
graph()->NewNode(machine()->Float64Abs(), input));
- NodeProperties::ChangeOp(node, machine()->Float64LessThan());
+ ChangeOp(node, machine()->Float64LessThan());
}
void SimplifiedLowering::DoIntegerToUint8Clamped(Node* node) {
@@ -4604,8 +4706,7 @@ void SimplifiedLowering::DoIntegerToUint8Clamped(Node* node) {
graph()->NewNode(machine()->Float64LessThan(), input, max), input,
max));
node->AppendInput(graph()->zone(), min);
- NodeProperties::ChangeOp(node,
- common()->Select(MachineRepresentation::kFloat64));
+ ChangeOp(node, common()->Select(MachineRepresentation::kFloat64));
}
void SimplifiedLowering::DoNumberToUint8Clamped(Node* node) {
@@ -4622,8 +4723,7 @@ void SimplifiedLowering::DoNumberToUint8Clamped(Node* node) {
graph()->NewNode(machine()->Float64LessThan(), input, max),
input, max),
min));
- NodeProperties::ChangeOp(node,
- machine()->Float64RoundTiesEven().placeholder());
+ ChangeOp(node, machine()->Float64RoundTiesEven().placeholder());
}
void SimplifiedLowering::DoSigned32ToUint8Clamped(Node* node) {
@@ -4639,8 +4739,7 @@ void SimplifiedLowering::DoSigned32ToUint8Clamped(Node* node) {
graph()->NewNode(machine()->Int32LessThan(), input, min),
min, input));
node->AppendInput(graph()->zone(), max);
- NodeProperties::ChangeOp(node,
- common()->Select(MachineRepresentation::kWord32));
+ ChangeOp(node, common()->Select(MachineRepresentation::kWord32));
}
void SimplifiedLowering::DoUnsigned32ToUint8Clamped(Node* node) {
@@ -4651,8 +4750,7 @@ void SimplifiedLowering::DoUnsigned32ToUint8Clamped(Node* node) {
0, graph()->NewNode(machine()->Uint32LessThanOrEqual(), input, max));
node->AppendInput(graph()->zone(), input);
node->AppendInput(graph()->zone(), max);
- NodeProperties::ChangeOp(node,
- common()->Select(MachineRepresentation::kWord32));
+ ChangeOp(node, common()->Select(MachineRepresentation::kWord32));
}
Node* SimplifiedLowering::ToNumberCode() {
@@ -4721,6 +4819,14 @@ Operator const* SimplifiedLowering::ToNumericOperator() {
return to_numeric_operator_.get();
}
+void SimplifiedLowering::ChangeOp(Node* node, const Operator* new_op) {
+ compiler::NodeProperties::ChangeOp(node, new_op);
+
+ if (V8_UNLIKELY(observe_node_manager_ != nullptr))
+ observe_node_manager_->OnNodeChanged(kSimplifiedLoweringReducerName, node,
+ node);
+}
+
#undef TRACE
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index f38d3df132..54017b34f7 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -7,6 +7,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
@@ -19,6 +20,7 @@ namespace compiler {
// Forward declarations.
class NodeOriginTable;
+class ObserveNodeManager;
class RepresentationChanger;
class RepresentationSelector;
class SourcePositionTable;
@@ -30,7 +32,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level,
- TickCounter* tick_counter, Linkage* linkage);
+ TickCounter* tick_counter, Linkage* linkage,
+ ObserveNodeManager* observe_node_manager = nullptr);
~SimplifiedLowering() = default;
void LowerAllNodes();
@@ -50,6 +53,17 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
void DoUnsigned32ToUint8Clamped(Node* node);
private:
+ // The purpose of this nested class is to hide method
+ // v8::internal::compiler::NodeProperties::ChangeOp which should not be
+ // directly used by code in SimplifiedLowering.
+ // SimplifiedLowering code should call SimplifiedLowering::ChangeOp instead,
+ // in order to notify the changes to ObserveNodeManager and support the
+ // %ObserveNode intrinsic.
+ class NodeProperties : public compiler::NodeProperties {
+ static void ChangeOp(Node* node, const Operator* new_op) { UNREACHABLE(); }
+ };
+ void ChangeOp(Node* node, const Operator* new_op);
+
JSGraph* const jsgraph_;
JSHeapBroker* broker_;
Zone* const zone_;
@@ -74,6 +88,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
TickCounter* const tick_counter_;
Linkage* const linkage_;
+ ObserveNodeManager* const observe_node_manager_;
+
Node* Float64Round(Node* const node);
Node* Float64Sign(Node* const node);
Node* Int32Abs(Node* const node);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 582a74db69..09e3a80ec4 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -972,13 +972,6 @@ struct SimplifiedOperatorGlobalCache final {
FindOrderedHashMapEntryForInt32KeyOperator
kFindOrderedHashMapEntryForInt32Key;
- struct ArgumentsFrameOperator final : public Operator {
- ArgumentsFrameOperator()
- : Operator(IrOpcode::kArgumentsFrame, Operator::kPure, "ArgumentsFrame",
- 0, 0, 0, 1, 0, 0) {}
- };
- ArgumentsFrameOperator kArgumentsFrame;
-
template <CheckForMinusZeroMode kMode>
struct ChangeFloat64ToTaggedOperator final
: public Operator1<CheckForMinusZeroMode> {
@@ -1225,7 +1218,6 @@ SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
PURE_OP_LIST(GET_FROM_CACHE)
EFFECT_DEPENDENT_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
-GET_FROM_CACHE(ArgumentsFrame)
GET_FROM_CACHE(FindOrderedHashMapEntry)
GET_FROM_CACHE(FindOrderedHashMapEntryForInt32Key)
GET_FROM_CACHE(LoadFieldByIndex)
@@ -1637,14 +1629,12 @@ const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
transition); // parameter
}
-const Operator* SimplifiedOperatorBuilder::ArgumentsLength(
- int formal_parameter_count) {
- return zone()->New<Operator1<int>>( // --
- IrOpcode::kArgumentsLength, // opcode
- Operator::kPure, // flags
- "ArgumentsLength", // name
- 1, 0, 0, 1, 0, 0, // counts
- formal_parameter_count); // parameter
+const Operator* SimplifiedOperatorBuilder::ArgumentsLength() {
+ return zone()->New<Operator>( // --
+ IrOpcode::kArgumentsLength, // opcode
+ Operator::kPure, // flags
+ "ArgumentsLength", // name
+ 0, 0, 0, 1, 0, 0); // counts
}
const Operator* SimplifiedOperatorBuilder::RestLength(
@@ -1653,7 +1643,7 @@ const Operator* SimplifiedOperatorBuilder::RestLength(
IrOpcode::kRestLength, // opcode
Operator::kPure, // flags
"RestLength", // name
- 1, 0, 0, 1, 0, 0, // counts
+ 0, 0, 0, 1, 0, 0, // counts
formal_parameter_count); // parameter
}
@@ -1775,7 +1765,7 @@ const Operator* SimplifiedOperatorBuilder::NewArgumentsElements(
IrOpcode::kNewArgumentsElements, // opcode
Operator::kEliminatable, // flags
"NewArgumentsElements", // name
- 2, 1, 0, 1, 1, 0, // counts
+ 1, 1, 0, 1, 1, 0, // counts
NewArgumentsElementsParameters(type,
formal_parameter_count)); // parameter
}
@@ -1950,6 +1940,11 @@ const Operator* SimplifiedOperatorBuilder::FastApiCall(
FastApiCallParameters(signature, feedback, descriptor));
}
+int FastApiCallNode::FastCallExtraInputCount() const {
+ return kFastTargetInputCount + kEffectAndControlInputCount +
+ (Parameters().signature()->HasOptions() ? 1 : 0);
+}
+
int FastApiCallNode::FastCallArgumentCount() const {
FastApiCallParameters p = FastApiCallParametersOf(node()->op());
const CFunctionInfo* signature = p.signature();
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 815243c0ae..cd66b89ea4 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -11,6 +11,7 @@
#include "src/codegen/machine-type.h"
#include "src/codegen/tnode.h"
#include "src/common/globals.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator.h"
@@ -971,14 +972,13 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* NumberIsSafeInteger();
const Operator* ObjectIsInteger();
- const Operator* ArgumentsFrame();
- const Operator* ArgumentsLength(int formal_parameter_count);
+ const Operator* ArgumentsLength();
const Operator* RestLength(int formal_parameter_count);
const Operator* NewDoubleElements(AllocationType);
const Operator* NewSmiOrObjectElements(AllocationType);
- // new-arguments-elements frame, arguments count
+ // new-arguments-elements arguments-length
const Operator* NewArgumentsElements(CreateArgumentsType type,
int formal_parameter_count);
@@ -1133,19 +1133,19 @@ class FastApiCallNode final : public SimplifiedNodeWrapperBase {
static constexpr int kExtraInputCount =
kFastTargetInputCount + kFastReceiverInputCount;
- static constexpr int kHasErrorInputCount = 1;
static constexpr int kArityInputCount = 1;
static constexpr int kNewTargetInputCount = 1;
static constexpr int kHolderInputCount = 1;
static constexpr int kContextAndFrameStateInputCount = 2;
static constexpr int kEffectAndControlInputCount = 2;
- static constexpr int kFastCallExtraInputCount =
- kFastTargetInputCount + kHasErrorInputCount + kEffectAndControlInputCount;
+ int FastCallExtraInputCount() const;
static constexpr int kSlowCallExtraInputCount =
kSlowTargetInputCount + kArityInputCount + kNewTargetInputCount +
kSlowReceiverInputCount + kHolderInputCount +
kContextAndFrameStateInputCount + kEffectAndControlInputCount;
+ static constexpr int kSlowCallDataArgumentIndex = 3;
+
// This is the arity fed into FastApiCallArguments.
static constexpr int ArityForArgc(int c_arg_count, int js_arg_count) {
return c_arg_count + kFastTargetInputCount + js_arg_count +
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index ff66c3df71..78d57a92b9 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -126,6 +126,17 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
size_t size() const;
iterator begin() const { return iterator(node_); }
+ iterator begin_without_receiver() const {
+ return ++begin(); // Skip the receiver.
+ }
+ iterator begin_without_receiver_and_skip(int n_skips) {
+ iterator it = begin_without_receiver();
+ while (n_skips > 0 && !it.done()) {
+ ++it;
+ --n_skips;
+ }
+ return it;
+ }
iterator end() const { return iterator(); }
private:
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 1e4acdc335..a2103139fa 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -680,10 +680,7 @@ Type Typer::Visitor::TypeIfException(Node* node) { return Type::NonInternal(); }
// Common operators.
Type Typer::Visitor::TypeParameter(Node* node) {
- Node* const start = node->InputAt(0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const parameter_count = start->op()->ValueOutputCount() - 4;
- DCHECK_LE(1, parameter_count);
+ StartNode start{node->InputAt(0)};
int const index = ParameterIndexOf(node->op());
if (index == Linkage::kJSCallClosureParamIndex) {
return Type::Function();
@@ -694,15 +691,15 @@ Type Typer::Visitor::TypeParameter(Node* node) {
// Parameter[this] can be the_hole for derived class constructors.
return Type::Union(Type::Hole(), Type::NonInternal(), typer_->zone());
}
- } else if (index == Linkage::GetJSCallNewTargetParamIndex(parameter_count)) {
+ } else if (index == start.NewTargetParameterIndex()) {
if (typer_->flags() & Typer::kNewTargetIsReceiver) {
return Type::Receiver();
} else {
return Type::Union(Type::Receiver(), Type::Undefined(), typer_->zone());
}
- } else if (index == Linkage::GetJSCallArgCountParamIndex(parameter_count)) {
+ } else if (index == start.ArgCountParameterIndex()) {
return Type::Range(0.0, FixedArray::kMaxLength, typer_->zone());
- } else if (index == Linkage::GetJSCallContextParamIndex(parameter_count)) {
+ } else if (index == start.ContextParameterIndex()) {
return Type::OtherInternal();
}
return Type::NonInternal();
@@ -992,6 +989,15 @@ Type Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
Type Typer::Visitor::TypeFastApiCall(Node* node) { return Type::Any(); }
+Type Typer::Visitor::TypeJSWasmCall(Node* node) {
+ const JSWasmCallParameters& op_params = JSWasmCallParametersOf(node->op());
+ const wasm::FunctionSig* wasm_signature = op_params.signature();
+ if (wasm_signature->return_count() > 0) {
+ return JSWasmCallNode::TypeForWasmReturnType(wasm_signature->GetReturn());
+ }
+ return Type::Any();
+}
+
Type Typer::Visitor::TypeProjection(Node* node) {
Type const type = Operand(node, 0);
if (type.Is(Type::None())) return Type::None();
@@ -2323,10 +2329,6 @@ Type Typer::Visitor::TypeRestLength(Node* node) {
return TypeCache::Get()->kArgumentsLengthType;
}
-Type Typer::Visitor::TypeArgumentsFrame(Node* node) {
- return Type::ExternalPointer();
-}
-
Type Typer::Visitor::TypeNewDoubleElements(Node* node) {
return Type::OtherInternal();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 7912b09ac0..2c6f05b44a 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -153,6 +153,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
return kString;
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case INTERNALIZED_STRING_TYPE:
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
return kInternalizedString;
@@ -261,6 +263,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case WASM_MODULE_OBJECT_TYPE:
case WASM_STRUCT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
+ case WASM_VALUE_OBJECT_TYPE:
case WEAK_CELL_TYPE:
DCHECK(!map.is_callable());
DCHECK(!map.is_undetectable());
@@ -269,6 +272,13 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
DCHECK(!map.is_undetectable());
return kBoundFunction;
case JS_FUNCTION_TYPE:
+ case JS_PROMISE_CONSTRUCTOR_TYPE:
+ case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_ARRAY_CONSTRUCTOR_TYPE:
+#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
+ case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
+#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
DCHECK(!map.is_undetectable());
return kFunction;
case JS_PROXY_TYPE:
@@ -302,6 +312,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case BYTECODE_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case ARRAY_BOILERPLATE_DESCRIPTION_TYPE:
+ case REG_EXP_BOILERPLATE_DESCRIPTION_TYPE:
case TRANSITION_ARRAY_TYPE:
case FEEDBACK_CELL_TYPE:
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 0960d34917..f3cd4789e7 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -396,11 +396,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CHECK_EQ(1, input_count);
// Parameter has an input that produces enough values.
int const index = ParameterIndexOf(node->op());
- Node* const start = NodeProperties::GetValueInput(node, 0);
- CHECK_EQ(IrOpcode::kStart, start->opcode());
+ StartNode start{NodeProperties::GetValueInput(node, 0)};
// Currently, parameter indices start at -1 instead of 0.
CHECK_LE(-1, index);
- CHECK_LT(index + 1, start->op()->ValueOutputCount());
+ CHECK_LE(index, start.LastParameterIndex_MaybeNonStandardLayout());
CheckTypeIs(node, Type::Any());
break;
}
@@ -536,29 +535,25 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CHECK_EQ(0, control_count);
CHECK_EQ(0, effect_count);
CHECK_EQ(6, input_count);
- // Check that the parameters and registers are kStateValues or
- // kTypedStateValues.
- for (int i = 0; i < 2; ++i) {
- CHECK(NodeProperties::GetValueInput(node, i)->opcode() ==
- IrOpcode::kStateValues ||
- NodeProperties::GetValueInput(node, i)->opcode() ==
- IrOpcode::kTypedStateValues);
- }
+
+ FrameState state{node};
+ CHECK(state.parameters()->opcode() == IrOpcode::kStateValues ||
+ state.parameters()->opcode() == IrOpcode::kTypedStateValues);
+ CHECK(state.locals()->opcode() == IrOpcode::kStateValues ||
+ state.locals()->opcode() == IrOpcode::kTypedStateValues);
// Checks that the state input is empty for all but kInterpretedFunction
// frames, where it should have size one.
{
- const FrameStateInfo& state_info = FrameStateInfoOf(node->op());
- const FrameStateFunctionInfo* func_info = state_info.function_info();
+ const FrameStateFunctionInfo* func_info =
+ state.frame_state_info().function_info();
CHECK_EQ(func_info->parameter_count(),
- StateValuesAccess(node->InputAt(kFrameStateParametersInput))
- .size());
- CHECK_EQ(
- func_info->local_count(),
- StateValuesAccess(node->InputAt(kFrameStateLocalsInput)).size());
-
- Node* accumulator = node->InputAt(kFrameStateStackInput);
- if (func_info->type() == FrameStateType::kInterpretedFunction) {
+ StateValuesAccess(state.parameters()).size());
+ CHECK_EQ(func_info->local_count(),
+ StateValuesAccess(state.locals()).size());
+
+ Node* accumulator = state.stack();
+ if (func_info->type() == FrameStateType::kUnoptimizedFunction) {
// The accumulator (InputAt(2)) cannot be kStateValues.
// It can be kTypedStateValues (to signal the type) and it can have
// other Node types including that of the optimized_out HeapConstant.
@@ -1232,12 +1227,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kArgumentsLength:
case IrOpcode::kRestLength:
- CheckValueInputIs(node, 0, Type::ExternalPointer());
CheckTypeIs(node, TypeCache::Get()->kArgumentsLengthType);
break;
- case IrOpcode::kArgumentsFrame:
- CheckTypeIs(node, Type::ExternalPointer());
- break;
case IrOpcode::kNewDoubleElements:
case IrOpcode::kNewSmiOrObjectElements:
CheckValueInputIs(node, 0,
@@ -1245,8 +1236,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::OtherInternal());
break;
case IrOpcode::kNewArgumentsElements:
- CheckValueInputIs(node, 0, Type::ExternalPointer());
- CheckValueInputIs(node, 1,
+ CheckValueInputIs(node, 0,
Type::Range(0.0, FixedArray::kMaxLength, zone));
CheckTypeIs(node, Type::OtherInternal());
break;
@@ -1627,6 +1617,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::ExternalPointer()); // callee
CheckValueInputIs(node, 1, Type::Any()); // receiver
break;
+ case IrOpcode::kJSWasmCall:
+ CHECK_GE(value_count, 3);
+ CheckTypeIs(node, Type::Any());
+ CheckValueInputIs(node, 0, Type::Any()); // callee
+ break;
// Machine operators
// -----------------------
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 3b72da176e..f4e99169e4 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -81,17 +81,6 @@ MachineType assert_size(int expected_size, MachineType type) {
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
-// We would like to use gasm_->Call() to implement this macro,
-// but this doesn't work currently when we try to call it from functions
-// which set IfSuccess/IfFailure control paths (e.g. within Throw()).
-// TODO(manoskouk): Maybe clean this up at some point?
-#define CALL_BUILTIN(name, ...) \
- SetEffect(graph()->NewNode( \
- mcgraph()->common()->Call(GetBuiltinCallDescriptor<name##Descriptor>( \
- this, StubCallMode::kCallBuiltinPointer)), \
- GetBuiltinPointerTarget(Builtins::k##name), ##__VA_ARGS__, effect(), \
- control()))
-
#define LOAD_INSTANCE_FIELD(name, type) \
gasm_->Load(assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \
instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name))
@@ -119,7 +108,7 @@ MachineType assert_size(int expected_size, MachineType type) {
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
#define STORE_RAW(base, offset, val, rep, barrier) \
- STORE_RAW_NODE_OFFSET(base, gasm_->Int32Constant(offset), val, rep, barrier)
+ STORE_RAW_NODE_OFFSET(base, Int32Constant(offset), val, rep, barrier)
#define STORE_RAW_NODE_OFFSET(base, node_offset, val, rep, barrier) \
gasm_->Store(StoreRepresentation(rep, barrier), base, node_offset, val)
@@ -162,25 +151,107 @@ bool ContainsInt64(const wasm::FunctionSig* sig) {
return false;
}
-template <typename BuiltinDescriptor>
-CallDescriptor* GetBuiltinCallDescriptor(WasmGraphBuilder* builder,
- StubCallMode stub_mode) {
- BuiltinDescriptor interface_descriptor;
+constexpr Builtins::Name WasmRuntimeStubIdToBuiltinName(
+ wasm::WasmCode::RuntimeStubId runtime_stub_id) {
+ switch (runtime_stub_id) {
+#define DEF_CASE(name) \
+ case wasm::WasmCode::k##name: \
+ return Builtins::k##name;
+#define DEF_TRAP_CASE(name) DEF_CASE(ThrowWasm##name)
+ WASM_RUNTIME_STUB_LIST(DEF_CASE, DEF_TRAP_CASE)
+#undef DEF_CASE
+#undef DEF_TRAP_CASE
+ default:
+#if V8_HAS_CXX14_CONSTEXPR
+ UNREACHABLE();
+#else
+ return Builtins::kAbort;
+#endif
+ }
+}
+
+CallDescriptor* GetBuiltinCallDescriptor(Builtins::Name name, Zone* zone,
+ StubCallMode stub_mode,
+ bool needs_frame_state = false) {
+ CallInterfaceDescriptor interface_descriptor =
+ Builtins::CallInterfaceDescriptorFor(name);
return Linkage::GetStubCallDescriptor(
- builder->mcgraph()->zone(), // zone
+ zone, // zone
interface_descriptor, // descriptor
interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- stub_mode); // stub call mode
+ needs_frame_state ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ stub_mode); // stub call mode
}
+
+Node* GetBuiltinPointerTarget(MachineGraph* mcgraph,
+ Builtins::Name builtin_id) {
+ static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
+ return mcgraph->graph()->NewNode(
+ mcgraph->common()->NumberConstant(builtin_id));
+}
+
} // namespace
+JSWasmCallData::JSWasmCallData(const wasm::FunctionSig* wasm_signature)
+ : result_needs_conversion_(wasm_signature->return_count() == 1 &&
+ wasm_signature->GetReturn().kind() ==
+ wasm::kI64) {
+ arg_needs_conversion_.resize(wasm_signature->parameter_count());
+ for (size_t i = 0; i < wasm_signature->parameter_count(); i++) {
+ wasm::ValueType type = wasm_signature->GetParam(i);
+ arg_needs_conversion_[i] = type.kind() == wasm::kI64;
+ }
+}
+
class WasmGraphAssembler : public GraphAssembler {
public:
WasmGraphAssembler(MachineGraph* mcgraph, Zone* zone)
: GraphAssembler(mcgraph, zone) {}
+ template <typename... Args>
+ Node* CallRuntimeStub(wasm::WasmCode::RuntimeStubId stub_id, Args*... args) {
+ auto* call_descriptor = GetBuiltinCallDescriptor(
+ WasmRuntimeStubIdToBuiltinName(stub_id), temp_zone(),
+ StubCallMode::kCallWasmRuntimeStub);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ stub_id, RelocInfo::WASM_STUB_CALL);
+ return Call(call_descriptor, call_target, args...);
+ }
+
+ template <typename... Args>
+ Node* CallBuiltin(Builtins::Name name, Args*... args) {
+ // We would like to use gasm_->Call() to implement this method,
+ // but this doesn't work currently when we try to call it from functions
+ // which set IfSuccess/IfFailure control paths (e.g. within Throw()).
+ // TODO(manoskouk): Maybe clean this up at some point and unite with
+ // CallRuntimeStub?
+ auto* call_descriptor = GetBuiltinCallDescriptor(
+ name, temp_zone(), StubCallMode::kCallBuiltinPointer);
+ Node* call_target = GetBuiltinPointerTarget(mcgraph(), name);
+ Node* call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ call_target, args..., effect(), control());
+ InitializeEffectControl(call, control());
+ return call;
+ }
+
+ Node* Branch(Node* cond, Node** true_node, Node** false_node,
+ BranchHint hint) {
+ DCHECK_NOT_NULL(cond);
+ Node* branch =
+ graph()->NewNode(mcgraph()->common()->Branch(hint), cond, control());
+ *true_node = graph()->NewNode(mcgraph()->common()->IfTrue(), branch);
+ *false_node = graph()->NewNode(mcgraph()->common()->IfFalse(), branch);
+ return branch;
+ }
+
+ Node* NumberConstant(volatile double value) {
+ return graph()->NewNode(mcgraph()->common()->NumberConstant(value));
+ }
+
// Helper functions for dealing with HeapObjects.
// Rule of thumb: if access to a given field in an object is required in
// at least two places, put a helper function here.
@@ -388,8 +459,7 @@ Node* WasmGraphBuilder::Start(unsigned params) {
}
Node* WasmGraphBuilder::Param(unsigned index) {
- return graph()->NewNode(mcgraph()->common()->Parameter(index),
- graph()->start());
+ return gasm_->Parameter(index);
}
Node* WasmGraphBuilder::Loop(Node* entry) {
@@ -461,6 +531,12 @@ void WasmGraphBuilder::AppendToPhi(Node* phi, Node* from) {
phi, mcgraph()->common()->ResizeMergeOrPhi(phi->op(), new_size));
}
+template <typename... Nodes>
+Node* WasmGraphBuilder::Merge(Node* fst, Nodes*... args) {
+ return graph()->NewNode(this->mcgraph()->common()->Merge(1 + sizeof...(args)),
+ fst, args...);
+}
+
Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
return graph()->NewNode(mcgraph()->common()->Merge(count), count, controls);
}
@@ -481,22 +557,25 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
}
Node* WasmGraphBuilder::RefNull() {
- return LOAD_FULL_POINTER(
- BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(RootIndex::kNullValue));
+ // Technically speaking, this does not generate a valid graph since the effect
+ // of the last Load is not consumed.
+ // TODO(manoskouk): Remove this code once we implement Load elimination
+ // optimization for wasm.
+ if (!ref_null_node_.is_set()) {
+ Node* current_effect = effect();
+ Node* current_control = control();
+ SetEffectControl(mcgraph()->graph()->start());
+ ref_null_node_.set(LOAD_FULL_POINTER(
+ BuildLoadIsolateRoot(),
+ IsolateData::root_slot_offset(RootIndex::kNullValue)));
+ SetEffectControl(current_effect, current_control);
+ }
+ return ref_null_node_.get();
}
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmRefFunc, RelocInfo::WASM_STUB_CALL);
-
- return SetEffectControl(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), call_target,
- mcgraph()->Uint32Constant(function_index), effect(), control()));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmRefFunc,
+ gasm_->Uint32Constant(function_index));
}
Node* WasmGraphBuilder::RefAsNonNull(Node* arg,
@@ -534,9 +613,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
Node* limit_address =
LOAD_INSTANCE_FIELD(StackLimitAddress, MachineType::Pointer());
- Node* limit = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Pointer()), limit_address,
- mcgraph()->IntPtrConstant(0), limit_address, control()));
+ Node* limit = gasm_->Load(MachineType::Pointer(), limit_address, 0);
Node* check = SetEffect(graph()->NewNode(
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
@@ -882,8 +959,7 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
MachineOperatorBuilder* m = mcgraph()->machine();
switch (opcode) {
case wasm::kExprI32Eqz:
- op = m->Word32Equal();
- return graph()->NewNode(op, input, mcgraph()->Int32Constant(0));
+ return gasm_->Word32Equal(input, Int32Constant(0));
case wasm::kExprF32Abs:
op = m->Float32Abs();
break;
@@ -1084,8 +1160,7 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
break;
}
case wasm::kExprI64Eqz:
- op = m->Word64Equal();
- return graph()->NewNode(op, input, mcgraph()->Int64Constant(0));
+ return gasm_->Word64Equal(input, Int64Constant(0));
case wasm::kExprF32SConvertI64:
if (m->Is32()) {
return BuildF32SConvertI64(input);
@@ -1137,7 +1212,7 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
? BuildCcallConvertFloat(input, position, opcode)
: BuildIntConvertFloat(input, position, opcode);
case wasm::kExprRefIsNull:
- return graph()->NewNode(m->WordEqual(), input, RefNull());
+ return gasm_->WordEqual(input, RefNull());
case wasm::kExprI32AsmjsLoadMem8S:
return BuildAsmjsLoadMem(MachineType::Int8(), input);
case wasm::kExprI32AsmjsLoadMem8U:
@@ -1171,29 +1246,14 @@ Node* WasmGraphBuilder::Simd128Constant(const uint8_t value[16]) {
return graph()->NewNode(mcgraph()->machine()->S128Const(value));
}
-namespace {
-Node* Branch(MachineGraph* mcgraph, Node* cond, Node** true_node,
- Node** false_node, Node* control, BranchHint hint) {
- DCHECK_NOT_NULL(cond);
- DCHECK_NOT_NULL(control);
- Node* branch =
- mcgraph->graph()->NewNode(mcgraph->common()->Branch(hint), cond, control);
- *true_node = mcgraph->graph()->NewNode(mcgraph->common()->IfTrue(), branch);
- *false_node = mcgraph->graph()->NewNode(mcgraph->common()->IfFalse(), branch);
- return branch;
-}
-} // namespace
-
Node* WasmGraphBuilder::BranchNoHint(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, control(),
- BranchHint::kNone);
+ return gasm_->Branch(cond, true_node, false_node, BranchHint::kNone);
}
Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, control(),
- BranchHint::kFalse);
+ return gasm_->Branch(cond, true_node, false_node, BranchHint::kFalse);
}
TrapId WasmGraphBuilder::GetTrapIdForTrap(wasm::TrapReason reason) {
@@ -1247,9 +1307,7 @@ Node* WasmGraphBuilder::TrapIfEq32(wasm::TrapReason reason, Node* node,
if (val == 0) {
return TrapIfFalse(reason, node, position);
} else {
- return TrapIfTrue(reason,
- graph()->NewNode(mcgraph()->machine()->Word32Equal(),
- node, mcgraph()->Int32Constant(val)),
+ return TrapIfTrue(reason, gasm_->Word32Equal(node, Int32Constant(val)),
position);
}
}
@@ -1266,9 +1324,7 @@ Node* WasmGraphBuilder::TrapIfEq64(wasm::TrapReason reason, Node* node,
wasm::WasmCodePosition position) {
Int64Matcher m(node);
if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
- return TrapIfTrue(reason,
- graph()->NewNode(mcgraph()->machine()->Word64Equal(), node,
- mcgraph()->Int64Constant(val)),
+ return TrapIfTrue(reason, gasm_->Word64Equal(node, Int64Constant(val)),
position);
}
@@ -1300,7 +1356,7 @@ Node* WasmGraphBuilder::Return(Vector<Node*> vals) {
unsigned count = static_cast<unsigned>(vals.size());
base::SmallVector<Node*, 8> buf(count + 3);
- buf[0] = mcgraph()->Int32Constant(0);
+ buf[0] = Int32Constant(0);
if (count > 0) {
base::Memcpy(buf.data() + 1, vals.begin(), sizeof(void*) * count);
}
@@ -1327,11 +1383,9 @@ Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
Int32Matcher match(node);
if (match.HasResolvedValue()) {
int32_t masked = (match.ResolvedValue() & kMask32);
- if (match.ResolvedValue() != masked)
- node = mcgraph()->Int32Constant(masked);
+ if (match.ResolvedValue() != masked) node = Int32Constant(masked);
} else {
- node = graph()->NewNode(mcgraph()->machine()->Word32And(), node,
- mcgraph()->Int32Constant(kMask32));
+ node = gasm_->Word32And(node, Int32Constant(kMask32));
}
}
return node;
@@ -1344,11 +1398,9 @@ Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
Int64Matcher match(node);
if (match.HasResolvedValue()) {
int64_t masked = (match.ResolvedValue() & kMask64);
- if (match.ResolvedValue() != masked)
- node = mcgraph()->Int64Constant(masked);
+ if (match.ResolvedValue() != masked) node = Int64Constant(masked);
} else {
- node = graph()->NewNode(mcgraph()->machine()->Word64And(), node,
- mcgraph()->Int64Constant(kMask64));
+ node = gasm_->Word64And(node, Int64Constant(kMask64));
}
}
return node;
@@ -1381,21 +1433,21 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
bool isFloat = false;
switch (wasmtype.kind()) {
- case wasm::ValueType::kF64:
- value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
+ case wasm::kF64:
+ value = gasm_->BitcastFloat64ToInt64(node);
isFloat = true;
V8_FALLTHROUGH;
- case wasm::ValueType::kI64:
- result = mcgraph()->Int64Constant(0);
+ case wasm::kI64:
+ result = Int64Constant(0);
break;
- case wasm::ValueType::kF32:
- value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
+ case wasm::kF32:
+ value = gasm_->BitcastFloat32ToInt32(node);
isFloat = true;
V8_FALLTHROUGH;
- case wasm::ValueType::kI32:
- result = mcgraph()->Int32Constant(0);
+ case wasm::kI32:
+ result = Int32Constant(0);
break;
- case wasm::ValueType::kS128:
+ case wasm::kS128:
DCHECK(ReverseBytesSupported(m, valueSizeInBytes));
break;
default:
@@ -1409,17 +1461,15 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
if (wasmtype == wasm::kWasmI64 && mem_rep < MachineRepresentation::kWord64) {
// In case we store lower part of WasmI64 expression, we can truncate
// upper 32bits
- value = graph()->NewNode(m->TruncateInt64ToInt32(), value);
+ value = gasm_->TruncateInt64ToInt32(value);
valueSizeInBytes = wasm::kWasmI32.element_size_bytes();
valueSizeInBits = 8 * valueSizeInBytes;
if (mem_rep == MachineRepresentation::kWord16) {
- value =
- graph()->NewNode(m->Word32Shl(), value, mcgraph()->Int32Constant(16));
+ value = gasm_->Word32Shl(value, Int32Constant(16));
}
} else if (wasmtype == wasm::kWasmI32 &&
mem_rep == MachineRepresentation::kWord16) {
- value =
- graph()->NewNode(m->Word32Shl(), value, mcgraph()->Int32Constant(16));
+ value = gasm_->Word32Shl(value, Int32Constant(16));
}
int i;
@@ -1428,10 +1478,10 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
if (ReverseBytesSupported(m, valueSizeInBytes)) {
switch (valueSizeInBytes) {
case 4:
- result = graph()->NewNode(m->Word32ReverseBytes(), value);
+ result = gasm_->Word32ReverseBytes(value);
break;
case 8:
- result = graph()->NewNode(m->Word64ReverseBytes(), value);
+ result = gasm_->Word64ReverseBytes(value);
break;
case 16:
result = graph()->NewNode(m->Simd128ReverseBytes(), value);
@@ -1452,44 +1502,36 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
DCHECK_EQ(0, (shiftCount + 8) % 16);
if (valueSizeInBits > 32) {
- shiftLower = graph()->NewNode(m->Word64Shl(), value,
- mcgraph()->Int64Constant(shiftCount));
- shiftHigher = graph()->NewNode(m->Word64Shr(), value,
- mcgraph()->Int64Constant(shiftCount));
- lowerByte = graph()->NewNode(
- m->Word64And(), shiftLower,
- mcgraph()->Int64Constant(static_cast<uint64_t>(0xFF)
- << (valueSizeInBits - 8 - i)));
- higherByte = graph()->NewNode(
- m->Word64And(), shiftHigher,
- mcgraph()->Int64Constant(static_cast<uint64_t>(0xFF) << i));
- result = graph()->NewNode(m->Word64Or(), result, lowerByte);
- result = graph()->NewNode(m->Word64Or(), result, higherByte);
+ shiftLower = gasm_->Word64Shl(value, Int64Constant(shiftCount));
+ shiftHigher = gasm_->Word64Shr(value, Int64Constant(shiftCount));
+ lowerByte = gasm_->Word64And(
+ shiftLower, Int64Constant(static_cast<uint64_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = gasm_->Word64And(
+ shiftHigher, Int64Constant(static_cast<uint64_t>(0xFF) << i));
+ result = gasm_->Word64Or(result, lowerByte);
+ result = gasm_->Word64Or(result, higherByte);
} else {
- shiftLower = graph()->NewNode(m->Word32Shl(), value,
- mcgraph()->Int32Constant(shiftCount));
- shiftHigher = graph()->NewNode(m->Word32Shr(), value,
- mcgraph()->Int32Constant(shiftCount));
- lowerByte = graph()->NewNode(
- m->Word32And(), shiftLower,
- mcgraph()->Int32Constant(static_cast<uint32_t>(0xFF)
- << (valueSizeInBits - 8 - i)));
- higherByte = graph()->NewNode(
- m->Word32And(), shiftHigher,
- mcgraph()->Int32Constant(static_cast<uint32_t>(0xFF) << i));
- result = graph()->NewNode(m->Word32Or(), result, lowerByte);
- result = graph()->NewNode(m->Word32Or(), result, higherByte);
+ shiftLower = gasm_->Word32Shl(value, Int32Constant(shiftCount));
+ shiftHigher = gasm_->Word32Shr(value, Int32Constant(shiftCount));
+ lowerByte = gasm_->Word32And(
+ shiftLower, Int32Constant(static_cast<uint32_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = gasm_->Word32And(
+ shiftHigher, Int32Constant(static_cast<uint32_t>(0xFF) << i));
+ result = gasm_->Word32Or(result, lowerByte);
+ result = gasm_->Word32Or(result, higherByte);
}
}
}
if (isFloat) {
switch (wasmtype.kind()) {
- case wasm::ValueType::kF64:
- result = graph()->NewNode(m->BitcastInt64ToFloat64(), result);
+ case wasm::kF64:
+ result = gasm_->BitcastInt64ToFloat64(result);
break;
- case wasm::ValueType::kF32:
- result = graph()->NewNode(m->BitcastInt32ToFloat32(), result);
+ case wasm::kF32:
+ result = gasm_->BitcastInt32ToFloat32(result);
break;
default:
UNREACHABLE();
@@ -1512,19 +1554,19 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
switch (memtype.representation()) {
case MachineRepresentation::kFloat64:
- value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
+ value = gasm_->BitcastFloat64ToInt64(node);
isFloat = true;
V8_FALLTHROUGH;
case MachineRepresentation::kWord64:
- result = mcgraph()->Int64Constant(0);
+ result = Int64Constant(0);
break;
case MachineRepresentation::kFloat32:
- value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
+ value = gasm_->BitcastFloat32ToInt32(node);
isFloat = true;
V8_FALLTHROUGH;
case MachineRepresentation::kWord32:
case MachineRepresentation::kWord16:
- result = mcgraph()->Int32Constant(0);
+ result = Int32Constant(0);
break;
case MachineRepresentation::kWord8:
// No need to change endianness for byte size, return original node
@@ -1543,16 +1585,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
if (ReverseBytesSupported(m, valueSizeInBytes < 4 ? 4 : valueSizeInBytes)) {
switch (valueSizeInBytes) {
case 2:
- result =
- graph()->NewNode(m->Word32ReverseBytes(),
- graph()->NewNode(m->Word32Shl(), value,
- mcgraph()->Int32Constant(16)));
+ result = gasm_->Word32ReverseBytes(
+ gasm_->Word32Shl(value, Int32Constant(16)));
break;
case 4:
- result = graph()->NewNode(m->Word32ReverseBytes(), value);
+ result = gasm_->Word32ReverseBytes(value);
break;
case 8:
- result = graph()->NewNode(m->Word64ReverseBytes(), value);
+ result = gasm_->Word64ReverseBytes(value);
break;
case 16:
result = graph()->NewNode(m->Simd128ReverseBytes(), value);
@@ -1572,33 +1612,25 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
DCHECK_EQ(0, (shiftCount + 8) % 16);
if (valueSizeInBits > 32) {
- shiftLower = graph()->NewNode(m->Word64Shl(), value,
- mcgraph()->Int64Constant(shiftCount));
- shiftHigher = graph()->NewNode(m->Word64Shr(), value,
- mcgraph()->Int64Constant(shiftCount));
- lowerByte = graph()->NewNode(
- m->Word64And(), shiftLower,
- mcgraph()->Int64Constant(static_cast<uint64_t>(0xFF)
- << (valueSizeInBits - 8 - i)));
- higherByte = graph()->NewNode(
- m->Word64And(), shiftHigher,
- mcgraph()->Int64Constant(static_cast<uint64_t>(0xFF) << i));
- result = graph()->NewNode(m->Word64Or(), result, lowerByte);
- result = graph()->NewNode(m->Word64Or(), result, higherByte);
+ shiftLower = gasm_->Word64Shl(value, Int64Constant(shiftCount));
+ shiftHigher = gasm_->Word64Shr(value, Int64Constant(shiftCount));
+ lowerByte = gasm_->Word64And(
+ shiftLower, Int64Constant(static_cast<uint64_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = gasm_->Word64And(
+ shiftHigher, Int64Constant(static_cast<uint64_t>(0xFF) << i));
+ result = gasm_->Word64Or(result, lowerByte);
+ result = gasm_->Word64Or(result, higherByte);
} else {
- shiftLower = graph()->NewNode(m->Word32Shl(), value,
- mcgraph()->Int32Constant(shiftCount));
- shiftHigher = graph()->NewNode(m->Word32Shr(), value,
- mcgraph()->Int32Constant(shiftCount));
- lowerByte = graph()->NewNode(
- m->Word32And(), shiftLower,
- mcgraph()->Int32Constant(static_cast<uint32_t>(0xFF)
- << (valueSizeInBits - 8 - i)));
- higherByte = graph()->NewNode(
- m->Word32And(), shiftHigher,
- mcgraph()->Int32Constant(static_cast<uint32_t>(0xFF) << i));
- result = graph()->NewNode(m->Word32Or(), result, lowerByte);
- result = graph()->NewNode(m->Word32Or(), result, higherByte);
+ shiftLower = gasm_->Word32Shl(value, Int32Constant(shiftCount));
+ shiftHigher = gasm_->Word32Shr(value, Int32Constant(shiftCount));
+ lowerByte = gasm_->Word32And(
+ shiftLower, Int32Constant(static_cast<uint32_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = gasm_->Word32And(
+ shiftHigher, Int32Constant(static_cast<uint32_t>(0xFF) << i));
+ result = gasm_->Word32Or(result, lowerByte);
+ result = gasm_->Word32Or(result, higherByte);
}
}
}
@@ -1606,10 +1638,10 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
if (isFloat) {
switch (memtype.representation()) {
case MachineRepresentation::kFloat64:
- result = graph()->NewNode(m->BitcastInt64ToFloat64(), result);
+ result = gasm_->BitcastInt64ToFloat64(result);
break;
case MachineRepresentation::kFloat32:
- result = graph()->NewNode(m->BitcastInt32ToFloat32(), result);
+ result = gasm_->BitcastInt32ToFloat32(result);
break;
default:
UNREACHABLE();
@@ -1626,19 +1658,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
// result = (x << machine_width - type_width) >> (machine_width -
// type_width)
if (wasmtype == wasm::kWasmI64) {
- shiftBitCount = mcgraph()->Int32Constant(64 - valueSizeInBits);
- result = graph()->NewNode(
- m->Word64Sar(),
- graph()->NewNode(m->Word64Shl(),
- graph()->NewNode(m->ChangeInt32ToInt64(), result),
- shiftBitCount),
+ shiftBitCount = Int32Constant(64 - valueSizeInBits);
+ result = gasm_->Word64Sar(
+ gasm_->Word64Shl(gasm_->ChangeInt32ToInt64(result), shiftBitCount),
shiftBitCount);
} else if (wasmtype == wasm::kWasmI32) {
- shiftBitCount = mcgraph()->Int32Constant(32 - valueSizeInBits);
- result = graph()->NewNode(
- m->Word32Sar(),
- graph()->NewNode(m->Word32Shl(), result, shiftBitCount),
- shiftBitCount);
+ shiftBitCount = Int32Constant(32 - valueSizeInBits);
+ result = gasm_->Word32Sar(gasm_->Word32Shl(result, shiftBitCount),
+ shiftBitCount);
}
}
}
@@ -1651,20 +1678,20 @@ Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
wasm::kExprF32ReinterpretI32,
Binop(wasm::kExprI32Ior,
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, left),
- mcgraph()->Int32Constant(0x7FFFFFFF)),
+ Int32Constant(0x7FFFFFFF)),
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, right),
- mcgraph()->Int32Constant(0x80000000))));
+ Int32Constant(0x80000000))));
return result;
}
Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
if (mcgraph()->machine()->Is64()) {
- return gasm_->BitcastInt64ToFloat64(gasm_->Word64Or(
- gasm_->Word64And(gasm_->BitcastFloat64ToInt64(left),
- gasm_->Int64Constant(0x7FFFFFFFFFFFFFFF)),
- gasm_->Word64And(gasm_->BitcastFloat64ToInt64(right),
- gasm_->Int64Constant(0x8000000000000000))));
+ return gasm_->BitcastInt64ToFloat64(
+ gasm_->Word64Or(gasm_->Word64And(gasm_->BitcastFloat64ToInt64(left),
+ Int64Constant(0x7FFFFFFFFFFFFFFF)),
+ gasm_->Word64And(gasm_->BitcastFloat64ToInt64(right),
+ Int64Constant(0x8000000000000000))));
}
DCHECK(mcgraph()->machine()->Is32());
@@ -1673,8 +1700,8 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
Node* high_word_right = gasm_->Float64ExtractHighWord32(right);
Node* new_high_word = gasm_->Word32Or(
- gasm_->Word32And(high_word_left, gasm_->Int32Constant(0x7FFFFFFF)),
- gasm_->Word32And(high_word_right, gasm_->Int32Constant(0x80000000)));
+ gasm_->Word32And(high_word_left, Int32Constant(0x7FFFFFFF)),
+ gasm_->Word32And(high_word_right, Int32Constant(0x80000000)));
return gasm_->Float64InsertHighWord32(left, new_high_word);
}
@@ -1966,29 +1993,23 @@ Node* WasmGraphBuilder::BuildIntConvertFloat(Node* input,
}
Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js must use the wacky JS semantics.
- input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
- return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+ return gasm_->TruncateFloat64ToWord32(gasm_->ChangeFloat32ToFloat64(input));
}
Node* WasmGraphBuilder::BuildI32AsmjsSConvertF64(Node* input) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js must use the wacky JS semantics.
- return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+ return gasm_->TruncateFloat64ToWord32(input);
}
Node* WasmGraphBuilder::BuildI32AsmjsUConvertF32(Node* input) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js must use the wacky JS semantics.
- input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
- return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+ return gasm_->TruncateFloat64ToWord32(gasm_->ChangeFloat32ToFloat64(input));
}
Node* WasmGraphBuilder::BuildI32AsmjsUConvertF64(Node* input) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js must use the wacky JS semantics.
- return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+ return gasm_->TruncateFloat64ToWord32(input);
}
Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
@@ -1998,7 +2019,7 @@ Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
return BuildCCall(&sig, function, stack_slot_param);
}
@@ -2118,12 +2139,10 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature sig(0, 1, sig_types);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
BuildCCall(&sig, function, stack_slot);
- return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type),
- stack_slot, mcgraph()->Int32Constant(0),
- effect(), control()));
+ return gasm_->Load(type, stack_slot, 0);
}
Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) {
@@ -2158,17 +2177,14 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
ElementSizeInBytes(result_type.representation()));
Node* stack_slot =
graph()->NewNode(mcgraph()->machine()->StackSlot(stack_slot_size));
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(parameter_representation, kNoWriteBarrier));
- SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- input, effect(), control()));
+ auto store_rep =
+ StoreRepresentation(parameter_representation, kNoWriteBarrier);
+ gasm_->Store(store_rep, stack_slot, 0, input);
MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature sig(0, 1, sig_types);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
BuildCCall(&sig, function, stack_slot);
- return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(result_type),
- stack_slot, mcgraph()->Int32Constant(0),
- effect(), control()));
+ return gasm_->Load(result_type, stack_slot, 0);
}
namespace {
@@ -2205,20 +2221,16 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
ElementSizeInBytes(float_ty.representation()));
Node* stack_slot =
graph()->NewNode(mcgraph()->machine()->StackSlot(stack_slot_size));
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(float_ty.representation(), kNoWriteBarrier));
- SetEffect(graph()->NewNode(store_op, stack_slot, Int32Constant(0), input,
- effect(), control()));
+ auto store_rep =
+ StoreRepresentation(float_ty.representation(), kNoWriteBarrier);
+ gasm_->Store(store_rep, stack_slot, 0, input);
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
- Node* function =
- graph()->NewNode(mcgraph()->common()->ExternalConstant(call_ref));
+ Node* function = gasm_->ExternalConstant(call_ref);
Node* overflow = BuildCCall(&sig, function, stack_slot);
if (IsTrappingConvertOp(opcode)) {
ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position);
- return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(int_ty),
- stack_slot, Int32Constant(0), effect(),
- control()));
+ return gasm_->Load(int_ty, stack_slot, 0);
}
Node* test = Binop(wasm::kExprI32Eq, overflow, Int32Constant(0), position);
Diamond tl_d(graph(), mcgraph()->common(), test, BranchHint::kFalse);
@@ -2231,9 +2243,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
sat_d.Nest(nan_d, false);
Node* sat_val =
sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
- Node* load =
- SetEffect(graph()->NewNode(mcgraph()->machine()->Load(int_ty), stack_slot,
- Int32Constant(0), effect(), control()));
+ Node* load = gasm_->Load(int_ty, stack_slot, 0);
Node* nan_val =
nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
return tl_d.Phi(int_ty.representation(), nan_val, load);
@@ -2241,22 +2251,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
Node* WasmGraphBuilder::MemoryGrow(Node* input) {
needs_stack_check_ = true;
-
- WasmMemoryGrowDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), // zone
- interface_descriptor, // descriptor
- interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallWasmRuntimeStub); // stub call mode
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmMemoryGrow, RelocInfo::WASM_STUB_CALL);
- return SetEffectControl(
- graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
- input, effect(), control()));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmMemoryGrow, input);
}
Node* WasmGraphBuilder::Throw(uint32_t exception_index,
@@ -2265,44 +2260,36 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
wasm::WasmCodePosition position) {
needs_stack_check_ = true;
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
- Node* create_parameters[] = {
- LoadExceptionTagFromTable(exception_index),
- BuildChangeUint31ToSmi(mcgraph()->Uint32Constant(encoded_size))};
- Node* except_obj =
- BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
- arraysize(create_parameters));
- SetSourcePosition(except_obj, position);
- Node* values_array = CALL_BUILTIN(
- WasmGetOwnProperty, except_obj,
- LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(
- RootIndex::kwasm_exception_values_symbol)),
- LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+
+ Node* values_array =
+ gasm_->CallRuntimeStub(wasm::WasmCode::kWasmAllocateFixedArray,
+ gasm_->IntPtrConstant(encoded_size));
+ SetSourcePosition(values_array, position);
+
uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig;
MachineOperatorBuilder* m = mcgraph()->machine();
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value = values[i];
switch (sig->GetParam(i).kind()) {
- case wasm::ValueType::kF32:
- value = graph()->NewNode(m->BitcastFloat32ToInt32(), value);
+ case wasm::kF32:
+ value = gasm_->BitcastFloat32ToInt32(value);
V8_FALLTHROUGH;
- case wasm::ValueType::kI32:
+ case wasm::kI32:
BuildEncodeException32BitValue(values_array, &index, value);
break;
- case wasm::ValueType::kF64:
- value = graph()->NewNode(m->BitcastFloat64ToInt64(), value);
+ case wasm::kF64:
+ value = gasm_->BitcastFloat64ToInt64(value);
V8_FALLTHROUGH;
- case wasm::ValueType::kI64: {
- Node* upper32 = graph()->NewNode(
- m->TruncateInt64ToInt32(),
+ case wasm::kI64: {
+ Node* upper32 = gasm_->TruncateInt64ToInt32(
Binop(wasm::kExprI64ShrU, value, Int64Constant(32)));
BuildEncodeException32BitValue(values_array, &index, upper32);
- Node* lower32 = graph()->NewNode(m->TruncateInt64ToInt32(), value);
+ Node* lower32 = gasm_->TruncateInt64ToInt32(value);
BuildEncodeException32BitValue(values_array, &index, lower32);
break;
}
- case wasm::ValueType::kS128:
+ case wasm::kS128:
BuildEncodeException32BitValue(
values_array, &index,
graph()->NewNode(m->I32x4ExtractLane(0), value));
@@ -2316,59 +2303,53 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
values_array, &index,
graph()->NewNode(m->I32x4ExtractLane(3), value));
break;
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
- case wasm::ValueType::kRtt: // TODO(7748): Implement.
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kStmt:
- case wasm::ValueType::kBottom:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kStmt:
+ case wasm::kBottom:
UNREACHABLE();
}
}
DCHECK_EQ(encoded_size, index);
- WasmThrowDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmThrow, RelocInfo::WASM_STUB_CALL);
- Node* call = SetEffectControl(
- graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
- except_obj, effect(), control()));
- SetSourcePosition(call, position);
- return call;
+
+ Node* exception_tag = LoadExceptionTagFromTable(exception_index);
+
+ Node* throw_call = gasm_->CallRuntimeStub(wasm::WasmCode::kWasmThrow,
+ exception_tag, values_array);
+ SetSourcePosition(throw_call, position);
+ return throw_call;
}
void WasmGraphBuilder::BuildEncodeException32BitValue(Node* values_array,
uint32_t* index,
Node* value) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
- Node* upper_halfword_as_smi = BuildChangeUint31ToSmi(
- graph()->NewNode(machine->Word32Shr(), value, Int32Constant(16)));
+ Node* upper_halfword_as_smi =
+ BuildChangeUint31ToSmi(gasm_->Word32Shr(value, Int32Constant(16)));
STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, upper_halfword_as_smi);
++(*index);
- Node* lower_halfword_as_smi = BuildChangeUint31ToSmi(
- graph()->NewNode(machine->Word32And(), value, Int32Constant(0xFFFFu)));
+ Node* lower_halfword_as_smi =
+ BuildChangeUint31ToSmi(gasm_->Word32And(value, Int32Constant(0xFFFFu)));
STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, lower_halfword_as_smi);
++(*index);
}
Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array,
uint32_t* index) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
Node* upper =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++;
- upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16));
+ upper = gasm_->Word32Shl(upper, Int32Constant(16));
Node* lower =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++;
- Node* value = graph()->NewNode(machine->Word32Or(), upper, lower);
+ Node* value = gasm_->Word32Or(upper, lower);
return value;
}
@@ -2387,20 +2368,12 @@ Node* WasmGraphBuilder::Rethrow(Node* except_obj) {
// TODO(v8:8091): Currently the message of the original exception is not being
// preserved when rethrown to the console. The pending message will need to be
// saved when caught and restored here while being rethrown.
- WasmThrowDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmRethrow, RelocInfo::WASM_STUB_CALL);
- return gasm_->Call(call_descriptor, call_target, except_obj);
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmRethrow, except_obj);
}
Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
Node* expected_tag) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
- return graph()->NewNode(machine->WordEqual(), caught_tag, expected_tag);
+ return gasm_->WordEqual(caught_tag, expected_tag);
}
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
@@ -2411,8 +2384,8 @@ Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
}
Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
- return CALL_BUILTIN(
- WasmGetOwnProperty, except_obj,
+ return gasm_->CallBuiltin(
+ Builtins::kWasmGetOwnProperty, except_obj,
LOAD_FULL_POINTER(
BuildLoadIsolateRoot(),
IsolateData::root_slot_offset(RootIndex::kwasm_exception_tag_symbol)),
@@ -2422,8 +2395,8 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
const wasm::WasmException* exception,
Vector<Node*> values) {
- Node* values_array = CALL_BUILTIN(
- WasmGetOwnProperty, except_obj,
+ Node* values_array = gasm_->CallBuiltin(
+ Builtins::kWasmGetOwnProperty, except_obj,
LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
IsolateData::root_slot_offset(
RootIndex::kwasm_exception_values_symbol)),
@@ -2434,23 +2407,23 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value;
switch (sig->GetParam(i).kind()) {
- case wasm::ValueType::kI32:
+ case wasm::kI32:
value = BuildDecodeException32BitValue(values_array, &index);
break;
- case wasm::ValueType::kI64:
+ case wasm::kI64:
value = BuildDecodeException64BitValue(values_array, &index);
break;
- case wasm::ValueType::kF32: {
+ case wasm::kF32: {
value = Unop(wasm::kExprF32ReinterpretI32,
BuildDecodeException32BitValue(values_array, &index));
break;
}
- case wasm::ValueType::kF64: {
+ case wasm::kF64: {
value = Unop(wasm::kExprF64ReinterpretI64,
BuildDecodeException64BitValue(values_array, &index));
break;
}
- case wasm::ValueType::kS128:
+ case wasm::kS128:
value = graph()->NewNode(
mcgraph()->machine()->I32x4Splat(),
BuildDecodeException32BitValue(values_array, &index));
@@ -2464,16 +2437,17 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
mcgraph()->machine()->I32x4ReplaceLane(3), value,
BuildDecodeException32BitValue(values_array, &index));
break;
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
- case wasm::ValueType::kRtt: // TODO(7748): Implement.
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kStmt:
- case wasm::ValueType::kBottom:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kStmt:
+ case wasm::kBottom:
UNREACHABLE();
}
values[i] = value;
@@ -2484,23 +2458,20 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = mcgraph()->machine();
ZeroCheck32(wasm::kTrapDivByZero, right, position);
Node* before = control();
Node* denom_is_m1;
Node* denom_is_not_m1;
- BranchExpectFalse(
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
- &denom_is_m1, &denom_is_not_m1);
+ BranchExpectFalse(gasm_->Word32Equal(right, Int32Constant(-1)), &denom_is_m1,
+ &denom_is_not_m1);
SetControl(denom_is_m1);
TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt, position);
if (control() != denom_is_m1) {
- SetControl(graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
- control()));
+ SetControl(Merge(denom_is_not_m1, control()));
} else {
SetControl(before);
}
- return graph()->NewNode(m->Int32Div(), left, right, control());
+ return gasm_->Int32Div(left, right);
}
Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
@@ -2509,28 +2480,24 @@ Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
ZeroCheck32(wasm::kTrapRemByZero, right, position);
- Diamond d(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
+ Diamond d(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(-1)), BranchHint::kFalse);
d.Chain(control());
- return d.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
+ return d.Phi(MachineRepresentation::kWord32, Int32Constant(0),
graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
}
Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right,
wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = mcgraph()->machine();
- return graph()->NewNode(m->Uint32Div(), left, right,
- ZeroCheck32(wasm::kTrapDivByZero, right, position));
+ ZeroCheck32(wasm::kTrapDivByZero, right, position);
+ return gasm_->Uint32Div(left, right);
}
Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right,
wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = mcgraph()->machine();
- return graph()->NewNode(m->Uint32Mod(), left, right,
- ZeroCheck32(wasm::kTrapRemByZero, right, position));
+ ZeroCheck32(wasm::kTrapRemByZero, right, position);
+ return gasm_->Uint32Mod(left, right);
}
Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
@@ -2539,54 +2506,49 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
Int32Matcher mr(right);
if (mr.HasResolvedValue()) {
if (mr.ResolvedValue() == 0) {
- return mcgraph()->Int32Constant(0);
+ return Int32Constant(0);
} else if (mr.ResolvedValue() == -1) {
// The result is the negation of the left input.
- return graph()->NewNode(m->Int32Sub(), mcgraph()->Int32Constant(0), left);
+ return gasm_->Int32Sub(Int32Constant(0), left);
}
- return graph()->NewNode(m->Int32Div(), left, right, control());
+ return gasm_->Int32Div(left, right);
}
// asm.js semantics return 0 on divide or mod by zero.
if (m->Int32DivIsSafe()) {
// The hardware instruction does the right thing (e.g. arm).
- return graph()->NewNode(m->Int32Div(), left, right, control());
+ return gasm_->Int32Div(left, right);
}
// Check denominator for zero.
- Diamond z(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+ Diamond z(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(0)), BranchHint::kFalse);
z.Chain(control());
// Check denominator for -1. (avoid minint / -1 case).
- Diamond n(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
+ Diamond n(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(-1)), BranchHint::kFalse);
n.Chain(z.if_false);
Node* div = graph()->NewNode(m->Int32Div(), left, right, n.if_false);
- Node* neg =
- graph()->NewNode(m->Int32Sub(), mcgraph()->Int32Constant(0), left);
+ Node* neg = gasm_->Int32Sub(Int32Constant(0), left);
- return z.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
+ return z.Phi(MachineRepresentation::kWord32, Int32Constant(0),
n.Phi(MachineRepresentation::kWord32, neg, div));
}
Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
CommonOperatorBuilder* c = mcgraph()->common();
MachineOperatorBuilder* m = mcgraph()->machine();
- Node* const zero = mcgraph()->Int32Constant(0);
+ Node* const zero = Int32Constant(0);
Int32Matcher mr(right);
if (mr.HasResolvedValue()) {
if (mr.ResolvedValue() == 0 || mr.ResolvedValue() == -1) {
return zero;
}
- return graph()->NewNode(m->Int32Mod(), left, right, control());
+ return gasm_->Int32Mod(left, right);
}
// General case for signed integer modulus, with optimization for (unknown)
@@ -2609,12 +2571,12 @@ Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
//
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
- Node* const minus_one = mcgraph()->Int32Constant(-1);
+ Node* const minus_one = Int32Constant(-1);
const Operator* const merge_op = c->Merge(2);
const Operator* const phi_op = c->Phi(MachineRepresentation::kWord32, 2);
- Node* check0 = graph()->NewNode(m->Int32LessThan(), zero, right);
+ Node* check0 = gasm_->Int32LessThan(zero, right);
Node* branch0 =
graph()->NewNode(c->Branch(BranchHint::kTrue), check0, control());
@@ -2679,35 +2641,29 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivU(Node* left, Node* right) {
// asm.js semantics return 0 on divide or mod by zero.
if (m->Uint32DivIsSafe()) {
// The hardware instruction does the right thing (e.g. arm).
- return graph()->NewNode(m->Uint32Div(), left, right, control());
+ return gasm_->Uint32Div(left, right);
}
// Explicit check for x % 0.
- Diamond z(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+ Diamond z(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(0)), BranchHint::kFalse);
z.Chain(control());
- return z.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
+ return z.Phi(MachineRepresentation::kWord32, Int32Constant(0),
graph()->NewNode(mcgraph()->machine()->Uint32Div(), left, right,
z.if_false));
}
Node* WasmGraphBuilder::BuildI32AsmjsRemU(Node* left, Node* right) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js semantics return 0 on divide or mod by zero.
// Explicit check for x % 0.
- Diamond z(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+ Diamond z(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(0)), BranchHint::kFalse);
z.Chain(control());
Node* rem = graph()->NewNode(mcgraph()->machine()->Uint32Mod(), left, right,
z.if_false);
- return z.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
- rem);
+ return z.Phi(MachineRepresentation::kWord32, Int32Constant(0), rem);
}
Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
@@ -2720,20 +2676,17 @@ Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
Node* before = control();
Node* denom_is_m1;
Node* denom_is_not_m1;
- BranchExpectFalse(graph()->NewNode(mcgraph()->machine()->Word64Equal(), right,
- mcgraph()->Int64Constant(-1)),
- &denom_is_m1, &denom_is_not_m1);
+ BranchExpectFalse(gasm_->Word64Equal(right, Int64Constant(-1)), &denom_is_m1,
+ &denom_is_not_m1);
SetControl(denom_is_m1);
TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
std::numeric_limits<int64_t>::min(), position);
if (control() != denom_is_m1) {
- SetControl(graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
- control()));
+ SetControl(Merge(denom_is_not_m1, control()));
} else {
SetControl(before);
}
- return graph()->NewNode(mcgraph()->machine()->Int64Div(), left, right,
- control());
+ return gasm_->Int64Div(left, right);
}
Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
@@ -2744,16 +2697,14 @@ Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
}
ZeroCheck64(wasm::kTrapRemByZero, right, position);
Diamond d(mcgraph()->graph(), mcgraph()->common(),
- graph()->NewNode(mcgraph()->machine()->Word64Equal(), right,
- mcgraph()->Int64Constant(-1)));
+ gasm_->Word64Equal(right, Int64Constant(-1)));
d.Chain(control());
Node* rem = graph()->NewNode(mcgraph()->machine()->Int64Mod(), left, right,
d.if_false);
- return d.Phi(MachineRepresentation::kWord64, mcgraph()->Int64Constant(0),
- rem);
+ return d.Phi(MachineRepresentation::kWord64, Int64Constant(0), rem);
}
Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right,
@@ -2762,8 +2713,8 @@ Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right,
return BuildDiv64Call(left, right, ExternalReference::wasm_uint64_div(),
MachineType::Int64(), wasm::kTrapDivByZero, position);
}
- return graph()->NewNode(mcgraph()->machine()->Uint64Div(), left, right,
- ZeroCheck64(wasm::kTrapDivByZero, right, position));
+ ZeroCheck64(wasm::kTrapDivByZero, right, position);
+ return gasm_->Uint64Div(left, right);
}
Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
wasm::WasmCodePosition position) {
@@ -2771,13 +2722,8 @@ Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
return BuildDiv64Call(left, right, ExternalReference::wasm_uint64_mod(),
MachineType::Int64(), wasm::kTrapRemByZero, position);
}
- return graph()->NewNode(mcgraph()->machine()->Uint64Mod(), left, right,
- ZeroCheck64(wasm::kTrapRemByZero, right, position));
-}
-
-Node* WasmGraphBuilder::GetBuiltinPointerTarget(int builtin_id) {
- static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
- return graph()->NewNode(mcgraph()->common()->NumberConstant(builtin_id));
+ ZeroCheck64(wasm::kTrapRemByZero, right, position);
+ return gasm_->Uint64Mod(left, right);
}
Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
@@ -2792,14 +2738,12 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
Node* call = BuildCCall(&sig, function, stack_slot);
ZeroCheck32(trap_zero, call, position);
TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
- return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(result_type),
- stack_slot, mcgraph()->Int32Constant(0),
- effect(), control()));
+ return gasm_->Load(result_type, stack_slot, 0);
}
template <typename... Args>
@@ -2807,27 +2751,28 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
Args... args) {
DCHECK_LE(sig->return_count(), 1);
DCHECK_EQ(sizeof...(args), sig->parameter_count());
- Node* const call_args[] = {function, args..., effect(), control()};
+ Node* call_args[] = {function, args..., effect(), control()};
auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(mcgraph()->zone(), sig);
- const Operator* op = mcgraph()->common()->Call(call_descriptor);
- return SetEffect(graph()->NewNode(op, arraysize(call_args), call_args));
+ return gasm_->Call(call_descriptor, arraysize(call_args), call_args);
}
Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node, const Operator* op) {
+ Node* instance_node, const Operator* op,
+ Node* frame_state) {
if (instance_node == nullptr) {
DCHECK_NOT_NULL(instance_node_);
instance_node = instance_node_.get();
}
needs_stack_check_ = true;
const size_t params = sig->parameter_count();
+ const size_t has_frame_state = frame_state != nullptr ? 1 : 0;
const size_t extra = 3; // instance_node, effect, and control.
- const size_t count = 1 + params + extra;
+ const size_t count = 1 + params + extra + has_frame_state;
// Reallocate the buffer to make space for extra inputs.
base::SmallVector<Node*, 16 + extra> inputs(count);
@@ -2839,8 +2784,9 @@ Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
if (params > 0) base::Memcpy(&inputs[2], &args[1], params * sizeof(Node*));
// Add effect and control inputs.
- inputs[params + 2] = effect();
- inputs[params + 3] = control();
+ if (has_frame_state != 0) inputs[params + 2] = frame_state;
+ inputs[params + has_frame_state + 2] = effect();
+ inputs[params + has_frame_state + 3] = control();
Node* call = graph()->NewNode(op, static_cast<int>(count), inputs.begin());
// Return calls have no effect output. Other calls are the new effect node.
@@ -2855,11 +2801,15 @@ Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
Node* instance_node,
- UseRetpoline use_retpoline) {
+ UseRetpoline use_retpoline,
+ Node* frame_state) {
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
+ GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline,
+ kWasmFunction, frame_state != nullptr);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
- Node* call = BuildCallNode(sig, args, position, instance_node, op);
+ Node* call =
+ BuildCallNode(sig, args, position, instance_node, op, frame_state);
+ SetControl(call);
size_t ret_count = sig->return_count();
if (ret_count == 0) return call; // No return value.
@@ -2907,10 +2857,8 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
// Load the target from the imported_targets array at a known offset.
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- mcgraph()->Int32Constant(func_index * kSystemPointerSize), effect(),
- control()));
+ Node* target_node = gasm_->Load(MachineType::Pointer(), imported_targets,
+ func_index * kSystemPointerSize);
args[0] = target_node;
const UseRetpoline use_retpoline =
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
@@ -2943,9 +2891,8 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
func_index_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- func_index_times_pointersize, effect(), control()));
+ Node* target_node = gasm_->Load(MachineType::Pointer(), imported_targets,
+ func_index_times_pointersize);
args[0] = target_node;
const UseRetpoline use_retpoline =
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
@@ -3042,32 +2989,27 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
const wasm::FunctionSig* sig = env_->module->signature(sig_index);
- MachineOperatorBuilder* machine = mcgraph()->machine();
Node* key = args[0];
// Bounds check against the table size.
- Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, ift_size);
+ Node* in_bounds = gasm_->Uint32LessThan(key, ift_size);
TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
// Mask the key to prevent SSCA.
if (untrusted_code_mitigations_) {
// mask = ((key - size) & ~key) >> 31
- Node* neg_key =
- graph()->NewNode(machine->Word32Xor(), key, Int32Constant(-1));
- Node* masked_diff = graph()->NewNode(
- machine->Word32And(),
- graph()->NewNode(machine->Int32Sub(), key, ift_size), neg_key);
- Node* mask =
- graph()->NewNode(machine->Word32Sar(), masked_diff, Int32Constant(31));
- key = graph()->NewNode(machine->Word32And(), key, mask);
- }
-
- Node* int32_scaled_key = Uint32ToUintptr(
- graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
-
- Node* loaded_sig = SetEffect(
- graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
- int32_scaled_key, effect(), control()));
+ Node* neg_key = gasm_->Word32Xor(key, Int32Constant(-1));
+ Node* masked_diff =
+ gasm_->Word32And(gasm_->Int32Sub(key, ift_size), neg_key);
+ Node* mask = gasm_->Word32Sar(masked_diff, Int32Constant(31));
+ key = gasm_->Word32And(key, mask);
+ }
+
+ Node* int32_scaled_key =
+ Uint32ToUintptr(gasm_->Word32Shl(key, Int32Constant(2)));
+
+ Node* loaded_sig =
+ gasm_->Load(MachineType::Int32(), ift_sig_ids, int32_scaled_key);
// Check that the dynamic type of the function is a subtype of its static
// (table) type. Currently, the only subtyping between function types is
// $t <: funcref for all $t: function_type.
@@ -3076,15 +3018,14 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
env_->module->tables[table_index].type == wasm::kWasmFuncRef;
if (needs_typechecking) {
int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index];
- Node* sig_match = graph()->NewNode(machine->Word32Equal(), loaded_sig,
- Int32Constant(expected_sig_id));
+ Node* sig_match =
+ gasm_->Word32Equal(loaded_sig, Int32Constant(expected_sig_id));
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
} else {
// We still have to check that the entry is initialized.
// TODO(9495): Skip this check for non-nullable tables when they are
// allowed.
- Node* function_is_null =
- graph()->NewNode(machine->Word32Equal(), loaded_sig, Int32Constant(-1));
+ Node* function_is_null = gasm_->Word32Equal(loaded_sig, Int32Constant(-1));
TrapIfTrue(wasm::kTrapNullDereference, function_is_null, position);
}
@@ -3096,9 +3037,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* intptr_scaled_key =
gasm_->IntMul(key_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
- Node* target = SetEffect(
- graph()->NewNode(machine->Load(MachineType::Pointer()), ift_targets,
- intptr_scaled_key, effect(), control()));
+ Node* target =
+ gasm_->Load(MachineType::Pointer(), ift_targets, intptr_scaled_key);
args[0] = target;
const UseRetpoline use_retpoline =
@@ -3222,9 +3162,8 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
// TODO(manoskouk): Find an elegant way to avoid allocating this pair for
// every call.
- Node* function_instance_node = CALL_BUILTIN(
- WasmAllocatePair, instance_node_.get(), callable,
- LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+ Node* function_instance_node = gasm_->CallBuiltin(
+ Builtins::kWasmAllocatePair, instance_node_.get(), callable);
gasm_->Goto(&end_label, call_target, function_instance_node);
}
@@ -3302,10 +3241,10 @@ Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
Int32Matcher m(right);
if (m.HasResolvedValue()) {
return Binop(wasm::kExprI32Ror, left,
- mcgraph()->Int32Constant(32 - (m.ResolvedValue() & 0x1F)));
+ Int32Constant(32 - (m.ResolvedValue() & 0x1F)));
} else {
return Binop(wasm::kExprI32Ror, left,
- Binop(wasm::kExprI32Sub, mcgraph()->Int32Constant(32), right));
+ Binop(wasm::kExprI32Sub, Int32Constant(32), right));
}
}
@@ -3313,10 +3252,9 @@ Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
// Implement Rol by Ror since TurboFan does not have Rol opcode.
// TODO(weiliang): support Word64Rol opcode in TurboFan.
Int64Matcher m(right);
- Node* inv_right =
- m.HasResolvedValue()
- ? mcgraph()->Int64Constant(64 - (m.ResolvedValue() & 0x3F))
- : Binop(wasm::kExprI64Sub, mcgraph()->Int64Constant(64), right);
+ Node* inv_right = m.HasResolvedValue()
+ ? Int64Constant(64 - (m.ResolvedValue() & 0x3F))
+ : Binop(wasm::kExprI64Sub, Int64Constant(64), right);
return Binop(wasm::kExprI64Ror, left, inv_right);
}
@@ -3334,6 +3272,11 @@ Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
: value;
}
+Node* WasmGraphBuilder::BuildChangeIntPtrToInt64(Node* value) {
+ return mcgraph()->machine()->Is32() ? gasm_->ChangeInt32ToInt64(value)
+ : value;
+}
+
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
// With pointer compression, only the lower 32 bits are used.
return COMPRESS_POINTERS_BOOL
@@ -3345,9 +3288,8 @@ Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
Node* WasmGraphBuilder::BuildChangeUint31ToSmi(Node* value) {
return COMPRESS_POINTERS_BOOL
? gasm_->Word32Shl(value, BuildSmiShiftBitsConstant32())
- : graph()->NewNode(mcgraph()->machine()->WordShl(),
- Uint32ToUintptr(value),
- BuildSmiShiftBitsConstant());
+ : gasm_->WordShl(Uint32ToUintptr(value),
+ BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
@@ -3355,7 +3297,7 @@ Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
}
Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() {
- return gasm_->Int32Constant(kSmiShiftSize + kSmiTagSize);
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
}
Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
@@ -3370,18 +3312,16 @@ Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
value = BuildChangeSmiToInt32(value);
return BuildChangeInt32ToIntPtr(value);
}
- return graph()->NewNode(mcgraph()->machine()->WordSar(), value,
- BuildSmiShiftBitsConstant());
+ return gasm_->WordSar(value, BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
uint32_t maxval) {
DCHECK(Smi::IsValid(maxval));
Node* max = mcgraph()->Uint32Constant(maxval);
- Node* check = graph()->NewNode(mcgraph()->machine()->Uint32LessThanOrEqual(),
- value, max);
+ Node* check = gasm_->Uint32LessThanOrEqual(value, max);
Node* valsmi = BuildChangeUint31ToSmi(value);
- Node* maxsmi = graph()->NewNode(mcgraph()->common()->NumberConstant(maxval));
+ Node* maxsmi = gasm_->NumberConstant(maxval);
Diamond d(graph(), mcgraph()->common(), check, BranchHint::kTrue);
d.Chain(control());
return d.Phi(MachineRepresentation::kTagged, valsmi, maxsmi);
@@ -3529,12 +3469,10 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
Node** offset_node) {
DCHECK_NOT_NULL(instance_node_);
if (global.mutability && global.imported) {
- *base_node = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::UintPtr()),
- GetImportedMutableGlobals(),
- mcgraph()->Int32Constant(global.index * sizeof(Address)), effect(),
- control()));
- *offset_node = mcgraph()->Int32Constant(0);
+ *base_node =
+ gasm_->Load(MachineType::UintPtr(), GetImportedMutableGlobals(),
+ Int32Constant(global.index * sizeof(Address)));
+ *offset_node = Int32Constant(0);
} else {
if (globals_start_ == nullptr) {
// Load globals_start from the instance object at runtime.
@@ -3549,17 +3487,16 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
globals_start_ = graph()->NewNode(
mcgraph()->machine()->Load(MachineType::UintPtr()),
instance_node_.get(),
- mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(GlobalsStart)),
+ Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(GlobalsStart)),
graph()->start(), graph()->start());
}
*base_node = globals_start_.get();
- *offset_node = mcgraph()->Int32Constant(global.offset);
+ *offset_node = Int32Constant(global.offset);
if (mem_type == MachineType::Simd128() && global.offset != 0) {
// TODO(titzer,bbudge): code generation for SIMD memory offsets is broken.
- *base_node = graph()->NewNode(mcgraph()->machine()->IntAdd(), *base_node,
- *offset_node);
- *offset_node = mcgraph()->Int32Constant(0);
+ *base_node = gasm_->IntAdd(*base_node, *offset_node);
+ *offset_node = Int32Constant(0);
}
}
}
@@ -3574,20 +3511,16 @@ void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
// For the offset we need the index of the global in the buffer, and then
// calculate the actual offset from the index. Load the index from the
// ImportedMutableGlobals array of the instance.
- Node* index = SetEffect(
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::UintPtr()),
- GetImportedMutableGlobals(),
- mcgraph()->Int32Constant(global.index * sizeof(Address)),
- effect(), control()));
+ Node* index = gasm_->Load(MachineType::UintPtr(), GetImportedMutableGlobals(),
+ Int32Constant(global.index * sizeof(Address)));
// From the index, calculate the actual offset in the FixedArray. This
// is kHeaderSize + (index * kTaggedSize). kHeaderSize can be acquired with
// wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0).
Node* index_times_tagged_size =
- graph()->NewNode(mcgraph()->machine()->IntMul(), Uint32ToUintptr(index),
- mcgraph()->Int32Constant(kTaggedSize));
- *offset = graph()->NewNode(
- mcgraph()->machine()->IntAdd(), index_times_tagged_size,
+ gasm_->IntMul(Uint32ToUintptr(index), Int32Constant(kTaggedSize));
+ *offset = gasm_->IntAdd(
+ index_times_tagged_size,
mcgraph()->IntPtrConstant(
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
}
@@ -3607,9 +3540,9 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
Node* mem_size = instance_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
Node* result =
- graph()->NewNode(mcgraph()->machine()->WordShr(), mem_size,
- mcgraph()->Int32Constant(wasm::kWasmPageSizeLog2));
- result = BuildTruncateIntPtrToInt32(result);
+ gasm_->WordShr(mem_size, Int32Constant(wasm::kWasmPageSizeLog2));
+ result = env_->module->is_memory64 ? BuildChangeIntPtrToInt64(result)
+ : BuildTruncateIntPtrToInt32(result);
return result;
}
@@ -3644,15 +3577,12 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
}
inputs[count++] =
mcgraph()->ExternalConstant(ExternalReference::Create(f)); // ref
- inputs[count++] = mcgraph()->Int32Constant(fun->nargs); // arity
+ inputs[count++] = Int32Constant(fun->nargs); // arity
inputs[count++] = js_context; // js_context
inputs[count++] = effect();
inputs[count++] = control();
- Node* call = mcgraph()->graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), count, inputs);
- SetEffect(call);
- return call;
+ return gasm_->Call(call_descriptor, count, inputs);
}
Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
@@ -3683,8 +3613,7 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
Node* base = nullptr;
Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
- Node* result = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(mem_type), base, offset, effect(), control()));
+ Node* result = gasm_->Load(mem_type, base, offset);
#if defined(V8_TARGET_BIG_ENDIAN)
result = BuildChangeEndiannessLoad(result, mem_type, global.type);
#endif
@@ -3714,40 +3643,25 @@ Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
Node* base = nullptr;
Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
- const Operator* op = mcgraph()->machine()->Store(
- StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
+ auto store_rep =
+ StoreRepresentation(mem_type.representation(), kNoWriteBarrier);
#if defined(V8_TARGET_BIG_ENDIAN)
val = BuildChangeEndiannessStore(val, mem_type.representation(), global.type);
#endif
- return SetEffect(
- graph()->NewNode(op, base, offset, val, effect(), control()));
+
+ return gasm_->Store(store_rep, base, offset, val);
}
Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmTableGetDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmTableGet, RelocInfo::WASM_STUB_CALL);
-
- return SetEffectControl(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), call_target,
- mcgraph()->IntPtrConstant(table_index), index, effect(), control()));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableGet,
+ gasm_->IntPtrConstant(table_index), index);
}
Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmTableSetDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmTableSet, RelocInfo::WASM_STUB_CALL);
-
- return gasm_->Call(call_descriptor, call_target,
- gasm_->IntPtrConstant(table_index), index, val);
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableSet,
+ gasm_->IntPtrConstant(table_index), index, val);
}
Node* WasmGraphBuilder::CheckBoundsAndAlignment(
@@ -3782,7 +3696,7 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
Node* cond =
gasm_->WordAnd(effective_offset, gasm_->IntPtrConstant(align_mask));
TrapIfFalse(wasm::kTrapUnalignedAccess,
- gasm_->Word32Equal(cond, gasm_->Int32Constant(0)), position);
+ gasm_->Word32Equal(cond, Int32Constant(0)), position);
return index;
}
@@ -3816,8 +3730,8 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// In memory64 mode on 32-bit systems, the upper 32 bits need to be zero to
// succeed the bounds check.
if (kSystemPointerSize == kInt32Size && env_->module->is_memory64) {
- Node* high_word = gasm_->TruncateInt64ToInt32(
- gasm_->Word64Shr(index, gasm_->Int32Constant(32)));
+ Node* high_word =
+ gasm_->TruncateInt64ToInt32(gasm_->Word64Shr(index, Int32Constant(32)));
TrapIfTrue(wasm::kTrapMemOutOfBounds, high_word, position);
// Only use the low word for the following bounds check.
index = gasm_->TruncateInt64ToInt32(index);
@@ -3918,7 +3832,7 @@ Node* WasmGraphBuilder::TraceFunctionExit(Vector<Node*> vals,
info = gasm_->StackSlot(size, size);
gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
- gasm_->Int32Constant(0), vals[0]);
+ Int32Constant(0), vals[0]);
}
Node* call = BuildCallToRuntime(Runtime::kWasmTraceExit, &info, 1);
@@ -3937,17 +3851,15 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
Node* effective_offset = gasm_->IntAdd(gasm_->UintPtrConstant(offset), index);
auto store = [&](int field_offset, MachineRepresentation rep, Node* data) {
gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
- gasm_->Int32Constant(field_offset), data);
+ Int32Constant(field_offset), data);
};
// Store effective_offset, is_store, and mem_rep.
store(offsetof(wasm::MemoryTracingInfo, offset),
MachineType::PointerRepresentation(), effective_offset);
store(offsetof(wasm::MemoryTracingInfo, is_store),
- MachineRepresentation::kWord8,
- mcgraph()->Int32Constant(is_store ? 1 : 0));
+ MachineRepresentation::kWord8, Int32Constant(is_store ? 1 : 0));
store(offsetof(wasm::MemoryTracingInfo, mem_rep),
- MachineRepresentation::kWord8,
- mcgraph()->Int32Constant(static_cast<int>(rep)));
+ MachineRepresentation::kWord8, Int32Constant(static_cast<int>(rep)));
Node* args[] = {info};
Node* call =
@@ -4391,17 +4303,15 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
// stored value, which is conservative if misaligned. Technically, asm.js
// should never have misaligned accesses.
index = Uint32ToUintptr(index);
- Diamond bounds_check(
- graph(), mcgraph()->common(),
- graph()->NewNode(mcgraph()->machine()->UintLessThan(), index, mem_size),
- BranchHint::kTrue);
+ Diamond bounds_check(graph(), mcgraph()->common(),
+ gasm_->UintLessThan(index, mem_size), BranchHint::kTrue);
bounds_check.Chain(control());
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index = graph()->NewNode(mcgraph()->machine()->WordAnd(), index, mem_mask);
+ index = gasm_->WordAnd(index, mem_mask);
}
Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start,
@@ -4419,7 +4329,7 @@ Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
uintptr_t value = matcher.ResolvedValue();
return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
}
- return graph()->NewNode(mcgraph()->machine()->ChangeUint32ToUint64(), node);
+ return gasm_->ChangeUint32ToUint64(node);
}
Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
@@ -4434,18 +4344,16 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
// Note that we check against the memory size ignoring the size of the
// stored value, which is conservative if misaligned. Technically, asm.js
// should never have misaligned accesses.
- Diamond bounds_check(
- graph(), mcgraph()->common(),
- graph()->NewNode(mcgraph()->machine()->Uint32LessThan(), index, mem_size),
- BranchHint::kTrue);
+ Diamond bounds_check(graph(), mcgraph()->common(),
+ gasm_->Uint32LessThan(index, mem_size),
+ BranchHint::kTrue);
bounds_check.Chain(control());
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index =
- graph()->NewNode(mcgraph()->machine()->Word32And(), index, mem_mask);
+ index = gasm_->Word32And(index, mem_mask);
}
index = Uint32ToUintptr(index);
@@ -4552,13 +4460,13 @@ CallDescriptor* WasmGraphBuilder::GetI32AtomicWaitCallDescriptor() {
if (i32_atomic_wait_descriptor_) return i32_atomic_wait_descriptor_;
i32_atomic_wait_descriptor_ =
- GetBuiltinCallDescriptor<WasmI32AtomicWait64Descriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
+ GetBuiltinCallDescriptor(Builtins::kWasmI32AtomicWait64, zone_,
+ StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i32_atomic_wait_descriptor_,
- GetBuiltinCallDescriptor<WasmI32AtomicWait32Descriptor>(
- this, StubCallMode::kCallWasmRuntimeStub));
+ GetBuiltinCallDescriptor(Builtins::kWasmI32AtomicWait32, zone_,
+ StubCallMode::kCallWasmRuntimeStub));
return i32_atomic_wait_descriptor_;
}
@@ -4567,13 +4475,13 @@ CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
if (i64_atomic_wait_descriptor_) return i64_atomic_wait_descriptor_;
i64_atomic_wait_descriptor_ =
- GetBuiltinCallDescriptor<WasmI64AtomicWait64Descriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
+ GetBuiltinCallDescriptor(Builtins::kWasmI64AtomicWait64, zone_,
+ StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i64_atomic_wait_descriptor_,
- GetBuiltinCallDescriptor<WasmI64AtomicWait32Descriptor>(
- this, StubCallMode::kCallWasmRuntimeStub));
+ GetBuiltinCallDescriptor(Builtins::kWasmI64AtomicWait32, zone_,
+ StubCallMode::kCallWasmRuntimeStub));
return i64_atomic_wait_descriptor_;
}
@@ -4690,6 +4598,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return BuildF64x2NearestInt(inputs[0]);
return graph()->NewNode(mcgraph()->machine()->F64x2NearestInt(),
inputs[0]);
+ case wasm::kExprF64x2ConvertLowI32x4S:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ConvertLowI32x4S(),
+ inputs[0]);
+ case wasm::kExprF64x2ConvertLowI32x4U:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ConvertLowI32x4U(),
+ inputs[0]);
+ case wasm::kExprF64x2PromoteLowF32x4:
+ return graph()->NewNode(mcgraph()->machine()->F64x2PromoteLowF32x4(),
+ inputs[0]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4784,8 +4701,13 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return BuildF32x4NearestInt(inputs[0]);
return graph()->NewNode(mcgraph()->machine()->F32x4NearestInt(),
inputs[0]);
+ case wasm::kExprF32x4DemoteF64x2Zero:
+ return graph()->NewNode(mcgraph()->machine()->F32x4DemoteF64x2Zero(),
+ inputs[0]);
case wasm::kExprI64x2Splat:
return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
+ case wasm::kExprI64x2Abs:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Abs(), inputs[0]);
case wasm::kExprI64x2Neg:
return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]);
case wasm::kExprI64x2SConvertI32x4Low:
@@ -4820,6 +4742,21 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI64x2Eq:
return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0],
inputs[1]);
+ case wasm::kExprI64x2Ne:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Ne(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2LtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2LeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2GtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2GeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI64x2ShrU:
return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(), inputs[0],
inputs[1]);
@@ -4951,6 +4888,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI32x4ExtAddPairwiseI16x8U:
return graph()->NewNode(mcgraph()->machine()->I32x4ExtAddPairwiseI16x8U(),
inputs[0]);
+ case wasm::kExprI32x4TruncSatF64x2SZero:
+ return graph()->NewNode(mcgraph()->machine()->I32x4TruncSatF64x2SZero(),
+ inputs[0]);
+ case wasm::kExprI32x4TruncSatF64x2UZero:
+ return graph()->NewNode(mcgraph()->machine()->I32x4TruncSatF64x2UZero(),
+ inputs[0]);
case wasm::kExprI16x8Splat:
return graph()->NewNode(mcgraph()->machine()->I16x8Splat(), inputs[0]);
case wasm::kExprI16x8SConvertI8x16Low:
@@ -5190,16 +5133,14 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128AndNot:
return graph()->NewNode(mcgraph()->machine()->S128AndNot(), inputs[0],
inputs[1]);
- case wasm::kExprV32x4AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->V32x4AnyTrue(), inputs[0]);
+ case wasm::kExprV64x2AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V64x2AllTrue(), inputs[0]);
case wasm::kExprV32x4AllTrue:
return graph()->NewNode(mcgraph()->machine()->V32x4AllTrue(), inputs[0]);
- case wasm::kExprV16x8AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->V16x8AnyTrue(), inputs[0]);
case wasm::kExprV16x8AllTrue:
return graph()->NewNode(mcgraph()->machine()->V16x8AllTrue(), inputs[0]);
- case wasm::kExprV8x16AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->V8x16AnyTrue(), inputs[0]);
+ case wasm::kExprV128AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V128AnyTrue(), inputs[0]);
case wasm::kExprV8x16AllTrue:
return graph()->NewNode(mcgraph()->machine()->V8x16AllTrue(), inputs[0]);
case wasm::kExprI8x16Swizzle:
@@ -5426,15 +5367,9 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
gasm_->IntAdd(gasm_->UintPtrConstant(capped_offset), index);
switch (opcode) {
- case wasm::kExprAtomicNotify: {
- auto* call_descriptor =
- GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmAtomicNotify, RelocInfo::WASM_STUB_CALL);
- return gasm_->Call(call_descriptor, call_target, effective_offset,
- inputs[1]);
- }
+ case wasm::kExprAtomicNotify:
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmAtomicNotify,
+ effective_offset, inputs[1]);
case wasm::kExprI32AtomicWait: {
auto* call_descriptor = GetI32AtomicWaitCallDescriptor();
@@ -5479,8 +5414,8 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
// validation.
DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
- ExternalReference::wasm_memory_init()));
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_memory_init());
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), instance_node_.get()},
@@ -5503,12 +5438,11 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
Node* seg_size_array =
LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::Pointer());
STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2);
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier));
- return SetEffect(
- graph()->NewNode(store_op, seg_size_array,
- mcgraph()->IntPtrConstant(data_segment_index << 2),
- mcgraph()->Int32Constant(0), effect(), control()));
+ auto store_rep =
+ StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier);
+ return gasm_->Store(store_rep, seg_size_array,
+ mcgraph()->IntPtrConstant(data_segment_index << 2),
+ Int32Constant(0));
}
Node* WasmGraphBuilder::StoreArgsInStackSlot(
@@ -5526,7 +5460,7 @@ Node* WasmGraphBuilder::StoreArgsInStackSlot(
MachineRepresentation type = arg.first;
Node* value = arg.second;
gasm_->Store(StoreRepresentation(type, kNoWriteBarrier), stack_slot,
- mcgraph()->Int32Constant(offset), value);
+ Int32Constant(offset), value);
offset += ElementSizeInBytes(type);
}
return stack_slot;
@@ -5534,8 +5468,8 @@ Node* WasmGraphBuilder::StoreArgsInStackSlot(
Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
- ExternalReference::wasm_memory_copy()));
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_memory_copy());
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), instance_node_.get()},
@@ -5551,8 +5485,8 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
wasm::WasmCodePosition position) {
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
- ExternalReference::wasm_memory_fill()));
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_memory_fill());
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), instance_node_.get()},
@@ -5570,18 +5504,9 @@ Node* WasmGraphBuilder::TableInit(uint32_t table_index,
uint32_t elem_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmTableInitDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
-
- intptr_t target = wasm::WasmCode::kWasmTableInit;
- Node* call_target =
- mcgraph()->RelocatableIntPtrConstant(target, RelocInfo::WASM_STUB_CALL);
-
- return gasm_->Call(
- call_descriptor, call_target, dst, src, size,
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
- graph()->NewNode(
- mcgraph()->common()->NumberConstant(elem_segment_index)));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableInit, dst, src, size,
+ gasm_->NumberConstant(table_index),
+ gasm_->NumberConstant(elem_segment_index));
}
Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
@@ -5592,35 +5517,25 @@ Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
Node* dropped_elem_segments =
LOAD_INSTANCE_FIELD(DroppedElemSegments, MachineType::Pointer());
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier));
- return SetEffect(
- graph()->NewNode(store_op, dropped_elem_segments,
- mcgraph()->IntPtrConstant(elem_segment_index),
- mcgraph()->Int32Constant(1), effect(), control()));
+ auto store_rep =
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier);
+ return gasm_->Store(store_rep, dropped_elem_segments, elem_segment_index,
+ Int32Constant(1));
}
Node* WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
uint32_t table_src_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
-
- intptr_t target = wasm::WasmCode::kWasmTableCopy;
- Node* call_target =
- mcgraph()->RelocatableIntPtrConstant(target, RelocInfo::WASM_STUB_CALL);
-
- return gasm_->Call(
- call_descriptor, call_target, dst, src, size,
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)),
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableCopy, dst, src, size,
+ gasm_->NumberConstant(table_dst_index),
+ gasm_->NumberConstant(table_src_index));
}
Node* WasmGraphBuilder::TableGrow(uint32_t table_index, Node* value,
Node* delta) {
Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), value,
+ gasm_->NumberConstant(table_index), value,
BuildConvertUint32ToSmiWithSaturation(delta, FLAG_wasm_max_table_size)};
Node* result =
BuildCallToRuntime(Runtime::kWasmTableGrow, args, arraysize(args));
@@ -5643,7 +5558,7 @@ Node* WasmGraphBuilder::TableSize(uint32_t table_index) {
Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
Node* value, Node* count) {
Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
+ gasm_->NumberConstant(table_index),
BuildConvertUint32ToSmiWithSaturation(start, FLAG_wasm_max_table_size),
value,
BuildConvertUint32ToSmiWithSaturation(count, FLAG_wasm_max_table_size)};
@@ -5654,7 +5569,7 @@ Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
const wasm::StructType* type,
Node* rtt, Vector<Node*> fields) {
- Node* s = CALL_BUILTIN(WasmAllocateStructWithRtt, rtt);
+ Node* s = gasm_->CallBuiltin(Builtins::kWasmAllocateStructWithRtt, rtt);
for (uint32_t i = 0; i < type->field_count(); i++) {
gasm_->StoreStructField(s, type, i, fields[i]);
}
@@ -5671,14 +5586,14 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
length, gasm_->Uint32Constant(wasm::kV8MaxWasmArrayLength)),
position);
wasm::ValueType element_type = type->element_type();
- Node* a = CALL_BUILTIN(WasmAllocateArrayWithRtt, rtt, length,
- graph()->NewNode(mcgraph()->common()->Int32Constant(
- element_type.element_size_bytes())));
+ Node* a =
+ gasm_->CallBuiltin(Builtins::kWasmAllocateArrayWithRtt, rtt, length,
+ Int32Constant(element_type.element_size_bytes()));
auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
auto done = gasm_->MakeLabel();
- Node* start_offset = gasm_->Int32Constant(
- wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize));
- Node* element_size = gasm_->Int32Constant(element_type.element_size_bytes());
+ Node* start_offset =
+ Int32Constant(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize));
+ Node* element_size = Int32Constant(element_type.element_size_bytes());
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
// Loops need the graph's end to have been set up.
@@ -5698,42 +5613,15 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
return a;
}
-Node* WasmGraphBuilder::RttCanon(wasm::HeapType type) {
- RootIndex index;
- switch (type.representation()) {
- case wasm::HeapType::kEq:
- index = RootIndex::kWasmRttEqrefMap;
- break;
- case wasm::HeapType::kExtern:
- index = RootIndex::kWasmRttExternrefMap;
- break;
- case wasm::HeapType::kFunc:
- index = RootIndex::kWasmRttFuncrefMap;
- break;
- case wasm::HeapType::kI31:
- index = RootIndex::kWasmRttI31refMap;
- break;
- case wasm::HeapType::kAny:
- index = RootIndex::kWasmRttAnyrefMap;
- break;
- case wasm::HeapType::kBottom:
- UNREACHABLE();
- default: {
- // User-defined type.
- Node* maps_list =
- LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
- return LOAD_FIXED_ARRAY_SLOT_PTR(maps_list, type.ref_index());
- }
- }
- return LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(index));
+Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
+ Node* maps_list =
+ LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
+ return LOAD_FIXED_ARRAY_SLOT_PTR(maps_list, type_index);
}
-Node* WasmGraphBuilder::RttSub(wasm::HeapType type, Node* parent_rtt) {
- return CALL_BUILTIN(WasmAllocateRtt,
- graph()->NewNode(mcgraph()->common()->Int32Constant(
- type.representation())),
- parent_rtt);
+Node* WasmGraphBuilder::RttSub(uint32_t type_index, Node* parent_rtt) {
+ return gasm_->CallBuiltin(Builtins::kWasmAllocateRtt,
+ Int32Constant(type_index), parent_rtt);
}
void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {
@@ -5748,169 +5636,145 @@ void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {
#endif
}
-Node* WasmGraphBuilder::RefTest(Node* object, Node* rtt,
- ObjectReferenceKnowledge config) {
- auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
- if (config.object_can_be_i31) {
- if (config.rtt_is_i31) {
- return gasm_->IsI31(object);
- }
- gasm_->GotoIf(gasm_->IsI31(object), &done, gasm_->Int32Constant(0));
- } else {
- AssertFalse(mcgraph(), gasm_.get(), gasm_->IsI31(object));
- }
+WasmGraphBuilder::Callbacks WasmGraphBuilder::TestCallbacks(
+ GraphAssemblerLabel<1>* label) {
+ return {// succeed_if
+ [=](Node* condition, BranchHint hint) -> void {
+ gasm_->GotoIf(condition, label, hint, Int32Constant(1));
+ },
+ // fail_if
+ [=](Node* condition, BranchHint hint) -> void {
+ gasm_->GotoIf(condition, label, hint, Int32Constant(0));
+ },
+ // fail_if_not
+ [=](Node* condition, BranchHint hint) -> void {
+ gasm_->GotoIfNot(condition, label, hint, Int32Constant(0));
+ }};
+}
+
+WasmGraphBuilder::Callbacks WasmGraphBuilder::CastCallbacks(
+ GraphAssemblerLabel<0>* label, wasm::WasmCodePosition position) {
+ return {// succeed_if
+ [=](Node* condition, BranchHint hint) -> void {
+ gasm_->GotoIf(condition, label, hint);
+ },
+ // fail_if
+ [=](Node* condition, BranchHint hint) -> void {
+ TrapIfTrue(wasm::kTrapIllegalCast, condition, position);
+ },
+ // fail_if_not
+ [=](Node* condition, BranchHint hint) -> void {
+ TrapIfFalse(wasm::kTrapIllegalCast, condition, position);
+ }};
+}
+
+WasmGraphBuilder::Callbacks WasmGraphBuilder::BranchCallbacks(
+ SmallNodeVector& no_match_controls, SmallNodeVector& no_match_effects,
+ SmallNodeVector& match_controls, SmallNodeVector& match_effects) {
+ return {
+ // succeed_if
+ [&](Node* condition, BranchHint hint) -> void {
+ Node* branch = graph()->NewNode(mcgraph()->common()->Branch(hint),
+ condition, control());
+ match_controls.emplace_back(
+ graph()->NewNode(mcgraph()->common()->IfTrue(), branch));
+ match_effects.emplace_back(effect());
+ SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), branch));
+ },
+ // fail_if
+ [&](Node* condition, BranchHint hint) -> void {
+ Node* branch = graph()->NewNode(mcgraph()->common()->Branch(hint),
+ condition, control());
+ no_match_controls.emplace_back(
+ graph()->NewNode(mcgraph()->common()->IfTrue(), branch));
+ no_match_effects.emplace_back(effect());
+ SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), branch));
+ },
+ // fail_if_not
+ [&](Node* condition, BranchHint hint) -> void {
+ Node* branch = graph()->NewNode(mcgraph()->common()->Branch(hint),
+ condition, control());
+ no_match_controls.emplace_back(
+ graph()->NewNode(mcgraph()->common()->IfFalse(), branch));
+ no_match_effects.emplace_back(effect());
+ SetControl(graph()->NewNode(mcgraph()->common()->IfTrue(), branch));
+ }};
+}
+
+void WasmGraphBuilder::TypeCheck(
+ Node* object, Node* rtt, WasmGraphBuilder::ObjectReferenceKnowledge config,
+ bool null_succeeds, Callbacks callbacks) {
if (config.object_can_be_null) {
- gasm_->GotoIf(gasm_->WordEqual(object, RefNull()), &done,
- gasm_->Int32Constant(0));
+ (null_succeeds ? callbacks.succeed_if : callbacks.fail_if)(
+ gasm_->WordEqual(object, RefNull()), BranchHint::kFalse);
}
Node* map = gasm_->LoadMap(object);
- gasm_->GotoIf(gasm_->TaggedEqual(map, rtt), &done, gasm_->Int32Constant(1));
- if (!config.object_must_be_data_ref) {
- gasm_->GotoIfNot(gasm_->IsDataRefMap(map), &done, gasm_->Int32Constant(0));
+ if (config.reference_kind == kFunction) {
+ // Currently, the only way for a function to match an rtt is if its map
+ // is equal to that rtt.
+ callbacks.fail_if_not(gasm_->TaggedEqual(map, rtt), BranchHint::kTrue);
+ return;
}
+
+ DCHECK(config.reference_kind == kArrayOrStruct);
+
+ callbacks.succeed_if(gasm_->TaggedEqual(map, rtt), BranchHint::kTrue);
+
Node* type_info = gasm_->LoadWasmTypeInfo(map);
Node* supertypes = gasm_->LoadSupertypes(type_info);
- Node* length =
+ Node* supertypes_length =
BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(supertypes));
- gasm_->GotoIfNot(
- gasm_->Uint32LessThan(gasm_->Int32Constant(config.rtt_depth), length),
- &done, gasm_->Int32Constant(0));
+ Node* rtt_depth =
+ config.rtt_depth >= 0
+ ? Int32Constant(config.rtt_depth)
+ : BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(
+ gasm_->LoadSupertypes(gasm_->LoadWasmTypeInfo(rtt))));
+ callbacks.fail_if_not(gasm_->Uint32LessThan(rtt_depth, supertypes_length),
+ BranchHint::kTrue);
Node* maybe_match = gasm_->LoadFixedArrayElement(
- supertypes, config.rtt_depth, MachineType::TaggedPointer());
- gasm_->Goto(&done, gasm_->TaggedEqual(maybe_match, rtt));
- gasm_->Bind(&done);
+ supertypes, rtt_depth, MachineType::TaggedPointer());
- return done.PhiAt(0);
+ callbacks.fail_if_not(gasm_->TaggedEqual(maybe_match, rtt),
+ BranchHint::kTrue);
}
-Node* WasmGraphBuilder::RefCast(Node* object, Node* rtt,
- ObjectReferenceKnowledge config,
- wasm::WasmCodePosition position) {
- if (config.object_can_be_i31) {
- if (config.rtt_is_i31) {
- TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsI31(object), position);
- return object;
- } else {
- TrapIfTrue(wasm::kTrapIllegalCast, gasm_->IsI31(object), position);
- }
- } else {
- AssertFalse(mcgraph(), gasm_.get(), gasm_->IsI31(object));
- }
- if (config.object_can_be_null) {
- TrapIfTrue(wasm::kTrapIllegalCast, gasm_->WordEqual(object, RefNull()),
- position);
+void WasmGraphBuilder::DataCheck(Node* object, bool object_can_be_null,
+ Callbacks callbacks) {
+ if (object_can_be_null) {
+ callbacks.fail_if(gasm_->WordEqual(object, RefNull()), BranchHint::kFalse);
}
+ callbacks.fail_if(gasm_->IsI31(object), BranchHint::kFalse);
Node* map = gasm_->LoadMap(object);
- auto done = gasm_->MakeLabel();
- gasm_->GotoIf(gasm_->TaggedEqual(map, rtt), &done);
- if (!config.object_must_be_data_ref) {
- TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsDataRefMap(map), position);
- }
- Node* type_info = gasm_->LoadWasmTypeInfo(map);
- Node* supertypes = gasm_->LoadSupertypes(type_info);
- Node* length =
- BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(supertypes));
- TrapIfFalse(
- wasm::kTrapIllegalCast,
- gasm_->Uint32LessThan(gasm_->Int32Constant(config.rtt_depth), length),
- position);
- Node* maybe_match = gasm_->LoadFixedArrayElement(
- supertypes, config.rtt_depth, MachineType::TaggedPointer());
- TrapIfFalse(wasm::kTrapIllegalCast, gasm_->TaggedEqual(maybe_match, rtt),
- position);
- gasm_->Goto(&done);
- gasm_->Bind(&done);
- return object;
+ callbacks.fail_if_not(gasm_->IsDataRefMap(map), BranchHint::kTrue);
}
-Node* WasmGraphBuilder::BrOnCast(Node* object, Node* rtt,
- ObjectReferenceKnowledge config,
- Node** match_control, Node** match_effect,
- Node** no_match_control,
- Node** no_match_effect) {
- // We have up to 5 control nodes to merge; the EffectPhi needs an additional
- // input.
- base::SmallVector<Node*, 5> no_match_controls;
- base::SmallVector<Node*, 6> no_match_effects;
- // We always have 2 match_controls; use the same mechanism for uniformity.
- base::SmallVector<Node*, 2> match_controls;
- base::SmallVector<Node*, 3> match_effects;
-
- Node* is_i31 = gasm_->IsI31(object);
- if (config.object_can_be_i31) {
- if (config.rtt_is_i31) {
- BranchExpectFalse(is_i31, match_control, no_match_control);
- return nullptr;
- } else {
- Node* i31_branch = graph()->NewNode(
- mcgraph()->common()->Branch(BranchHint::kFalse), is_i31, control());
- SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), i31_branch));
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfTrue(), i31_branch));
- no_match_effects.emplace_back(effect());
- }
- } else {
- AssertFalse(mcgraph(), gasm_.get(), is_i31);
+void WasmGraphBuilder::FuncCheck(Node* object, bool object_can_be_null,
+ Callbacks callbacks) {
+ if (object_can_be_null) {
+ callbacks.fail_if(gasm_->WordEqual(object, RefNull()), BranchHint::kFalse);
}
+ callbacks.fail_if(gasm_->IsI31(object), BranchHint::kFalse);
+ callbacks.fail_if_not(gasm_->HasInstanceType(object, JS_FUNCTION_TYPE),
+ BranchHint::kTrue);
+}
- if (config.object_can_be_null) {
- Node* null_branch =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kFalse),
- gasm_->WordEqual(object, RefNull()), control());
- SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), null_branch));
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfTrue(), null_branch));
- no_match_effects.emplace_back(effect());
- }
+Node* WasmGraphBuilder::BrOnCastAbs(
+ Node** match_control, Node** match_effect, Node** no_match_control,
+ Node** no_match_effect, std::function<void(Callbacks)> type_checker) {
+ SmallNodeVector no_match_controls, no_match_effects, match_controls,
+ match_effects;
- // At this point, {object} is neither null nor an i31ref/Smi.
- Node* map = gasm_->LoadMap(object);
- Node* exact_match =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kTrue),
- gasm_->TaggedEqual(map, rtt), control());
- match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfTrue(), exact_match));
- match_effects.emplace_back(effect());
- SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), exact_match));
- if (!config.object_must_be_data_ref) {
- Node* is_data_ref =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kTrue),
- gasm_->IsDataRefMap(map), control());
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfFalse(), is_data_ref));
- no_match_effects.emplace_back(effect());
- SetControl(graph()->NewNode(mcgraph()->common()->IfTrue(), is_data_ref));
- }
- Node* type_info = gasm_->LoadWasmTypeInfo(map);
- Node* supertypes = gasm_->LoadSupertypes(type_info);
- Node* length =
- BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(supertypes));
- Node* length_sufficient = graph()->NewNode(
- mcgraph()->common()->Branch(BranchHint::kTrue),
- gasm_->Uint32LessThan(gasm_->Int32Constant(config.rtt_depth), length),
- control());
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfFalse(), length_sufficient));
- no_match_effects.emplace_back(effect());
- SetControl(
- graph()->NewNode(mcgraph()->common()->IfTrue(), length_sufficient));
- Node* maybe_match = gasm_->LoadFixedArrayElement(
- supertypes, config.rtt_depth, MachineType::TaggedPointer());
- Node* supertype_match =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kTrue),
- gasm_->TaggedEqual(maybe_match, rtt), control());
- match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfTrue(), supertype_match));
+ type_checker(BranchCallbacks(no_match_controls, no_match_effects,
+ match_controls, match_effects));
+
+ match_controls.emplace_back(control());
match_effects.emplace_back(effect());
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfFalse(), supertype_match));
- no_match_effects.emplace_back(effect());
// Wire up the control/effect nodes.
unsigned count = static_cast<unsigned>(match_controls.size());
- DCHECK_EQ(2, count);
+ DCHECK_EQ(match_controls.size(), match_effects.size());
*match_control = Merge(count, match_controls.data());
// EffectPhis need their control dependency as an additional input.
match_effects.emplace_back(*match_control);
@@ -5928,6 +5792,118 @@ Node* WasmGraphBuilder::BrOnCast(Node* object, Node* rtt,
return nullptr;
}
+Node* WasmGraphBuilder::RefTest(Node* object, Node* rtt,
+ ObjectReferenceKnowledge config) {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
+ TypeCheck(object, rtt, config, false, TestCallbacks(&done));
+ gasm_->Goto(&done, Int32Constant(1));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* WasmGraphBuilder::RefCast(Node* object, Node* rtt,
+ ObjectReferenceKnowledge config,
+ wasm::WasmCodePosition position) {
+ auto done = gasm_->MakeLabel();
+ TypeCheck(object, rtt, config, true, CastCallbacks(&done, position));
+ gasm_->Goto(&done);
+ gasm_->Bind(&done);
+ return object;
+}
+
+Node* WasmGraphBuilder::BrOnCast(Node* object, Node* rtt,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ return BrOnCastAbs(match_control, match_effect, no_match_control,
+ no_match_effect, [=](Callbacks callbacks) -> void {
+ return TypeCheck(object, rtt, config, false, callbacks);
+ });
+}
+
+Node* WasmGraphBuilder::RefIsData(Node* object, bool object_can_be_null) {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
+ DataCheck(object, object_can_be_null, TestCallbacks(&done));
+ gasm_->Goto(&done, Int32Constant(1));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* WasmGraphBuilder::RefAsData(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position) {
+ auto done = gasm_->MakeLabel();
+ DataCheck(object, object_can_be_null, CastCallbacks(&done, position));
+ gasm_->Goto(&done);
+ gasm_->Bind(&done);
+ return object;
+}
+
+Node* WasmGraphBuilder::BrOnData(Node* object, Node* /*rtt*/,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ return BrOnCastAbs(match_control, match_effect, no_match_control,
+ no_match_effect, [=](Callbacks callbacks) -> void {
+ return DataCheck(object, config.object_can_be_null,
+ callbacks);
+ });
+}
+
+Node* WasmGraphBuilder::RefIsFunc(Node* object, bool object_can_be_null) {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
+ FuncCheck(object, object_can_be_null, TestCallbacks(&done));
+ gasm_->Goto(&done, Int32Constant(1));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* WasmGraphBuilder::RefAsFunc(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position) {
+ auto done = gasm_->MakeLabel();
+ FuncCheck(object, object_can_be_null, CastCallbacks(&done, position));
+ gasm_->Goto(&done);
+ gasm_->Bind(&done);
+ return object;
+}
+
+Node* WasmGraphBuilder::BrOnFunc(Node* object, Node* /*rtt*/,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ return BrOnCastAbs(match_control, match_effect, no_match_control,
+ no_match_effect, [=](Callbacks callbacks) -> void {
+ return FuncCheck(object, config.object_can_be_null,
+ callbacks);
+ });
+}
+
+Node* WasmGraphBuilder::RefIsI31(Node* object) { return gasm_->IsI31(object); }
+
+Node* WasmGraphBuilder::RefAsI31(Node* object,
+ wasm::WasmCodePosition position) {
+ TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsI31(object), position);
+ return object;
+}
+
+Node* WasmGraphBuilder::BrOnI31(Node* object, Node* /* rtt */,
+ ObjectReferenceKnowledge /* config */,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ gasm_->Branch(gasm_->IsI31(object), match_control, no_match_control,
+ BranchHint::kTrue);
+
+ SetControl(*no_match_control);
+ *match_effect = effect();
+ *no_match_effect = effect();
+
+ // Unused return value, needed for typing of BUILD in graph-builder-interface.
+ return nullptr;
+}
+
Node* WasmGraphBuilder::StructGet(Node* struct_object,
const wasm::StructType* struct_type,
uint32_t field_index, CheckForNull null_check,
@@ -6080,23 +6056,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (i64_to_bigint_descriptor_) return i64_to_bigint_descriptor_;
i64_to_bigint_descriptor_ =
- GetBuiltinCallDescriptor<I64ToBigIntDescriptor>(this, stub_mode_);
+ GetBuiltinCallDescriptor(Builtins::kI64ToBigInt, zone_, stub_mode_);
AddInt64LoweringReplacement(
i64_to_bigint_descriptor_,
- GetBuiltinCallDescriptor<I32PairToBigIntDescriptor>(this, stub_mode_));
+ GetBuiltinCallDescriptor(Builtins::kI32PairToBigInt, zone_,
+ stub_mode_));
return i64_to_bigint_descriptor_;
}
- CallDescriptor* GetBigIntToI64CallDescriptor() {
+ CallDescriptor* GetBigIntToI64CallDescriptor(bool needs_frame_state) {
if (bigint_to_i64_descriptor_) return bigint_to_i64_descriptor_;
- bigint_to_i64_descriptor_ =
- GetBuiltinCallDescriptor<BigIntToI64Descriptor>(this, stub_mode_);
+ bigint_to_i64_descriptor_ = GetBuiltinCallDescriptor(
+ Builtins::kBigIntToI64, zone_, stub_mode_, needs_frame_state);
AddInt64LoweringReplacement(
bigint_to_i64_descriptor_,
- GetBuiltinCallDescriptor<BigIntToI32PairDescriptor>(this, stub_mode_));
+ GetBuiltinCallDescriptor(Builtins::kBigIntToI32Pair, zone_,
+ stub_mode_));
return bigint_to_i64_descriptor_;
}
@@ -6105,7 +6083,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(wasm_stub,
RelocInfo::WASM_STUB_CALL)
- : GetBuiltinPointerTarget(builtin_id);
+ : GetBuiltinPointerTarget(mcgraph(), builtin_id);
}
Node* BuildLoadUndefinedValueFromInstance() {
@@ -6114,7 +6092,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
undefined_value_node_ = gasm_->Load(
MachineType::Pointer(), isolate_root,
- mcgraph()->Int32Constant(
+ Int32Constant(
IsolateData::root_slot_offset(RootIndex::kUndefinedValue)));
}
return undefined_value_node_.get();
@@ -6159,7 +6137,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return done.PhiAt(0);
}
- Node* BuildChangeTaggedToInt32(Node* value, Node* context) {
+ Node* BuildChangeTaggedToInt32(Node* value, Node* context,
+ Node* frame_state) {
// We expect most integers at runtime to be Smis, so it is important for
// wrapper performance that Smi conversion be inlined.
auto builtin = gasm_->MakeDeferredLabel();
@@ -6180,11 +6159,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (!tagged_non_smi_to_int32_operator_.is_set()) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), WasmTaggedNonSmiToInt32Descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ frame_state ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags,
+ Operator::kNoProperties, stub_mode_);
tagged_non_smi_to_int32_operator_.set(common->Call(call_descriptor));
}
- Node* call = gasm_->Call(tagged_non_smi_to_int32_operator_.get(), target,
- value, context);
+ Node* call = frame_state
+ ? gasm_->Call(tagged_non_smi_to_int32_operator_.get(),
+ target, value, context, frame_state)
+ : gasm_->Call(tagged_non_smi_to_int32_operator_.get(),
+ target, value, context);
SetSourcePosition(call, 1);
gasm_->Goto(&done, call);
gasm_->Bind(&done);
@@ -6217,18 +6201,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return gasm_->Call(float64_to_number_operator_.get(), target, value);
}
- Node* BuildChangeTaggedToFloat64(Node* value, Node* context) {
+ Node* BuildChangeTaggedToFloat64(Node* value, Node* context,
+ Node* frame_state) {
CommonOperatorBuilder* common = mcgraph()->common();
Node* target = GetTargetForBuiltinCall(wasm::WasmCode::kWasmTaggedToFloat64,
Builtins::kWasmTaggedToFloat64);
+ bool needs_frame_state = frame_state != nullptr;
if (!tagged_to_float64_operator_.is_set()) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), WasmTaggedToFloat64Descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ frame_state ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags,
+ Operator::kNoProperties, stub_mode_);
tagged_to_float64_operator_.set(common->Call(call_descriptor));
}
- Node* call =
- gasm_->Call(tagged_to_float64_operator_.get(), target, value, context);
+ Node* call = needs_frame_state
+ ? gasm_->Call(tagged_to_float64_operator_.get(), target,
+ value, context, frame_state)
+ : gasm_->Call(tagged_to_float64_operator_.get(), target,
+ value, context);
SetSourcePosition(call, 1);
return call;
}
@@ -6246,26 +6237,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* ToJS(Node* node, wasm::ValueType type) {
switch (type.kind()) {
- case wasm::ValueType::kI32:
+ case wasm::kI32:
return BuildChangeInt32ToNumber(node);
- case wasm::ValueType::kS128:
+ case wasm::kS128:
UNREACHABLE();
- case wasm::ValueType::kI64: {
+ case wasm::kI64: {
return BuildChangeInt64ToBigInt(node);
}
- case wasm::ValueType::kF32:
+ case wasm::kF32:
return BuildChangeFloat32ToNumber(node);
- case wasm::ValueType::kF64:
+ case wasm::kF64:
return BuildChangeFloat64ToNumber(node);
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef: {
+ case wasm::kRef:
+ case wasm::kOptRef: {
uint32_t representation = type.heap_representation();
if (representation == wasm::HeapType::kExtern ||
- representation == wasm::HeapType::kExn ||
representation == wasm::HeapType::kFunc) {
return node;
}
- if (representation == wasm::HeapType::kEq) {
+ if (representation == wasm::HeapType::kData) {
// TODO(7748): Update this when JS interop is settled.
return BuildAllocateObjectWrapper(node);
}
@@ -6288,13 +6278,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// TODO(7748): Figure out a JS interop story for arrays and structs.
UNREACHABLE();
}
- case wasm::ValueType::kRtt:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
// TODO(7748): Figure out what to do for RTTs.
UNIMPLEMENTED();
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kStmt:
- case wasm::ValueType::kBottom:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kStmt:
+ case wasm::kBottom:
UNREACHABLE();
}
}
@@ -6303,16 +6294,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// through JavaScript, where they show up as opaque boxes. This will disappear
// once we have a proper WasmGC <-> JS interaction story.
Node* BuildAllocateObjectWrapper(Node* input) {
- return CALL_BUILTIN(
- WasmAllocateObjectWrapper, input,
+ return gasm_->CallBuiltin(
+ Builtins::kWasmAllocateObjectWrapper, input,
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
}
enum UnpackFailureBehavior : bool { kReturnInput, kReturnNull };
Node* BuildUnpackObjectWrapper(Node* input, UnpackFailureBehavior failure) {
- Node* obj = CALL_BUILTIN(
- WasmGetOwnProperty, input,
+ Node* obj = gasm_->CallBuiltin(
+ Builtins::kWasmGetOwnProperty, input,
LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
IsolateData::root_slot_offset(
RootIndex::kwasm_wrapped_object_symbol)),
@@ -6330,9 +6321,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildChangeInt64ToBigInt(Node* input) {
- const Operator* call =
- mcgraph()->common()->Call(GetI64ToBigIntCallDescriptor());
-
Node* target;
if (mcgraph()->machine()->Is64()) {
target = GetTargetForBuiltinCall(wasm::WasmCode::kI64ToBigInt,
@@ -6345,15 +6333,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
target = GetTargetForBuiltinCall(wasm::WasmCode::kI32PairToBigInt,
Builtins::kI32PairToBigInt);
}
-
- return SetEffectControl(
- graph()->NewNode(call, target, input, effect(), control()));
+ return gasm_->Call(GetI64ToBigIntCallDescriptor(), target, input);
}
- Node* BuildChangeBigIntToInt64(Node* input, Node* context) {
- const Operator* call =
- mcgraph()->common()->Call(GetBigIntToI64CallDescriptor());
-
+ Node* BuildChangeBigIntToInt64(Node* input, Node* context,
+ Node* frame_state) {
Node* target;
if (mcgraph()->machine()->Is64()) {
target = GetTargetForBuiltinCall(wasm::WasmCode::kBigIntToI64,
@@ -6367,8 +6351,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Builtins::kBigIntToI32Pair);
}
- return SetEffectControl(
- graph()->NewNode(call, target, input, context, effect(), control()));
+ return frame_state ? gasm_->Call(GetBigIntToI64CallDescriptor(true), target,
+ input, context, frame_state)
+ : gasm_->Call(GetBigIntToI64CallDescriptor(false),
+ target, input, context);
}
void BuildCheckValidRefValue(Node* input, Node* js_context,
@@ -6394,13 +6380,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
type_check.merge);
}
- Node* FromJS(Node* input, Node* js_context, wasm::ValueType type) {
+ Node* FromJS(Node* input, Node* js_context, wasm::ValueType type,
+ Node* frame_state = nullptr) {
switch (type.kind()) {
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef: {
+ case wasm::kRef:
+ case wasm::kOptRef: {
switch (type.heap_representation()) {
case wasm::HeapType::kExtern:
- case wasm::HeapType::kExn:
return input;
case wasm::HeapType::kAny:
// If this is a wrapper for arrays/structs, unpack it.
@@ -6409,10 +6395,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::HeapType::kFunc:
BuildCheckValidRefValue(input, js_context, type);
return input;
- case wasm::HeapType::kEq:
+ case wasm::HeapType::kData:
// TODO(7748): Update this when JS interop has settled.
BuildCheckValidRefValue(input, js_context, type);
return BuildUnpackObjectWrapper(input, kReturnNull);
+ case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
// If this is reached, then IsJSCompatibleSignature() is too
// permissive.
@@ -6427,40 +6414,38 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
UNREACHABLE();
}
}
- case wasm::ValueType::kF32:
- return graph()->NewNode(
- mcgraph()->machine()->TruncateFloat64ToFloat32(),
- BuildChangeTaggedToFloat64(input, js_context));
+ case wasm::kF32:
+ return gasm_->TruncateFloat64ToFloat32(
+ BuildChangeTaggedToFloat64(input, js_context, frame_state));
- case wasm::ValueType::kF64:
- return BuildChangeTaggedToFloat64(input, js_context);
+ case wasm::kF64:
+ return BuildChangeTaggedToFloat64(input, js_context, frame_state);
- case wasm::ValueType::kI32:
- return BuildChangeTaggedToInt32(input, js_context);
+ case wasm::kI32:
+ return BuildChangeTaggedToInt32(input, js_context, frame_state);
- case wasm::ValueType::kI64:
+ case wasm::kI64:
// i64 values can only come from BigInt.
- return BuildChangeBigIntToInt64(input, js_context);
-
- case wasm::ValueType::kRtt: // TODO(7748): Implement.
- case wasm::ValueType::kS128:
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ return BuildChangeBigIntToInt64(input, js_context, frame_state);
+
+ case wasm::kRtt: // TODO(7748): Implement.
+ case wasm::kRttWithDepth:
+ case wasm::kS128:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kBottom:
+ case wasm::kStmt:
UNREACHABLE();
break;
}
}
Node* SmiToFloat32(Node* input) {
- return graph()->NewNode(mcgraph()->machine()->RoundInt32ToFloat32(),
- BuildChangeSmiToInt32(input));
+ return gasm_->RoundInt32ToFloat32(BuildChangeSmiToInt32(input));
}
Node* SmiToFloat64(Node* input) {
- return graph()->NewNode(mcgraph()->machine()->ChangeInt32ToFloat64(),
- BuildChangeSmiToInt32(input));
+ return gasm_->ChangeInt32ToFloat64(BuildChangeSmiToInt32(input));
}
Node* HeapNumberToFloat64(Node* input) {
@@ -6470,22 +6455,21 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* FromJSFast(Node* input, wasm::ValueType type) {
switch (type.kind()) {
- case wasm::ValueType::kI32:
+ case wasm::kI32:
return BuildChangeSmiToInt32(input);
- case wasm::ValueType::kF32: {
+ case wasm::kF32: {
auto done = gasm_->MakeLabel(MachineRepresentation::kFloat32);
auto heap_number = gasm_->MakeLabel();
gasm_->GotoIfNot(IsSmi(input), &heap_number);
gasm_->Goto(&done, SmiToFloat32(input));
gasm_->Bind(&heap_number);
Node* value =
- graph()->NewNode(mcgraph()->machine()->TruncateFloat64ToFloat32(),
- HeapNumberToFloat64(input));
+ gasm_->TruncateFloat64ToFloat32(HeapNumberToFloat64(input));
gasm_->Goto(&done, value);
gasm_->Bind(&done);
return done.PhiAt(0);
}
- case wasm::ValueType::kF64: {
+ case wasm::kF64: {
auto done = gasm_->MakeLabel(MachineRepresentation::kFloat64);
auto heap_number = gasm_->MakeLabel();
gasm_->GotoIfNot(IsSmi(input), &heap_number);
@@ -6495,66 +6479,96 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
gasm_->Bind(&done);
return done.PhiAt(0);
}
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
- case wasm::ValueType::kI64:
- case wasm::ValueType::kRtt:
- case wasm::ValueType::kS128:
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kI64:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ case wasm::kS128:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kBottom:
+ case wasm::kStmt:
UNREACHABLE();
break;
}
}
- void BuildModifyThreadInWasmFlag(bool new_value) {
- if (!trap_handler::IsTrapHandlerEnabled()) return;
- Node* isolate_root = BuildLoadIsolateRoot();
-
- Node* thread_in_wasm_flag_address =
- gasm_->Load(MachineType::Pointer(), isolate_root,
- Isolate::thread_in_wasm_flag_address_offset());
-
+ void BuildModifyThreadInWasmFlagHelper(Node* thread_in_wasm_flag_address,
+ bool new_value) {
if (FLAG_debug_code) {
- Node* flag_value = SetEffect(
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::Pointer()),
- thread_in_wasm_flag_address,
- mcgraph()->Int32Constant(0), effect(), control()));
+ Node* flag_value =
+ gasm_->Load(MachineType::Pointer(), thread_in_wasm_flag_address, 0);
Node* check =
- graph()->NewNode(mcgraph()->machine()->Word32Equal(), flag_value,
- mcgraph()->Int32Constant(new_value ? 0 : 1));
+ gasm_->Word32Equal(flag_value, Int32Constant(new_value ? 0 : 1));
Diamond flag_check(graph(), mcgraph()->common(), check,
BranchHint::kTrue);
flag_check.Chain(control());
SetControl(flag_check.if_false);
- Node* message_id = graph()->NewNode(
- mcgraph()->common()->NumberConstant(static_cast<int32_t>(
- new_value ? AbortReason::kUnexpectedThreadInWasmSet
- : AbortReason::kUnexpectedThreadInWasmUnset)));
+ Node* message_id = gasm_->NumberConstant(static_cast<int32_t>(
+ new_value ? AbortReason::kUnexpectedThreadInWasmSet
+ : AbortReason::kUnexpectedThreadInWasmUnset));
Node* old_effect = effect();
- BuildCallToRuntimeWithContext(Runtime::kAbort, NoContextConstant(),
- &message_id, 1);
-
+ Node* call = BuildCallToRuntimeWithContext(
+ Runtime::kAbort, NoContextConstant(), &message_id, 1);
+ flag_check.merge->ReplaceInput(1, call);
SetEffectControl(flag_check.EffectPhi(old_effect, effect()),
flag_check.merge);
}
- SetEffect(graph()->NewNode(
- mcgraph()->machine()->Store(StoreRepresentation(
- MachineRepresentation::kWord32, kNoWriteBarrier)),
- thread_in_wasm_flag_address, mcgraph()->Int32Constant(0),
- mcgraph()->Int32Constant(new_value ? 1 : 0), effect(), control()));
+ gasm_->Store(
+ StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
+ thread_in_wasm_flag_address, 0, Int32Constant(new_value ? 1 : 0));
}
+ void BuildModifyThreadInWasmFlag(bool new_value) {
+ if (!trap_handler::IsTrapHandlerEnabled()) return;
+ Node* isolate_root = BuildLoadIsolateRoot();
+
+ Node* thread_in_wasm_flag_address =
+ gasm_->Load(MachineType::Pointer(), isolate_root,
+ Isolate::thread_in_wasm_flag_address_offset());
+
+ BuildModifyThreadInWasmFlagHelper(thread_in_wasm_flag_address, new_value);
+ }
+
+ class ModifyThreadInWasmFlagScope {
+ public:
+ ModifyThreadInWasmFlagScope(
+ WasmWrapperGraphBuilder* wasm_wrapper_graph_builder,
+ WasmGraphAssembler* gasm)
+ : wasm_wrapper_graph_builder_(wasm_wrapper_graph_builder) {
+ if (!trap_handler::IsTrapHandlerEnabled()) return;
+ Node* isolate_root = wasm_wrapper_graph_builder_->BuildLoadIsolateRoot();
+
+ thread_in_wasm_flag_address_ =
+ gasm->Load(MachineType::Pointer(), isolate_root,
+ Isolate::thread_in_wasm_flag_address_offset());
+
+ wasm_wrapper_graph_builder_->BuildModifyThreadInWasmFlagHelper(
+ thread_in_wasm_flag_address_, true);
+ }
+
+ ~ModifyThreadInWasmFlagScope() {
+ if (!trap_handler::IsTrapHandlerEnabled()) return;
+
+ wasm_wrapper_graph_builder_->BuildModifyThreadInWasmFlagHelper(
+ thread_in_wasm_flag_address_, false);
+ }
+
+ private:
+ WasmWrapperGraphBuilder* wasm_wrapper_graph_builder_;
+ Node* thread_in_wasm_flag_address_;
+ };
+
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context) {
Node* length = BuildChangeUint31ToSmi(
mcgraph()->Uint32Constant(static_cast<uint32_t>(sig->return_count())));
- return CALL_BUILTIN(IterableToFixedArrayForWasm, iterable, length, context);
+ return gasm_->CallBuiltin(Builtins::kIterableToFixedArrayForWasm, iterable,
+ length, context);
}
// Generate a call to the AllocateJSArray builtin.
@@ -6563,43 +6577,47 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// we make sure this is true based on statically known limits.
STATIC_ASSERT(wasm::kV8MaxWasmFunctionMultiReturns <=
JSArray::kInitialMaxFastElementArray);
- return SetControl(CALL_BUILTIN(WasmAllocateJSArray, array_length, context));
+ return SetControl(gasm_->CallBuiltin(Builtins::kWasmAllocateJSArray,
+ array_length, context));
}
Node* BuildCallAndReturn(bool is_import, Node* js_context,
Node* function_data,
- base::SmallVector<Node*, 16> args) {
- // Set the ThreadInWasm flag before we do the actual call.
- BuildModifyThreadInWasmFlag(true);
-
+ base::SmallVector<Node*, 16> args,
+ const JSWasmCallData* js_wasm_call_data,
+ Node* frame_state) {
const int rets_count = static_cast<int>(sig_->return_count());
base::SmallVector<Node*, 1> rets(rets_count);
- if (is_import) {
- // Call to an imported function.
- // Load function index from {WasmExportedFunctionData}.
- Node* function_index = BuildChangeSmiToInt32(
- gasm_->LoadExportedFunctionIndexAsSmi(function_data));
- BuildImportCall(sig_, VectorOf(args), VectorOf(rets),
- wasm::kNoCodePosition, function_index, kCallContinues);
- } else {
- // Call to a wasm function defined in this module.
- // The call target is the jump table slot for that function.
- Node* jump_table_start =
- LOAD_INSTANCE_FIELD(JumpTableStart, MachineType::Pointer());
- Node* jump_table_offset =
- BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
- Node* jump_table_slot = graph()->NewNode(
- mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset);
- args[0] = jump_table_slot;
-
- BuildWasmCall(sig_, VectorOf(args), VectorOf(rets), wasm::kNoCodePosition,
- nullptr, kNoRetpoline);
+ // Set the ThreadInWasm flag before we do the actual call.
+ {
+ ModifyThreadInWasmFlagScope modify_thread_in_wasm_flag_builder(
+ this, gasm_.get());
+
+ if (is_import) {
+ // Call to an imported function.
+ // Load function index from {WasmExportedFunctionData}.
+ Node* function_index = BuildChangeSmiToInt32(
+ gasm_->LoadExportedFunctionIndexAsSmi(function_data));
+ BuildImportCall(sig_, VectorOf(args), VectorOf(rets),
+ wasm::kNoCodePosition, function_index, kCallContinues);
+ } else {
+ // Call to a wasm function defined in this module.
+ // The call target is the jump table slot for that function.
+ Node* jump_table_start =
+ LOAD_INSTANCE_FIELD(JumpTableStart, MachineType::Pointer());
+ Node* jump_table_offset =
+ BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
+ Node* jump_table_slot =
+ gasm_->IntAdd(jump_table_start, jump_table_offset);
+ args[0] = jump_table_slot;
+
+ BuildWasmCall(sig_, VectorOf(args), VectorOf(rets),
+ wasm::kNoCodePosition, nullptr, kNoRetpoline,
+ frame_state);
+ }
}
- // Clear the ThreadInWasm flag.
- BuildModifyThreadInWasmFlag(false);
-
Node* jsval;
if (sig_->return_count() == 0) {
// We do not use {BuildLoadUndefinedValueFromInstance} here because it
@@ -6608,14 +6626,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
jsval = gasm_->Load(
MachineType::Pointer(), isolate_root,
- mcgraph()->Int32Constant(
- IsolateData::root_slot_offset(RootIndex::kUndefinedValue)));
+ IsolateData::root_slot_offset(RootIndex::kUndefinedValue));
} else if (sig_->return_count() == 1) {
- jsval = ToJS(rets[0], sig_->GetReturn());
+ jsval = js_wasm_call_data && !js_wasm_call_data->result_needs_conversion()
+ ? rets[0]
+ : ToJS(rets[0], sig_->GetReturn());
} else {
int32_t return_count = static_cast<int32_t>(sig_->return_count());
- Node* size =
- graph()->NewNode(mcgraph()->common()->NumberConstant(return_count));
+ Node* size = gasm_->NumberConstant(return_count);
jsval = BuildCallAllocateJSArray(size, js_context);
@@ -6634,19 +6652,20 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = 0; i < wasm_count; ++i) {
wasm::ValueType type = sig_->GetParam(i);
switch (type.kind()) {
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
- case wasm::ValueType::kI64:
- case wasm::ValueType::kRtt:
- case wasm::ValueType::kS128:
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kI64:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ case wasm::kS128:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kBottom:
+ case wasm::kStmt:
return false;
- case wasm::ValueType::kI32:
- case wasm::ValueType::kF32:
- case wasm::ValueType::kF64:
+ case wasm::kI32:
+ case wasm::kF32:
+ case wasm::kF64:
break;
}
}
@@ -6656,20 +6675,20 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* IsSmi(Node* input) {
return gasm_->Word32Equal(
gasm_->Word32And(BuildTruncateIntPtrToInt32(input),
- gasm_->Int32Constant(kSmiTagMask)),
- gasm_->Int32Constant(kSmiTag));
+ Int32Constant(kSmiTagMask)),
+ Int32Constant(kSmiTag));
}
void CanTransformFast(
Node* input, wasm::ValueType type,
v8::internal::compiler::GraphAssemblerLabel<0>* slow_path) {
switch (type.kind()) {
- case wasm::ValueType::kI32: {
+ case wasm::kI32: {
gasm_->GotoIfNot(IsSmi(input), slow_path);
return;
}
- case wasm::ValueType::kF32:
- case wasm::ValueType::kF64: {
+ case wasm::kF32:
+ case wasm::kF64: {
auto done = gasm_->MakeLabel();
gasm_->GotoIf(IsSmi(input), &done);
Node* map =
@@ -6684,25 +6703,28 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
gasm_->Bind(&done);
return;
}
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
- case wasm::ValueType::kI64:
- case wasm::ValueType::kRtt:
- case wasm::ValueType::kS128:
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kI64:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ case wasm::kS128:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kBottom:
+ case wasm::kStmt:
UNREACHABLE();
break;
}
}
- void BuildJSToWasmWrapper(bool is_import) {
- const int wasm_count = static_cast<int>(sig_->parameter_count());
+ void BuildJSToWasmWrapper(bool is_import,
+ const JSWasmCallData* js_wasm_call_data = nullptr,
+ Node* frame_state = nullptr) {
+ const int wasm_param_count = static_cast<int>(sig_->parameter_count());
// Build the start and the JS parameter nodes.
- SetEffectControl(Start(wasm_count + 5));
+ SetEffectControl(Start(wasm_param_count + 5));
// Create the js_closure and js_context parameters.
Node* js_closure =
@@ -6711,7 +6733,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->start());
Node* js_context = graph()->NewNode(
mcgraph()->common()->Parameter(
- Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
+ Linkage::GetJSCallContextParamIndex(wasm_param_count + 1),
+ "%context"),
graph()->start());
// Create the instance_node node to pass as parameter. It is loaded from
@@ -6731,17 +6754,18 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return;
}
- const int args_count = wasm_count + 1; // +1 for wasm_code.
+ const int args_count = wasm_param_count + 1; // +1 for wasm_code.
// Check whether the signature of the function allows for a fast
// transformation (if any params exist that need transformation).
// Create a fast transformation path, only if it does.
- bool include_fast_path = wasm_count && QualifiesForFastTransform(sig_);
+ bool include_fast_path = !js_wasm_call_data && wasm_param_count > 0 &&
+ QualifiesForFastTransform(sig_);
// Prepare Param() nodes. Param() nodes can only be created once,
// so we need to use the same nodes along all possible transformation paths.
base::SmallVector<Node*, 16> params(args_count);
- for (int i = 0; i < wasm_count; ++i) params[i + 1] = Param(i + 1);
+ for (int i = 0; i < wasm_param_count; ++i) params[i + 1] = Param(i + 1);
auto done = gasm_->MakeLabel(MachineRepresentation::kTagged);
if (include_fast_path) {
@@ -6750,30 +6774,46 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// using the fast transformation. When a param that cannot be transformed
// fast is encountered, skip checking the rest and fall back to the slow
// path.
- for (int i = 0; i < wasm_count; ++i) {
+ for (int i = 0; i < wasm_param_count; ++i) {
CanTransformFast(params[i + 1], sig_->GetParam(i), &slow_path);
}
// Convert JS parameters to wasm numbers using the fast transformation
// and build the call.
base::SmallVector<Node*, 16> args(args_count);
- for (int i = 0; i < wasm_count; ++i) {
+ for (int i = 0; i < wasm_param_count; ++i) {
Node* wasm_param = FromJSFast(params[i + 1], sig_->GetParam(i));
args[i + 1] = wasm_param;
}
- Node* jsval =
- BuildCallAndReturn(is_import, js_context, function_data, args);
+ Node* jsval = BuildCallAndReturn(is_import, js_context, function_data,
+ args, js_wasm_call_data, frame_state);
gasm_->Goto(&done, jsval);
gasm_->Bind(&slow_path);
}
// Convert JS parameters to wasm numbers using the default transformation
// and build the call.
base::SmallVector<Node*, 16> args(args_count);
- for (int i = 0; i < wasm_count; ++i) {
- Node* wasm_param = FromJS(params[i + 1], js_context, sig_->GetParam(i));
- args[i + 1] = wasm_param;
+ for (int i = 0; i < wasm_param_count; ++i) {
+ bool do_conversion =
+ !js_wasm_call_data || js_wasm_call_data->arg_needs_conversion(i);
+ if (do_conversion) {
+ args[i + 1] =
+ FromJS(params[i + 1], js_context, sig_->GetParam(i), frame_state);
+ } else {
+ Node* wasm_param = params[i + 1];
+
+ // For Float32 parameters
+ // we set UseInfo::CheckedNumberOrOddballAsFloat64 in
+ // simplified-lowering and we need to add here a conversion from Float64
+ // to Float32.
+ if (sig_->GetParam(i).kind() == wasm::kF32) {
+ wasm_param = gasm_->TruncateFloat64ToFloat32(wasm_param);
+ }
+
+ args[i + 1] = wasm_param;
+ }
}
- Node* jsval =
- BuildCallAndReturn(is_import, js_context, function_data, args);
+ Node* jsval = BuildCallAndReturn(is_import, js_context, function_data, args,
+ js_wasm_call_data, frame_state);
// If both the default and a fast transformation paths are present,
// get the return value based on the path used.
if (include_fast_path) {
@@ -6795,8 +6835,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::ObjectAccess::FlagsOffsetInSharedFunctionInfo());
Node* strict_check =
Binop(wasm::kExprI32And, flags,
- mcgraph()->Int32Constant(SharedFunctionInfo::IsNativeBit::kMask |
- SharedFunctionInfo::IsStrictBit::kMask));
+ Int32Constant(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
// Load global receiver if sloppy else use undefined.
Diamond strict_d(graph(), mcgraph()->common(), strict_check,
@@ -6864,17 +6904,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
args[pos++] = undefined_node; // new target
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = Int32Constant(wasm_count); // argument count
args[pos++] = function_context;
args[pos++] = effect();
args[pos++] = control();
DCHECK_EQ(pos, args.size());
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
+ call = gasm_->Call(call_descriptor, pos, args.begin());
break;
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// =======================================================================
// === JS Functions with mismatching arity ===============================
// =======================================================================
@@ -6894,7 +6932,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = undefined_node;
}
args[pos++] = undefined_node; // new target
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = Int32Constant(wasm_count); // argument count
Node* function_context =
gasm_->LoadContextFromJSFunction(callable_node);
@@ -6905,64 +6943,19 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
auto call_descriptor = Linkage::GetJSCallDescriptor(
graph()->zone(), false, pushed_count + 1, CallDescriptor::kNoFlags);
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
- break;
- }
-#else
- // =======================================================================
- // === JS Functions with mismatching arity ===============================
- // =======================================================================
- case WasmImportCallKind::kJSFunctionArityMismatch: {
- base::SmallVector<Node*, 16> args(wasm_count + 9);
- int pos = 0;
- Node* function_context =
- gasm_->LoadContextFromJSFunction(callable_node);
- args[pos++] =
- GetBuiltinPointerTarget(Builtins::kArgumentsAdaptorTrampoline);
- args[pos++] = callable_node; // target callable
- args[pos++] = undefined_node; // new target
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
-
- // Load shared function info, and then the formal parameter count.
- Node* shared_function_info =
- gasm_->LoadSharedFunctionInfo(callable_node);
- Node* formal_param_count =
- gasm_->Load(MachineType::Uint16(), shared_function_info,
- wasm::ObjectAccess::
- FormalParameterCountOffsetInSharedFunctionInfo());
- args[pos++] = formal_param_count;
-
- // Determine receiver at runtime.
- args[pos++] =
- BuildReceiverNode(callable_node, native_context, undefined_node);
-
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallBuiltinPointer);
-
- // Convert wasm numbers to JS values.
- pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
- args[pos++] = function_context;
- args[pos++] = effect();
- args[pos++] = control();
-
- DCHECK_EQ(pos, args.size());
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
+ call = gasm_->Call(call_descriptor, pos, args.begin());
break;
}
-#endif
// =======================================================================
// === General case of unknown callable ==================================
// =======================================================================
case WasmImportCallKind::kUseCallBuiltin: {
base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
- args[pos++] = GetBuiltinPointerTarget(Builtins::kCall_ReceiverIsAny);
+ args[pos++] =
+ GetBuiltinPointerTarget(mcgraph(), Builtins::kCall_ReceiverIsAny);
args[pos++] = callable_node;
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = Int32Constant(wasm_count); // argument count
args[pos++] = undefined_node; // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -6983,8 +6976,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = control();
DCHECK_EQ(pos, args.size());
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
+ call = gasm_->Call(call_descriptor, pos, args.begin());
break;
}
default:
@@ -6992,13 +6984,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
DCHECK_NOT_NULL(call);
- SetEffect(call);
SetSourcePosition(call, 0);
// Convert the return value(s) back.
if (sig_->return_count() <= 1) {
Node* val = sig_->return_count() == 0
- ? mcgraph()->Int32Constant(0)
+ ? Int32Constant(0)
: FromJS(call, native_context, sig_->GetReturn());
BuildModifyThreadInWasmFlag(true);
Return(val);
@@ -7064,8 +7055,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// TODO(jkummerow): Load the address from the {host_data}, and cache
// wrappers per signature.
const ExternalReference ref = ExternalReference::Create(address);
- Node* function =
- graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
// Parameters: Address host_data_foreign, Address arguments.
MachineType host_sig_types[] = {
@@ -7078,24 +7068,22 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* exception_branch = graph()->NewNode(
mcgraph()->common()->Branch(BranchHint::kTrue),
- graph()->NewNode(mcgraph()->machine()->WordEqual(), return_value,
- mcgraph()->IntPtrConstant(0)),
+ gasm_->WordEqual(return_value, mcgraph()->IntPtrConstant(0)),
control());
SetControl(
graph()->NewNode(mcgraph()->common()->IfFalse(), exception_branch));
- WasmThrowDescriptor interface_descriptor;
+ WasmRethrowDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), interface_descriptor,
interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmRethrow, RelocInfo::WASM_STUB_CALL);
- Node* throw_effect =
- graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, return_value, effect(), control());
- TerminateThrow(throw_effect, control());
+ gasm_->Call(call_descriptor, call_target, return_value);
+ TerminateThrow(effect(), control());
- SetControl(
+ SetEffectControl(
+ return_value,
graph()->NewNode(mcgraph()->common()->IfTrue(), exception_branch));
DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionMultiReturns);
size_t return_count = sig_->return_count();
@@ -7151,9 +7139,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Call the underlying closure.
base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
- args[pos++] = GetBuiltinPointerTarget(Builtins::kCall_ReceiverIsAny);
+ args[pos++] =
+ GetBuiltinPointerTarget(mcgraph(), Builtins::kCall_ReceiverIsAny);
args[pos++] = callable;
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = Int32Constant(wasm_count); // argument count
args[pos++] = BuildLoadUndefinedValueFromInstance(); // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -7173,8 +7162,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = control();
DCHECK_EQ(pos, args.size());
- Node* call = SetEffect(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), pos, args.begin()));
+ Node* call = gasm_->Call(call_descriptor, pos, args.begin());
// Convert return JS values to wasm numbers and back to JS values.
Node* jsval;
@@ -7186,8 +7174,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* fixed_array =
BuildMultiReturnFixedArrayFromIterable(sig_, call, context);
int32_t return_count = static_cast<int32_t>(sig_->return_count());
- Node* size =
- graph()->NewNode(mcgraph()->common()->NumberConstant(return_count));
+ Node* size = gasm_->NumberConstant(return_count);
jsval = BuildCallAllocateJSArray(size, context);
Node* result_fixed_array = gasm_->LoadJSArrayElements(jsval);
for (unsigned i = 0; i < sig_->return_count(); ++i) {
@@ -7237,8 +7224,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
auto call_descriptor = GetWasmCallDescriptor(mcgraph()->zone(), sig_);
DCHECK_EQ(pos, args.size());
- Node* call = SetEffect(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), pos, args.begin()));
+ Node* call = gasm_->Call(call_descriptor, pos, args.begin());
Node* if_success = graph()->NewNode(mcgraph()->common()->IfSuccess(), call);
Node* if_exception =
@@ -7300,6 +7286,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
} // namespace
+void BuildInlinedJSToWasmWrapper(
+ Zone* zone, MachineGraph* mcgraph, const wasm::FunctionSig* signature,
+ const wasm::WasmModule* module, compiler::SourcePositionTable* spt,
+ StubCallMode stub_mode, wasm::WasmFeatures features,
+ const JSWasmCallData* js_wasm_call_data, Node* frame_state) {
+ WasmWrapperGraphBuilder builder(zone, mcgraph, signature, module, spt,
+ stub_mode, features);
+ builder.BuildJSToWasmWrapper(false, js_wasm_call_data, frame_state);
+}
+
std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
Isolate* isolate, wasm::WasmEngine* wasm_engine,
const wasm::FunctionSig* sig, const wasm::WasmModule* module,
@@ -7447,10 +7443,10 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
}
// If function isn't compiled, compile it now.
- IsCompiledScope is_compiled_scope(
- shared->is_compiled_scope(callable->GetIsolate()));
+ Isolate* isolate = callable->GetIsolate();
+ IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
if (!is_compiled_scope.is_compiled()) {
- Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
+ Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
@@ -8018,7 +8014,8 @@ class LinkageLocationAllocator {
// General code uses the above configuration data.
CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* fsig,
- WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind) {
+ WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind,
+ bool need_frame_state) {
// The extra here is to accomodate the instance object as first parameter
// and, when specified, the additional callable.
bool extra_callable_param =
@@ -8095,7 +8092,9 @@ CallDescriptor* GetWasmCallDescriptor(
}
CallDescriptor::Flags flags =
- use_retpoline ? CallDescriptor::kRetpoline : CallDescriptor::kNoFlags;
+ use_retpoline ? CallDescriptor::kRetpoline
+ : need_frame_state ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
return zone->New<CallDescriptor>( // --
descriptor_kind, // kind
target_type, // target MachineType
@@ -8223,7 +8222,6 @@ AssemblerOptions WasmStubAssemblerOptions() {
}
#undef FATAL_UNSUPPORTED_OPCODE
-#undef CALL_BUILTIN
#undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_INSTANCE_FIELD
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 00ec7a9f8b..e6614f1c67 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -10,6 +10,7 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "src/base/small-vector.h"
#include "src/runtime/runtime.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
@@ -36,6 +37,9 @@ class WasmDecorator;
class WasmGraphAssembler;
enum class TrapId : uint32_t;
struct Int64LoweringSpecialCase;
+template <size_t VarCount>
+class GraphAssemblerLabel;
+enum class BranchHint : uint8_t;
} // namespace compiler
namespace wasm {
@@ -148,6 +152,21 @@ enum CWasmEntryParameters {
V8_EXPORT_PRIVATE Handle<Code> CompileCWasmEntry(
Isolate*, const wasm::FunctionSig*, const wasm::WasmModule* module);
+class JSWasmCallData {
+ public:
+ explicit JSWasmCallData(const wasm::FunctionSig* wasm_signature);
+
+ bool arg_needs_conversion(size_t index) const {
+ DCHECK_LT(index, arg_needs_conversion_.size());
+ return arg_needs_conversion_[index];
+ }
+ bool result_needs_conversion() const { return result_needs_conversion_; }
+
+ private:
+ bool result_needs_conversion_;
+ std::vector<bool> arg_needs_conversion_;
+};
+
// Values from the instance object are cached between Wasm-level function calls.
// This struct allows the SSA environment handling this cache to be defined
// and manipulated in wasm-compiler.{h,cc} instead of inside the Wasm decoder.
@@ -162,12 +181,14 @@ struct WasmInstanceCacheNodes {
// the wasm decoder from the internal details of TurboFan.
class WasmGraphBuilder {
public:
+ enum ReferenceKind : bool { // --
+ kArrayOrStruct = true,
+ kFunction = false
+ };
struct ObjectReferenceKnowledge {
bool object_can_be_null;
- bool object_must_be_data_ref;
- bool object_can_be_i31;
- bool rtt_is_i31;
- uint8_t rtt_depth;
+ ReferenceKind reference_kind;
+ int8_t rtt_depth;
};
enum EnforceBoundsCheck : bool { // --
kNeedsBoundsCheck = true,
@@ -201,6 +222,8 @@ class WasmGraphBuilder {
Node* LoopExitValue(Node* value, MachineRepresentation representation);
Node* TerminateThrow(Node* effect, Node* control);
Node* Merge(unsigned count, Node** controls);
+ template <typename... Nodes>
+ Node* Merge(Node* fst, Nodes*... args);
Node* Phi(wasm::ValueType type, unsigned count, Node** vals_and_control);
Node* CreateOrMergeIntoPhi(MachineRepresentation rep, Node* merge,
Node* tnode, Node* fnode);
@@ -434,14 +457,32 @@ class WasmGraphBuilder {
Node* I31New(Node* input);
Node* I31GetS(Node* input);
Node* I31GetU(Node* input);
- Node* RttCanon(wasm::HeapType type);
- Node* RttSub(wasm::HeapType type, Node* parent_rtt);
+ Node* RttCanon(uint32_t type_index);
+ Node* RttSub(uint32_t type_index, Node* parent_rtt);
+
Node* RefTest(Node* object, Node* rtt, ObjectReferenceKnowledge config);
Node* RefCast(Node* object, Node* rtt, ObjectReferenceKnowledge config,
wasm::WasmCodePosition position);
Node* BrOnCast(Node* object, Node* rtt, ObjectReferenceKnowledge config,
Node** match_control, Node** match_effect,
Node** no_match_control, Node** no_match_effect);
+ Node* RefIsData(Node* object, bool object_can_be_null);
+ Node* RefAsData(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position);
+ Node* BrOnData(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
+ Node* RefIsFunc(Node* object, bool object_can_be_null);
+ Node* RefAsFunc(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position);
+ Node* BrOnFunc(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
+ Node* RefIsI31(Node* object);
+ Node* RefAsI31(Node* object, wasm::WasmCodePosition position);
+ Node* BrOnI31(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
bool has_simd() const { return has_simd_; }
@@ -490,7 +531,7 @@ class WasmGraphBuilder {
Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
Node* BuildCallNode(const wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position, Node* instance_node,
- const Operator* op);
+ const Operator* op, Node* frame_state = nullptr);
// Helper function for {BuildIndirectCall}.
void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size,
Node** ift_sig_ids, Node** ift_targets,
@@ -501,7 +542,8 @@ class WasmGraphBuilder {
IsReturnCall continuation);
Node* BuildWasmCall(const wasm::FunctionSig* sig, Vector<Node*> args,
Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline);
+ Node* instance_node, UseRetpoline use_retpoline,
+ Node* frame_state = nullptr);
Node* BuildWasmReturnCall(const wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node, UseRetpoline use_retpoline);
@@ -514,7 +556,6 @@ class WasmGraphBuilder {
Node* BuildCallRef(uint32_t sig_index, Vector<Node*> args, Vector<Node*> rets,
CheckForNull null_check, IsReturnCall continuation,
wasm::WasmCodePosition position);
- Node* GetBuiltinPointerTarget(int builtin_id);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
@@ -573,6 +614,7 @@ class WasmGraphBuilder {
Node* BuildTruncateIntPtrToInt32(Node* value);
Node* BuildChangeInt32ToIntPtr(Node* value);
+ Node* BuildChangeIntPtrToInt64(Node* value);
Node* BuildChangeInt32ToSmi(Node* value);
Node* BuildChangeUint31ToSmi(Node* value);
Node* BuildSmiShiftBitsConstant();
@@ -582,6 +624,36 @@ class WasmGraphBuilder {
// generates {index > max ? Smi(max) : Smi(index)}
Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval);
+ using BranchBuilder = std::function<void(Node*, BranchHint)>;
+ struct Callbacks {
+ BranchBuilder succeed_if;
+ BranchBuilder fail_if;
+ BranchBuilder fail_if_not;
+ };
+
+ // This type is used to collect control/effect nodes we need to merge at the
+ // end of BrOn* functions. Nodes are collected in {TypeCheck} etc. by calling
+ // the passed callbacks succeed_if, fail_if and fail_if_not. We have up to 5
+ // control nodes to merge; the EffectPhi needs an additional input.
+ using SmallNodeVector = base::SmallVector<Node*, 6>;
+
+ Callbacks TestCallbacks(GraphAssemblerLabel<1>* label);
+ Callbacks CastCallbacks(GraphAssemblerLabel<0>* label,
+ wasm::WasmCodePosition position);
+ Callbacks BranchCallbacks(SmallNodeVector& no_match_controls,
+ SmallNodeVector& no_match_effects,
+ SmallNodeVector& match_controls,
+ SmallNodeVector& match_effects);
+
+ void TypeCheck(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ bool null_succeeds, Callbacks callbacks);
+ void DataCheck(Node* object, bool object_can_be_null, Callbacks callbacks);
+ void FuncCheck(Node* object, bool object_can_be_null, Callbacks callbacks);
+
+ Node* BrOnCastAbs(Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect,
+ std::function<void(Callbacks)> type_checker);
+
// Asm.js specific functionality.
Node* BuildI32AsmjsSConvertF32(Node* input);
Node* BuildI32AsmjsSConvertF64(Node* input);
@@ -643,6 +715,7 @@ class WasmGraphBuilder {
WasmInstanceCacheNodes* instance_cache_ = nullptr;
SetOncePointer<Node> instance_node_;
+ SetOncePointer<Node> ref_null_node_;
SetOncePointer<Node> globals_start_;
SetOncePointer<Node> imported_mutable_globals_;
SetOncePointer<Node> stack_check_code_node_;
@@ -666,11 +739,17 @@ class WasmGraphBuilder {
enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };
+V8_EXPORT_PRIVATE void BuildInlinedJSToWasmWrapper(
+ Zone* zone, MachineGraph* mcgraph, const wasm::FunctionSig* signature,
+ const wasm::WasmModule* module, compiler::SourcePositionTable* spt,
+ StubCallMode stub_mode, wasm::WasmFeatures features,
+ const JSWasmCallData* js_wasm_call_data, Node* frame_state);
+
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* signature,
WasmGraphBuilder::UseRetpoline use_retpoline =
WasmGraphBuilder::kNoRetpoline,
- WasmCallKind kind = kWasmFunction);
+ WasmCallKind kind = kWasmFunction, bool need_frame_state = false);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, const CallDescriptor* call_descriptor);
diff --git a/deps/v8/src/d8/d8-console.cc b/deps/v8/src/d8/d8-console.cc
index 5ecdf6767b..7f0904e343 100644
--- a/deps/v8/src/d8/d8-console.cc
+++ b/deps/v8/src/d8/d8-console.cc
@@ -112,6 +112,7 @@ void D8Console::TimeEnd(const debug::ConsoleCallArguments& args,
auto find = timers_.find(string);
if (find != timers_.end()) {
delta = now - find->second;
+ timers_.erase(find);
}
printf("console.timeEnd: %s, %f\n", *utf8, delta.InMillisecondsF());
}
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 578d887b2c..999e8c2b96 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -334,8 +334,8 @@ v8::Platform* g_default_platform;
std::unique_ptr<v8::Platform> g_platform;
static Local<Value> Throw(Isolate* isolate, const char* message) {
- return isolate->ThrowException(
- String::NewFromUtf8(isolate, message).ToLocalChecked());
+ return isolate->ThrowException(v8::Exception::Error(
+ String::NewFromUtf8(isolate, message).ToLocalChecked()));
}
static MaybeLocal<Value> TryGetValue(v8::Isolate* isolate,
@@ -694,7 +694,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Context::Scope context_scope(realm);
MaybeLocal<Script> maybe_script;
Local<Context> context(isolate->GetCurrentContext());
- ScriptOrigin origin(name);
+ ScriptOrigin origin(isolate, name);
Local<Script> script;
if (!CompileString<Script>(isolate, context, source, origin)
@@ -839,13 +839,46 @@ class ModuleEmbedderData {
public:
explicit ModuleEmbedderData(Isolate* isolate)
- : module_to_specifier_map(10, ModuleGlobalHash(isolate)) {}
+ : module_to_specifier_map(10, ModuleGlobalHash(isolate)),
+ json_module_to_parsed_json_map(10, ModuleGlobalHash(isolate)) {}
+
+ static ModuleType ModuleTypeFromImportAssertions(
+ Local<Context> context, Local<FixedArray> import_assertions,
+ bool hasPositions) {
+ Isolate* isolate = context->GetIsolate();
+ const int kV8AssertionEntrySize = hasPositions ? 3 : 2;
+ for (int i = 0; i < import_assertions->Length();
+ i += kV8AssertionEntrySize) {
+ Local<String> v8_assertion_key =
+ import_assertions->Get(context, i).As<v8::String>();
+ std::string assertion_key = ToSTLString(isolate, v8_assertion_key);
+
+ if (assertion_key == "type") {
+ Local<String> v8_assertion_value =
+ import_assertions->Get(context, i + 1).As<String>();
+ std::string assertion_value = ToSTLString(isolate, v8_assertion_value);
+ if (assertion_value == "json") {
+ return ModuleType::kJSON;
+ } else {
+ // JSON is currently the only supported non-JS type
+ return ModuleType::kInvalid;
+ }
+ }
+ }
- // Map from normalized module specifier to Module.
- std::unordered_map<std::string, Global<Module>> specifier_to_module_map;
+ // If no type is asserted, default to JS.
+ return ModuleType::kJavaScript;
+ }
+
+ // Map from (normalized module specifier, module type) pair to Module.
+ std::map<std::pair<std::string, ModuleType>, Global<Module>> module_map;
// Map from Module to its URL as defined in the ScriptOrigin
std::unordered_map<Global<Module>, std::string, ModuleGlobalHash>
module_to_specifier_map;
+ // Map from JSON Module to its parsed content, for use in module
+ // JSONModuleEvaluationSteps
+ std::unordered_map<Global<Module>, Global<Value>, ModuleGlobalHash>
+ json_module_to_parsed_json_map;
};
enum { kModuleEmbedderDataIndex, kInspectorClientIndex };
@@ -869,7 +902,6 @@ MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
Local<String> specifier,
Local<FixedArray> import_assertions,
Local<Module> referrer) {
- // TODO(v8:11189) Consider JSON modules support in d8.
Isolate* isolate = context->GetIsolate();
ModuleEmbedderData* d = GetModuleDataFromContext(context);
auto specifier_it =
@@ -877,8 +909,11 @@ MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
CHECK(specifier_it != d->module_to_specifier_map.end());
std::string absolute_path = NormalizePath(ToSTLString(isolate, specifier),
DirName(specifier_it->second));
- auto module_it = d->specifier_to_module_map.find(absolute_path);
- CHECK(module_it != d->specifier_to_module_map.end());
+ ModuleType module_type = ModuleEmbedderData::ModuleTypeFromImportAssertions(
+ context, import_assertions, true);
+ auto module_it =
+ d->module_map.find(std::make_pair(absolute_path, module_type));
+ CHECK(module_it != d->module_map.end());
return module_it->second.Get(isolate);
}
@@ -886,7 +921,8 @@ MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
Local<Context> context,
- const std::string& file_name) {
+ const std::string& file_name,
+ ModuleType module_type) {
DCHECK(IsAbsolutePath(file_name));
Isolate* isolate = context->GetIsolate();
Local<String> source_text = ReadFile(isolate, file_name.c_str());
@@ -912,18 +948,41 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
return MaybeLocal<Module>();
}
ScriptOrigin origin(
- String::NewFromUtf8(isolate, file_name.c_str()).ToLocalChecked(), 0, 0,
- false, -1, Local<Value>(), false, false, true);
- ScriptCompiler::Source source(source_text, origin);
+ isolate, String::NewFromUtf8(isolate, file_name.c_str()).ToLocalChecked(),
+ 0, 0, false, -1, Local<Value>(), false, false, true);
Local<Module> module;
- if (!CompileString<Module>(isolate, context, source_text, origin)
- .ToLocal(&module)) {
- return MaybeLocal<Module>();
+ if (module_type == ModuleType::kJavaScript) {
+ ScriptCompiler::Source source(source_text, origin);
+ if (!CompileString<Module>(isolate, context, source_text, origin)
+ .ToLocal(&module)) {
+ return MaybeLocal<Module>();
+ }
+ } else if (module_type == ModuleType::kJSON) {
+ Local<Value> parsed_json;
+ if (!v8::JSON::Parse(context, source_text).ToLocal(&parsed_json)) {
+ return MaybeLocal<Module>();
+ }
+
+ std::vector<Local<String>> export_names{
+ String::NewFromUtf8(isolate, "default").ToLocalChecked()};
+
+ module = v8::Module::CreateSyntheticModule(
+ isolate,
+ String::NewFromUtf8(isolate, file_name.c_str()).ToLocalChecked(),
+ export_names, Shell::JSONModuleEvaluationSteps);
+
+ CHECK(d->json_module_to_parsed_json_map
+ .insert(std::make_pair(Global<Module>(isolate, module),
+ Global<Value>(isolate, parsed_json)))
+ .second);
+ } else {
+ UNREACHABLE();
}
- CHECK(d->specifier_to_module_map
- .insert(std::make_pair(file_name, Global<Module>(isolate, module)))
+ CHECK(d->module_map
+ .insert(std::make_pair(std::make_pair(file_name, module_type),
+ Global<Module>(isolate, module)))
.second);
CHECK(d->module_to_specifier_map
.insert(std::make_pair(Global<Module>(isolate, module), file_name))
@@ -938,8 +997,23 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
Local<String> name = module_request->GetSpecifier();
std::string absolute_path =
NormalizePath(ToSTLString(isolate, name), dir_name);
- if (d->specifier_to_module_map.count(absolute_path)) continue;
- if (FetchModuleTree(module, context, absolute_path).IsEmpty()) {
+ Local<FixedArray> import_assertions = module_request->GetImportAssertions();
+ ModuleType request_module_type =
+ ModuleEmbedderData::ModuleTypeFromImportAssertions(
+ context, import_assertions, true);
+
+ if (request_module_type == ModuleType::kInvalid) {
+ Throw(isolate, "Invalid module type was asserted");
+ return MaybeLocal<Module>();
+ }
+
+ if (d->module_map.count(
+ std::make_pair(absolute_path, request_module_type))) {
+ continue;
+ }
+
+ if (FetchModuleTree(module, context, absolute_path, request_module_type)
+ .IsEmpty()) {
return MaybeLocal<Module>();
}
}
@@ -947,19 +1021,53 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
return module;
}
+MaybeLocal<Value> Shell::JSONModuleEvaluationSteps(Local<Context> context,
+ Local<Module> module) {
+ Isolate* isolate = context->GetIsolate();
+
+ ModuleEmbedderData* d = GetModuleDataFromContext(context);
+ auto json_value_it =
+ d->json_module_to_parsed_json_map.find(Global<Module>(isolate, module));
+ CHECK(json_value_it != d->json_module_to_parsed_json_map.end());
+ Local<Value> json_value = json_value_it->second.Get(isolate);
+
+ TryCatch try_catch(isolate);
+ Maybe<bool> result = module->SetSyntheticModuleExport(
+ isolate,
+ String::NewFromUtf8Literal(isolate, "default",
+ NewStringType::kInternalized),
+ json_value);
+
+ // Setting the default export should never fail.
+ CHECK(!try_catch.HasCaught());
+ CHECK(!result.IsNothing() && result.FromJust());
+
+ if (i::FLAG_harmony_top_level_await) {
+ Local<Promise::Resolver> resolver =
+ Promise::Resolver::New(context).ToLocalChecked();
+ resolver->Resolve(context, Undefined(isolate)).ToChecked();
+ return resolver->GetPromise();
+ }
+
+ return Undefined(isolate);
+}
+
struct DynamicImportData {
DynamicImportData(Isolate* isolate_, Local<String> referrer_,
Local<String> specifier_,
+ Local<FixedArray> import_assertions_,
Local<Promise::Resolver> resolver_)
: isolate(isolate_) {
referrer.Reset(isolate, referrer_);
specifier.Reset(isolate, specifier_);
+ import_assertions.Reset(isolate, import_assertions_);
resolver.Reset(isolate, resolver_);
}
Isolate* isolate;
Global<String> referrer;
Global<String> specifier;
+ Global<FixedArray> import_assertions;
Global<Promise::Resolver> resolver;
};
@@ -1020,15 +1128,16 @@ void Shell::ModuleResolutionFailureCallback(
MaybeLocal<Promise> Shell::HostImportModuleDynamically(
Local<Context> context, Local<ScriptOrModule> referrer,
- Local<String> specifier) {
+ Local<String> specifier, Local<FixedArray> import_assertions) {
Isolate* isolate = context->GetIsolate();
MaybeLocal<Promise::Resolver> maybe_resolver =
Promise::Resolver::New(context);
Local<Promise::Resolver> resolver;
if (maybe_resolver.ToLocal(&resolver)) {
- DynamicImportData* data = new DynamicImportData(
- isolate, referrer->GetResourceName().As<String>(), specifier, resolver);
+ DynamicImportData* data =
+ new DynamicImportData(isolate, referrer->GetResourceName().As<String>(),
+ specifier, import_assertions, resolver);
PerIsolateData::Get(isolate)->AddDynamicImportData(data);
isolate->EnqueueMicrotask(Shell::DoHostImportModuleDynamically, data);
return resolver->GetPromise();
@@ -1064,6 +1173,8 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
Local<String> referrer(import_data_->referrer.Get(isolate));
Local<String> specifier(import_data_->specifier.Get(isolate));
+ Local<FixedArray> import_assertions(
+ import_data_->import_assertions.Get(isolate));
Local<Promise::Resolver> resolver(import_data_->resolver.Get(isolate));
PerIsolateData* data = PerIsolateData::Get(isolate);
@@ -1072,21 +1183,33 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
Context::Scope context_scope(realm);
+ ModuleType module_type = ModuleEmbedderData::ModuleTypeFromImportAssertions(
+ realm, import_assertions, false);
+
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+
+ if (module_type == ModuleType::kInvalid) {
+ Throw(isolate, "Invalid module type was asserted");
+ CHECK(try_catch.HasCaught());
+ resolver->Reject(realm, try_catch.Exception()).ToChecked();
+ return;
+ }
+
std::string source_url = ToSTLString(isolate, referrer);
std::string dir_name =
DirName(NormalizePath(source_url, GetWorkingDirectory()));
std::string file_name = ToSTLString(isolate, specifier);
std::string absolute_path = NormalizePath(file_name, dir_name);
- TryCatch try_catch(isolate);
- try_catch.SetVerbose(true);
-
ModuleEmbedderData* d = GetModuleDataFromContext(realm);
Local<Module> root_module;
- auto module_it = d->specifier_to_module_map.find(absolute_path);
- if (module_it != d->specifier_to_module_map.end()) {
+ auto module_it =
+ d->module_map.find(std::make_pair(absolute_path, module_type));
+ if (module_it != d->module_map.end()) {
root_module = module_it->second.Get(isolate);
- } else if (!FetchModuleTree(Local<Module>(), realm, absolute_path)
+ } else if (!FetchModuleTree(Local<Module>(), realm, absolute_path,
+ module_type)
.ToLocal(&root_module)) {
CHECK(try_catch.HasCaught());
resolver->Reject(realm, try_catch.Exception()).ToChecked();
@@ -1154,7 +1277,8 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
Local<Module> root_module;
- if (!FetchModuleTree(Local<Module>(), realm, absolute_path)
+ if (!FetchModuleTree(Local<Module>(), realm, absolute_path,
+ ModuleType::kJavaScript)
.ToLocal(&root_module)) {
CHECK(try_catch.HasCaught());
ReportException(isolate, &try_catch);
@@ -1403,7 +1527,12 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::JSGlobalProxy>::cast(i_object)->IsDetached()) {
return;
}
- int index = data->RealmFind(object->CreationContext());
+ Local<Context> creation_context;
+ if (!object->GetCreationContext().ToLocal(&creation_context)) {
+ Throw(args.GetIsolate(), "object doesn't have creation context");
+ return;
+ }
+ int index = data->RealmFind(creation_context);
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@@ -1555,7 +1684,8 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- ScriptOrigin origin(String::NewFromUtf8Literal(isolate, "(d8)",
+ ScriptOrigin origin(isolate,
+ String::NewFromUtf8Literal(isolate, "(d8)",
NewStringType::kInternalized));
ScriptCompiler::Source script_source(
args[1]->ToString(isolate->GetCurrentContext()).ToLocalChecked(), origin);
@@ -1803,51 +1933,146 @@ void Shell::SetTimeout(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData::Get(isolate)->SetTimeout(callback, context);
}
-void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
- Isolate* isolate = args.GetIsolate();
- HandleScope handle_scope(isolate);
- if (args.Length() < 1 || !args[0]->IsString()) {
- Throw(args.GetIsolate(), "1st argument must be string");
- return;
- }
+enum WorkerType { kClassic, kString, kFunction, kInvalid, kNone };
- // d8 honors `options={type: string}`, which means the first argument is
- // not a filename but string of script to be run.
- bool load_from_file = true;
+void ReadWorkerTypeAndArguments(const v8::FunctionCallbackInfo<v8::Value>& args,
+ WorkerType* worker_type,
+ Local<Value>* arguments = nullptr) {
+ Isolate* isolate = args.GetIsolate();
if (args.Length() > 1 && args[1]->IsObject()) {
Local<Object> object = args[1].As<Object>();
Local<Context> context = isolate->GetCurrentContext();
Local<Value> value;
- if (TryGetValue(args.GetIsolate(), context, object, "type")
- .ToLocal(&value) &&
- value->IsString()) {
- Local<String> worker_type = value->ToString(context).ToLocalChecked();
- String::Utf8Value str(isolate, worker_type);
- if (strcmp("string", *str) == 0) {
- load_from_file = false;
- } else if (strcmp("classic", *str) == 0) {
- load_from_file = true;
- } else {
- Throw(args.GetIsolate(), "Unsupported worker type");
- return;
+ if (!TryGetValue(isolate, context, object, "type").ToLocal(&value)) {
+ *worker_type = WorkerType::kNone;
+ return;
+ }
+ if (!value->IsString()) {
+ *worker_type = WorkerType::kInvalid;
+ return;
+ }
+ Local<String> worker_type_string =
+ value->ToString(context).ToLocalChecked();
+ String::Utf8Value str(isolate, worker_type_string);
+ if (strcmp("string", *str) == 0) {
+ *worker_type = WorkerType::kString;
+ } else if (strcmp("classic", *str) == 0) {
+ *worker_type = WorkerType::kClassic;
+ } else if (strcmp("function", *str) == 0) {
+ *worker_type = WorkerType::kFunction;
+ } else {
+ *worker_type = WorkerType::kInvalid;
+ }
+ if (arguments != nullptr) {
+ bool got_arguments =
+ TryGetValue(isolate, context, object, "arguments").ToLocal(arguments);
+ USE(got_arguments);
+ }
+ } else {
+ *worker_type = WorkerType::kNone;
+ }
+}
+
+bool FunctionAndArgumentsToString(Local<Function> function,
+ Local<Value> arguments, Local<String>* source,
+ Isolate* isolate) {
+ Local<Context> context = isolate->GetCurrentContext();
+ MaybeLocal<String> maybe_function_string =
+ function->FunctionProtoToString(context);
+ Local<String> function_string;
+ if (!maybe_function_string.ToLocal(&function_string)) {
+ Throw(isolate, "Failed to convert function to string");
+ return false;
+ }
+ *source = String::NewFromUtf8Literal(isolate, "(");
+ *source = String::Concat(isolate, *source, function_string);
+ Local<String> middle = String::NewFromUtf8Literal(isolate, ")(");
+ *source = String::Concat(isolate, *source, middle);
+ if (!arguments.IsEmpty() && !arguments->IsUndefined()) {
+ if (!arguments->IsArray()) {
+ Throw(isolate, "'arguments' must be an array");
+ return false;
+ }
+ Local<String> comma = String::NewFromUtf8Literal(isolate, ",");
+ Local<Array> array = arguments.As<Array>();
+ for (uint32_t i = 0; i < array->Length(); ++i) {
+ if (i > 0) {
+ *source = String::Concat(isolate, *source, comma);
+ }
+ MaybeLocal<Value> maybe_argument = array->Get(context, i);
+ Local<Value> argument;
+ if (!maybe_argument.ToLocal(&argument)) {
+ Throw(isolate, "Failed to get argument");
+ return false;
}
+ Local<String> argument_string;
+ if (!JSON::Stringify(context, argument).ToLocal(&argument_string)) {
+ Throw(isolate, "Failed to convert argument to string");
+ return false;
+ }
+ *source = String::Concat(isolate, *source, argument_string);
}
}
+ Local<String> suffix = String::NewFromUtf8Literal(isolate, ")");
+ *source = String::Concat(isolate, *source, suffix);
+ return true;
+}
- Local<Value> source;
- if (load_from_file) {
- String::Utf8Value filename(args.GetIsolate(), args[0]);
- source = ReadFile(args.GetIsolate(), *filename);
- if (source.IsEmpty()) {
- Throw(args.GetIsolate(), "Error loading worker script");
+void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope handle_scope(isolate);
+ if (args.Length() < 1 || (!args[0]->IsString() && !args[0]->IsFunction())) {
+ Throw(isolate, "1st argument must be a string or a function");
+ return;
+ }
+
+ Local<String> source;
+ if (args[0]->IsFunction()) {
+ // d8 supports `options={type: 'function', arguments:[...]}`, which means
+ // the first argument is a function with the code to be ran. Restrictions
+ // apply; in particular the function will be converted to a string and the
+ // Worker constructed based on it.
+ WorkerType worker_type;
+ Local<Value> arguments;
+ ReadWorkerTypeAndArguments(args, &worker_type, &arguments);
+ if (worker_type != WorkerType::kFunction) {
+ Throw(isolate, "Invalid or missing worker type");
+ return;
+ }
+
+ // Source: ( function_to_string )( params )
+ if (!FunctionAndArgumentsToString(args[0].As<Function>(), arguments,
+ &source, isolate)) {
return;
}
} else {
- source = args[0];
+ // d8 honors `options={type: 'string'}`, which means the first argument is
+ // not a filename but string of script to be run.
+ bool load_from_file = true;
+ WorkerType worker_type;
+ ReadWorkerTypeAndArguments(args, &worker_type);
+ if (worker_type == WorkerType::kString) {
+ load_from_file = false;
+ } else if (worker_type != WorkerType::kNone &&
+ worker_type != WorkerType::kClassic) {
+ Throw(isolate, "Invalid worker type");
+ return;
+ }
+
+ if (load_from_file) {
+ String::Utf8Value filename(isolate, args[0]);
+ source = ReadFile(isolate, *filename);
+ if (source.IsEmpty()) {
+ Throw(args.GetIsolate(), "Error loading worker script");
+ return;
+ }
+ } else {
+ source = args[0].As<String>();
+ }
}
if (!args.IsConstructCall()) {
- Throw(args.GetIsolate(), "Worker must be constructed with new");
+ Throw(isolate, "Worker must be constructed with new");
return;
}
@@ -1862,9 +2087,9 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
base::MutexGuard lock_guard(workers_mutex_.Pointer());
if (!allow_new_workers_) return;
- String::Utf8Value script(args.GetIsolate(), source);
+ String::Utf8Value script(isolate, source);
if (!*script) {
- Throw(args.GetIsolate(), "Can't get worker script");
+ Throw(isolate, "Can't get worker script");
return;
}
@@ -1878,7 +2103,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
i_isolate, kWorkerSizeEstimate, worker);
args.Holder()->SetInternalField(0, Utils::ToLocal(managed));
if (!Worker::StartWorkerThread(std::move(worker))) {
- Throw(args.GetIsolate(), "Can't start thread");
+ Throw(isolate, "Can't start thread");
return;
}
}
@@ -2184,7 +2409,7 @@ Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
Local<String> source =
String::NewFromUtf8(isolate, stringify_source_).ToLocalChecked();
Local<String> name = String::NewFromUtf8Literal(isolate, "d8-stringify");
- ScriptOrigin origin(name);
+ ScriptOrigin origin(isolate, name);
Local<Script> script =
Script::Compile(context, source, &origin).ToLocalChecked();
stringify_function_.Reset(
@@ -3332,12 +3557,11 @@ void Worker::ProcessMessage(std::unique_ptr<SerializationData> data) {
Local<Object> global = context->Global();
// Get the message handler.
- Local<Value> onmessage = global
- ->Get(context, String::NewFromUtf8Literal(
- isolate_, "onmessage",
- NewStringType::kInternalized))
- .ToLocalChecked();
- if (!onmessage->IsFunction()) {
+ MaybeLocal<Value> maybe_onmessage = global->Get(
+ context, String::NewFromUtf8Literal(isolate_, "onmessage",
+ NewStringType::kInternalized));
+ Local<Value> onmessage;
+ if (!maybe_onmessage.ToLocal(&onmessage) || !onmessage->IsFunction()) {
return;
}
Local<Function> onmessage_fun = onmessage.As<Function>();
@@ -3416,13 +3640,12 @@ void Worker::ExecuteInThread() {
isolate_, source, file_name, Shell::kNoPrintResult,
Shell::kReportExceptions, Shell::kProcessMessageQueue)) {
// Check that there's a message handler
- Local<Value> onmessage =
- global
- ->Get(context, String::NewFromUtf8Literal(
- isolate_, "onmessage",
- NewStringType::kInternalized))
- .ToLocalChecked();
- if (onmessage->IsFunction()) {
+ MaybeLocal<Value> maybe_onmessage = global->Get(
+ context,
+ String::NewFromUtf8Literal(isolate_, "onmessage",
+ NewStringType::kInternalized));
+ Local<Value> onmessage;
+ if (maybe_onmessage.ToLocal(&onmessage) && onmessage->IsFunction()) {
// Now wait for messages
ProcessMessages();
}
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index a6a1037cff..a9f6f3bc8b 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -26,6 +26,8 @@ namespace v8 {
class D8Console;
+enum class ModuleType { kJavaScript, kJSON, kInvalid };
+
namespace internal {
class CancelableTaskManager;
} // namespace internal
@@ -534,7 +536,7 @@ class Shell : public i::AllStatic {
static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static MaybeLocal<Promise> HostImportModuleDynamically(
Local<Context> context, Local<ScriptOrModule> referrer,
- Local<String> specifier);
+ Local<String> specifier, Local<FixedArray> import_assertions);
static void ModuleResolutionSuccessCallback(
const v8::FunctionCallbackInfo<v8::Value>& info);
static void ModuleResolutionFailureCallback(
@@ -630,7 +632,11 @@ class Shell : public i::AllStatic {
int index);
static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Module> origin_module,
v8::Local<v8::Context> context,
- const std::string& file_name);
+ const std::string& file_name,
+ ModuleType module_type);
+
+ static MaybeLocal<Value> JSONModuleEvaluationSteps(Local<Context> context,
+ Local<Module> module);
template <class T>
static MaybeLocal<T> CompileString(Isolate* isolate, Local<Context> context,
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index 23460d6767..238bc5b85d 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -37,13 +37,12 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Restart the frame by calling the function.
__ mov(fp, r1);
__ ldr(r1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ ldr(r0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ LeaveFrame(StackFrame::INTERNAL);
- __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldrh(r0,
- FieldMemOperand(r0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r2, r0);
-
+ // The arguments are already in the stack (including any necessary padding),
+ // we should not try to massage the arguments again.
+ __ mov(r2, Operand(kDontAdaptArgumentsSentinel));
__ InvokeFunction(r1, r2, r0, JUMP_FUNCTION);
}
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index 251856e284..b12d235983 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -36,16 +36,14 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Restart the frame by calling the function.
__ Mov(fp, x1);
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ ldr(x0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ Mov(sp, fp);
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
- __ LoadTaggedPointerField(
- x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrh(x0,
- FieldMemOperand(x0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(x3, x0);
-
+ // The arguments are already in the stack (including any necessary padding),
+ // we should not try to massage the arguments again.
+ __ Mov(x3, kDontAdaptArgumentsSentinel);
__ InvokeFunctionWithNewTarget(x1, x3, x0, JUMP_FUNCTION);
}
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index e352acc846..56933602a6 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -77,9 +77,9 @@ std::vector<CoverageBlock> GetSortedBlockData(SharedFunctionInfo shared) {
if (coverage_info.slot_count() == 0) return result;
for (int i = 0; i < coverage_info.slot_count(); i++) {
- const int start_pos = coverage_info.StartSourcePosition(i);
- const int until_pos = coverage_info.EndSourcePosition(i);
- const int count = coverage_info.BlockCount(i);
+ const int start_pos = coverage_info.slots_start_source_position(i);
+ const int until_pos = coverage_info.slots_end_source_position(i);
+ const int count = coverage_info.slots_block_count(i);
DCHECK_NE(kNoSourcePosition, start_pos);
result.emplace_back(start_pos, until_pos, count);
@@ -747,6 +747,10 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
// generated for a function, which can interfere with lazy source positions,
// so just force source position collection whenever there's such a change.
isolate->CollectSourcePositionsForAllBytecodeArrays();
+ // Changing the coverage mode changes the generated bytecode and hence it is
+ // not safe to flush bytecode. Set a flag here, so we can disable bytecode
+ // flushing.
+ isolate->set_disable_bytecode_flushing(true);
}
switch (mode) {
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 77683fde1e..ab5df9b3c9 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -10,7 +10,7 @@
#include "src/common/globals.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
-#include "src/debug/debug-wasm-support.h"
+#include "src/debug/debug-wasm-objects.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
@@ -308,8 +308,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ThrowReferenceError) \
V(ThrowSymbolIteratorInvalid) \
/* Strings */ \
- V(StringIncludes) \
- V(StringIndexOf) \
V(StringReplaceOneCharWithString) \
V(StringSubstring) \
V(StringToNumber) \
@@ -351,7 +349,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(StringAdd) \
V(StringCharCodeAt) \
V(StringEqual) \
- V(StringIndexOfUnchecked) \
V(StringParseFloat) \
V(StringParseInt) \
V(SymbolDescriptiveString) \
@@ -837,12 +834,30 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kMapPrototypeClear:
case Builtins::kMapPrototypeDelete:
case Builtins::kMapPrototypeSet:
+ // Date builtins.
+ case Builtins::kDatePrototypeSetDate:
+ case Builtins::kDatePrototypeSetFullYear:
+ case Builtins::kDatePrototypeSetHours:
+ case Builtins::kDatePrototypeSetMilliseconds:
+ case Builtins::kDatePrototypeSetMinutes:
+ case Builtins::kDatePrototypeSetMonth:
+ case Builtins::kDatePrototypeSetSeconds:
+ case Builtins::kDatePrototypeSetTime:
+ case Builtins::kDatePrototypeSetUTCDate:
+ case Builtins::kDatePrototypeSetUTCFullYear:
+ case Builtins::kDatePrototypeSetUTCHours:
+ case Builtins::kDatePrototypeSetUTCMilliseconds:
+ case Builtins::kDatePrototypeSetUTCMinutes:
+ case Builtins::kDatePrototypeSetUTCMonth:
+ case Builtins::kDatePrototypeSetUTCSeconds:
+ case Builtins::kDatePrototypeSetYear:
// RegExp builtins.
case Builtins::kRegExpPrototypeTest:
case Builtins::kRegExpPrototypeExec:
case Builtins::kRegExpPrototypeSplit:
case Builtins::kRegExpPrototypeFlagsGetter:
case Builtins::kRegExpPrototypeGlobalGetter:
+ case Builtins::kRegExpPrototypeHasIndicesGetter:
case Builtins::kRegExpPrototypeIgnoreCaseGetter:
case Builtins::kRegExpPrototypeMatchAll:
case Builtins::kRegExpPrototypeMultilineGetter:
@@ -1088,7 +1103,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
}
CHECK(!failed);
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
- defined(V8_TARGET_ARCH_MIPS64)
+ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_RISCV64)
// Isolate-independent builtin calls and jumps do not emit reloc infos
// on PPC. We try to avoid using PC relative code due to performance
// issue with especially older hardwares.
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index cd3c7624fe..8a3e4acb88 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -33,9 +33,7 @@ FrameInspector::FrameInspector(CommonFrame* frame, int inlined_frame_index,
JavaScriptFrame* js_frame =
frame->is_java_script() ? javascript_frame() : nullptr;
DCHECK(js_frame || frame->is_wasm());
- has_adapted_arguments_ = js_frame && js_frame->has_adapted_arguments();
is_optimized_ = frame_->is_optimized();
- is_interpreted_ = frame_->is_interpreted();
// Calculate the deoptimized frame.
if (is_optimized_) {
@@ -50,8 +48,7 @@ FrameInspector::FrameInspector(CommonFrame* frame, int inlined_frame_index,
FrameInspector::~FrameInspector() = default;
JavaScriptFrame* FrameInspector::javascript_frame() {
- return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_)
- : JavaScriptFrame::cast(frame_);
+ return JavaScriptFrame::cast(frame_);
}
Handle<Object> FrameInspector::GetParameter(int index) {
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 25e050b52f..5d21d0a22d 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "src/deoptimizer/deoptimizer.h"
+#include "src/deoptimizer/deoptimized-frame-info.h"
#include "src/execution/isolate.h"
#include "src/execution/v8threads.h"
#include "src/objects/objects.h"
@@ -59,8 +59,6 @@ class FrameInspector {
Handle<String> function_name_;
int source_position_ = -1;
bool is_optimized_ = false;
- bool is_interpreted_ = false;
- bool has_adapted_arguments_ = false;
bool is_constructor_ = false;
};
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
new file mode 100644
index 0000000000..a53f912625
--- /dev/null
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -0,0 +1,1176 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/debug-interface.h"
+
+#include "src/api/api-inl.h"
+#include "src/debug/debug-coverage.h"
+#include "src/debug/debug-evaluate.h"
+#include "src/debug/debug-property-iterator.h"
+#include "src/debug/debug-type-profile.h"
+#include "src/debug/debug-wasm-objects-inl.h"
+#include "src/debug/debug.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/objects/js-generator-inl.h"
+#include "src/regexp/regexp-stack.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/api/api-macros.h"
+
+namespace v8 {
+namespace debug {
+
+void SetContextId(Local<Context> context, int id) {
+ Utils::OpenHandle(*context)->set_debug_context_id(i::Smi::FromInt(id));
+}
+
+int GetContextId(Local<Context> context) {
+ i::Object value = Utils::OpenHandle(*context)->debug_context_id();
+ return (value.IsSmi()) ? i::Smi::ToInt(value) : 0;
+}
+
+void SetInspector(Isolate* isolate, v8_inspector::V8Inspector* inspector) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->set_inspector(inspector);
+}
+
+v8_inspector::V8Inspector* GetInspector(Isolate* isolate) {
+ return reinterpret_cast<i::Isolate*>(isolate)->inspector();
+}
+
+void SetBreakOnNextFunctionCall(Isolate* isolate) {
+ reinterpret_cast<i::Isolate*>(isolate)->debug()->SetBreakOnNextFunctionCall();
+}
+
+void ClearBreakOnNextFunctionCall(Isolate* isolate) {
+ reinterpret_cast<i::Isolate*>(isolate)
+ ->debug()
+ ->ClearBreakOnNextFunctionCall();
+}
+
+MaybeLocal<Array> GetInternalProperties(Isolate* v8_isolate,
+ Local<Value> value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ i::Handle<i::Object> val = Utils::OpenHandle(*value);
+ i::Handle<i::JSArray> result;
+ if (!i::Runtime::GetInternalProperties(isolate, val).ToHandle(&result))
+ return MaybeLocal<Array>();
+ return Utils::ToLocal(result);
+}
+
+namespace {
+
+void CollectPrivateMethodsAndAccessorsFromContext(
+ i::Isolate* isolate, i::Handle<i::Context> context,
+ i::IsStaticFlag is_static_flag, std::vector<Local<Value>>* names_out,
+ std::vector<Local<Value>>* values_out) {
+ i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
+ int local_count = scope_info->ContextLocalCount();
+ for (int j = 0; j < local_count; ++j) {
+ i::VariableMode mode = scope_info->ContextLocalMode(j);
+ i::IsStaticFlag flag = scope_info->ContextLocalIsStaticFlag(j);
+ if (!i::IsPrivateMethodOrAccessorVariableMode(mode) ||
+ flag != is_static_flag) {
+ continue;
+ }
+
+ i::Handle<i::String> name(scope_info->ContextLocalName(j), isolate);
+ int context_index = scope_info->ContextHeaderLength() + j;
+ i::Handle<i::Object> slot_value(context->get(context_index), isolate);
+ DCHECK_IMPLIES(mode == i::VariableMode::kPrivateMethod,
+ slot_value->IsJSFunction());
+ DCHECK_IMPLIES(mode != i::VariableMode::kPrivateMethod,
+ slot_value->IsAccessorPair());
+ names_out->push_back(Utils::ToLocal(name));
+ values_out->push_back(Utils::ToLocal(slot_value));
+ }
+}
+
+} // namespace
+
+bool GetPrivateMembers(Local<Context> context, Local<Object> value,
+ std::vector<Local<Value>>* names_out,
+ std::vector<Local<Value>>* values_out) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ LOG_API(isolate, debug, GetPrivateMembers);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ i::Handle<i::JSReceiver> receiver = Utils::OpenHandle(*value);
+ i::Handle<i::JSArray> names;
+ i::Handle<i::FixedArray> values;
+
+ i::PropertyFilter key_filter =
+ static_cast<i::PropertyFilter>(i::PropertyFilter::PRIVATE_NAMES_ONLY);
+ i::Handle<i::FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys,
+ i::KeyAccumulator::GetKeys(receiver, i::KeyCollectionMode::kOwnOnly,
+ key_filter,
+ i::GetKeysConversion::kConvertToString),
+ false);
+
+ // Estimate number of private fields and private instance methods/accessors.
+ int private_entries_count = 0;
+ for (int i = 0; i < keys->length(); ++i) {
+ // Exclude the private brand symbols.
+ i::Handle<i::Symbol> key(i::Symbol::cast(keys->get(i)), isolate);
+ if (key->is_private_brand()) {
+ i::Handle<i::Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, i::Object::GetProperty(isolate, receiver, key),
+ false);
+
+ i::Handle<i::Context> context(i::Context::cast(*value), isolate);
+ i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
+ // At least one slot contains the brand symbol so it does not count.
+ private_entries_count += (scope_info->ContextLocalCount() - 1);
+ } else {
+ private_entries_count++;
+ }
+ }
+
+ // Estimate number of static private methods/accessors for classes.
+ bool has_static_private_methods_or_accessors = false;
+ if (receiver->IsJSFunction()) {
+ i::Handle<i::JSFunction> func(i::JSFunction::cast(*receiver), isolate);
+ i::Handle<i::SharedFunctionInfo> shared(func->shared(), isolate);
+ if (shared->is_class_constructor() &&
+ shared->has_static_private_methods_or_accessors()) {
+ has_static_private_methods_or_accessors = true;
+ i::Handle<i::Context> context(func->context(), isolate);
+ i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
+ int local_count = scope_info->ContextLocalCount();
+ for (int j = 0; j < local_count; ++j) {
+ i::VariableMode mode = scope_info->ContextLocalMode(j);
+ i::IsStaticFlag is_static_flag =
+ scope_info->ContextLocalIsStaticFlag(j);
+ if (i::IsPrivateMethodOrAccessorVariableMode(mode) &&
+ is_static_flag == i::IsStaticFlag::kStatic) {
+ private_entries_count += local_count;
+ break;
+ }
+ }
+ }
+ }
+
+ DCHECK(names_out->empty());
+ names_out->reserve(private_entries_count);
+ DCHECK(values_out->empty());
+ values_out->reserve(private_entries_count);
+
+ if (has_static_private_methods_or_accessors) {
+ i::Handle<i::Context> context(i::JSFunction::cast(*receiver).context(),
+ isolate);
+ CollectPrivateMethodsAndAccessorsFromContext(
+ isolate, context, i::IsStaticFlag::kStatic, names_out, values_out);
+ }
+
+ for (int i = 0; i < keys->length(); ++i) {
+ i::Handle<i::Object> obj_key(keys->get(i), isolate);
+ i::Handle<i::Symbol> key(i::Symbol::cast(*obj_key), isolate);
+ CHECK(key->is_private_name());
+ i::Handle<i::Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, i::Object::GetProperty(isolate, receiver, key), false);
+
+ if (key->is_private_brand()) {
+ DCHECK(value->IsContext());
+ i::Handle<i::Context> context(i::Context::cast(*value), isolate);
+ CollectPrivateMethodsAndAccessorsFromContext(
+ isolate, context, i::IsStaticFlag::kNotStatic, names_out, values_out);
+ } else { // Private fields
+ i::Handle<i::String> name(
+ i::String::cast(i::Symbol::cast(*key).description()), isolate);
+ names_out->push_back(Utils::ToLocal(name));
+ values_out->push_back(Utils::ToLocal(value));
+ }
+ }
+
+ DCHECK_EQ(names_out->size(), values_out->size());
+ DCHECK_LE(names_out->size(), private_entries_count);
+ return true;
+}
+
+MaybeLocal<Context> GetCreationContext(Local<Object> value) {
+ i::Handle<i::Object> val = Utils::OpenHandle(*value);
+ if (val->IsJSGlobalProxy()) {
+ return MaybeLocal<Context>();
+ }
+ return value->GetCreationContext();
+}
+
+void ChangeBreakOnException(Isolate* isolate, ExceptionBreakState type) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debug()->ChangeBreakOnException(
+ i::BreakException, type == BreakOnAnyException);
+ internal_isolate->debug()->ChangeBreakOnException(i::BreakUncaughtException,
+ type != NoBreakOnException);
+}
+
+void SetBreakPointsActive(Isolate* v8_isolate, bool is_active) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ isolate->debug()->set_break_points_active(is_active);
+}
+
+void PrepareStep(Isolate* v8_isolate, StepAction action) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_DO_NOT_USE(isolate);
+ CHECK(isolate->debug()->CheckExecutionState());
+ // Clear all current stepping setup.
+ isolate->debug()->ClearStepping();
+ // Prepare step.
+ isolate->debug()->PrepareStep(static_cast<i::StepAction>(action));
+}
+
+void ClearStepping(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ // Clear all current stepping setup.
+ isolate->debug()->ClearStepping();
+}
+
+void BreakRightNow(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_DO_NOT_USE(isolate);
+ isolate->debug()->HandleDebugBreak(i::kIgnoreIfAllFramesBlackboxed);
+}
+
+void SetTerminateOnResume(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ isolate->debug()->SetTerminateOnResume();
+}
+
+bool CanBreakProgram(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_DO_NOT_USE(isolate);
+ // We cannot break a program if we are currently running a regexp.
+ // TODO(yangguo): fix this exception.
+ return !isolate->regexp_stack()->is_in_use() &&
+ isolate->debug()->AllFramesOnStackAreBlackboxed();
+}
+
+Isolate* Script::GetIsolate() const {
+ return reinterpret_cast<Isolate*>(Utils::OpenHandle(this)->GetIsolate());
+}
+
+ScriptOriginOptions Script::OriginOptions() const {
+ return Utils::OpenHandle(this)->origin_options();
+}
+
+bool Script::WasCompiled() const {
+ return Utils::OpenHandle(this)->compilation_state() ==
+ i::Script::COMPILATION_STATE_COMPILED;
+}
+
+bool Script::IsEmbedded() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ return script->context_data() ==
+ script->GetReadOnlyRoots().uninitialized_symbol();
+}
+
+int Script::Id() const { return Utils::OpenHandle(this)->id(); }
+
+int Script::LineOffset() const {
+ return Utils::OpenHandle(this)->line_offset();
+}
+
+int Script::ColumnOffset() const {
+ return Utils::OpenHandle(this)->column_offset();
+}
+
+std::vector<int> Script::LineEnds() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ if (script->type() == i::Script::TYPE_WASM) return std::vector<int>();
+
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope scope(isolate);
+ i::Script::InitLineEnds(isolate, script);
+ CHECK(script->line_ends().IsFixedArray());
+ i::Handle<i::FixedArray> line_ends(i::FixedArray::cast(script->line_ends()),
+ isolate);
+ std::vector<int> result(line_ends->length());
+ for (int i = 0; i < line_ends->length(); ++i) {
+ i::Smi line_end = i::Smi::cast(line_ends->get(i));
+ result[i] = line_end.value();
+ }
+ return result;
+}
+
+MaybeLocal<String> Script::Name() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Object> value(script->name(), isolate);
+ if (!value->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(
+ handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> Script::SourceURL() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Object> value(script->source_url(), isolate);
+ if (!value->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(
+ handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> Script::SourceMappingURL() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Object> value(script->source_mapping_url(), isolate);
+ if (!value->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(
+ handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+Maybe<int> Script::ContextId() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Object value = script->context_data();
+ if (value.IsSmi()) return Just(i::Smi::ToInt(value));
+ return Nothing<int>();
+}
+
+MaybeLocal<String> Script::Source() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Object> value(script->source(), isolate);
+ if (!value->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(
+ handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+bool Script::IsWasm() const {
+ return Utils::OpenHandle(this)->type() == i::Script::TYPE_WASM;
+}
+
+bool Script::IsModule() const {
+ return Utils::OpenHandle(this)->origin_options().IsModule();
+}
+
+namespace {
+
+int GetSmiValue(i::Handle<i::FixedArray> array, int index) {
+ return i::Smi::ToInt(array->get(index));
+}
+
+bool CompareBreakLocation(const i::BreakLocation& loc1,
+ const i::BreakLocation& loc2) {
+ return loc1.position() < loc2.position();
+}
+
+} // namespace
+
+bool Script::GetPossibleBreakpoints(
+ const Location& start, const Location& end, bool restrict_to_function,
+ std::vector<BreakLocation>* locations) const {
+ CHECK(!start.IsEmpty());
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ if (script->type() == i::Script::TYPE_WASM) {
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ return i::WasmScript::GetPossibleBreakpoints(native_module, start, end,
+ locations);
+ }
+
+ i::Isolate* isolate = script->GetIsolate();
+ i::Script::InitLineEnds(isolate, script);
+ CHECK(script->line_ends().IsFixedArray());
+ i::Handle<i::FixedArray> line_ends =
+ i::Handle<i::FixedArray>::cast(i::handle(script->line_ends(), isolate));
+ CHECK(line_ends->length());
+
+ int start_offset = GetSourceOffset(start);
+ int end_offset = end.IsEmpty()
+ ? GetSmiValue(line_ends, line_ends->length() - 1) + 1
+ : GetSourceOffset(end);
+ if (start_offset >= end_offset) return true;
+
+ std::vector<i::BreakLocation> v8_locations;
+ if (!isolate->debug()->GetPossibleBreakpoints(
+ script, start_offset, end_offset, restrict_to_function,
+ &v8_locations)) {
+ return false;
+ }
+
+ std::sort(v8_locations.begin(), v8_locations.end(), CompareBreakLocation);
+ int current_line_end_index = 0;
+ for (const auto& v8_location : v8_locations) {
+ int offset = v8_location.position();
+ while (offset > GetSmiValue(line_ends, current_line_end_index)) {
+ ++current_line_end_index;
+ CHECK(current_line_end_index < line_ends->length());
+ }
+ int line_offset = 0;
+
+ if (current_line_end_index > 0) {
+ line_offset = GetSmiValue(line_ends, current_line_end_index - 1) + 1;
+ }
+ locations->emplace_back(
+ current_line_end_index + script->line_offset(),
+ offset - line_offset +
+ (current_line_end_index == 0 ? script->column_offset() : 0),
+ v8_location.type());
+ }
+ return true;
+}
+
+int Script::GetSourceOffset(const Location& location) const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ if (script->type() == i::Script::TYPE_WASM) {
+ DCHECK_EQ(0, location.GetLineNumber());
+ return location.GetColumnNumber();
+ }
+
+ int line = std::max(location.GetLineNumber() - script->line_offset(), 0);
+ int column = location.GetColumnNumber();
+ if (line == 0) {
+ column = std::max(0, column - script->column_offset());
+ }
+
+ i::Script::InitLineEnds(script->GetIsolate(), script);
+ CHECK(script->line_ends().IsFixedArray());
+ i::Handle<i::FixedArray> line_ends = i::Handle<i::FixedArray>::cast(
+ i::handle(script->line_ends(), script->GetIsolate()));
+ CHECK(line_ends->length());
+ if (line >= line_ends->length())
+ return GetSmiValue(line_ends, line_ends->length() - 1);
+ int line_offset = GetSmiValue(line_ends, line);
+ if (line == 0) return std::min(column, line_offset);
+ int prev_line_offset = GetSmiValue(line_ends, line - 1);
+ return std::min(prev_line_offset + column + 1, line_offset);
+}
+
+Location Script::GetSourceLocation(int offset) const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Script::PositionInfo info;
+ i::Script::GetPositionInfo(script, offset, &info, i::Script::WITH_OFFSET);
+ return Location(info.line, info.column);
+}
+
+bool Script::SetScriptSource(Local<String> newSource, bool preview,
+ LiveEditResult* result) const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ return isolate->debug()->SetScriptSource(
+ script, Utils::OpenHandle(*newSource), preview, result);
+}
+
+bool Script::SetBreakpoint(Local<String> condition, Location* location,
+ BreakpointId* id) const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ int offset = GetSourceOffset(*location);
+ if (!isolate->debug()->SetBreakPointForScript(
+ script, Utils::OpenHandle(*condition), &offset, id)) {
+ return false;
+ }
+ *location = GetSourceLocation(offset);
+ return true;
+}
+
+bool Script::SetBreakpointOnScriptEntry(BreakpointId* id) const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ if (script->type() == i::Script::TYPE_WASM) {
+ int position = i::WasmScript::kOnEntryBreakpointPosition;
+ return isolate->debug()->SetBreakPointForScript(
+ script, isolate->factory()->empty_string(), &position, id);
+ }
+ i::SharedFunctionInfo::ScriptIterator it(isolate, *script);
+ for (i::SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
+ if (sfi.is_toplevel()) {
+ return isolate->debug()->SetBreakpointForFunction(
+ handle(sfi, isolate), isolate->factory()->empty_string(), id);
+ }
+ }
+ return false;
+}
+
+void Script::RemoveWasmBreakpoint(BreakpointId id) {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ isolate->debug()->RemoveBreakpointForWasmScript(script, id);
+}
+
+void RemoveBreakpoint(Isolate* v8_isolate, BreakpointId id) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ i::HandleScope handle_scope(isolate);
+ isolate->debug()->RemoveBreakpoint(id);
+}
+
+Platform* GetCurrentPlatform() { return i::V8::GetCurrentPlatform(); }
+
+void ForceGarbageCollection(
+ Isolate* isolate,
+ EmbedderHeapTracer::EmbedderStackState embedder_stack_state) {
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ heap->SetEmbedderStackStateForNextFinalization(embedder_stack_state);
+ isolate->LowMemoryNotification();
+}
+
+WasmScript* WasmScript::Cast(Script* script) {
+ CHECK(script->IsWasm());
+ return static_cast<WasmScript*>(script);
+}
+
+WasmScript::DebugSymbolsType WasmScript::GetDebugSymbolType() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ switch (script->wasm_native_module()->module()->debug_symbols.type) {
+ case i::wasm::WasmDebugSymbols::Type::None:
+ return WasmScript::DebugSymbolsType::None;
+ case i::wasm::WasmDebugSymbols::Type::EmbeddedDWARF:
+ return WasmScript::DebugSymbolsType::EmbeddedDWARF;
+ case i::wasm::WasmDebugSymbols::Type::ExternalDWARF:
+ return WasmScript::DebugSymbolsType::ExternalDWARF;
+ case i::wasm::WasmDebugSymbols::Type::SourceMap:
+ return WasmScript::DebugSymbolsType::SourceMap;
+ }
+}
+
+MemorySpan<const char> WasmScript::ExternalSymbolsURL() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+
+ const i::wasm::WasmDebugSymbols& symbols =
+ script->wasm_native_module()->module()->debug_symbols;
+ if (symbols.external_url.is_empty()) return {};
+
+ internal::wasm::ModuleWireBytes wire_bytes(
+ script->wasm_native_module()->wire_bytes());
+ i::wasm::WasmName external_url =
+ wire_bytes.GetNameOrNull(symbols.external_url);
+ return {external_url.data(), external_url.size()};
+}
+
+int WasmScript::NumFunctions() const {
+ i::DisallowGarbageCollection no_gc;
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ DCHECK_GE(i::kMaxInt, module->functions.size());
+ return static_cast<int>(module->functions.size());
+}
+
+int WasmScript::NumImportedFunctions() const {
+ i::DisallowGarbageCollection no_gc;
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ DCHECK_GE(i::kMaxInt, module->num_imported_functions);
+ return static_cast<int>(module->num_imported_functions);
+}
+
+MemorySpan<const uint8_t> WasmScript::Bytecode() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Vector<const uint8_t> wire_bytes =
+ script->wasm_native_module()->wire_bytes();
+ return {wire_bytes.begin(), wire_bytes.size()};
+}
+
+std::pair<int, int> WasmScript::GetFunctionRange(int function_index) const {
+ i::DisallowGarbageCollection no_gc;
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ DCHECK_LE(0, function_index);
+ DCHECK_GT(module->functions.size(), function_index);
+ const i::wasm::WasmFunction& func = module->functions[function_index];
+ DCHECK_GE(i::kMaxInt, func.code.offset());
+ DCHECK_GE(i::kMaxInt, func.code.end_offset());
+ return std::make_pair(static_cast<int>(func.code.offset()),
+ static_cast<int>(func.code.end_offset()));
+}
+
+int WasmScript::GetContainingFunction(int byte_offset) const {
+ i::DisallowGarbageCollection no_gc;
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ DCHECK_LE(0, byte_offset);
+
+ return i::wasm::GetContainingWasmFunction(module, byte_offset);
+}
+
+uint32_t WasmScript::GetFunctionHash(int function_index) {
+ i::DisallowGarbageCollection no_gc;
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ DCHECK_LE(0, function_index);
+ DCHECK_GT(module->functions.size(), function_index);
+ const i::wasm::WasmFunction& func = module->functions[function_index];
+ i::wasm::ModuleWireBytes wire_bytes(native_module->wire_bytes());
+ i::Vector<const i::byte> function_bytes = wire_bytes.GetFunctionBytes(&func);
+ // TODO(herhut): Maybe also take module, name and signature into account.
+ return i::StringHasher::HashSequentialString(function_bytes.begin(),
+ function_bytes.length(), 0);
+}
+
+int WasmScript::CodeOffset() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::wasm::NativeModule* native_module = script->wasm_native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+
+ // If the module contains at least one function, the code offset must have
+ // been initialized, and it cannot be zero.
+ DCHECK_IMPLIES(module->num_declared_functions > 0,
+ module->code.offset() != 0);
+ return module->code.offset();
+}
+
+Location::Location(int line_number, int column_number)
+ : line_number_(line_number),
+ column_number_(column_number),
+ is_empty_(false) {}
+
+Location::Location()
+ : line_number_(Function::kLineOffsetNotFound),
+ column_number_(Function::kLineOffsetNotFound),
+ is_empty_(true) {}
+
+int Location::GetLineNumber() const {
+ DCHECK(!IsEmpty());
+ return line_number_;
+}
+
+int Location::GetColumnNumber() const {
+ DCHECK(!IsEmpty());
+ return column_number_;
+}
+
+bool Location::IsEmpty() const { return is_empty_; }
+
+void GetLoadedScripts(Isolate* v8_isolate,
+ PersistentValueVector<Script>& scripts) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ {
+ i::DisallowGarbageCollection no_gc;
+ i::Script::Iterator iterator(isolate);
+ for (i::Script script = iterator.Next(); !script.is_null();
+ script = iterator.Next()) {
+ if (script.type() == i::Script::TYPE_NORMAL ||
+ script.type() == i::Script::TYPE_WASM) {
+ if (script.HasValidSource()) {
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Script> script_handle(script, isolate);
+ scripts.Append(ToApiHandle<Script>(script_handle));
+ }
+ }
+ }
+ }
+}
+
+MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* v8_isolate,
+ Local<String> source) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, UnboundScript);
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Handle<i::SharedFunctionInfo> result;
+ {
+ ScriptOriginOptions origin_options;
+ i::ScriptData* script_data = nullptr;
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScript(
+ isolate, str, i::Compiler::ScriptDetails(), origin_options, nullptr,
+ script_data, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheBecauseInspector,
+ i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
+ : i::INSPECTOR_CODE);
+ has_pending_exception = !maybe_function_info.ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(UnboundScript);
+ }
+ RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
+}
+
+void TierDownAllModulesPerIsolate(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->wasm_engine()->TierDownAllModulesPerIsolate(isolate);
+}
+
+void TierUpAllModulesPerIsolate(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->wasm_engine()->TierUpAllModulesPerIsolate(isolate);
+}
+
+void SetDebugDelegate(Isolate* v8_isolate, DebugDelegate* delegate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->debug()->SetDebugDelegate(delegate);
+}
+
+void SetAsyncEventDelegate(Isolate* v8_isolate, AsyncEventDelegate* delegate) {
+ reinterpret_cast<i::Isolate*>(v8_isolate)->set_async_event_delegate(delegate);
+}
+
+void ResetBlackboxedStateCache(Isolate* v8_isolate, Local<Script> script) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ i::DisallowGarbageCollection no_gc;
+ i::SharedFunctionInfo::ScriptIterator iter(isolate,
+ *Utils::OpenHandle(*script));
+ for (i::SharedFunctionInfo info = iter.Next(); !info.is_null();
+ info = iter.Next()) {
+ if (info.HasDebugInfo()) {
+ info.GetDebugInfo().set_computed_debug_is_blackboxed(false);
+ }
+ }
+}
+
+int EstimatedValueSize(Isolate* v8_isolate, Local<Value> value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ i::Handle<i::Object> object = Utils::OpenHandle(*value);
+ if (object->IsSmi()) return i::kTaggedSize;
+ CHECK(object->IsHeapObject());
+ return i::Handle<i::HeapObject>::cast(object)->Size();
+}
+
+void AccessorPair::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsAccessorPair(), "v8::debug::AccessorPair::Cast",
+ "Value is not a v8::debug::AccessorPair");
+}
+
+void WasmValueObject::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsWasmValueObject(), "v8::debug::WasmValueObject::Cast",
+ "Value is not a v8::debug::WasmValueObject");
+}
+
+Local<Function> GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ i::HandleScope handle_scope(isolate);
+
+ CHECK_EQ(builtin, kStringToLowerCase);
+ i::Builtins::Name builtin_id = i::Builtins::kStringPrototypeToLocaleLowerCase;
+
+ i::Factory* factory = isolate->factory();
+ i::Handle<i::String> name = isolate->factory()->empty_string();
+ i::Handle<i::NativeContext> context(isolate->native_context());
+ i::Handle<i::SharedFunctionInfo> info =
+ factory->NewSharedFunctionInfoForBuiltin(name, builtin_id);
+ info->set_language_mode(i::LanguageMode::kStrict);
+ i::Handle<i::JSFunction> fun =
+ i::Factory::JSFunctionBuilder{isolate, info, context}
+ .set_map(isolate->strict_function_without_prototype_map())
+ .Build();
+
+ fun->shared().set_internal_formal_parameter_count(0);
+ fun->shared().set_length(0);
+ return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
+}
+
+void SetConsoleDelegate(Isolate* v8_isolate, ConsoleDelegate* delegate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ isolate->set_console_delegate(delegate);
+}
+
+ConsoleCallArguments::ConsoleCallArguments(
+ const v8::FunctionCallbackInfo<v8::Value>& info)
+ : v8::FunctionCallbackInfo<v8::Value>(nullptr, info.values_, info.length_) {
+}
+
+ConsoleCallArguments::ConsoleCallArguments(
+ const internal::BuiltinArguments& args)
+ : v8::FunctionCallbackInfo<v8::Value>(
+ nullptr,
+ // Drop the first argument (receiver, i.e. the "console" object).
+ args.length() > 1 ? args.address_of_first_argument() : nullptr,
+ args.length() - 1) {}
+
+// Marked V8_DEPRECATED.
+int GetStackFrameId(v8::Local<v8::StackFrame> frame) { return 0; }
+
+v8::Local<v8::StackTrace> GetDetailedStackTrace(
+ Isolate* v8_isolate, v8::Local<v8::Object> v8_error) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ i::Handle<i::JSReceiver> error = Utils::OpenHandle(*v8_error);
+ if (!error->IsJSObject()) {
+ return v8::Local<v8::StackTrace>();
+ }
+ i::Handle<i::FixedArray> stack_trace =
+ isolate->GetDetailedStackTrace(i::Handle<i::JSObject>::cast(error));
+ return Utils::StackTraceToLocal(stack_trace);
+}
+
+MaybeLocal<Script> GeneratorObject::Script() {
+ i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
+ i::Object maybe_script = obj->function().shared().script();
+ if (!maybe_script.IsScript()) return {};
+ i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
+ return ToApiHandle<v8::debug::Script>(script);
+}
+
+Local<Function> GeneratorObject::Function() {
+ i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
+ return Utils::ToLocal(handle(obj->function(), obj->GetIsolate()));
+}
+
+Location GeneratorObject::SuspendedLocation() {
+ i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
+ CHECK(obj->is_suspended());
+ i::Object maybe_script = obj->function().shared().script();
+ if (!maybe_script.IsScript()) return Location();
+ i::Isolate* isolate = obj->GetIsolate();
+ i::Handle<i::Script> script(i::Script::cast(maybe_script), isolate);
+ i::Script::PositionInfo info;
+ i::SharedFunctionInfo::EnsureSourcePositionsAvailable(
+ isolate, i::handle(obj->function().shared(), isolate));
+ i::Script::GetPositionInfo(script, obj->source_position(), &info,
+ i::Script::WITH_OFFSET);
+ return Location(info.line, info.column);
+}
+
+bool GeneratorObject::IsSuspended() {
+ return Utils::OpenHandle(this)->is_suspended();
+}
+
+v8::Local<GeneratorObject> GeneratorObject::Cast(v8::Local<v8::Value> value) {
+ CHECK(value->IsGeneratorObject());
+ return ToApiHandle<GeneratorObject>(Utils::OpenHandle(*value));
+}
+
+MaybeLocal<v8::Value> EvaluateGlobal(v8::Isolate* isolate,
+ v8::Local<v8::String> source,
+ EvaluateGlobalMode mode, bool repl) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value);
+ i::REPLMode repl_mode = repl ? i::REPLMode::kYes : i::REPLMode::kNo;
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(
+ i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source),
+ mode, repl_mode),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+void QueryObjects(v8::Local<v8::Context> v8_context,
+ QueryObjectPredicate* predicate,
+ PersistentValueVector<v8::Object>* objects) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_context->GetIsolate());
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ isolate->heap_profiler()->QueryObjects(Utils::OpenHandle(*v8_context),
+ predicate, objects);
+}
+
+void GlobalLexicalScopeNames(v8::Local<v8::Context> v8_context,
+ v8::PersistentValueVector<v8::String>* names) {
+ i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
+ i::Isolate* isolate = context->GetIsolate();
+ i::Handle<i::ScriptContextTable> table(
+ context->global_object().native_context().script_context_table(),
+ isolate);
+ for (int i = 0; i < table->synchronized_used(); i++) {
+ i::Handle<i::Context> context =
+ i::ScriptContextTable::GetContext(isolate, table, i);
+ DCHECK(context->IsScriptContext());
+ i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
+ int local_count = scope_info->ContextLocalCount();
+ for (int j = 0; j < local_count; ++j) {
+ i::String name = scope_info->ContextLocalName(j);
+ if (i::ScopeInfo::VariableIsSynthetic(name)) continue;
+ names->Append(Utils::ToLocal(handle(name, isolate)));
+ }
+ }
+}
+
+void SetReturnValue(v8::Isolate* v8_isolate, v8::Local<v8::Value> value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->debug()->set_return_value(*Utils::OpenHandle(*value));
+}
+
+int64_t GetNextRandomInt64(v8::Isolate* v8_isolate) {
+ return reinterpret_cast<i::Isolate*>(v8_isolate)
+ ->random_number_generator()
+ ->NextInt64();
+}
+
+void EnumerateRuntimeCallCounters(v8::Isolate* v8_isolate,
+ RuntimeCallCounterCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ if (isolate->counters()) {
+ isolate->counters()->runtime_call_stats()->EnumerateCounters(callback);
+ }
+}
+
+int GetDebuggingId(v8::Local<v8::Function> function) {
+ i::Handle<i::JSReceiver> callable = v8::Utils::OpenHandle(*function);
+ if (!callable->IsJSFunction()) return i::DebugInfo::kNoDebuggingId;
+ i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(callable);
+ int id = func->GetIsolate()->debug()->GetFunctionDebuggingId(func);
+ DCHECK_NE(i::DebugInfo::kNoDebuggingId, id);
+ return id;
+}
+
+bool SetFunctionBreakpoint(v8::Local<v8::Function> function,
+ v8::Local<v8::String> condition, BreakpointId* id) {
+ i::Handle<i::JSReceiver> callable = Utils::OpenHandle(*function);
+ if (!callable->IsJSFunction()) return false;
+ i::Handle<i::JSFunction> jsfunction =
+ i::Handle<i::JSFunction>::cast(callable);
+ i::Isolate* isolate = jsfunction->GetIsolate();
+ i::Handle<i::String> condition_string =
+ condition.IsEmpty() ? isolate->factory()->empty_string()
+ : Utils::OpenHandle(*condition);
+ return isolate->debug()->SetBreakpointForFunction(
+ handle(jsfunction->shared(), isolate), condition_string, id);
+}
+
+PostponeInterruptsScope::PostponeInterruptsScope(v8::Isolate* isolate)
+ : scope_(
+ new i::PostponeInterruptsScope(reinterpret_cast<i::Isolate*>(isolate),
+ i::StackGuard::API_INTERRUPT)) {}
+
+PostponeInterruptsScope::~PostponeInterruptsScope() = default;
+
+DisableBreakScope::DisableBreakScope(v8::Isolate* isolate)
+ : scope_(std::make_unique<i::DisableBreak>(
+ reinterpret_cast<i::Isolate*>(isolate)->debug())) {}
+
+DisableBreakScope::~DisableBreakScope() = default;
+
+int Coverage::BlockData::StartOffset() const { return block_->start; }
+
+int Coverage::BlockData::EndOffset() const { return block_->end; }
+
+uint32_t Coverage::BlockData::Count() const { return block_->count; }
+
+int Coverage::FunctionData::StartOffset() const { return function_->start; }
+
+int Coverage::FunctionData::EndOffset() const { return function_->end; }
+
+uint32_t Coverage::FunctionData::Count() const { return function_->count; }
+
+MaybeLocal<String> Coverage::FunctionData::Name() const {
+ return ToApiHandle<String>(function_->name);
+}
+
+size_t Coverage::FunctionData::BlockCount() const {
+ return function_->blocks.size();
+}
+
+bool Coverage::FunctionData::HasBlockCoverage() const {
+ return function_->has_block_coverage;
+}
+
+Coverage::BlockData Coverage::FunctionData::GetBlockData(size_t i) const {
+ return BlockData(&function_->blocks.at(i), coverage_);
+}
+
+Local<Script> Coverage::ScriptData::GetScript() const {
+ return ToApiHandle<Script>(script_->script);
+}
+
+size_t Coverage::ScriptData::FunctionCount() const {
+ return script_->functions.size();
+}
+
+Coverage::FunctionData Coverage::ScriptData::GetFunctionData(size_t i) const {
+ return FunctionData(&script_->functions.at(i), coverage_);
+}
+
+Coverage::ScriptData::ScriptData(size_t index,
+ std::shared_ptr<i::Coverage> coverage)
+ : script_(&coverage->at(index)), coverage_(std::move(coverage)) {}
+
+size_t Coverage::ScriptCount() const { return coverage_->size(); }
+
+Coverage::ScriptData Coverage::GetScriptData(size_t i) const {
+ return ScriptData(i, coverage_);
+}
+
+Coverage Coverage::CollectPrecise(Isolate* isolate) {
+ return Coverage(
+ i::Coverage::CollectPrecise(reinterpret_cast<i::Isolate*>(isolate)));
+}
+
+Coverage Coverage::CollectBestEffort(Isolate* isolate) {
+ return Coverage(
+ i::Coverage::CollectBestEffort(reinterpret_cast<i::Isolate*>(isolate)));
+}
+
+void Coverage::SelectMode(Isolate* isolate, CoverageMode mode) {
+ i::Coverage::SelectMode(reinterpret_cast<i::Isolate*>(isolate), mode);
+}
+
+int TypeProfile::Entry::SourcePosition() const { return entry_->position; }
+
+std::vector<MaybeLocal<String>> TypeProfile::Entry::Types() const {
+ std::vector<MaybeLocal<String>> result;
+ for (const internal::Handle<internal::String>& type : entry_->types) {
+ result.emplace_back(ToApiHandle<String>(type));
+ }
+ return result;
+}
+
+TypeProfile::ScriptData::ScriptData(
+ size_t index, std::shared_ptr<i::TypeProfile> type_profile)
+ : script_(&type_profile->at(index)),
+ type_profile_(std::move(type_profile)) {}
+
+Local<Script> TypeProfile::ScriptData::GetScript() const {
+ return ToApiHandle<Script>(script_->script);
+}
+
+std::vector<TypeProfile::Entry> TypeProfile::ScriptData::Entries() const {
+ std::vector<TypeProfile::Entry> result;
+ for (const internal::TypeProfileEntry& entry : script_->entries) {
+ result.push_back(TypeProfile::Entry(&entry, type_profile_));
+ }
+ return result;
+}
+
+TypeProfile TypeProfile::Collect(Isolate* isolate) {
+ return TypeProfile(
+ i::TypeProfile::Collect(reinterpret_cast<i::Isolate*>(isolate)));
+}
+
+void TypeProfile::SelectMode(Isolate* isolate, TypeProfileMode mode) {
+ i::TypeProfile::SelectMode(reinterpret_cast<i::Isolate*>(isolate), mode);
+}
+
+size_t TypeProfile::ScriptCount() const { return type_profile_->size(); }
+
+TypeProfile::ScriptData TypeProfile::GetScriptData(size_t i) const {
+ return ScriptData(i, type_profile_);
+}
+
+v8::MaybeLocal<v8::Value> WeakMap::Get(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> key) {
+ PREPARE_FOR_EXECUTION(context, WeakMap, Get, Value);
+ auto self = Utils::OpenHandle(this);
+ Local<Value> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
+ has_pending_exception =
+ !ToLocal<Value>(i::Execution::CallBuiltin(isolate, isolate->weakmap_get(),
+ self, arraysize(argv), argv),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+v8::MaybeLocal<WeakMap> WeakMap::Set(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> key,
+ v8::Local<v8::Value> value) {
+ PREPARE_FOR_EXECUTION(context, WeakMap, Set, WeakMap);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
+ Utils::OpenHandle(*value)};
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->weakmap_set(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(WeakMap);
+ RETURN_ESCAPED(Local<WeakMap>::Cast(Utils::ToLocal(result)));
+}
+
+Local<WeakMap> WeakMap::New(v8::Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, WeakMap, New);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::Handle<i::JSWeakMap> obj = i_isolate->factory()->NewJSWeakMap();
+ return ToApiHandle<WeakMap>(obj);
+}
+
+WeakMap* WeakMap::Cast(v8::Value* value) {
+ return static_cast<WeakMap*>(value);
+}
+
+Local<Value> AccessorPair::getter() {
+ i::Handle<i::AccessorPair> accessors = Utils::OpenHandle(this);
+ i::Isolate* isolate = accessors->GetIsolate();
+ i::Handle<i::Object> getter(accessors->getter(), isolate);
+ return Utils::ToLocal(getter);
+}
+
+Local<Value> AccessorPair::setter() {
+ i::Handle<i::AccessorPair> accessors = Utils::OpenHandle(this);
+ i::Isolate* isolate = accessors->GetIsolate();
+ i::Handle<i::Object> setter(accessors->setter(), isolate);
+ return Utils::ToLocal(setter);
+}
+
+bool AccessorPair::IsAccessorPair(Local<Value> that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(*that);
+ return obj->IsAccessorPair();
+}
+
+bool WasmValueObject::IsWasmValueObject(Local<Value> that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(*that);
+ return obj->IsWasmValueObject();
+}
+
+MaybeLocal<Message> GetMessageFromPromise(Local<Promise> p) {
+ i::Handle<i::JSPromise> promise = Utils::OpenHandle(*p);
+ i::Isolate* isolate = promise->GetIsolate();
+
+ i::Handle<i::Symbol> key = isolate->factory()->promise_debug_message_symbol();
+ i::Handle<i::Object> maybeMessage =
+ i::JSReceiver::GetDataProperty(promise, key);
+
+ if (!maybeMessage->IsJSMessageObject(isolate)) return MaybeLocal<Message>();
+ return ToApiHandle<Message>(
+ i::Handle<i::JSMessageObject>::cast(maybeMessage));
+}
+
+std::unique_ptr<PropertyIterator> PropertyIterator::Create(
+ Local<Context> context, Local<Object> object) {
+ internal::Isolate* isolate =
+ reinterpret_cast<i::Isolate*>(object->GetIsolate());
+ if (IsExecutionTerminatingCheck(isolate)) {
+ return nullptr;
+ }
+ CallDepthScope<false> call_depth_scope(isolate, context);
+
+ auto result =
+ i::DebugPropertyIterator::Create(isolate, Utils::OpenHandle(*object));
+ if (!result) {
+ DCHECK(isolate->has_pending_exception());
+ call_depth_scope.Escape();
+ }
+ return result;
+}
+
+} // namespace debug
+
+namespace internal {
+
+Maybe<bool> DebugPropertyIterator::Advance() {
+ if (IsExecutionTerminatingCheck(isolate_)) {
+ return Nothing<bool>();
+ }
+ Local<v8::Context> context =
+ Utils::ToLocal(handle(isolate_->context(), isolate_));
+ CallDepthScope<false> call_depth_scope(isolate_, context);
+
+ if (!AdvanceInternal()) {
+ DCHECK(isolate_->has_pending_exception());
+ call_depth_scope.Escape();
+ return Nothing<bool>();
+ }
+ return Just(true);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/api/api-macros-undef.h"
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 3e41e2affd..f04a91be32 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -73,7 +73,7 @@ V8_EXPORT_PRIVATE bool GetPrivateMembers(Local<Context> context,
* Forwards to v8::Object::CreationContext, but with special handling for
* JSGlobalProxy objects.
*/
-Local<Context> GetCreationContext(Local<Object> value);
+MaybeLocal<Context> GetCreationContext(Local<Object> value);
enum ExceptionBreakState {
NoBreakOnException = 0,
@@ -592,12 +592,17 @@ struct PropertyDescriptor {
class PropertyIterator {
public:
- static std::unique_ptr<PropertyIterator> Create(v8::Local<v8::Object> object);
+ // Creating a PropertyIterator can potentially throw an exception.
+ // The returned std::unique_ptr is empty iff that happens.
+ V8_WARN_UNUSED_RESULT static std::unique_ptr<PropertyIterator> Create(
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object);
virtual ~PropertyIterator() = default;
virtual bool Done() const = 0;
- virtual void Advance() = 0;
+ // Returns |Nothing| should |Advance| throw an exception,
+ // |true| otherwise.
+ V8_WARN_UNUSED_RESULT virtual Maybe<bool> Advance() = 0;
virtual v8::Local<v8::Name> name() const = 0;
@@ -611,6 +616,16 @@ class PropertyIterator {
virtual bool is_array_index() = 0;
};
+class V8_EXPORT_PRIVATE WasmValueObject : public v8::Object {
+ public:
+ WasmValueObject() = delete;
+ static bool IsWasmValueObject(v8::Local<v8::Value> obj);
+ V8_INLINE static WasmValueObject* Cast(v8::Value* obj);
+
+ private:
+ static void CheckCast(v8::Value* obj);
+};
+
AccessorPair* AccessorPair::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -618,6 +633,13 @@ AccessorPair* AccessorPair::Cast(v8::Value* value) {
return static_cast<AccessorPair*>(value);
}
+WasmValueObject* WasmValueObject::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<WasmValueObject*>(value);
+}
+
MaybeLocal<Message> GetMessageFromPromise(Local<Promise> promise);
} // namespace debug
diff --git a/deps/v8/src/debug/debug-property-iterator.cc b/deps/v8/src/debug/debug-property-iterator.cc
index a3605df55c..ea49316d70 100644
--- a/deps/v8/src/debug/debug-property-iterator.cc
+++ b/deps/v8/src/debug/debug-property-iterator.cc
@@ -12,37 +12,39 @@
#include "src/objects/property-details.h"
namespace v8 {
+namespace internal {
-std::unique_ptr<debug::PropertyIterator> debug::PropertyIterator::Create(
- v8::Local<v8::Object> v8_object) {
- internal::Isolate* isolate =
- reinterpret_cast<internal::Isolate*>(v8_object->GetIsolate());
- return std::unique_ptr<debug::PropertyIterator>(
- new internal::DebugPropertyIterator(isolate,
- Utils::OpenHandle(*v8_object)));
-}
+std::unique_ptr<DebugPropertyIterator> DebugPropertyIterator::Create(
+ Isolate* isolate, Handle<JSReceiver> receiver) {
+ // Can't use std::make_unique as Ctor is private.
+ auto iterator = std::unique_ptr<DebugPropertyIterator>(
+ new DebugPropertyIterator(isolate, receiver));
-namespace internal {
+ if (receiver->IsJSProxy()) {
+ iterator->is_own_ = false;
+ iterator->prototype_iterator_.AdvanceIgnoringProxies();
+ }
+ if (iterator->prototype_iterator_.IsAtEnd()) return iterator;
+
+ if (!iterator->FillKeysForCurrentPrototypeAndStage()) return nullptr;
+ if (iterator->should_move_to_next_stage() && !iterator->AdvanceInternal()) {
+ return nullptr;
+ }
+
+ return iterator;
+}
DebugPropertyIterator::DebugPropertyIterator(Isolate* isolate,
Handle<JSReceiver> receiver)
: isolate_(isolate),
prototype_iterator_(isolate, receiver, kStartAtReceiver,
- PrototypeIterator::END_AT_NULL) {
- if (receiver->IsJSProxy()) {
- is_own_ = false;
- prototype_iterator_.AdvanceIgnoringProxies();
- }
- if (prototype_iterator_.IsAtEnd()) return;
- FillKeysForCurrentPrototypeAndStage();
- if (should_move_to_next_stage()) Advance();
-}
+ PrototypeIterator::END_AT_NULL) {}
bool DebugPropertyIterator::Done() const {
return prototype_iterator_.IsAtEnd();
}
-void DebugPropertyIterator::Advance() {
+bool DebugPropertyIterator::AdvanceInternal() {
++current_key_index_;
calculated_native_accessor_flags_ = false;
while (should_move_to_next_stage()) {
@@ -59,8 +61,9 @@ void DebugPropertyIterator::Advance() {
prototype_iterator_.AdvanceIgnoringProxies();
break;
}
- FillKeysForCurrentPrototypeAndStage();
+ if (!FillKeysForCurrentPrototypeAndStage()) return false;
}
+ return true;
}
bool DebugPropertyIterator::is_native_accessor() {
@@ -138,19 +141,19 @@ bool DebugPropertyIterator::is_array_index() {
return raw_name()->AsArrayIndex(&index);
}
-void DebugPropertyIterator::FillKeysForCurrentPrototypeAndStage() {
+bool DebugPropertyIterator::FillKeysForCurrentPrototypeAndStage() {
current_key_index_ = 0;
exotic_length_ = 0;
keys_ = Handle<FixedArray>::null();
- if (prototype_iterator_.IsAtEnd()) return;
+ if (prototype_iterator_.IsAtEnd()) return true;
Handle<JSReceiver> receiver =
PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
bool has_exotic_indices = receiver->IsJSTypedArray();
if (stage_ == kExoticIndices) {
- if (!has_exotic_indices) return;
+ if (!has_exotic_indices) return true;
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
exotic_length_ = typed_array->WasDetached() ? 0 : typed_array->length();
- return;
+ return true;
}
bool skip_indices = has_exotic_indices;
PropertyFilter filter =
@@ -160,7 +163,9 @@ void DebugPropertyIterator::FillKeysForCurrentPrototypeAndStage() {
skip_indices)
.ToHandle(&keys_)) {
keys_ = Handle<FixedArray>::null();
+ return false;
}
+ return true;
}
bool DebugPropertyIterator::should_move_to_next_stage() const {
diff --git a/deps/v8/src/debug/debug-property-iterator.h b/deps/v8/src/debug/debug-property-iterator.h
index 393af4c0ee..0c2a9afd97 100644
--- a/deps/v8/src/debug/debug-property-iterator.h
+++ b/deps/v8/src/debug/debug-property-iterator.h
@@ -19,13 +19,14 @@ class JSReceiver;
class DebugPropertyIterator final : public debug::PropertyIterator {
public:
- DebugPropertyIterator(Isolate* isolate, Handle<JSReceiver> receiver);
+ V8_WARN_UNUSED_RESULT static std::unique_ptr<DebugPropertyIterator> Create(
+ Isolate* isolate, Handle<JSReceiver> receiver);
~DebugPropertyIterator() override = default;
DebugPropertyIterator(const DebugPropertyIterator&) = delete;
DebugPropertyIterator& operator=(const DebugPropertyIterator&) = delete;
bool Done() const override;
- void Advance() override;
+ V8_WARN_UNUSED_RESULT Maybe<bool> Advance() override;
v8::Local<v8::Name> name() const override;
bool is_native_accessor() override;
@@ -38,10 +39,13 @@ class DebugPropertyIterator final : public debug::PropertyIterator {
bool is_array_index() override;
private:
- void FillKeysForCurrentPrototypeAndStage();
+ DebugPropertyIterator(Isolate* isolate, Handle<JSReceiver> receiver);
+
+ V8_WARN_UNUSED_RESULT bool FillKeysForCurrentPrototypeAndStage();
bool should_move_to_next_stage() const;
void CalculateNativeAccessorFlags();
Handle<Name> raw_name() const;
+ V8_WARN_UNUSED_RESULT bool AdvanceInternal();
Isolate* isolate_;
PrototypeIterator prototype_iterator_;
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 782b0c7dda..1d7e37029d 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -7,7 +7,7 @@
#include "src/api/api-inl.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-scope-iterator.h"
-#include "src/debug/debug-wasm-support.h"
+#include "src/debug/debug-wasm-objects.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/execution/frames-inl.h"
diff --git a/deps/v8/src/debug/debug-wasm-objects-inl.h b/deps/v8/src/debug/debug-wasm-objects-inl.h
new file mode 100644
index 0000000000..77ca8c9a0b
--- /dev/null
+++ b/deps/v8/src/debug/debug-wasm-objects-inl.h
@@ -0,0 +1,30 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_WASM_OBJECTS_INL_H_
+#define V8_DEBUG_DEBUG_WASM_OBJECTS_INL_H_
+
+#include "src/debug/debug-wasm-objects.h"
+#include "src/objects/js-objects-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/debug/debug-wasm-objects-tq-inl.inc"
+
+OBJECT_CONSTRUCTORS_IMPL(WasmValueObject, JSObject)
+
+CAST_ACCESSOR(WasmValueObject)
+
+ACCESSORS(WasmValueObject, value, Object, kValueOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_DEBUG_DEBUG_WASM_OBJECTS_INL_H_
diff --git a/deps/v8/src/debug/debug-wasm-support.cc b/deps/v8/src/debug/debug-wasm-objects.cc
index 59b8ad8c35..c3560a4d15 100644
--- a/deps/v8/src/debug/debug-wasm-support.cc
+++ b/deps/v8/src/debug/debug-wasm-objects.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/debug/debug-wasm-support.h"
+#include "src/debug/debug-wasm-objects.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
+#include "src/debug/debug-wasm-objects-inl.h"
#include "src/execution/frames-inl.h"
#include "src/objects/property-descriptor.h"
#include "src/wasm/wasm-debug.h"
@@ -16,49 +17,6 @@ namespace v8 {
namespace internal {
namespace {
-// Convert a WasmValue to an appropriate JS representation.
-Handle<Object> WasmValueToObject(Isolate* isolate, wasm::WasmValue value) {
- auto* factory = isolate->factory();
- switch (value.type().kind()) {
- case wasm::ValueType::kI32:
- return factory->NewNumberFromInt(value.to_i32());
- case wasm::ValueType::kI64:
- return BigInt::FromInt64(isolate, value.to_i64());
- case wasm::ValueType::kF32:
- return factory->NewNumber(value.to_f32());
- case wasm::ValueType::kF64:
- return factory->NewNumber(value.to_f64());
- case wasm::ValueType::kS128: {
- wasm::Simd128 s128 = value.to_s128();
- Handle<JSArrayBuffer> buffer;
- if (!factory
- ->NewJSArrayBufferAndBackingStore(
- kSimd128Size, InitializedFlag::kUninitialized)
- .ToHandle(&buffer)) {
- isolate->FatalProcessOutOfHeapMemory(
- "failed to allocate backing store");
- }
-
- base::Memcpy(buffer->allocation_base(), s128.bytes(),
- buffer->byte_length());
- auto array = factory->NewJSTypedArray(kExternalUint8Array, buffer, 0,
- kSimd128Size);
- JSObject::SetPrototype(array, factory->null_value(), false, kDontThrow)
- .Check();
- return array;
- }
- case wasm::ValueType::kRef:
- return value.to_externref();
- default:
- break;
- }
- return factory->undefined_value();
-}
-
-} // namespace
-
-namespace {
-
// Helper for unpacking a maybe name that makes a default with an index if
// the name is empty. If the name is not empty, it's prefixed with a $.
Handle<String> GetNameOrDefault(Isolate* isolate,
@@ -78,6 +36,36 @@ Handle<String> GetNameOrDefault(Isolate* isolate,
return isolate->factory()->InternalizeString(value.SubVector(0, len));
}
+MaybeHandle<String> GetNameFromImportsAndExportsOrNull(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ wasm::ImportExportKindCode kind, uint32_t index) {
+ auto debug_info = instance->module_object().native_module()->GetDebugInfo();
+ wasm::ModuleWireBytes wire_bytes(
+ instance->module_object().native_module()->wire_bytes());
+
+ auto import_name_ref = debug_info->GetImportName(kind, index);
+ if (!import_name_ref.first.is_empty()) {
+ ScopedVector<char> name(import_name_ref.first.length() + 1 +
+ import_name_ref.second.length());
+ auto name_begin = &name.first(), name_end = name_begin;
+ auto module_name = wire_bytes.GetNameOrNull(import_name_ref.first);
+ name_end = std::copy(module_name.begin(), module_name.end(), name_end);
+ *name_end++ = '.';
+ auto field_name = wire_bytes.GetNameOrNull(import_name_ref.second);
+ name_end = std::copy(field_name.begin(), field_name.end(), name_end);
+ return isolate->factory()->NewStringFromUtf8(
+ VectorOf(name_begin, name_end - name_begin));
+ }
+
+ auto export_name_ref = debug_info->GetExportName(kind, index);
+ if (!export_name_ref.is_empty()) {
+ auto name = wire_bytes.GetNameOrNull(export_name_ref);
+ return isolate->factory()->NewStringFromUtf8(name);
+ }
+
+ return {};
+}
+
enum DebugProxyId {
kFunctionsProxy,
kGlobalsProxy,
@@ -94,24 +82,18 @@ enum DebugProxyId {
kNumInstanceProxies = kLastInstanceProxyId + 1
};
-// Creates a FixedArray with the given |length| as cache on-demand on
-// the |object|, stored under the |wasm_debug_proxy_cache_symbol|.
-// This is currently used to cache the debug proxy object maps on the
-// JSGlobalObject (per native context), and various debug proxy objects
-// (functions, globals, tables, and memories) on the WasmInstanceObject.
-Handle<FixedArray> GetOrCreateDebugProxyCache(Isolate* isolate,
- Handle<Object> object,
- int length) {
- Handle<Object> cache;
- Handle<Symbol> symbol = isolate->factory()->wasm_debug_proxy_cache_symbol();
- if (!Object::GetProperty(isolate, object, symbol).ToHandle(&cache) ||
- cache->IsUndefined(isolate)) {
- cache = isolate->factory()->NewFixedArrayWithHoles(length);
- Object::SetProperty(isolate, object, symbol, cache).Check();
- } else {
- DCHECK_EQ(length, Handle<FixedArray>::cast(cache)->length());
+constexpr int kFirstWasmValueMapIndex = kNumProxies;
+constexpr int kLastWasmValueMapIndex =
+ kFirstWasmValueMapIndex + WasmValueObject::kNumTypes - 1;
+constexpr int kNumDebugMaps = kLastWasmValueMapIndex + 1;
+
+Handle<FixedArray> GetOrCreateDebugMaps(Isolate* isolate) {
+ Handle<FixedArray> maps = isolate->wasm_debug_maps();
+ if (maps->length() == 0) {
+ maps = isolate->factory()->NewFixedArrayWithHoles(kNumDebugMaps);
+ isolate->native_context()->set_wasm_debug_maps(*maps);
}
- return Handle<FixedArray>::cast(cache);
+ return maps;
}
// Creates a Map for the given debug proxy |id| using the |create_template_fn|
@@ -121,8 +103,8 @@ Handle<FixedArray> GetOrCreateDebugProxyCache(Isolate* isolate,
Handle<Map> GetOrCreateDebugProxyMap(
Isolate* isolate, DebugProxyId id,
v8::Local<v8::FunctionTemplate> (*create_template_fn)(v8::Isolate*)) {
- Handle<FixedArray> maps = GetOrCreateDebugProxyCache(
- isolate, isolate->global_object(), kNumProxies);
+ auto maps = GetOrCreateDebugMaps(isolate);
+ CHECK_LE(kNumProxies, maps->length());
if (!maps->is_the_hole(isolate, id)) {
return handle(Map::cast(maps->get(id)), isolate);
}
@@ -241,12 +223,6 @@ struct IndexedDebugProxy {
// of functions in them.
template <typename T, DebugProxyId id, typename Provider = WasmInstanceObject>
struct NamedDebugProxy : IndexedDebugProxy<T, id, Provider> {
- enum {
- kProviderField,
- kNameTableField,
- kFieldCount,
- };
-
static v8::Local<v8::FunctionTemplate> CreateTemplate(v8::Isolate* isolate) {
auto templ = IndexedDebugProxy<T, id, Provider>::CreateTemplate(isolate);
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
@@ -261,8 +237,9 @@ struct NamedDebugProxy : IndexedDebugProxy<T, id, Provider> {
static Handle<NameDictionary> GetNameTable(Handle<JSObject> holder,
Isolate* isolate) {
- Handle<Object> table_or_undefined(holder->GetEmbedderField(kNameTableField),
- isolate);
+ Handle<Symbol> symbol = isolate->factory()->wasm_debug_proxy_names_symbol();
+ Handle<Object> table_or_undefined =
+ JSObject::GetProperty(isolate, holder, symbol).ToHandleChecked();
if (!table_or_undefined->IsUndefined(isolate)) {
return Handle<NameDictionary>::cast(table_or_undefined);
}
@@ -277,7 +254,7 @@ struct NamedDebugProxy : IndexedDebugProxy<T, id, Provider> {
table = NameDictionary::Add(isolate, table, key, value,
PropertyDetails::Empty());
}
- holder->SetEmbedderField(kNameTableField, *table);
+ Object::SetProperty(isolate, holder, symbol, table).Check();
return table;
}
@@ -340,18 +317,15 @@ struct FunctionsProxy : NamedDebugProxy<FunctionsProxy, kFunctionsProxy> {
static Handle<String> GetName(Isolate* isolate,
Handle<WasmInstanceObject> instance,
uint32_t index) {
- wasm::ModuleWireBytes wire_bytes(
- instance->module_object().native_module()->wire_bytes());
- auto* module = instance->module();
- wasm::WireBytesRef name_ref =
- module->lazily_generated_names.LookupFunctionName(
- wire_bytes, index, VectorOf(module->export_table));
- Vector<const char> name_vec = wire_bytes.GetNameOrNull(name_ref);
- return GetNameOrDefault(
- isolate,
- name_vec.empty() ? MaybeHandle<String>()
- : isolate->factory()->NewStringFromUtf8(name_vec),
- "$func", index);
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ MaybeHandle<String> name =
+ WasmModuleObject::GetFunctionNameOrNull(isolate, module_object, index);
+ if (name.is_null()) {
+ name = GetNameFromImportsAndExportsOrNull(
+ isolate, instance, wasm::ImportExportKindCode::kExternalFunction,
+ index);
+ }
+ return GetNameOrDefault(isolate, name, "$func", index);
}
};
@@ -366,9 +340,9 @@ struct GlobalsProxy : NamedDebugProxy<GlobalsProxy, kGlobalsProxy> {
static Handle<Object> Get(Isolate* isolate,
Handle<WasmInstanceObject> instance,
uint32_t index) {
- return WasmValueToObject(isolate,
- WasmInstanceObject::GetGlobalValue(
- instance, instance->module()->globals[index]));
+ return WasmValueObject::New(
+ isolate, WasmInstanceObject::GetGlobalValue(
+ instance, instance->module()->globals[index]));
}
static Handle<String> GetName(Isolate* isolate,
@@ -376,7 +350,9 @@ struct GlobalsProxy : NamedDebugProxy<GlobalsProxy, kGlobalsProxy> {
uint32_t index) {
return GetNameOrDefault(
isolate,
- WasmInstanceObject::GetGlobalNameOrNull(isolate, instance, index),
+ GetNameFromImportsAndExportsOrNull(
+ isolate, instance, wasm::ImportExportKindCode::kExternalGlobal,
+ index),
"$global", index);
}
};
@@ -400,7 +376,9 @@ struct MemoriesProxy : NamedDebugProxy<MemoriesProxy, kMemoriesProxy> {
uint32_t index) {
return GetNameOrDefault(
isolate,
- WasmInstanceObject::GetMemoryNameOrNull(isolate, instance, index),
+ GetNameFromImportsAndExportsOrNull(
+ isolate, instance, wasm::ImportExportKindCode::kExternalMemory,
+ index),
"$memory", index);
}
};
@@ -424,7 +402,9 @@ struct TablesProxy : NamedDebugProxy<TablesProxy, kTablesProxy> {
uint32_t index) {
return GetNameOrDefault(
isolate,
- WasmInstanceObject::GetTableNameOrNull(isolate, instance, index),
+ GetNameFromImportsAndExportsOrNull(
+ isolate, instance, wasm::ImportExportKindCode::kExternalTable,
+ index),
"$table", index);
}
};
@@ -441,7 +421,7 @@ struct LocalsProxy : NamedDebugProxy<LocalsProxy, kLocalsProxy, FixedArray> {
auto function = debug_info->GetFunctionAtAddress(frame->pc());
auto values = isolate->factory()->NewFixedArray(count + 2);
for (int i = 0; i < count; ++i) {
- auto value = WasmValueToObject(
+ auto value = WasmValueObject::New(
isolate, debug_info->GetLocalValue(i, frame->pc(), frame->fp(),
frame->callee_fp()));
values->set(i, *value);
@@ -488,7 +468,7 @@ struct StackProxy : IndexedDebugProxy<StackProxy, kStackProxy, FixedArray> {
int count = debug_info->GetStackDepth(frame->pc());
auto values = isolate->factory()->NewFixedArray(count);
for (int i = 0; i < count; ++i) {
- auto value = WasmValueToObject(
+ auto value = WasmValueObject::New(
isolate, debug_info->GetStackValue(i, frame->pc(), frame->fp(),
frame->callee_fp()));
values->set(i, *value);
@@ -506,14 +486,29 @@ struct StackProxy : IndexedDebugProxy<StackProxy, kStackProxy, FixedArray> {
}
};
+// Creates FixedArray with size |kNumInstanceProxies| as cache on-demand
+// on the |instance|, stored under the |wasm_debug_proxy_cache_symbol|.
+// This is used to cache the various instance debug proxies (functions,
+// globals, tables, and memories) on the WasmInstanceObject.
+Handle<FixedArray> GetOrCreateInstanceProxyCache(
+ Isolate* isolate, Handle<WasmInstanceObject> instance) {
+ Handle<Object> cache;
+ Handle<Symbol> symbol = isolate->factory()->wasm_debug_proxy_cache_symbol();
+ if (!Object::GetProperty(isolate, instance, symbol).ToHandle(&cache) ||
+ cache->IsUndefined(isolate)) {
+ cache = isolate->factory()->NewFixedArrayWithHoles(kNumInstanceProxies);
+ Object::SetProperty(isolate, instance, symbol, cache).Check();
+ }
+ return Handle<FixedArray>::cast(cache);
+}
+
// Creates an instance of the |Proxy| on-demand and caches that on the
// |instance|.
template <typename Proxy>
Handle<JSObject> GetOrCreateInstanceProxy(Isolate* isolate,
Handle<WasmInstanceObject> instance) {
STATIC_ASSERT(Proxy::kId < kNumInstanceProxies);
- Handle<FixedArray> proxies =
- GetOrCreateDebugProxyCache(isolate, instance, kNumInstanceProxies);
+ Handle<FixedArray> proxies = GetOrCreateInstanceProxyCache(isolate, instance);
if (!proxies->is_the_hole(isolate, Proxy::kId)) {
return handle(JSObject::cast(proxies->get(Proxy::kId)), isolate);
}
@@ -528,8 +523,10 @@ Handle<JSObject> GetOrCreateInstanceProxy(Isolate* isolate,
// and extensions to inspect the WebAssembly engine state from JavaScript.
// The proxy provides the following interface:
//
-// type WasmSimdValue = Uint8Array;
-// type WasmValue = number | bigint | object | WasmSimdValue;
+// type WasmValue = {
+// type: string;
+// value: number | bigint | object | string;
+// };
// type WasmFunction = (... args : WasmValue[]) = > WasmValue;
// interface WasmInterface {
// $globalX: WasmValue;
@@ -561,33 +558,17 @@ Handle<JSObject> GetOrCreateInstanceProxy(Isolate* isolate,
//
// See http://doc/1VZOJrU2VsqOZe3IUzbwQWQQSZwgGySsm5119Ust1gUA and
// http://bit.ly/devtools-wasm-entities for more details.
-class ContextProxy {
+class ContextProxyPrototype {
public:
- static Handle<JSObject> Create(WasmFrame* frame) {
- Isolate* isolate = frame->isolate();
+ static Handle<JSObject> Create(Isolate* isolate) {
auto object_map =
GetOrCreateDebugProxyMap(isolate, kContextProxy, &CreateTemplate);
- auto object = isolate->factory()->NewJSObjectFromMap(object_map);
- Handle<WasmInstanceObject> instance(frame->wasm_instance(), isolate);
- object->SetEmbedderField(kInstanceField, *instance);
- Handle<JSObject> locals = LocalsProxy::Create(frame);
- object->SetEmbedderField(kLocalsField, *locals);
- Handle<JSObject> stack = StackProxy::Create(frame);
- object->SetEmbedderField(kStackField, *stack);
- return object;
+ return isolate->factory()->NewJSObjectFromMap(object_map);
}
private:
- enum {
- kInstanceField,
- kLocalsField,
- kStackField,
- kFieldCount,
- };
-
static v8::Local<v8::FunctionTemplate> CreateTemplate(v8::Isolate* isolate) {
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
- templ->InstanceTemplate()->SetInternalFieldCount(kFieldCount);
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
&NamedGetter, {}, {}, {}, {}, {}, {}, {},
static_cast<v8::PropertyHandlerFlags>(
@@ -599,44 +580,16 @@ class ContextProxy {
}
static MaybeHandle<Object> GetNamedProperty(Isolate* isolate,
- Handle<JSObject> holder,
+ Handle<JSObject> receiver,
Handle<String> name) {
- if (name->length() == 0) return {};
- Handle<WasmInstanceObject> instance(
- WasmInstanceObject::cast(holder->GetEmbedderField(kInstanceField)),
- isolate);
- if (name->IsOneByteEqualTo(StaticCharVector("instance"))) {
- return instance;
- }
- if (name->IsOneByteEqualTo(StaticCharVector("module"))) {
- return handle(instance->module_object(), isolate);
- }
- if (name->IsOneByteEqualTo(StaticCharVector("locals"))) {
- return handle(holder->GetEmbedderField(kLocalsField), isolate);
- }
- if (name->IsOneByteEqualTo(StaticCharVector("stack"))) {
- return handle(holder->GetEmbedderField(kStackField), isolate);
- }
- if (name->IsOneByteEqualTo(StaticCharVector("memories"))) {
- return GetOrCreateInstanceProxy<MemoriesProxy>(isolate, instance);
- }
- if (name->IsOneByteEqualTo(StaticCharVector("tables"))) {
- return GetOrCreateInstanceProxy<TablesProxy>(isolate, instance);
- }
- if (name->IsOneByteEqualTo(StaticCharVector("globals"))) {
- return GetOrCreateInstanceProxy<GlobalsProxy>(isolate, instance);
- }
- if (name->IsOneByteEqualTo(StaticCharVector("functions"))) {
- return GetOrCreateInstanceProxy<FunctionsProxy>(isolate, instance);
- }
- if (name->Get(0) == '$') {
+ if (name->length() != 0 && name->Get(0) == '$') {
const char* kDelegateNames[] = {"memories", "locals", "tables",
"functions", "globals"};
for (auto delegate_name : kDelegateNames) {
Handle<Object> delegate;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, delegate,
- JSObject::GetProperty(isolate, holder, delegate_name), Object);
+ JSObject::GetProperty(isolate, receiver, delegate_name), Object);
if (!delegate->IsUndefined(isolate)) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -653,49 +606,41 @@ class ContextProxy {
const PropertyCallbackInfo<v8::Value>& info) {
auto name_string = Handle<String>::cast(Utils::OpenHandle(*name));
auto isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
- auto holder = Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
+ auto receiver = Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
Handle<Object> value;
- if (GetNamedProperty(isolate, holder, name_string).ToHandle(&value)) {
+ if (GetNamedProperty(isolate, receiver, name_string).ToHandle(&value)) {
info.GetReturnValue().Set(Utils::ToLocal(value));
}
}
};
-Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject> instance) {
- Isolate* isolate = instance->GetIsolate();
- Handle<JSObject> module_scope_object =
- isolate->factory()->NewJSObjectWithNullProto();
-
- Handle<String> instance_name =
- isolate->factory()->InternalizeString(StaticCharVector("instance"));
- JSObject::AddProperty(isolate, module_scope_object, instance_name, instance,
- NONE);
-
- Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
- Handle<String> module_name =
- isolate->factory()->InternalizeString(StaticCharVector("module"));
- JSObject::AddProperty(isolate, module_scope_object, module_name,
- module_object, NONE);
-
- uint32_t memory_count = MemoriesProxy::Count(isolate, instance);
- for (uint32_t memory_index = 0; memory_index < memory_count; ++memory_index) {
- auto memory_name = MemoriesProxy::GetName(isolate, instance, memory_index);
- auto memory_value = MemoriesProxy::Get(isolate, instance, memory_index);
- JSObject::AddProperty(isolate, module_scope_object, memory_name,
- memory_value, NONE);
- }
-
- if (GlobalsProxy::Count(isolate, instance) != 0) {
- Handle<JSObject> globals_obj =
- GetOrCreateInstanceProxy<GlobalsProxy>(isolate, instance);
- Handle<String> globals_name =
- isolate->factory()->InternalizeString(StaticCharVector("globals"));
- JSObject::AddProperty(isolate, module_scope_object, globals_name,
- globals_obj, NONE);
+class ContextProxy {
+ public:
+ static Handle<JSObject> Create(WasmFrame* frame) {
+ Isolate* isolate = frame->isolate();
+ auto object = isolate->factory()->NewJSObjectWithNullProto();
+ Handle<WasmInstanceObject> instance(frame->wasm_instance(), isolate);
+ JSObject::AddProperty(isolate, object, "instance", instance, FROZEN);
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ JSObject::AddProperty(isolate, object, "module", module_object, FROZEN);
+ auto locals = LocalsProxy::Create(frame);
+ JSObject::AddProperty(isolate, object, "locals", locals, FROZEN);
+ auto stack = StackProxy::Create(frame);
+ JSObject::AddProperty(isolate, object, "stack", stack, FROZEN);
+ auto memories = GetOrCreateInstanceProxy<MemoriesProxy>(isolate, instance);
+ JSObject::AddProperty(isolate, object, "memories", memories, FROZEN);
+ auto tables = GetOrCreateInstanceProxy<TablesProxy>(isolate, instance);
+ JSObject::AddProperty(isolate, object, "tables", tables, FROZEN);
+ auto globals = GetOrCreateInstanceProxy<GlobalsProxy>(isolate, instance);
+ JSObject::AddProperty(isolate, object, "globals", globals, FROZEN);
+ auto functions =
+ GetOrCreateInstanceProxy<FunctionsProxy>(isolate, instance);
+ JSObject::AddProperty(isolate, object, "functions", functions, FROZEN);
+ Handle<JSObject> prototype = ContextProxyPrototype::Create(isolate);
+ JSObject::SetPrototype(object, prototype, false, kDontThrow).Check();
+ return object;
}
-
- return module_scope_object;
-}
+};
class DebugWasmScopeIterator final : public debug::ScopeIterator {
public:
@@ -732,23 +677,48 @@ class DebugWasmScopeIterator final : public debug::ScopeIterator {
ScopeType GetType() override { return type_; }
v8::Local<v8::Object> GetObject() override {
- DCHECK(!Done());
+ Isolate* isolate = frame_->isolate();
switch (type_) {
case debug::ScopeIterator::ScopeTypeModule: {
- Handle<WasmInstanceObject> instance =
- FrameSummary::GetTop(frame_).AsWasm().wasm_instance();
- return Utils::ToLocal(GetModuleScopeObject(instance));
+ Handle<WasmInstanceObject> instance(frame_->wasm_instance(), isolate);
+ Handle<JSObject> object =
+ isolate->factory()->NewJSObjectWithNullProto();
+ JSObject::AddProperty(isolate, object, "instance", instance, FROZEN);
+ Handle<JSObject> module_object(instance->module_object(), isolate);
+ JSObject::AddProperty(isolate, object, "module", module_object, FROZEN);
+ if (FunctionsProxy::Count(isolate, instance) != 0) {
+ JSObject::AddProperty(
+ isolate, object, "functions",
+ GetOrCreateInstanceProxy<FunctionsProxy>(isolate, instance),
+ FROZEN);
+ }
+ if (GlobalsProxy::Count(isolate, instance) != 0) {
+ JSObject::AddProperty(
+ isolate, object, "globals",
+ GetOrCreateInstanceProxy<GlobalsProxy>(isolate, instance),
+ FROZEN);
+ }
+ if (MemoriesProxy::Count(isolate, instance) != 0) {
+ JSObject::AddProperty(
+ isolate, object, "memories",
+ GetOrCreateInstanceProxy<MemoriesProxy>(isolate, instance),
+ FROZEN);
+ }
+ if (TablesProxy::Count(isolate, instance) != 0) {
+ JSObject::AddProperty(
+ isolate, object, "tables",
+ GetOrCreateInstanceProxy<TablesProxy>(isolate, instance), FROZEN);
+ }
+ return Utils::ToLocal(object);
}
case debug::ScopeIterator::ScopeTypeLocal: {
- DCHECK(frame_->is_inspectable());
return Utils::ToLocal(LocalsProxy::Create(frame_));
}
case debug::ScopeIterator::ScopeTypeWasmExpressionStack: {
- DCHECK(frame_->is_inspectable());
return Utils::ToLocal(StackProxy::Create(frame_));
}
default:
- return {};
+ UNREACHABLE();
}
}
v8::Local<v8::Value> GetFunctionDebugName() override {
@@ -773,8 +743,104 @@ class DebugWasmScopeIterator final : public debug::ScopeIterator {
ScopeType type_;
};
+Handle<String> WasmSimd128ToString(Isolate* isolate, wasm::Simd128 s128) {
+ // We use the canonical format as described in:
+ // https://github.com/WebAssembly/simd/blob/master/proposals/simd/TextSIMD.md
+ EmbeddedVector<char, 50> buffer;
+ auto i32x4 = s128.to_i32x4();
+ SNPrintF(buffer, "i32x4 0x%08X 0x%08X 0x%08X 0x%08X", i32x4.val[0],
+ i32x4.val[1], i32x4.val[2], i32x4.val[3]);
+ return isolate->factory()->NewStringFromAsciiChecked(buffer.data());
+}
+
+Handle<String> Type2String(Isolate* isolate, WasmValueObject::Type type) {
+ switch (type) {
+ case WasmValueObject::kExternRef:
+ return isolate->factory()->InternalizeString(
+ StaticCharVector("externref"));
+ case WasmValueObject::kF32:
+ return isolate->factory()->InternalizeString(StaticCharVector("f32"));
+ case WasmValueObject::kF64:
+ return isolate->factory()->InternalizeString(StaticCharVector("f64"));
+ case WasmValueObject::kI32:
+ return isolate->factory()->InternalizeString(StaticCharVector("i32"));
+ case WasmValueObject::kI64:
+ return isolate->factory()->InternalizeString(StaticCharVector("i64"));
+ case WasmValueObject::kV128:
+ return isolate->factory()->InternalizeString(StaticCharVector("v128"));
+ case WasmValueObject::kNumTypes:
+ break;
+ }
+ UNREACHABLE();
+}
+
} // namespace
+// static
+Handle<WasmValueObject> WasmValueObject::New(Isolate* isolate, Type type,
+ Handle<Object> value) {
+ int map_index = kFirstWasmValueMapIndex + type;
+ DCHECK_LE(kFirstWasmValueMapIndex, map_index);
+ DCHECK_LE(map_index, kLastWasmValueMapIndex);
+ auto maps = GetOrCreateDebugMaps(isolate);
+ if (maps->is_the_hole(isolate, map_index)) {
+ auto type_name = Type2String(isolate, type);
+ auto shared = isolate->factory()->NewSharedFunctionInfoForBuiltin(
+ type_name, Builtins::kIllegal);
+ shared->set_language_mode(LanguageMode::kStrict);
+ auto constructor =
+ Factory::JSFunctionBuilder{isolate, shared, isolate->native_context()}
+ .set_map(isolate->strict_function_map())
+ .Build();
+ Handle<Map> map = isolate->factory()->NewMap(
+ WASM_VALUE_OBJECT_TYPE, WasmValueObject::kSize,
+ TERMINAL_FAST_ELEMENTS_KIND, 1);
+ Map::EnsureDescriptorSlack(isolate, map, 2);
+ { // type
+ Descriptor d = Descriptor::DataConstant(
+ isolate->factory()->InternalizeString(StaticCharVector("type")),
+ type_name, FROZEN);
+ map->AppendDescriptor(isolate, &d);
+ }
+ { // value
+ Descriptor d = Descriptor::DataField(
+ isolate,
+ isolate->factory()->InternalizeString(StaticCharVector("value")),
+ WasmValueObject::kValueIndex, FROZEN, Representation::Tagged());
+ map->AppendDescriptor(isolate, &d);
+ }
+ map->set_constructor_or_back_pointer(*constructor);
+ map->set_is_extensible(false);
+ maps->set(map_index, *map);
+ }
+ Handle<Map> value_map(Map::cast(maps->get(map_index)), isolate);
+ Handle<WasmValueObject> object = Handle<WasmValueObject>::cast(
+ isolate->factory()->NewJSObjectFromMap(value_map));
+ object->set_value(*value);
+ return object;
+}
+
+// static
+Handle<WasmValueObject> WasmValueObject::New(Isolate* isolate,
+ const wasm::WasmValue& value) {
+ switch (value.type().kind()) {
+ case wasm::kF32:
+ return New(isolate, kF32, isolate->factory()->NewNumber(value.to_f32()));
+ case wasm::kF64:
+ return New(isolate, kF64, isolate->factory()->NewNumber(value.to_f64()));
+ case wasm::kI32:
+ return New(isolate, kI32, isolate->factory()->NewNumber(value.to_i32()));
+ case wasm::kI64:
+ return New(isolate, kI64, BigInt::FromInt64(isolate, value.to_i64()));
+ case wasm::kRef:
+ return New(isolate, kExternRef, value.to_externref());
+ case wasm::kS128:
+ return New(isolate, kV128, WasmSimd128ToString(isolate, value.to_s128()));
+ default:
+ UNREACHABLE();
+ }
+}
+
Handle<JSObject> GetWasmDebugProxy(WasmFrame* frame) {
return ContextProxy::Create(frame);
}
@@ -783,5 +849,79 @@ std::unique_ptr<debug::ScopeIterator> GetWasmScopeIterator(WasmFrame* frame) {
return std::make_unique<DebugWasmScopeIterator>(frame);
}
+Handle<JSArray> GetWasmInstanceObjectInternalProperties(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(2 * 5);
+ int length = 0;
+
+ Handle<String> module_str =
+ isolate->factory()->NewStringFromAsciiChecked("[[Module]]");
+ Handle<Object> module_obj = handle(instance->module_object(), isolate);
+ result->set(length++, *module_str);
+ result->set(length++, *module_obj);
+
+ if (FunctionsProxy::Count(isolate, instance) != 0) {
+ Handle<String> functions_str =
+ isolate->factory()->NewStringFromAsciiChecked("[[Functions]]");
+ Handle<Object> functions_obj =
+ GetOrCreateInstanceProxy<FunctionsProxy>(isolate, instance);
+ result->set(length++, *functions_str);
+ result->set(length++, *functions_obj);
+ }
+
+ if (GlobalsProxy::Count(isolate, instance) != 0) {
+ Handle<String> globals_str =
+ isolate->factory()->NewStringFromAsciiChecked("[[Globals]]");
+ Handle<Object> globals_obj =
+ GetOrCreateInstanceProxy<GlobalsProxy>(isolate, instance);
+ result->set(length++, *globals_str);
+ result->set(length++, *globals_obj);
+ }
+
+ if (MemoriesProxy::Count(isolate, instance) != 0) {
+ Handle<String> memories_str =
+ isolate->factory()->NewStringFromAsciiChecked("[[Memories]]");
+ Handle<Object> memories_obj =
+ GetOrCreateInstanceProxy<MemoriesProxy>(isolate, instance);
+ result->set(length++, *memories_str);
+ result->set(length++, *memories_obj);
+ }
+
+ if (TablesProxy::Count(isolate, instance) != 0) {
+ Handle<String> tables_str =
+ isolate->factory()->NewStringFromAsciiChecked("[[Tables]]");
+ Handle<Object> tables_obj =
+ GetOrCreateInstanceProxy<TablesProxy>(isolate, instance);
+ result->set(length++, *tables_str);
+ result->set(length++, *tables_obj);
+ }
+
+ return isolate->factory()->NewJSArrayWithElements(result, PACKED_ELEMENTS,
+ length);
+}
+
+Handle<JSArray> GetWasmModuleObjectInternalProperties(
+ Handle<WasmModuleObject> module_object) {
+ Isolate* isolate = module_object->GetIsolate();
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(2 * 2);
+ int length = 0;
+
+ Handle<String> exports_str =
+ isolate->factory()->NewStringFromStaticChars("[[Exports]]");
+ Handle<JSArray> exports_obj = wasm::GetExports(isolate, module_object);
+ result->set(length++, *exports_str);
+ result->set(length++, *exports_obj);
+
+ Handle<String> imports_str =
+ isolate->factory()->NewStringFromStaticChars("[[Imports]]");
+ Handle<JSArray> imports_obj = wasm::GetImports(isolate, module_object);
+ result->set(length++, *imports_str);
+ result->set(length++, *imports_obj);
+
+ return isolate->factory()->NewJSArrayWithElements(result, PACKED_ELEMENTS,
+ length);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-wasm-objects.h b/deps/v8/src/debug/debug-wasm-objects.h
new file mode 100644
index 0000000000..98c9e6db16
--- /dev/null
+++ b/deps/v8/src/debug/debug-wasm-objects.h
@@ -0,0 +1,75 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_WASM_OBJECTS_H_
+#define V8_DEBUG_DEBUG_WASM_OBJECTS_H_
+
+#include <memory>
+
+#include "src/objects/js-objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace debug {
+class ScopeIterator;
+} // namespace debug
+
+namespace internal {
+namespace wasm {
+class WasmValue;
+} // namespace wasm
+
+#include "torque-generated/src/debug/debug-wasm-objects-tq.inc"
+
+class WasmFrame;
+class WasmInstanceObject;
+class WasmModuleObject;
+
+class WasmValueObject : public JSObject {
+ public:
+ DECL_CAST(WasmValueObject)
+
+ DECL_ACCESSORS(value, Object)
+
+ // Dispatched behavior.
+ DECL_PRINTER(WasmValueObject)
+ DECL_VERIFIER(WasmValueObject)
+
+// Layout description.
+#define WASM_VALUE_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ V(kSize, 0)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, WASM_VALUE_FIELDS)
+#undef WASM_VALUE_FIELDS
+
+ // Indices of in-object properties.
+ static constexpr int kValueIndex = 0;
+
+ enum Type { kExternRef, kF32, kF64, kI32, kI64, kV128, kNumTypes };
+
+ static Handle<WasmValueObject> New(Isolate* isolate, Type type,
+ Handle<Object> value);
+ static Handle<WasmValueObject> New(Isolate* isolate,
+ const wasm::WasmValue& value);
+
+ OBJECT_CONSTRUCTORS(WasmValueObject, JSObject);
+};
+
+Handle<JSObject> GetWasmDebugProxy(WasmFrame* frame);
+
+std::unique_ptr<debug::ScopeIterator> GetWasmScopeIterator(WasmFrame* frame);
+
+Handle<JSArray> GetWasmInstanceObjectInternalProperties(
+ Handle<WasmInstanceObject> instance);
+Handle<JSArray> GetWasmModuleObjectInternalProperties(
+ Handle<WasmModuleObject> module_object);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_DEBUG_DEBUG_WASM_OBJECTS_H_
diff --git a/deps/v8/src/debug/debug-wasm-objects.tq b/deps/v8/src/debug/debug-wasm-objects.tq
new file mode 100644
index 0000000000..0f7b860d94
--- /dev/null
+++ b/deps/v8/src/debug/debug-wasm-objects.tq
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/debug/debug-wasm-objects.h'
+
+extern class WasmValueObject extends JSObject;
diff --git a/deps/v8/src/debug/debug-wasm-support.h b/deps/v8/src/debug/debug-wasm-support.h
deleted file mode 100644
index fba5759a77..0000000000
--- a/deps/v8/src/debug/debug-wasm-support.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_DEBUG_DEBUG_WASM_SUPPORT_H_
-#define V8_DEBUG_DEBUG_WASM_SUPPORT_H_
-
-#include <memory>
-
-namespace v8 {
-namespace debug {
-class ScopeIterator;
-} // namespace debug
-
-namespace internal {
-
-template <typename T>
-class Handle;
-class JSObject;
-class WasmFrame;
-
-Handle<JSObject> GetWasmDebugProxy(WasmFrame* frame);
-
-std::unique_ptr<debug::ScopeIterator> GetWasmScopeIterator(WasmFrame* frame);
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_DEBUG_DEBUG_WASM_SUPPORT_H_
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 8dce71960f..c29fb9b785 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -145,7 +145,7 @@ JSGeneratorObject BreakLocation::GetGeneratorObjectForSuspendedFrame(
DCHECK(IsSuspend());
DCHECK_GE(generator_obj_reg_index_, 0);
- Object generator_obj = InterpretedFrame::cast(frame)->ReadInterpreterRegister(
+ Object generator_obj = UnoptimizedFrame::cast(frame)->ReadInterpreterRegister(
generator_obj_reg_index_);
return JSGeneratorObject::cast(generator_obj);
@@ -217,20 +217,41 @@ BreakIterator::BreakIterator(Handle<DebugInfo> debug_info)
}
int BreakIterator::BreakIndexFromPosition(int source_position) {
- int distance = kMaxInt;
- int closest_break = break_index();
+ // TODO(crbug.com/901819): When there's no exact match, we
+ // should always pick the first match (in execution order)
+ // to ensure that when setting a breakpoint on a line, we
+ // really break as early as possible in that line. With
+ // generators that's currently broken because of the way
+ // the implicit yield is handled, this will be fixed in
+ // a follow up CL.
+ if (IsGeneratorFunction(debug_info_->shared().kind()) ||
+ IsModule(debug_info_->shared().kind())) {
+ int distance = kMaxInt;
+ int closest_break = break_index();
+ while (!Done()) {
+ int next_position = position();
+ if (source_position <= next_position &&
+ next_position - source_position < distance) {
+ closest_break = break_index();
+ distance = next_position - source_position;
+ if (distance == 0) break;
+ }
+ Next();
+ }
+ return closest_break;
+ }
+ int first_break = break_index();
+ bool first = true;
while (!Done()) {
int next_position = position();
- if (source_position <= next_position &&
- next_position - source_position < distance) {
- closest_break = break_index();
- distance = next_position - source_position;
- // Check whether we can't get any closer.
- if (distance == 0) break;
+ if (source_position == next_position) return break_index();
+ if (source_position <= next_position && first) {
+ first_break = break_index();
+ first = false;
}
Next();
}
- return closest_break;
+ return first_break;
}
void BreakIterator::Next() {
@@ -339,7 +360,6 @@ void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
bitfield_ |= mask;
}
-
// Threading support.
void Debug::ThreadInit() {
thread_local_.break_frame_id_ = StackFrameId::NO_ID;
@@ -359,7 +379,6 @@ void Debug::ThreadInit() {
UpdateHookOnFunctionCall();
}
-
char* Debug::ArchiveDebug(char* storage) {
MemCopy(storage, reinterpret_cast<char*>(&thread_local_),
ArchiveSpacePerThread());
@@ -545,7 +564,6 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
}
}
-
// Find break point objects for this location, if any, and evaluate them.
// Return an array of break point objects that evaluated true, or an empty
// handle if none evaluated true.
@@ -560,7 +578,6 @@ MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
return Debug::GetHitBreakPoints(debug_info, location->position());
}
-
bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
// A break location is considered muted if break locations on the current
@@ -663,35 +680,9 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
FindSharedFunctionInfoInScript(script, *source_position);
if (result->IsUndefined(isolate_)) return false;
- // Make sure the function has set up the debug info.
- Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(result);
- if (!EnsureBreakInfo(shared)) return false;
- PrepareFunctionForDebugExecution(shared);
-
- // Find position within function. The script position might be before the
- // source position of the first function.
- if (shared->StartPosition() > *source_position) {
- *source_position = shared->StartPosition();
- }
-
- Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
-
- // Find breakable position returns first breakable position after
- // *source_position, it can return 0 if no break location is found after
- // *source_position.
- int breakable_position = FindBreakablePosition(debug_info, *source_position);
- if (breakable_position < *source_position) return false;
- *source_position = breakable_position;
-
- DebugInfo::SetBreakPoint(isolate_, debug_info, *source_position, break_point);
- // At least one active break point now.
- DCHECK_LT(0, debug_info->GetBreakPointCount(isolate_));
-
- ClearBreakPoints(debug_info);
- ApplyBreakPoints(debug_info);
-
- feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
- return true;
+ // Set the breakpoint in the function.
+ auto shared = Handle<SharedFunctionInfo>::cast(result);
+ return SetBreakpoint(shared, break_point, source_position);
}
int Debug::FindBreakablePosition(Handle<DebugInfo> debug_info,
@@ -884,7 +875,6 @@ void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
}
}
-
bool Debug::IsBreakOnException(ExceptionBreakType type) {
if (type == BreakUncaughtException) {
return break_on_uncaught_exception_;
@@ -1073,8 +1063,10 @@ void Debug::PrepareStep(StepAction step_action) {
if (!EnsureBreakInfo(shared)) return;
PrepareFunctionForDebugExecution(shared);
- Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
+ // PrepareFunctionForDebugExecution can invalidate Baseline frames
+ js_frame = JavaScriptFrame::cast(frames_it.Reframe());
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
location = BreakLocation::FromFrame(debug_info, js_frame);
// Any step at a return is a step-out, and a step-out at a suspend behaves
@@ -1223,7 +1215,6 @@ void Debug::ClearStepping() {
UpdateHookOnFunctionCall();
}
-
// Clears all the one-shot break points that are currently set. Normally this
// function is called each time a break point is hit as one shot break points
// are used to support stepping.
@@ -1239,11 +1230,83 @@ void Debug::ClearOneShot() {
}
}
+namespace {
+class DiscardBaselineCodeVisitor : public ThreadVisitor {
+ public:
+ explicit DiscardBaselineCodeVisitor(SharedFunctionInfo shared)
+ : shared_(shared) {}
+ DiscardBaselineCodeVisitor() : shared_(SharedFunctionInfo()) {}
+
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
+ bool deopt_all = shared_ == SharedFunctionInfo();
+ for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ if (it.frame()->type() == StackFrame::BASELINE) {
+ BaselineFrame* frame = BaselineFrame::cast(it.frame());
+ if (!deopt_all && frame->function().shared() != shared_) continue;
+ int bytecode_offset = frame->GetBytecodeOffset();
+ Address* pc_addr = frame->pc_address();
+ Address advance = BUILTIN_CODE(isolate, InterpreterEnterBytecodeAdvance)
+ ->InstructionStart();
+ PointerAuthentication::ReplacePC(pc_addr, advance, kSystemPointerSize);
+ InterpretedFrame::cast(it.Reframe())
+ ->PatchBytecodeOffset(bytecode_offset);
+ }
+ }
+ }
+
+ private:
+ SharedFunctionInfo shared_;
+ DISALLOW_GARBAGE_COLLECTION(no_gc_)
+};
+} // namespace
+
+void Debug::DiscardBaselineCode(SharedFunctionInfo shared) {
+ DCHECK(shared.HasBaselineData());
+ Isolate* isolate = shared.GetIsolate();
+ DiscardBaselineCodeVisitor visitor(shared);
+ visitor.VisitThread(isolate, isolate->thread_local_top());
+ isolate->thread_manager()->IterateArchivedThreads(&visitor);
+ // TODO(v8:11429): Avoid this heap walk somehow.
+ HeapObjectIterator iterator(isolate->heap());
+ auto trampoline = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
+ shared.flush_baseline_data();
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ if (obj.IsJSFunction()) {
+ JSFunction fun = JSFunction::cast(obj);
+ if (fun.shared() == shared && fun.ActiveTierIsBaseline()) {
+ fun.set_code(*trampoline);
+ }
+ }
+ }
+}
+
+void Debug::DiscardAllBaselineCode() {
+ DiscardBaselineCodeVisitor visitor;
+ visitor.VisitThread(isolate_, isolate_->thread_local_top());
+ HeapObjectIterator iterator(isolate_->heap());
+ auto trampoline = BUILTIN_CODE(isolate_, InterpreterEntryTrampoline);
+ isolate_->thread_manager()->IterateArchivedThreads(&visitor);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ if (obj.IsJSFunction()) {
+ JSFunction fun = JSFunction::cast(obj);
+ if (fun.shared().HasBaselineData()) {
+ fun.set_code(*trampoline);
+ }
+ }
+ }
+}
+
void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
// Deoptimize all code compiled from this shared function info including
// inlining.
isolate_->AbortConcurrentOptimization(BlockingBehavior::kBlock);
+ if (shared->HasBaselineData()) {
+ DiscardBaselineCode(*shared);
+ }
+
bool found_something = false;
Code::OptimizedCodeIterator iterator(isolate_);
do {
@@ -1278,6 +1341,7 @@ void Debug::PrepareFunctionForDebugExecution(
if (debug_info->CanBreakAtEntry()) {
// Deopt everything in case the function is inlined anywhere.
Deoptimizer::DeoptimizeAll(isolate_);
+ DiscardAllBaselineCode();
InstallDebugBreakTrampoline();
} else {
DeoptimizeFunction(shared);
@@ -1354,8 +1418,9 @@ void Debug::InstallDebugBreakTrampoline() {
}
if (recorded.find(accessor_pair) != recorded.end()) continue;
- needs_instantiate.emplace_back(handle(accessor_pair, isolate_),
- object.GetCreationContext());
+ needs_instantiate.emplace_back(
+ handle(accessor_pair, isolate_),
+ object.GetCreationContext().ToHandleChecked());
recorded.insert(accessor_pair);
}
}
@@ -1392,7 +1457,8 @@ void Debug::InstallDebugBreakTrampoline() {
// to shared code, we bypass CompileLazy. Perform CompileLazy here instead.
for (Handle<JSFunction> fun : needs_compile) {
IsCompiledScope is_compiled_scope;
- Compiler::Compile(fun, Compiler::CLEAR_EXCEPTION, &is_compiled_scope);
+ Compiler::Compile(isolate_, fun, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope);
DCHECK(is_compiled_scope.is_compiled());
fun->set_code(*trampoline);
}
@@ -1460,7 +1526,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
if (!is_compiled_scope.is_compiled()) {
// Code that cannot be compiled lazily are internal and not debuggable.
DCHECK(candidate->allows_lazy_compilation());
- if (!Compiler::Compile(candidate, Compiler::CLEAR_EXCEPTION,
+ if (!Compiler::Compile(isolate_, candidate, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return false;
} else {
@@ -1534,6 +1600,19 @@ class SharedFunctionInfoFinder {
DISALLOW_GARBAGE_COLLECTION(no_gc_)
};
+namespace {
+SharedFunctionInfo FindSharedFunctionInfoCandidate(int position,
+ Handle<Script> script,
+ Isolate* isolate) {
+ SharedFunctionInfoFinder finder(position);
+ SharedFunctionInfo::ScriptIterator iterator(isolate, *script);
+ for (SharedFunctionInfo info = iterator.Next(); !info.is_null();
+ info = iterator.Next()) {
+ finder.NewCandidate(info);
+ }
+ return finder.Result();
+}
+} // namespace
// We need to find a SFI for a literal that may not yet have been compiled yet,
// and there may not be a JSFunction referencing it. Find the SFI closest to
@@ -1552,14 +1631,20 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
SharedFunctionInfo shared;
IsCompiledScope is_compiled_scope;
{
- SharedFunctionInfoFinder finder(position);
- SharedFunctionInfo::ScriptIterator iterator(isolate_, *script);
- for (SharedFunctionInfo info = iterator.Next(); !info.is_null();
- info = iterator.Next()) {
- finder.NewCandidate(info);
+ shared = FindSharedFunctionInfoCandidate(position, script, isolate_);
+ if (shared.is_null()) {
+ // It might be that the shared function info is not available as the
+ // top level functions are removed due to the GC. Try to recompile
+ // the top level functions.
+ UnoptimizedCompileState compile_state(isolate_);
+ UnoptimizedCompileFlags flags =
+ UnoptimizedCompileFlags::ForScriptCompile(isolate_, *script);
+ ParseInfo parse_info(isolate_, flags, &compile_state);
+ IsCompiledScope is_compiled_scope;
+ Compiler::CompileToplevel(&parse_info, script, isolate_,
+ &is_compiled_scope);
+ continue;
}
- shared = finder.Result();
- if (shared.is_null()) break;
// We found it if it's already compiled.
is_compiled_scope = shared.is_compiled_scope(isolate_);
if (is_compiled_scope.is_compiled()) {
@@ -1579,15 +1664,14 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
HandleScope scope(isolate_);
// Code that cannot be compiled lazily are internal and not debuggable.
DCHECK(shared.allows_lazy_compilation());
- if (!Compiler::Compile(handle(shared, isolate_), Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope)) {
+ if (!Compiler::Compile(isolate_, handle(shared, isolate_),
+ Compiler::CLEAR_EXCEPTION, &is_compiled_scope)) {
break;
}
}
return isolate_->factory()->undefined_value();
}
-
// Ensures the debug information is present for shared.
bool Debug::EnsureBreakInfo(Handle<SharedFunctionInfo> shared) {
// Return if we already have the break info for shared.
@@ -1597,7 +1681,7 @@ bool Debug::EnsureBreakInfo(Handle<SharedFunctionInfo> shared) {
}
IsCompiledScope is_compiled_scope = shared->is_compiled_scope(isolate_);
if (!is_compiled_scope.is_compiled() &&
- !Compiler::Compile(shared, Compiler::CLEAR_EXCEPTION,
+ !Compiler::Compile(isolate_, shared, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return false;
}
@@ -2115,7 +2199,8 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
StackLimitCheck check(isolate_);
if (check.HasOverflowed()) return;
- { JavaScriptFrameIterator it(isolate_);
+ {
+ JavaScriptFrameIterator it(isolate_);
DCHECK(!it.done());
Object fun = it.frame()->function();
if (fun.IsJSFunction()) {
@@ -2335,7 +2420,7 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function,
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate_));
if (!function->is_compiled() &&
- !Compiler::Compile(function, Compiler::KEEP_EXCEPTION,
+ !Compiler::Compile(isolate_, function, Compiler::KEEP_EXCEPTION,
&is_compiled_scope)) {
return false;
}
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index ef5402c143..9462f70125 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -265,6 +265,9 @@ class V8_EXPORT_PRIVATE Debug {
void SetBreakOnNextFunctionCall();
void ClearBreakOnNextFunctionCall();
+ void DiscardBaselineCode(SharedFunctionInfo shared);
+ void DiscardAllBaselineCode();
+
void DeoptimizeFunction(Handle<SharedFunctionInfo> shared);
void PrepareFunctionForDebugExecution(Handle<SharedFunctionInfo> shared);
void InstallDebugBreakTrampoline();
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index 309614c575..72d4ac37df 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -34,16 +34,14 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Restart the frame by calling the function.
__ mov(ebp, eax);
__ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kArgCOffset));
__ leave();
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ movzx_w(
- eax, FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
-
- // The expected and actual argument counts don't matter as long as they match
- // and we don't enter the ArgumentsAdaptorTrampoline.
+ // The arguments are already in the stack (including any necessary padding),
+ // we should not try to massage the arguments again.
+ __ mov(ecx, Immediate(kDontAdaptArgumentsSentinel));
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- __ InvokeFunctionCode(edi, no_reg, eax, eax, JUMP_FUNCTION);
+ __ InvokeFunctionCode(edi, no_reg, ecx, eax, JUMP_FUNCTION);
}
const bool LiveEdit::kFrameDropperSupported = true;
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 7d740336e8..17d9cb240f 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -1191,7 +1191,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
isolate->compilation_cache()->Remove(sfi);
for (auto& js_function : data->js_functions) {
js_function->set_shared(*new_sfi);
- js_function->set_code(js_function->shared().GetCode());
+ js_function->set_code(js_function->shared().GetCode(), kReleaseStore);
js_function->set_raw_feedback_cell(
*isolate->factory()->many_closures_cell());
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index 30bf2159bc..d1ab6ec545 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -34,14 +34,12 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Restart the frame by calling the function.
__ mov(fp, a1);
__ lw(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ lw(a0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERNAL);
- __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lhu(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(a2, a0);
+ __ li(a2, Operand(kDontAdaptArgumentsSentinel));
__ InvokeFunction(a1, a2, a0, JUMP_FUNCTION);
}
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index f677a38ee2..7b8e9e9744 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -34,14 +34,12 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Restart the frame by calling the function.
__ mov(fp, a1);
__ Ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ld(a0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERNAL);
- __ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Lhu(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(a2, a0);
+ __ li(a2, Operand(kDontAdaptArgumentsSentinel));
__ InvokeFunction(a1, a2, a0, JUMP_FUNCTION);
}
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index eeed2d8e3e..c083708d3a 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -35,13 +35,12 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ mr(fp, r4);
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ LeaveFrame(StackFrame::INTERNAL);
- __ LoadTaggedPointerField(
- r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lhz(r3,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mr(r5, r3);
+ // The arguments are already in the stack (including any necessary padding),
+ // we should not try to massage the arguments again.
+ __ mov(r5, Operand(kDontAdaptArgumentsSentinel));
__ InvokeFunction(r4, r5, r3, JUMP_FUNCTION);
}
diff --git a/deps/v8/src/debug/riscv64/debug-riscv64.cc b/deps/v8/src/debug/riscv64/debug-riscv64.cc
new file mode 100644
index 0000000000..b292300150
--- /dev/null
+++ b/deps/v8/src/debug/riscv64/debug-riscv64.cc
@@ -0,0 +1,55 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/codegen/macro-assembler.h"
+#include "src/debug/debug.h"
+#include "src/debug/liveedit.h"
+#include "src/execution/frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+ }
+ __ MaybeDropFrames();
+
+ // Return to caller.
+ __ Ret();
+}
+
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+ // Frame is being dropped:
+ // - Drop to the target frame specified by a1.
+ // - Look up current function on the frame.
+ // - Leave the frame.
+ // - Restart the frame by calling the function.
+ __ mv(fp, a1);
+ __ Ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Pop return address and frame.
+ __ LeaveFrame(StackFrame::INTERNAL);
+
+ __ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Lhu(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mv(a2, a0);
+
+ __ InvokeFunction(a1, a2, a0, JUMP_FUNCTION);
+}
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
index c618491366..b58e70b851 100644
--- a/deps/v8/src/debug/s390/debug-s390.cc
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -37,13 +37,12 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ mov(fp, r3);
__ LoadU64(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadU64(r2, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ LeaveFrame(StackFrame::INTERNAL);
- __ LoadTaggedPointerField(
- r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadU16(
- r2, FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r4, r2);
+ // The arguments are already in the stack (including any necessary padding),
+ // we should not try to massage the arguments again.
+ __ mov(r4, Operand(kDontAdaptArgumentsSentinel));
__ InvokeFunction(r3, r4, r2, JUMP_FUNCTION);
}
diff --git a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
index 2f307dd5c0..2c08595f84 100644
--- a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
@@ -96,6 +96,7 @@ std::vector<wasm_addr_t> WasmModuleDebug::GetCallStack(
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
+ case StackFrame::BASELINE:
case StackFrame::BUILTIN:
case StackFrame::WASM: {
// A standard frame may include many summarized frames, due to inlining.
@@ -154,6 +155,7 @@ std::vector<FrameSummary> WasmModuleDebug::FindWasmFrame(
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
+ case StackFrame::BASELINE:
case StackFrame::BUILTIN:
case StackFrame::WASM: {
// A standard frame may include many summarized frames, due to inlining.
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 3d25c5f19f..2209213831 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -34,17 +34,15 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Look up current function on the frame.
// - Leave the frame.
// - Restart the frame by calling the function.
-
__ movq(rbp, rbx);
__ movq(rdi, Operand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ movq(rax, Operand(rbp, StandardFrameConstants::kArgCOffset));
__ leave();
- __ LoadTaggedPointerField(
- rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movzxwq(
- rbx, FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
-
- __ InvokeFunction(rdi, no_reg, rbx, rbx, JUMP_FUNCTION);
+ // The arguments are already in the stack (including any necessary padding),
+ // we should not try to massage the arguments again.
+ __ movq(rbx, Immediate(kDontAdaptArgumentsSentinel));
+ __ InvokeFunction(rdi, no_reg, rbx, rax, JUMP_FUNCTION);
}
const bool LiveEdit::kFrameDropperSupported = true;
diff --git a/deps/v8/src/deoptimizer/DEPS b/deps/v8/src/deoptimizer/DEPS
new file mode 100644
index 0000000000..4842a26562
--- /dev/null
+++ b/deps/v8/src/deoptimizer/DEPS
@@ -0,0 +1,5 @@
+specific_include_rules = {
+ "translation-array.cc": [
+ "+third_party/zlib",
+ ],
+}
diff --git a/deps/v8/src/deoptimizer/OWNERS b/deps/v8/src/deoptimizer/OWNERS
index eae6bba0ae..137347321a 100644
--- a/deps/v8/src/deoptimizer/OWNERS
+++ b/deps/v8/src/deoptimizer/OWNERS
@@ -2,4 +2,3 @@ jgruber@chromium.org
neis@chromium.org
nicohartmann@chromium.org
sigurds@chromium.org
-tebbi@chromium.org
diff --git a/deps/v8/src/deoptimizer/deoptimized-frame-info.cc b/deps/v8/src/deoptimizer/deoptimized-frame-info.cc
new file mode 100644
index 0000000000..a424a73ea1
--- /dev/null
+++ b/deps/v8/src/deoptimizer/deoptimized-frame-info.cc
@@ -0,0 +1,74 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer/deoptimized-frame-info.h"
+
+#include "src/execution/isolate.h"
+#include "src/objects/js-function-inl.h"
+#include "src/objects/oddball.h"
+
+namespace v8 {
+namespace internal {
+namespace {
+
+Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it,
+ Isolate* isolate) {
+ if (it->GetRawValue() == ReadOnlyRoots(isolate).arguments_marker() &&
+ !it->IsMaterializableByDebugger()) {
+ return isolate->factory()->optimized_out();
+ }
+ return it->GetValue();
+}
+
+} // namespace
+
+DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
+ TranslatedState::iterator frame_it,
+ Isolate* isolate) {
+ int parameter_count =
+ frame_it->shared_info()->internal_formal_parameter_count();
+ TranslatedFrame::iterator stack_it = frame_it->begin();
+
+ // Get the function. Note that this might materialize the function.
+ // In case the debugger mutates this value, we should deoptimize
+ // the function and remember the value in the materialized value store.
+ DCHECK_EQ(parameter_count, Handle<JSFunction>::cast(stack_it->GetValue())
+ ->shared()
+ .internal_formal_parameter_count());
+
+ stack_it++; // Skip the function.
+ stack_it++; // Skip the receiver.
+
+ DCHECK_EQ(TranslatedFrame::kUnoptimizedFunction, frame_it->kind());
+
+ parameters_.resize(static_cast<size_t>(parameter_count));
+ for (int i = 0; i < parameter_count; i++) {
+ Handle<Object> parameter = GetValueForDebugger(stack_it, isolate);
+ SetParameter(i, parameter);
+ stack_it++;
+ }
+
+ // Get the context.
+ context_ = GetValueForDebugger(stack_it, isolate);
+ stack_it++;
+
+ // Get the expression stack.
+ DCHECK_EQ(TranslatedFrame::kUnoptimizedFunction, frame_it->kind());
+ const int stack_height = frame_it->height(); // Accumulator *not* included.
+
+ expression_stack_.resize(static_cast<size_t>(stack_height));
+ for (int i = 0; i < stack_height; i++) {
+ Handle<Object> expression = GetValueForDebugger(stack_it, isolate);
+ SetExpression(i, expression);
+ stack_it++;
+ }
+
+ DCHECK_EQ(TranslatedFrame::kUnoptimizedFunction, frame_it->kind());
+ stack_it++; // Skip the accumulator.
+
+ CHECK(stack_it == frame_it->end());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/deoptimizer/deoptimized-frame-info.h b/deps/v8/src/deoptimizer/deoptimized-frame-info.h
new file mode 100644
index 0000000000..cb0c48d110
--- /dev/null
+++ b/deps/v8/src/deoptimizer/deoptimized-frame-info.h
@@ -0,0 +1,70 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEOPTIMIZER_DEOPTIMIZED_FRAME_INFO_H_
+#define V8_DEOPTIMIZER_DEOPTIMIZED_FRAME_INFO_H_
+
+#include <vector>
+
+#include "src/deoptimizer/translated-state.h"
+
+namespace v8 {
+namespace internal {
+
+// Class used to represent an unoptimized frame when the debugger
+// needs to inspect a frame that is part of an optimized frame. The
+// internally used FrameDescription objects are not GC safe so for use
+// by the debugger frame information is copied to an object of this type.
+// Represents parameters in unadapted form so their number might mismatch
+// formal parameter count.
+class DeoptimizedFrameInfo : public Malloced {
+ public:
+ DeoptimizedFrameInfo(TranslatedState* state,
+ TranslatedState::iterator frame_it, Isolate* isolate);
+
+ // Get the frame context.
+ Handle<Object> GetContext() { return context_; }
+
+ // Get an incoming argument.
+ Handle<Object> GetParameter(int index) {
+ DCHECK(0 <= index && index < parameters_count());
+ return parameters_[index];
+ }
+
+ // Get an expression from the expression stack.
+ Handle<Object> GetExpression(int index) {
+ DCHECK(0 <= index && index < expression_count());
+ return expression_stack_[index];
+ }
+
+ private:
+ // Return the number of incoming arguments.
+ int parameters_count() { return static_cast<int>(parameters_.size()); }
+
+ // Return the height of the expression stack.
+ int expression_count() { return static_cast<int>(expression_stack_.size()); }
+
+ // Set an incoming argument.
+ void SetParameter(int index, Handle<Object> obj) {
+ DCHECK(0 <= index && index < parameters_count());
+ parameters_[index] = obj;
+ }
+
+ // Set an expression on the expression stack.
+ void SetExpression(int index, Handle<Object> obj) {
+ DCHECK(0 <= index && index < expression_count());
+ expression_stack_[index] = obj;
+ }
+
+ Handle<Object> context_;
+ std::vector<Handle<Object>> parameters_;
+ std::vector<Handle<Object>> expression_stack_;
+
+ friend class Deoptimizer;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEOPTIMIZER_DEOPTIMIZED_FRAME_INFO_H_
diff --git a/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc b/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
index ae00947a10..e1c08e4a99 100644
--- a/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
@@ -12,7 +12,6 @@ void Builtins_ContinueToCodeStubBuiltinWithResult();
void Builtins_ContinueToCodeStubBuiltin();
void Builtins_ContinueToJavaScriptBuiltinWithResult();
void Builtins_ContinueToJavaScriptBuiltin();
-void arguments_adaptor_deopt_addr();
void construct_stub_create_deopt_addr();
void construct_stub_invoke_deopt_addr();
typedef void (*function_ptr)();
@@ -31,7 +30,6 @@ constexpr function_ptr builtins[] = {
&Builtins_ContinueToJavaScriptBuiltin,
&construct_stub_create_deopt_addr,
&construct_stub_invoke_deopt_addr,
- &arguments_adaptor_deopt_addr,
};
bool Deoptimizer::IsValidReturnAddress(Address address) {
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index b1ad9c19b9..340dede229 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -4,41 +4,36 @@
#include "src/deoptimizer/deoptimizer.h"
-#include <memory>
-
-#include "src/ast/prettyprinter.h"
-#include "src/builtins/accessors.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/callable.h"
-#include "src/codegen/macro-assembler.h"
+#include "src/base/memory.h"
+#include "src/codegen/interface-descriptors.h"
#include "src/codegen/register-configuration.h"
-#include "src/common/assert-scope.h"
-#include "src/diagnostics/disasm.h"
+#include "src/codegen/reloc-info.h"
+#include "src/deoptimizer/deoptimized-frame-info.h"
+#include "src/deoptimizer/materialized-object-store.h"
#include "src/execution/frames-inl.h"
+#include "src/execution/isolate.h"
#include "src/execution/pointer-authentication.h"
#include "src/execution/v8threads.h"
-#include "src/handles/global-handles.h"
-#include "src/heap/heap-inl.h"
-#include "src/init/v8.h"
-#include "src/interpreter/interpreter.h"
+#include "src/handles/handles-inl.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
-#include "src/objects/arguments.h"
-#include "src/objects/debug-objects-inl.h"
-#include "src/objects/heap-number-inl.h"
-#include "src/objects/smi.h"
+#include "src/objects/js-function-inl.h"
+#include "src/objects/oddball.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/tracing/trace-event.h"
-
-// Has to be the last include (doesn't have include guards)
-#include "src/objects/object-macros.h"
+#include "src/wasm/wasm-linkage.h"
namespace v8 {
+
+using base::Memory;
+
namespace internal {
// {FrameWriter} offers a stack writer abstraction for writing
// FrameDescriptions. The main service the class provides is managing
// {top_offset_}, i.e. the offset of the next slot to write to.
+//
+// Note: Not in an anonymous namespace due to the friend class declaration
+// in Deoptimizer.
class FrameWriter {
public:
static const int NO_INPUT_INDEX = -1;
@@ -193,11 +188,11 @@ Code Deoptimizer::FindDeoptimizingCode(Address addr) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
- unsigned bailout_id, Address from,
+ unsigned deopt_exit_index, Address from,
int fp_to_sp_delta, Isolate* isolate) {
JSFunction function = JSFunction::cast(Object(raw_function));
- Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, kind,
- bailout_id, from, fp_to_sp_delta);
+ Deoptimizer* deoptimizer = new Deoptimizer(
+ isolate, function, kind, deopt_exit_index, from, fp_to_sp_delta);
isolate->set_current_deoptimizer(deoptimizer);
return deoptimizer;
}
@@ -219,7 +214,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
int counter = jsframe_index;
for (auto it = translated_values.begin(); it != translated_values.end();
it++) {
- if (it->kind() == TranslatedFrame::kInterpretedFunction ||
+ if (it->kind() == TranslatedFrame::kUnoptimizedFunction ||
it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
it->kind() ==
TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
@@ -233,7 +228,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
CHECK(frame_it != translated_values.end());
// We only include kJavaScriptBuiltinContinuation frames above to get the
// counting right.
- CHECK_EQ(frame_it->kind(), TranslatedFrame::kInterpretedFunction);
+ CHECK_EQ(frame_it->kind(), TranslatedFrame::kUnoptimizedFunction);
DeoptimizedFrameInfo* info =
new DeoptimizedFrameInfo(&translated_values, frame_it, isolate);
@@ -442,7 +437,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
// The code in the function's optimized code feedback vector slot might
// be different from the code on the function - evict it if necessary.
function.feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- function.shared(), "unlinking code marked for deopt");
+ function.raw_feedback_cell(), function.shared(),
+ "unlinking code marked for deopt");
if (!code.deopt_already_counted()) {
code.set_deopt_already_counted(true);
}
@@ -487,11 +483,11 @@ uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) {
} // namespace
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
- DeoptimizeKind kind, unsigned bailout_id, Address from,
- int fp_to_sp_delta)
+ DeoptimizeKind kind, unsigned deopt_exit_index,
+ Address from, int fp_to_sp_delta)
: isolate_(isolate),
function_(function),
- bailout_id_(bailout_id),
+ deopt_exit_index_(deopt_exit_index),
deopt_kind_(kind),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
@@ -500,16 +496,14 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
catch_handler_pc_offset_(-1),
input_(nullptr),
output_count_(0),
- jsframe_count_(0),
output_(nullptr),
caller_frame_top_(0),
caller_fp_(0),
caller_pc_(0),
caller_constant_pool_(0),
- input_frame_context_(0),
actual_argument_count_(0),
stack_fp_(0),
- trace_scope_(FLAG_trace_deopt
+ trace_scope_(FLAG_trace_deopt || FLAG_log_deopt
? new CodeTracer::Scope(isolate->GetCodeTracer())
: nullptr) {
if (isolate->deoptimizer_lazy_throw()) {
@@ -517,8 +511,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
deoptimizing_throw_ = true;
}
- DCHECK(bailout_id_ == kFixedExitSizeMarker ||
- bailout_id_ < kMaxNumberOfEntries);
+ DCHECK(deopt_exit_index_ == kFixedExitSizeMarker ||
+ deopt_exit_index_ < kMaxNumberOfEntries);
DCHECK_NE(from, kNullAddress);
compiled_code_ = FindOptimizedCode();
@@ -547,8 +541,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
input_ = new (size) FrameDescription(size, parameter_count);
if (kSupportsFixedDeoptExitSizes) {
- DCHECK_EQ(bailout_id_, kFixedExitSizeMarker);
- // Calculate bailout id from return address.
+ DCHECK_EQ(deopt_exit_index_, kFixedExitSizeMarker);
+ // Calculate the deopt exit index from return address.
DCHECK_GT(kNonLazyDeoptExitSize, 0);
DCHECK_GT(kLazyDeoptExitSize, 0);
DeoptimizationData deopt_data =
@@ -578,19 +572,20 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
int offset =
static_cast<int>(from_ - kNonLazyDeoptExitSize - deopt_start);
DCHECK_EQ(0, offset % kNonLazyDeoptExitSize);
- bailout_id_ = offset / kNonLazyDeoptExitSize;
+ deopt_exit_index_ = offset / kNonLazyDeoptExitSize;
} else if (from_ <= eager_with_resume_deopt_start) {
int offset =
static_cast<int>(from_ - kLazyDeoptExitSize - lazy_deopt_start);
DCHECK_EQ(0, offset % kLazyDeoptExitSize);
- bailout_id_ =
+ deopt_exit_index_ =
eager_soft_and_bailout_deopt_count + (offset / kLazyDeoptExitSize);
} else {
int offset = static_cast<int>(from_ - kNonLazyDeoptExitSize -
eager_with_resume_deopt_start);
DCHECK_EQ(0, offset % kEagerWithResumeDeoptExitSize);
- bailout_id_ = eager_soft_and_bailout_deopt_count + lazy_deopt_count +
- (offset / kEagerWithResumeDeoptExitSize);
+ deopt_exit_index_ = eager_soft_and_bailout_deopt_count +
+ lazy_deopt_count +
+ (offset / kEagerWithResumeDeoptExitSize);
}
}
}
@@ -713,8 +708,8 @@ namespace {
int LookupCatchHandler(Isolate* isolate, TranslatedFrame* translated_frame,
int* data_out) {
switch (translated_frame->kind()) {
- case TranslatedFrame::kInterpretedFunction: {
- int bytecode_offset = translated_frame->node_id().ToInt();
+ case TranslatedFrame::kUnoptimizedFunction: {
+ int bytecode_offset = translated_frame->bytecode_offset().ToInt();
HandlerTable table(
translated_frame->raw_shared_info().GetBytecodeArray(isolate));
return table.LookupRange(bytecode_offset, data_out, nullptr);
@@ -730,7 +725,8 @@ int LookupCatchHandler(Isolate* isolate, TranslatedFrame* translated_frame,
} // namespace
-void Deoptimizer::TraceDeoptBegin(int optimization_id, int node_id) {
+void Deoptimizer::TraceDeoptBegin(int optimization_id,
+ BytecodeOffset bytecode_offset) {
DCHECK(tracing_enabled());
FILE* file = trace_scope()->file();
Deoptimizer::DeoptInfo info =
@@ -744,10 +740,11 @@ void Deoptimizer::TraceDeoptBegin(int optimization_id, int node_id) {
PrintF(file, "%s", CodeKindToString(compiled_code_.kind()));
}
PrintF(file,
- ", opt id %d, node id %d, bailout id %d, FP to SP delta %d, "
+ ", opt id %d, bytecode offset %d, deopt exit %d, FP to SP delta %d, "
"caller SP " V8PRIxPTR_FMT ", pc " V8PRIxPTR_FMT "]\n",
- optimization_id, node_id, bailout_id_, fp_to_sp_delta_,
- caller_frame_top_, PointerAuthentication::StripPAC(from_));
+ optimization_id, bytecode_offset.ToInt(), deopt_exit_index_,
+ fp_to_sp_delta_, caller_frame_top_,
+ PointerAuthentication::StripPAC(from_));
if (verbose_tracing_enabled() && deopt_kind_ != DeoptimizeKind::kLazy) {
PrintF(file, " ;;; deoptimize at ");
OFStream outstr(file);
@@ -764,7 +761,7 @@ void Deoptimizer::TraceDeoptEnd(double deopt_duration) {
// static
void Deoptimizer::TraceMarkForDeoptimization(Code code, const char* reason) {
- if (!FLAG_trace_deopt) return;
+ if (!FLAG_trace_deopt && !FLAG_log_deopt) return;
DisallowGarbageCollection no_gc;
Isolate* isolate = code.GetIsolate();
@@ -773,12 +770,14 @@ void Deoptimizer::TraceMarkForDeoptimization(Code code, const char* reason) {
DeoptimizationData deopt_data = DeoptimizationData::cast(maybe_data);
CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[marking dependent code " V8PRIxPTR_FMT " (",
- code.ptr());
- deopt_data.SharedFunctionInfo().ShortPrint(scope.file());
- PrintF(") (opt id %d) for deoptimization, reason: %s]\n",
- deopt_data.OptimizationId().value(), reason);
-
+ if (FLAG_trace_deopt) {
+ PrintF(scope.file(), "[marking dependent code " V8PRIxPTR_FMT " (",
+ code.ptr());
+ deopt_data.SharedFunctionInfo().ShortPrint(scope.file());
+ PrintF(") (opt id %d) for deoptimization, reason: %s]\n",
+ deopt_data.OptimizationId().value(), reason);
+ }
+ if (!FLAG_log_deopt) return;
no_gc.Release();
{
HandleScope scope(isolate);
@@ -860,8 +859,6 @@ void Deoptimizer::DoComputeOutputFrames() {
caller_fp_ = Memory<intptr_t>(fp_address);
caller_pc_ =
Memory<intptr_t>(fp_address + CommonFrameConstants::kCallerPCOffset);
- input_frame_context_ = Memory<intptr_t>(
- fp_address + CommonFrameConstants::kContextOrFrameTypeOffset);
actual_argument_count_ = static_cast<int>(
Memory<intptr_t>(fp_address + StandardFrameConstants::kArgCOffset));
@@ -875,18 +872,20 @@ void Deoptimizer::DoComputeOutputFrames() {
CHECK_GT(static_cast<uintptr_t>(caller_frame_top_),
stack_guard->real_jslimit());
- BailoutId node_id = input_data.BytecodeOffset(bailout_id_);
+ BytecodeOffset bytecode_offset =
+ input_data.GetBytecodeOffset(deopt_exit_index_);
ByteArray translations = input_data.TranslationByteArray();
- unsigned translation_index = input_data.TranslationIndex(bailout_id_).value();
+ unsigned translation_index =
+ input_data.TranslationIndex(deopt_exit_index_).value();
if (tracing_enabled()) {
timer.Start();
- TraceDeoptBegin(input_data.OptimizationId().value(), node_id.ToInt());
+ TraceDeoptBegin(input_data.OptimizationId().value(), bytecode_offset);
}
FILE* trace_file =
verbose_tracing_enabled() ? trace_scope()->file() : nullptr;
- TranslationIterator state_iterator(translations, translation_index);
+ TranslationArrayIterator state_iterator(translations, translation_index);
translated_state_.Init(
isolate_, input_->GetFramePointerAddress(), stack_fp_, &state_iterator,
input_data.LiteralArray(), input_->GetRegisterValues(), trace_file,
@@ -921,17 +920,15 @@ void Deoptimizer::DoComputeOutputFrames() {
output_count_ = static_cast<int>(count);
// Translate each output frame.
- int frame_index = 0; // output_frame_index
+ int frame_index = 0;
size_t total_output_frame_size = 0;
for (size_t i = 0; i < count; ++i, ++frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
- bool handle_exception = deoptimizing_throw_ && i == count - 1;
+ const bool handle_exception = deoptimizing_throw_ && i == count - 1;
switch (translated_frame->kind()) {
- case TranslatedFrame::kInterpretedFunction:
- DoComputeInterpretedFrame(translated_frame, frame_index,
+ case TranslatedFrame::kUnoptimizedFunction:
+ DoComputeUnoptimizedFrame(translated_frame, frame_index,
handle_exception);
- jsframe_count_++;
break;
case TranslatedFrame::kArgumentsAdaptor:
DoComputeArgumentsAdaptorFrame(translated_frame, frame_index);
@@ -940,6 +937,7 @@ void Deoptimizer::DoComputeOutputFrames() {
DoComputeConstructStubFrame(translated_frame, frame_index);
break;
case TranslatedFrame::kBuiltinContinuation:
+ case TranslatedFrame::kJSToWasmBuiltinContinuation:
DoComputeBuiltinContinuation(translated_frame, frame_index,
BuiltinContinuationMode::STUB);
break;
@@ -983,7 +981,7 @@ void Deoptimizer::DoComputeOutputFrames() {
stack_guard->real_jslimit() - kStackLimitSlackForDeoptimizationInBytes);
}
-void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
+void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
int frame_index,
bool goto_catch_handler) {
SharedFunctionInfo shared = translated_frame->raw_shared_info();
@@ -992,13 +990,12 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
const bool is_bottommost = (0 == frame_index);
const bool is_topmost = (output_count_ - 1 == frame_index);
- const int real_bytecode_offset = translated_frame->node_id().ToInt();
+ const int real_bytecode_offset = translated_frame->bytecode_offset().ToInt();
const int bytecode_offset =
goto_catch_handler ? catch_handler_pc_offset_ : real_bytecode_offset;
const int parameters_count = InternalFormalParameterCountWithReceiver(shared);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// If this is the bottom most frame or the previous frame was the arguments
// adaptor fake frame, then we already have extra arguments in the stack
// (including any extra padding). Therefore we should not try to add any
@@ -1006,18 +1003,15 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
bool should_pad_arguments =
!is_bottommost && (translated_state_.frames()[frame_index - 1]).kind() !=
TranslatedFrame::kArgumentsAdaptor;
-#else
- bool should_pad_arguments = true;
-#endif
const int locals_count = translated_frame->height();
- InterpretedFrameInfo frame_info = InterpretedFrameInfo::Precise(
+ UnoptimizedFrameInfo frame_info = UnoptimizedFrameInfo::Precise(
parameters_count, locals_count, is_topmost, should_pad_arguments);
const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
TranslatedFrame::iterator function_iterator = value_iterator++;
if (verbose_tracing_enabled()) {
- PrintF(trace_scope()->file(), " translating interpreted frame ");
+ PrintF(trace_scope()->file(), " translating unoptimized frame ");
std::unique_ptr<char[]> name = shared.DebugNameCStr();
PrintF(trace_scope()->file(), "%s", name.get());
PrintF(trace_scope()->file(),
@@ -1069,9 +1063,9 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// explicitly.
//
// The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and bytecode offset of the bailout.
if (is_bottommost) {
frame_writer.PushBottommostCallerPc(caller_pc_);
} else {
@@ -1089,7 +1083,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
const intptr_t fp_value = top_address + frame_writer.top_offset();
output_frame->SetFp(fp_value);
if (is_topmost) {
- Register fp_reg = InterpretedFrame::fp_register();
+ Register fp_reg = UnoptimizedFrame::fp_register();
output_frame->SetRegister(fp_reg.code(), fp_value);
}
@@ -1260,7 +1254,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
output_frame->SetConstantPool(constant_pool_value);
if (is_topmost) {
Register constant_pool_reg =
- InterpretedFrame::constant_pool_pointer_register();
+ UnoptimizedFrame::constant_pool_pointer_register();
output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
}
}
@@ -1286,12 +1280,12 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
CHECK_GT(frame_index, 0);
CHECK_NULL(output_[frame_index]);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// During execution, V8 does not understand arguments adaptor frames anymore,
// so during deoptimization we only push the extra arguments (arguments with
// index greater than the formal parameter count). Therefore we call this
- // TranslatedFrame the fake adaptor frame. For more info, see the design
- // document shorturl.at/fKT49.
+ // TranslatedFrame the fake adaptor frame.
+ // For more info, see the design document:
+ // https://docs.google.com/document/d/150wGaUREaZI6YWqOQFD5l2mWQXaPbbZjcAIJLOFrzMs
TranslatedFrame::iterator value_iterator = translated_frame->begin();
const int argument_count_without_receiver = translated_frame->height() - 1;
@@ -1342,104 +1336,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
for (int i = 0; i < formal_parameter_count; i++) value_iterator++;
frame_writer.PushStackJSArguments(value_iterator, extra_argument_count);
}
-#else
- TranslatedFrame::iterator value_iterator = translated_frame->begin();
- const bool is_bottommost = (0 == frame_index);
-
- const int parameters_count = translated_frame->height();
- ArgumentsAdaptorFrameInfo frame_info =
- ArgumentsAdaptorFrameInfo::Precise(parameters_count);
- const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
-
- TranslatedFrame::iterator function_iterator = value_iterator++;
- if (verbose_tracing_enabled()) {
- PrintF(trace_scope()->file(),
- " translating arguments adaptor => variable_frame_size=%d, "
- "frame_size=%d\n",
- frame_info.frame_size_in_bytes_without_fixed(), output_frame_size);
- }
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame = new (output_frame_size)
- FrameDescription(output_frame_size, parameters_count);
- FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
-
- // Arguments adaptor can not be topmost.
- CHECK(frame_index < output_count_ - 1);
- CHECK_NULL(output_[frame_index]);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- const intptr_t top_address =
- is_bottommost ? caller_frame_top_ - output_frame_size
- : output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- ReadOnlyRoots roots(isolate());
- if (ShouldPadArguments(parameters_count)) {
- frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
- }
-
- // Compute the incoming parameter translation.
- frame_writer.PushStackJSArguments(value_iterator, parameters_count);
-
- DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(),
- frame_writer.top_offset());
-
- // Read caller's PC from the previous frame.
- if (is_bottommost) {
- frame_writer.PushBottommostCallerPc(caller_pc_);
- } else {
- frame_writer.PushApprovedCallerPc(output_[frame_index - 1]->GetPc());
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- const intptr_t caller_fp =
- is_bottommost ? caller_fp_ : output_[frame_index - 1]->GetFp();
- frame_writer.PushCallerFp(caller_fp);
-
- intptr_t fp_value = top_address + frame_writer.top_offset();
- output_frame->SetFp(fp_value);
-
- if (FLAG_enable_embedded_constant_pool) {
- // Read the caller's constant pool from the previous frame.
- const intptr_t caller_cp =
- is_bottommost ? caller_constant_pool_
- : output_[frame_index - 1]->GetConstantPool();
- frame_writer.PushCallerConstantPool(caller_cp);
- }
-
- // A marker value is used in place of the context.
- intptr_t marker = StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
- frame_writer.PushRawValue(marker, "context (adaptor sentinel)\n");
-
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- frame_writer.PushTranslatedValue(function_iterator, "function\n");
-
- // Number of incoming arguments.
- const uint32_t parameters_count_without_receiver = parameters_count - 1;
- frame_writer.PushRawObject(Smi::FromInt(parameters_count_without_receiver),
- "argc\n");
-
- frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
-
- CHECK_EQ(translated_frame->end(), value_iterator);
- DCHECK_EQ(0, frame_writer.top_offset());
-
- Builtins* builtins = isolate_->builtins();
- Code adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- intptr_t pc_value = static_cast<intptr_t>(
- adaptor_trampoline.InstructionStart() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset().value());
- output_frame->SetPc(pc_value);
- if (FLAG_enable_embedded_constant_pool) {
- intptr_t constant_pool_value =
- static_cast<intptr_t>(adaptor_trampoline.constant_pool());
- output_frame->SetConstantPool(constant_pool_value);
- }
-#endif
}
void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
@@ -1453,7 +1349,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
Builtins* builtins = isolate_->builtins();
Code construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- BailoutId bailout_id = translated_frame->node_id();
+ BytecodeOffset bytecode_offset = translated_frame->bytecode_offset();
const int parameters_count = translated_frame->height();
ConstructStubFrameInfo frame_info =
@@ -1463,10 +1359,11 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
TranslatedFrame::iterator function_iterator = value_iterator++;
if (verbose_tracing_enabled()) {
PrintF(trace_scope()->file(),
- " translating construct stub => bailout_id=%d (%s), "
+ " translating construct stub => bytecode_offset=%d (%s), "
"variable_frame_size=%d, frame_size=%d\n",
- bailout_id.ToInt(),
- bailout_id == BailoutId::ConstructStubCreate() ? "create" : "invoke",
+ bytecode_offset.ToInt(),
+ bytecode_offset == BytecodeOffset::ConstructStubCreate() ? "create"
+ : "invoke",
frame_info.frame_size_in_bytes_without_fixed(), output_frame_size);
}
@@ -1544,11 +1441,12 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
- CHECK(bailout_id == BailoutId::ConstructStubCreate() ||
- bailout_id == BailoutId::ConstructStubInvoke());
- const char* debug_hint = bailout_id == BailoutId::ConstructStubCreate()
- ? "new target\n"
- : "allocated receiver\n";
+ CHECK(bytecode_offset == BytecodeOffset::ConstructStubCreate() ||
+ bytecode_offset == BytecodeOffset::ConstructStubInvoke());
+ const char* debug_hint =
+ bytecode_offset == BytecodeOffset::ConstructStubCreate()
+ ? "new target\n"
+ : "allocated receiver\n";
frame_writer.PushTranslatedValue(receiver_iterator, debug_hint);
if (is_topmost) {
@@ -1565,10 +1463,10 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
CHECK_EQ(0u, frame_writer.top_offset());
// Compute this frame's PC.
- DCHECK(bailout_id.IsValidForConstructStub());
+ DCHECK(bytecode_offset.IsValidForConstructStub());
Address start = construct_stub.InstructionStart();
const int pc_offset =
- bailout_id == BailoutId::ConstructStubCreate()
+ bytecode_offset == BytecodeOffset::ConstructStubCreate()
? isolate_->heap()->construct_stub_create_deopt_pc_offset().value()
: isolate_->heap()->construct_stub_invoke_deopt_pc_offset().value();
intptr_t pc_value = static_cast<intptr_t>(start + pc_offset);
@@ -1659,6 +1557,36 @@ Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
UNREACHABLE();
}
+TranslatedValue Deoptimizer::TranslatedValueForWasmReturnType(
+ base::Optional<wasm::ValueKind> wasm_call_return_type) {
+ if (wasm_call_return_type) {
+ switch (wasm_call_return_type.value()) {
+ case wasm::kI32:
+ return TranslatedValue::NewInt32(
+ &translated_state_,
+ (int32_t)input_->GetRegister(kReturnRegister0.code()));
+ case wasm::kI64:
+ return TranslatedValue::NewInt64ToBigInt(
+ &translated_state_,
+ (int64_t)input_->GetRegister(kReturnRegister0.code()));
+ case wasm::kF32:
+ return TranslatedValue::NewFloat(
+ &translated_state_,
+ Float32(*reinterpret_cast<float*>(
+ input_->GetDoubleRegister(wasm::kFpReturnRegisters[0].code())
+ .get_bits_address())));
+ case wasm::kF64:
+ return TranslatedValue::NewDouble(
+ &translated_state_,
+ input_->GetDoubleRegister(wasm::kFpReturnRegisters[0].code()));
+ default:
+ UNREACHABLE();
+ }
+ }
+ return TranslatedValue::NewTagged(&translated_state_,
+ ReadOnlyRoots(isolate()).undefined_value());
+}
+
// BuiltinContinuationFrames capture the machine state that is expected as input
// to a builtin, including both input register values and stack parameters. When
// the frame is reactivated (i.e. the frame below it returns), a
@@ -1720,10 +1648,26 @@ Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
void Deoptimizer::DoComputeBuiltinContinuation(
TranslatedFrame* translated_frame, int frame_index,
BuiltinContinuationMode mode) {
+ TranslatedFrame::iterator result_iterator = translated_frame->end();
+
+ bool is_js_to_wasm_builtin_continuation =
+ translated_frame->kind() == TranslatedFrame::kJSToWasmBuiltinContinuation;
+ if (is_js_to_wasm_builtin_continuation) {
+ // For JSToWasmBuiltinContinuations, add a TranslatedValue with the result
+ // of the Wasm call, extracted from the input FrameDescription.
+ // This TranslatedValue will be written in the output frame in place of the
+ // hole and we'll use ContinueToCodeStubBuiltin in place of
+ // ContinueToCodeStubBuiltinWithResult.
+ TranslatedValue result = TranslatedValueForWasmReturnType(
+ translated_frame->wasm_call_return_type());
+ translated_frame->Add(result);
+ }
+
TranslatedFrame::iterator value_iterator = translated_frame->begin();
- const BailoutId bailout_id = translated_frame->node_id();
- Builtins::Name builtin_name = Builtins::GetBuiltinFromBailoutId(bailout_id);
+ const BytecodeOffset bytecode_offset = translated_frame->bytecode_offset();
+ Builtins::Name builtin_name =
+ Builtins::GetBuiltinFromBytecodeOffset(bytecode_offset);
CallInterfaceDescriptor continuation_descriptor =
Builtins::CallInterfaceDescriptorFor(builtin_name);
@@ -1803,9 +1747,15 @@ void Deoptimizer::DoComputeBuiltinContinuation(
frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
}
if (frame_info.frame_has_result_stack_slot()) {
- frame_writer.PushRawObject(
- roots.the_hole_value(),
- "placeholder for return result on lazy deopt\n");
+ if (is_js_to_wasm_builtin_continuation) {
+ frame_writer.PushTranslatedValue(result_iterator,
+ "return result on lazy deopt\n");
+ } else {
+ DCHECK_EQ(result_iterator, translated_frame->end());
+ frame_writer.PushRawObject(
+ roots.the_hole_value(),
+ "placeholder for return result on lazy deopt\n");
+ }
}
} else {
// JavaScript builtin.
@@ -1900,7 +1850,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
frame_writer.PushRawObject(Smi::FromInt(output_frame_size_above_fp),
"frame height at deoptimization\n");
- // The context even if this is a stub contininuation frame. We can't use the
+ // The context even if this is a stub continuation frame. We can't use the
// usual context slot, because we must store the frame marker there.
frame_writer.PushTranslatedValue(context_register_value,
"builtin JavaScript context\n");
@@ -1953,7 +1903,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
}
}
- CHECK_EQ(translated_frame->end(), value_iterator);
+ CHECK_EQ(result_iterator, value_iterator);
CHECK_EQ(0u, frame_writer.top_offset());
// Clear the context register. The context might be a de-materialized object
@@ -1969,10 +1919,13 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// will build its own frame once we continue to it.
Register fp_reg = JavaScriptFrame::fp_register();
output_frame->SetRegister(fp_reg.code(), fp_value);
-
+ // For JSToWasmBuiltinContinuations use ContinueToCodeStubBuiltin, and not
+ // ContinueToCodeStubBuiltinWithResult because we don't want to overwrite the
+ // return value that we have already set.
Code continue_to_builtin =
isolate()->builtins()->builtin(TrampolineForBuiltinContinuation(
- mode, frame_info.frame_has_result_stack_slot()));
+ mode, frame_info.frame_has_result_stack_slot() &&
+ !is_js_to_wasm_builtin_continuation));
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
// authenticated at the end of the DeoptimizationEntry builtin.
@@ -2058,7 +2011,6 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
DCHECK(CodeKindCanDeoptimize(compiled_code_.kind()));
unsigned stack_slots = compiled_code_.stack_slots();
unsigned outgoing_size = 0;
- // ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size,
result);
@@ -2068,434 +2020,9 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
int parameter_slots = InternalFormalParameterCountWithReceiver(shared);
-#ifndef V8_NO_ARGUMENTS_ADAPTOR
- if (ShouldPadArguments(parameter_slots)) parameter_slots++;
-#endif
return parameter_slots * kSystemPointerSize;
}
-FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
- : frame_size_(frame_size),
- parameter_count_(parameter_count),
- top_(kZapUint32),
- pc_(kZapUint32),
- fp_(kZapUint32),
- context_(kZapUint32),
- constant_pool_(kZapUint32) {
- // Zap all the registers.
- for (int r = 0; r < Register::kNumRegisters; r++) {
- // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
- // isn't used before the next safepoint, the GC will try to scan it as a
- // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
-#if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64)
- // x18 is reserved as platform register on Windows arm64 platform
- const int kPlatformRegister = 18;
- if (r != kPlatformRegister) {
- SetRegister(r, kZapUint32);
- }
-#else
- SetRegister(r, kZapUint32);
-#endif
- }
-
- // Zap all the slots.
- for (unsigned o = 0; o < frame_size; o += kSystemPointerSize) {
- SetFrameSlot(o, kZapUint32);
- }
-}
-
-void TranslationBuffer::Add(int32_t value) {
- // This wouldn't handle kMinInt correctly if it ever encountered it.
- DCHECK_NE(value, kMinInt);
- // Encode the sign bit in the least significant bit.
- bool is_negative = (value < 0);
- uint32_t bits = (static_cast<uint32_t>(is_negative ? -value : value) << 1) |
- static_cast<uint32_t>(is_negative);
- // Encode the individual bytes using the least significant bit of
- // each byte to indicate whether or not more bytes follow.
- do {
- uint32_t next = bits >> 7;
- contents_.push_back(((bits << 1) & 0xFF) | (next != 0));
- bits = next;
- } while (bits != 0);
-}
-
-TranslationIterator::TranslationIterator(ByteArray buffer, int index)
- : buffer_(buffer), index_(index) {
- DCHECK(index >= 0 && index < buffer.length());
-}
-
-int32_t TranslationIterator::Next() {
- // Run through the bytes until we reach one with a least significant
- // bit of zero (marks the end).
- uint32_t bits = 0;
- for (int i = 0; true; i += 7) {
- DCHECK(HasNext());
- uint8_t next = buffer_.get(index_++);
- bits |= (next >> 1) << i;
- if ((next & 1) == 0) break;
- }
- // The bits encode the sign in the least significant bit.
- bool is_negative = (bits & 1) == 1;
- int32_t result = bits >> 1;
- return is_negative ? -result : result;
-}
-
-bool TranslationIterator::HasNext() const { return index_ < buffer_.length(); }
-
-Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
- Handle<ByteArray> result =
- factory->NewByteArray(CurrentIndex(), AllocationType::kOld);
- contents_.CopyTo(result->GetDataStartAddress());
- return result;
-}
-
-void Translation::BeginBuiltinContinuationFrame(BailoutId bailout_id,
- int literal_id,
- unsigned height) {
- buffer_->Add(BUILTIN_CONTINUATION_FRAME);
- buffer_->Add(bailout_id.ToInt());
- buffer_->Add(literal_id);
- buffer_->Add(height);
-}
-
-void Translation::BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id,
- int literal_id,
- unsigned height) {
- buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME);
- buffer_->Add(bailout_id.ToInt());
- buffer_->Add(literal_id);
- buffer_->Add(height);
-}
-
-void Translation::BeginJavaScriptBuiltinContinuationWithCatchFrame(
- BailoutId bailout_id, int literal_id, unsigned height) {
- buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME);
- buffer_->Add(bailout_id.ToInt());
- buffer_->Add(literal_id);
- buffer_->Add(height);
-}
-
-void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
- unsigned height) {
- buffer_->Add(CONSTRUCT_STUB_FRAME);
- buffer_->Add(bailout_id.ToInt());
- buffer_->Add(literal_id);
- buffer_->Add(height);
-}
-
-void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
- buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
- buffer_->Add(literal_id);
- buffer_->Add(height);
-}
-
-void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
- int literal_id, unsigned height,
- int return_value_offset,
- int return_value_count) {
- buffer_->Add(INTERPRETED_FRAME);
- buffer_->Add(bytecode_offset.ToInt());
- buffer_->Add(literal_id);
- buffer_->Add(height);
- buffer_->Add(return_value_offset);
- buffer_->Add(return_value_count);
-}
-
-void Translation::ArgumentsElements(CreateArgumentsType type) {
- buffer_->Add(ARGUMENTS_ELEMENTS);
- buffer_->Add(static_cast<uint8_t>(type));
-}
-
-void Translation::ArgumentsLength() { buffer_->Add(ARGUMENTS_LENGTH); }
-
-void Translation::BeginCapturedObject(int length) {
- buffer_->Add(CAPTURED_OBJECT);
- buffer_->Add(length);
-}
-
-void Translation::DuplicateObject(int object_index) {
- buffer_->Add(DUPLICATED_OBJECT);
- buffer_->Add(object_index);
-}
-
-void Translation::StoreRegister(Register reg) {
- buffer_->Add(REGISTER);
- buffer_->Add(reg.code());
-}
-
-void Translation::StoreInt32Register(Register reg) {
- buffer_->Add(INT32_REGISTER);
- buffer_->Add(reg.code());
-}
-
-void Translation::StoreInt64Register(Register reg) {
- buffer_->Add(INT64_REGISTER);
- buffer_->Add(reg.code());
-}
-
-void Translation::StoreUint32Register(Register reg) {
- buffer_->Add(UINT32_REGISTER);
- buffer_->Add(reg.code());
-}
-
-void Translation::StoreBoolRegister(Register reg) {
- buffer_->Add(BOOL_REGISTER);
- buffer_->Add(reg.code());
-}
-
-void Translation::StoreFloatRegister(FloatRegister reg) {
- buffer_->Add(FLOAT_REGISTER);
- buffer_->Add(reg.code());
-}
-
-void Translation::StoreDoubleRegister(DoubleRegister reg) {
- buffer_->Add(DOUBLE_REGISTER);
- buffer_->Add(reg.code());
-}
-
-void Translation::StoreStackSlot(int index) {
- buffer_->Add(STACK_SLOT);
- buffer_->Add(index);
-}
-
-void Translation::StoreInt32StackSlot(int index) {
- buffer_->Add(INT32_STACK_SLOT);
- buffer_->Add(index);
-}
-
-void Translation::StoreInt64StackSlot(int index) {
- buffer_->Add(INT64_STACK_SLOT);
- buffer_->Add(index);
-}
-
-void Translation::StoreUint32StackSlot(int index) {
- buffer_->Add(UINT32_STACK_SLOT);
- buffer_->Add(index);
-}
-
-void Translation::StoreBoolStackSlot(int index) {
- buffer_->Add(BOOL_STACK_SLOT);
- buffer_->Add(index);
-}
-
-void Translation::StoreFloatStackSlot(int index) {
- buffer_->Add(FLOAT_STACK_SLOT);
- buffer_->Add(index);
-}
-
-void Translation::StoreDoubleStackSlot(int index) {
- buffer_->Add(DOUBLE_STACK_SLOT);
- buffer_->Add(index);
-}
-
-void Translation::StoreLiteral(int literal_id) {
- buffer_->Add(LITERAL);
- buffer_->Add(literal_id);
-}
-
-void Translation::AddUpdateFeedback(int vector_literal, int slot) {
- buffer_->Add(UPDATE_FEEDBACK);
- buffer_->Add(vector_literal);
- buffer_->Add(slot);
-}
-
-void Translation::StoreJSFrameFunction() {
- StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
- StandardFrameConstants::kFunctionOffset) /
- kSystemPointerSize);
-}
-
-int Translation::NumberOfOperandsFor(Opcode opcode) {
- switch (opcode) {
- case ARGUMENTS_LENGTH:
- return 0;
- case DUPLICATED_OBJECT:
- case ARGUMENTS_ELEMENTS:
- case CAPTURED_OBJECT:
- case REGISTER:
- case INT32_REGISTER:
- case INT64_REGISTER:
- case UINT32_REGISTER:
- case BOOL_REGISTER:
- case FLOAT_REGISTER:
- case DOUBLE_REGISTER:
- case STACK_SLOT:
- case INT32_STACK_SLOT:
- case INT64_STACK_SLOT:
- case UINT32_STACK_SLOT:
- case BOOL_STACK_SLOT:
- case FLOAT_STACK_SLOT:
- case DOUBLE_STACK_SLOT:
- case LITERAL:
- return 1;
- case ARGUMENTS_ADAPTOR_FRAME:
- case UPDATE_FEEDBACK:
- return 2;
- case BEGIN:
- case CONSTRUCT_STUB_FRAME:
- case BUILTIN_CONTINUATION_FRAME:
- case JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
- case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME:
- return 3;
- case INTERPRETED_FRAME:
- return 5;
- }
- FATAL("Unexpected translation type");
- return -1;
-}
-
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
-const char* Translation::StringFor(Opcode opcode) {
-#define TRANSLATION_OPCODE_CASE(item) \
- case item: \
- return #item;
- switch (opcode) { TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE) }
-#undef TRANSLATION_OPCODE_CASE
- UNREACHABLE();
-}
-
-#endif
-
-Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
- int index = StackIdToIndex(fp);
- if (index == -1) {
- return Handle<FixedArray>::null();
- }
- Handle<FixedArray> array = GetStackEntries();
- CHECK_GT(array->length(), index);
- return Handle<FixedArray>::cast(Handle<Object>(array->get(index), isolate()));
-}
-
-void MaterializedObjectStore::Set(Address fp,
- Handle<FixedArray> materialized_objects) {
- int index = StackIdToIndex(fp);
- if (index == -1) {
- index = static_cast<int>(frame_fps_.size());
- frame_fps_.push_back(fp);
- }
-
- Handle<FixedArray> array = EnsureStackEntries(index + 1);
- array->set(index, *materialized_objects);
-}
-
-bool MaterializedObjectStore::Remove(Address fp) {
- auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp);
- if (it == frame_fps_.end()) return false;
- int index = static_cast<int>(std::distance(frame_fps_.begin(), it));
-
- frame_fps_.erase(it);
- FixedArray array = isolate()->heap()->materialized_objects();
-
- CHECK_LT(index, array.length());
- int fps_size = static_cast<int>(frame_fps_.size());
- for (int i = index; i < fps_size; i++) {
- array.set(i, array.get(i + 1));
- }
- array.set(fps_size, ReadOnlyRoots(isolate()).undefined_value());
- return true;
-}
-
-int MaterializedObjectStore::StackIdToIndex(Address fp) {
- auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp);
- return it == frame_fps_.end()
- ? -1
- : static_cast<int>(std::distance(frame_fps_.begin(), it));
-}
-
-Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
- return Handle<FixedArray>(isolate()->heap()->materialized_objects(),
- isolate());
-}
-
-Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
- Handle<FixedArray> array = GetStackEntries();
- if (array->length() >= length) {
- return array;
- }
-
- int new_length = length > 10 ? length : 10;
- if (new_length < 2 * array->length()) {
- new_length = 2 * array->length();
- }
-
- Handle<FixedArray> new_array =
- isolate()->factory()->NewFixedArray(new_length, AllocationType::kOld);
- for (int i = 0; i < array->length(); i++) {
- new_array->set(i, array->get(i));
- }
- HeapObject undefined_value = ReadOnlyRoots(isolate()).undefined_value();
- for (int i = array->length(); i < length; i++) {
- new_array->set(i, undefined_value);
- }
- isolate()->heap()->SetRootMaterializedObjects(*new_array);
- return new_array;
-}
-
-namespace {
-
-Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it,
- Isolate* isolate) {
- if (it->GetRawValue() == ReadOnlyRoots(isolate).arguments_marker()) {
- if (!it->IsMaterializableByDebugger()) {
- return isolate->factory()->optimized_out();
- }
- }
- return it->GetValue();
-}
-
-} // namespace
-
-DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
- TranslatedState::iterator frame_it,
- Isolate* isolate) {
- int parameter_count =
- frame_it->shared_info()->internal_formal_parameter_count();
- TranslatedFrame::iterator stack_it = frame_it->begin();
-
- // Get the function. Note that this might materialize the function.
- // In case the debugger mutates this value, we should deoptimize
- // the function and remember the value in the materialized value store.
- function_ = Handle<JSFunction>::cast(stack_it->GetValue());
- stack_it++; // Skip the function.
- stack_it++; // Skip the receiver.
-
- DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind());
- source_position_ = Deoptimizer::ComputeSourcePositionFromBytecodeArray(
- isolate, *frame_it->shared_info(), frame_it->node_id());
-
- DCHECK_EQ(parameter_count,
- function_->shared().internal_formal_parameter_count());
-
- parameters_.resize(static_cast<size_t>(parameter_count));
- for (int i = 0; i < parameter_count; i++) {
- Handle<Object> parameter = GetValueForDebugger(stack_it, isolate);
- SetParameter(i, parameter);
- stack_it++;
- }
-
- // Get the context.
- context_ = GetValueForDebugger(stack_it, isolate);
- stack_it++;
-
- // Get the expression stack.
- DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind());
- const int stack_height = frame_it->height(); // Accumulator *not* included.
-
- expression_stack_.resize(static_cast<size_t>(stack_height));
- for (int i = 0; i < stack_height; i++) {
- Handle<Object> expression = GetValueForDebugger(stack_it, isolate);
- SetExpression(i, expression);
- stack_it++;
- }
-
- DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind());
- stack_it++; // Skip the accumulator.
-
- CHECK(stack_it == frame_it->end());
-}
-
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
CHECK(code.InstructionStart() <= pc && pc <= code.InstructionEnd());
SourcePosition last_position = SourcePosition::Unknown();
@@ -2525,1817 +2052,12 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
// static
int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
- Isolate* isolate, SharedFunctionInfo shared, BailoutId node_id) {
+ Isolate* isolate, SharedFunctionInfo shared,
+ BytecodeOffset bytecode_offset) {
DCHECK(shared.HasBytecodeArray());
return AbstractCode::cast(shared.GetBytecodeArray(isolate))
- .SourcePosition(node_id.ToInt());
-}
-
-// static
-TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container,
- int length,
- int object_index) {
- TranslatedValue slot(container, kCapturedObject);
- slot.materialization_info_ = {object_index, length};
- return slot;
-}
-
-// static
-TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container,
- int id) {
- TranslatedValue slot(container, kDuplicatedObject);
- slot.materialization_info_ = {id, -1};
- return slot;
-}
-
-// static
-TranslatedValue TranslatedValue::NewFloat(TranslatedState* container,
- Float32 value) {
- TranslatedValue slot(container, kFloat);
- slot.float_value_ = value;
- return slot;
-}
-
-// static
-TranslatedValue TranslatedValue::NewDouble(TranslatedState* container,
- Float64 value) {
- TranslatedValue slot(container, kDouble);
- slot.double_value_ = value;
- return slot;
-}
-
-// static
-TranslatedValue TranslatedValue::NewInt32(TranslatedState* container,
- int32_t value) {
- TranslatedValue slot(container, kInt32);
- slot.int32_value_ = value;
- return slot;
-}
-
-// static
-TranslatedValue TranslatedValue::NewInt64(TranslatedState* container,
- int64_t value) {
- TranslatedValue slot(container, kInt64);
- slot.int64_value_ = value;
- return slot;
-}
-
-// static
-TranslatedValue TranslatedValue::NewUInt32(TranslatedState* container,
- uint32_t value) {
- TranslatedValue slot(container, kUInt32);
- slot.uint32_value_ = value;
- return slot;
-}
-
-// static
-TranslatedValue TranslatedValue::NewBool(TranslatedState* container,
- uint32_t value) {
- TranslatedValue slot(container, kBoolBit);
- slot.uint32_value_ = value;
- return slot;
-}
-
-// static
-TranslatedValue TranslatedValue::NewTagged(TranslatedState* container,
- Object literal) {
- TranslatedValue slot(container, kTagged);
- slot.raw_literal_ = literal;
- return slot;
-}
-
-// static
-TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) {
- return TranslatedValue(container, kInvalid);
-}
-
-Isolate* TranslatedValue::isolate() const { return container_->isolate(); }
-
-Object TranslatedValue::raw_literal() const {
- DCHECK_EQ(kTagged, kind());
- return raw_literal_;
-}
-
-int32_t TranslatedValue::int32_value() const {
- DCHECK_EQ(kInt32, kind());
- return int32_value_;
-}
-
-int64_t TranslatedValue::int64_value() const {
- DCHECK_EQ(kInt64, kind());
- return int64_value_;
-}
-
-uint32_t TranslatedValue::uint32_value() const {
- DCHECK(kind() == kUInt32 || kind() == kBoolBit);
- return uint32_value_;
-}
-
-Float32 TranslatedValue::float_value() const {
- DCHECK_EQ(kFloat, kind());
- return float_value_;
-}
-
-Float64 TranslatedValue::double_value() const {
- DCHECK_EQ(kDouble, kind());
- return double_value_;
-}
-
-int TranslatedValue::object_length() const {
- DCHECK_EQ(kind(), kCapturedObject);
- return materialization_info_.length_;
-}
-
-int TranslatedValue::object_index() const {
- DCHECK(kind() == kCapturedObject || kind() == kDuplicatedObject);
- return materialization_info_.id_;
-}
-
-Object TranslatedValue::GetRawValue() const {
- // If we have a value, return it.
- if (materialization_state() == kFinished) {
- int smi;
- if (storage_->IsHeapNumber() &&
- DoubleToSmiInteger(storage_->Number(), &smi)) {
- return Smi::FromInt(smi);
- }
- return *storage_;
- }
-
- // Otherwise, do a best effort to get the value without allocation.
- switch (kind()) {
- case kTagged:
- return raw_literal();
-
- case kInt32: {
- bool is_smi = Smi::IsValid(int32_value());
- if (is_smi) {
- return Smi::FromInt(int32_value());
- }
- break;
- }
-
- case kInt64: {
- bool is_smi = (int64_value() >= static_cast<int64_t>(Smi::kMinValue) &&
- int64_value() <= static_cast<int64_t>(Smi::kMaxValue));
- if (is_smi) {
- return Smi::FromIntptr(static_cast<intptr_t>(int64_value()));
- }
- break;
- }
-
- case kUInt32: {
- bool is_smi = (uint32_value() <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (is_smi) {
- return Smi::FromInt(static_cast<int32_t>(uint32_value()));
- }
- break;
- }
-
- case kBoolBit: {
- if (uint32_value() == 0) {
- return ReadOnlyRoots(isolate()).false_value();
- } else {
- CHECK_EQ(1U, uint32_value());
- return ReadOnlyRoots(isolate()).true_value();
- }
- }
-
- case kFloat: {
- int smi;
- if (DoubleToSmiInteger(float_value().get_scalar(), &smi)) {
- return Smi::FromInt(smi);
- }
- break;
- }
-
- case kDouble: {
- int smi;
- if (DoubleToSmiInteger(double_value().get_scalar(), &smi)) {
- return Smi::FromInt(smi);
- }
- break;
- }
-
- default:
- break;
- }
-
- // If we could not get the value without allocation, return the arguments
- // marker.
- return ReadOnlyRoots(isolate()).arguments_marker();
-}
-
-void TranslatedValue::set_initialized_storage(Handle<HeapObject> storage) {
- DCHECK_EQ(kUninitialized, materialization_state());
- storage_ = storage;
- materialization_state_ = kFinished;
-}
-
-Handle<Object> TranslatedValue::GetValue() {
- Handle<Object> value(GetRawValue(), isolate());
- if (materialization_state() == kFinished) return value;
-
- if (value->IsSmi()) {
- // Even though stored as a Smi, this number might instead be needed as a
- // HeapNumber when materializing a JSObject with a field of HeapObject
- // representation. Since we don't have this information available here, we
- // just always allocate a HeapNumber and later extract the Smi again if we
- // don't need a HeapObject.
- set_initialized_storage(
- isolate()->factory()->NewHeapNumber(value->Number()));
- return value;
- }
-
- if (*value != ReadOnlyRoots(isolate()).arguments_marker()) {
- set_initialized_storage(Handle<HeapObject>::cast(value));
- return storage_;
- }
-
- // Otherwise we have to materialize.
-
- if (kind() == TranslatedValue::kCapturedObject ||
- kind() == TranslatedValue::kDuplicatedObject) {
- // We need to materialize the object (or possibly even object graphs).
- // To make the object verifier happy, we materialize in two steps.
-
- // 1. Allocate storage for reachable objects. This makes sure that for
- // each object we have allocated space on heap. The space will be
- // a byte array that will be later initialized, or a fully
- // initialized object if it is safe to allocate one that will
- // pass the verifier.
- container_->EnsureObjectAllocatedAt(this);
-
- // 2. Initialize the objects. If we have allocated only byte arrays
- // for some objects, we now overwrite the byte arrays with the
- // correct object fields. Note that this phase does not allocate
- // any new objects, so it does not trigger the object verifier.
- return container_->InitializeObjectAt(this);
- }
-
- double number;
- switch (kind()) {
- case TranslatedValue::kInt32:
- number = int32_value();
- break;
- case TranslatedValue::kInt64:
- number = int64_value();
- break;
- case TranslatedValue::kUInt32:
- number = uint32_value();
- break;
- case TranslatedValue::kFloat:
- number = float_value().get_scalar();
- break;
- case TranslatedValue::kDouble:
- number = double_value().get_scalar();
- break;
- default:
- UNREACHABLE();
- }
- DCHECK(!IsSmiDouble(number));
- set_initialized_storage(isolate()->factory()->NewHeapNumber(number));
- return storage_;
-}
-
-bool TranslatedValue::IsMaterializedObject() const {
- switch (kind()) {
- case kCapturedObject:
- case kDuplicatedObject:
- return true;
- default:
- return false;
- }
-}
-
-bool TranslatedValue::IsMaterializableByDebugger() const {
- // At the moment, we only allow materialization of doubles.
- return (kind() == kDouble);
-}
-
-int TranslatedValue::GetChildrenCount() const {
- if (kind() == kCapturedObject) {
- return object_length();
- } else {
- return 0;
- }
-}
-
-uint64_t TranslatedState::GetUInt64Slot(Address fp, int slot_offset) {
-#if V8_TARGET_ARCH_32_BIT
- return ReadUnalignedValue<uint64_t>(fp + slot_offset);
-#else
- return Memory<uint64_t>(fp + slot_offset);
-#endif
-}
-
-uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) {
- Address address = fp + slot_offset;
-#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
- return Memory<uint32_t>(address + kIntSize);
-#else
- return Memory<uint32_t>(address);
-#endif
-}
-
-Float32 TranslatedState::GetFloatSlot(Address fp, int slot_offset) {
-#if !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
- return Float32::FromBits(GetUInt32Slot(fp, slot_offset));
-#else
- return Float32::FromBits(Memory<uint32_t>(fp + slot_offset));
-#endif
-}
-
-Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
- return Float64::FromBits(GetUInt64Slot(fp, slot_offset));
-}
-
-void TranslatedValue::Handlify() {
- if (kind() == kTagged && raw_literal().IsHeapObject()) {
- set_initialized_storage(
- Handle<HeapObject>(HeapObject::cast(raw_literal()), isolate()));
- raw_literal_ = Object();
- }
-}
-
-TranslatedFrame TranslatedFrame::InterpretedFrame(
- BailoutId bytecode_offset, SharedFunctionInfo shared_info, int height,
- int return_value_offset, int return_value_count) {
- TranslatedFrame frame(kInterpretedFunction, shared_info, height,
- return_value_offset, return_value_count);
- frame.node_id_ = bytecode_offset;
- return frame;
-}
-
-TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame(
- SharedFunctionInfo shared_info, int height) {
- return TranslatedFrame(kArgumentsAdaptor, shared_info, height);
-}
-
-TranslatedFrame TranslatedFrame::ConstructStubFrame(
- BailoutId bailout_id, SharedFunctionInfo shared_info, int height) {
- TranslatedFrame frame(kConstructStub, shared_info, height);
- frame.node_id_ = bailout_id;
- return frame;
-}
-
-TranslatedFrame TranslatedFrame::BuiltinContinuationFrame(
- BailoutId bailout_id, SharedFunctionInfo shared_info, int height) {
- TranslatedFrame frame(kBuiltinContinuation, shared_info, height);
- frame.node_id_ = bailout_id;
- return frame;
-}
-
-TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame(
- BailoutId bailout_id, SharedFunctionInfo shared_info, int height) {
- TranslatedFrame frame(kJavaScriptBuiltinContinuation, shared_info, height);
- frame.node_id_ = bailout_id;
- return frame;
-}
-
-TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
- BailoutId bailout_id, SharedFunctionInfo shared_info, int height) {
- TranslatedFrame frame(kJavaScriptBuiltinContinuationWithCatch, shared_info,
- height);
- frame.node_id_ = bailout_id;
- return frame;
-}
-
-int TranslatedFrame::GetValueCount() {
- // The function is added to all frame state descriptors in
- // InstructionSelector::AddInputsToFrameStateDescriptor.
- static constexpr int kTheFunction = 1;
-
- switch (kind()) {
- case kInterpretedFunction: {
- int parameter_count =
- InternalFormalParameterCountWithReceiver(raw_shared_info_);
- static constexpr int kTheContext = 1;
- static constexpr int kTheAccumulator = 1;
- return height() + parameter_count + kTheContext + kTheFunction +
- kTheAccumulator;
- }
-
- case kArgumentsAdaptor:
- return height() + kTheFunction;
-
- case kConstructStub:
- case kBuiltinContinuation:
- case kJavaScriptBuiltinContinuation:
- case kJavaScriptBuiltinContinuationWithCatch: {
- static constexpr int kTheContext = 1;
- return height() + kTheContext + kTheFunction;
- }
-
- case kInvalid:
- UNREACHABLE();
- }
- UNREACHABLE();
-}
-
-void TranslatedFrame::Handlify() {
- if (!raw_shared_info_.is_null()) {
- shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_,
- raw_shared_info_.GetIsolate());
- raw_shared_info_ = SharedFunctionInfo();
- }
- for (auto& value : values_) {
- value.Handlify();
- }
-}
-
-TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
- TranslationIterator* iterator, FixedArray literal_array, Address fp,
- FILE* trace_file) {
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- switch (opcode) {
- case Translation::INTERPRETED_FRAME: {
- BailoutId bytecode_offset = BailoutId(iterator->Next());
- SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
- int return_value_offset = iterator->Next();
- int return_value_count = iterator->Next();
- if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
- PrintF(trace_file, " reading input frame %s", name.get());
- int arg_count = InternalFormalParameterCountWithReceiver(shared_info);
- PrintF(trace_file,
- " => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); "
- "inputs:\n",
- bytecode_offset.ToInt(), arg_count, height, return_value_offset,
- return_value_count);
- }
- return TranslatedFrame::InterpretedFrame(bytecode_offset, shared_info,
- height, return_value_offset,
- return_value_count);
- }
-
- case Translation::ARGUMENTS_ADAPTOR_FRAME: {
- SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
- if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
- PrintF(trace_file, " reading arguments adaptor frame %s", name.get());
- PrintF(trace_file, " => height=%d; inputs:\n", height);
- }
- return TranslatedFrame::ArgumentsAdaptorFrame(shared_info, height);
- }
-
- case Translation::CONSTRUCT_STUB_FRAME: {
- BailoutId bailout_id = BailoutId(iterator->Next());
- SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
- if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
- PrintF(trace_file, " reading construct stub frame %s", name.get());
- PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
- bailout_id.ToInt(), height);
- }
- return TranslatedFrame::ConstructStubFrame(bailout_id, shared_info,
- height);
- }
-
- case Translation::BUILTIN_CONTINUATION_FRAME: {
- BailoutId bailout_id = BailoutId(iterator->Next());
- SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
- if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
- PrintF(trace_file, " reading builtin continuation frame %s",
- name.get());
- PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
- bailout_id.ToInt(), height);
- }
- return TranslatedFrame::BuiltinContinuationFrame(bailout_id, shared_info,
- height);
- }
-
- case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
- BailoutId bailout_id = BailoutId(iterator->Next());
- SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
- if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
- PrintF(trace_file, " reading JavaScript builtin continuation frame %s",
- name.get());
- PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
- bailout_id.ToInt(), height);
- }
- return TranslatedFrame::JavaScriptBuiltinContinuationFrame(
- bailout_id, shared_info, height);
- }
- case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
- BailoutId bailout_id = BailoutId(iterator->Next());
- SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
- int height = iterator->Next();
- if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
- PrintF(trace_file,
- " reading JavaScript builtin continuation frame with catch %s",
- name.get());
- PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
- bailout_id.ToInt(), height);
- }
- return TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
- bailout_id, shared_info, height);
- }
- case Translation::UPDATE_FEEDBACK:
- case Translation::BEGIN:
- case Translation::DUPLICATED_OBJECT:
- case Translation::ARGUMENTS_ELEMENTS:
- case Translation::ARGUMENTS_LENGTH:
- case Translation::CAPTURED_OBJECT:
- case Translation::REGISTER:
- case Translation::INT32_REGISTER:
- case Translation::INT64_REGISTER:
- case Translation::UINT32_REGISTER:
- case Translation::BOOL_REGISTER:
- case Translation::FLOAT_REGISTER:
- case Translation::DOUBLE_REGISTER:
- case Translation::STACK_SLOT:
- case Translation::INT32_STACK_SLOT:
- case Translation::INT64_STACK_SLOT:
- case Translation::UINT32_STACK_SLOT:
- case Translation::BOOL_STACK_SLOT:
- case Translation::FLOAT_STACK_SLOT:
- case Translation::DOUBLE_STACK_SLOT:
- case Translation::LITERAL:
- break;
- }
- FATAL("We should never get here - unexpected deopt info.");
- return TranslatedFrame::InvalidFrame();
-}
-
-// static
-void TranslatedFrame::AdvanceIterator(
- std::deque<TranslatedValue>::iterator* iter) {
- int values_to_skip = 1;
- while (values_to_skip > 0) {
- // Consume the current element.
- values_to_skip--;
- // Add all the children.
- values_to_skip += (*iter)->GetChildrenCount();
-
- (*iter)++;
- }
-}
-
-Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer,
- int* length) {
- Address parent_frame_pointer = *reinterpret_cast<Address*>(
- input_frame_pointer + StandardFrameConstants::kCallerFPOffset);
- intptr_t parent_frame_type = Memory<intptr_t>(
- parent_frame_pointer + CommonFrameConstants::kContextOrFrameTypeOffset);
-
- Address arguments_frame;
- if (parent_frame_type ==
- StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)) {
- if (length)
- *length = Smi::cast(*FullObjectSlot(
- parent_frame_pointer +
- ArgumentsAdaptorFrameConstants::kLengthOffset))
- .value();
- arguments_frame = parent_frame_pointer;
- } else {
- if (length) *length = formal_parameter_count_;
- arguments_frame = input_frame_pointer;
- }
-
- return arguments_frame;
-}
-
-// Creates translated values for an arguments backing store, or the backing
-// store for rest parameters depending on the given {type}. The TranslatedValue
-// objects for the fields are not read from the TranslationIterator, but instead
-// created on-the-fly based on dynamic information in the optimized frame.
-void TranslatedState::CreateArgumentsElementsTranslatedValues(
- int frame_index, Address input_frame_pointer, CreateArgumentsType type,
- FILE* trace_file) {
- TranslatedFrame& frame = frames_[frame_index];
-
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- int arguments_length = actual_argument_count_;
-#else
- int arguments_length;
- Address arguments_frame =
- ComputeArgumentsPosition(input_frame_pointer, &arguments_length);
-#endif
-
- int length = type == CreateArgumentsType::kRestParameter
- ? std::max(0, arguments_length - formal_parameter_count_)
- : arguments_length;
-
- int object_index = static_cast<int>(object_positions_.size());
- int value_index = static_cast<int>(frame.values_.size());
- if (trace_file != nullptr) {
- PrintF(trace_file, "arguments elements object #%d (type = %d, length = %d)",
- object_index, static_cast<uint8_t>(type), length);
- }
-
- object_positions_.push_back({frame_index, value_index});
- frame.Add(TranslatedValue::NewDeferredObject(
- this, length + FixedArray::kHeaderSize / kTaggedSize, object_index));
-
- ReadOnlyRoots roots(isolate_);
- frame.Add(TranslatedValue::NewTagged(this, roots.fixed_array_map()));
- frame.Add(TranslatedValue::NewInt32(this, length));
-
- int number_of_holes = 0;
- if (type == CreateArgumentsType::kMappedArguments) {
- // If the actual number of arguments is less than the number of formal
- // parameters, we have fewer holes to fill to not overshoot the length.
- number_of_holes = std::min(formal_parameter_count_, length);
- }
- for (int i = 0; i < number_of_holes; ++i) {
- frame.Add(TranslatedValue::NewTagged(this, roots.the_hole_value()));
- }
- int argc = length - number_of_holes;
- int start_index = number_of_holes;
- if (type == CreateArgumentsType::kRestParameter) {
- start_index = std::max(0, formal_parameter_count_);
- }
- for (int i = 0; i < argc; i++) {
- // Skip the receiver.
- int offset = i + start_index + 1;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- Address arguments_frame = offset > formal_parameter_count_
- ? stack_frame_pointer_
- : input_frame_pointer;
-#endif
- Address argument_slot = arguments_frame +
- CommonFrameConstants::kFixedFrameSizeAboveFp +
- offset * kSystemPointerSize;
-
- frame.Add(TranslatedValue::NewTagged(this, *FullObjectSlot(argument_slot)));
- }
-}
-
-// We can't intermix stack decoding and allocations because the deoptimization
-// infrastracture is not GC safe.
-// Thus we build a temporary structure in malloced space.
-// The TranslatedValue objects created correspond to the static translation
-// instructions from the TranslationIterator, except for
-// Translation::ARGUMENTS_ELEMENTS, where the number and values of the
-// FixedArray elements depend on dynamic information from the optimized frame.
-// Returns the number of expected nested translations from the
-// TranslationIterator.
-int TranslatedState::CreateNextTranslatedValue(
- int frame_index, TranslationIterator* iterator, FixedArray literal_array,
- Address fp, RegisterValues* registers, FILE* trace_file) {
- disasm::NameConverter converter;
-
- TranslatedFrame& frame = frames_[frame_index];
- int value_index = static_cast<int>(frame.values_.size());
-
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::INTERPRETED_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
- case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME:
- case Translation::BUILTIN_CONTINUATION_FRAME:
- case Translation::UPDATE_FEEDBACK:
- // Peeled off before getting here.
- break;
-
- case Translation::DUPLICATED_OBJECT: {
- int object_id = iterator->Next();
- if (trace_file != nullptr) {
- PrintF(trace_file, "duplicated object #%d", object_id);
- }
- object_positions_.push_back(object_positions_[object_id]);
- TranslatedValue translated_value =
- TranslatedValue::NewDuplicateObject(this, object_id);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::ARGUMENTS_ELEMENTS: {
- CreateArgumentsType arguments_type =
- static_cast<CreateArgumentsType>(iterator->Next());
- CreateArgumentsElementsTranslatedValues(frame_index, fp, arguments_type,
- trace_file);
- return 0;
- }
-
- case Translation::ARGUMENTS_LENGTH: {
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- int arguments_length = actual_argument_count_;
-#else
- int arguments_length;
- ComputeArgumentsPosition(fp, &arguments_length);
-#endif
- if (trace_file != nullptr) {
- PrintF(trace_file, "arguments length field (length = %d)",
- arguments_length);
- }
- frame.Add(TranslatedValue::NewInt32(this, arguments_length));
- return 0;
- }
-
- case Translation::CAPTURED_OBJECT: {
- int field_count = iterator->Next();
- int object_index = static_cast<int>(object_positions_.size());
- if (trace_file != nullptr) {
- PrintF(trace_file, "captured object #%d (length = %d)", object_index,
- field_count);
- }
- object_positions_.push_back({frame_index, value_index});
- TranslatedValue translated_value =
- TranslatedValue::NewDeferredObject(this, field_count, object_index);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::REGISTER: {
- int input_reg = iterator->Next();
- if (registers == nullptr) {
- TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
- intptr_t value = registers->GetRegister(input_reg);
- Address uncompressed_value = DecompressIfNeeded(value);
- if (trace_file != nullptr) {
- PrintF(trace_file, V8PRIxPTR_FMT " ; %s ", uncompressed_value,
- converter.NameOfCPURegister(input_reg));
- Object(uncompressed_value).ShortPrint(trace_file);
- }
- TranslatedValue translated_value =
- TranslatedValue::NewTagged(this, Object(uncompressed_value));
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::INT32_REGISTER: {
- int input_reg = iterator->Next();
- if (registers == nullptr) {
- TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
- intptr_t value = registers->GetRegister(input_reg);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%" V8PRIdPTR " ; %s (int32)", value,
- converter.NameOfCPURegister(input_reg));
- }
- TranslatedValue translated_value =
- TranslatedValue::NewInt32(this, static_cast<int32_t>(value));
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::INT64_REGISTER: {
- int input_reg = iterator->Next();
- if (registers == nullptr) {
- TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
- intptr_t value = registers->GetRegister(input_reg);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%" V8PRIdPTR " ; %s (int64)", value,
- converter.NameOfCPURegister(input_reg));
- }
- TranslatedValue translated_value =
- TranslatedValue::NewInt64(this, static_cast<int64_t>(value));
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::UINT32_REGISTER: {
- int input_reg = iterator->Next();
- if (registers == nullptr) {
- TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
- intptr_t value = registers->GetRegister(input_reg);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%" V8PRIuPTR " ; %s (uint32)", value,
- converter.NameOfCPURegister(input_reg));
- }
- TranslatedValue translated_value =
- TranslatedValue::NewUInt32(this, static_cast<uint32_t>(value));
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::BOOL_REGISTER: {
- int input_reg = iterator->Next();
- if (registers == nullptr) {
- TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
- intptr_t value = registers->GetRegister(input_reg);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%" V8PRIdPTR " ; %s (bool)", value,
- converter.NameOfCPURegister(input_reg));
- }
- TranslatedValue translated_value =
- TranslatedValue::NewBool(this, static_cast<uint32_t>(value));
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::FLOAT_REGISTER: {
- int input_reg = iterator->Next();
- if (registers == nullptr) {
- TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
- Float32 value = registers->GetFloatRegister(input_reg);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%e ; %s (float)", value.get_scalar(),
- RegisterName(FloatRegister::from_code(input_reg)));
- }
- TranslatedValue translated_value = TranslatedValue::NewFloat(this, value);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::DOUBLE_REGISTER: {
- int input_reg = iterator->Next();
- if (registers == nullptr) {
- TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
- Float64 value = registers->GetDoubleRegister(input_reg);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%e ; %s (double)", value.get_scalar(),
- RegisterName(DoubleRegister::from_code(input_reg)));
- }
- TranslatedValue translated_value =
- TranslatedValue::NewDouble(this, value);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::STACK_SLOT: {
- int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
- intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
- Address uncompressed_value = DecompressIfNeeded(value);
- if (trace_file != nullptr) {
- PrintF(trace_file, V8PRIxPTR_FMT " ; [fp %c %3d] ",
- uncompressed_value, slot_offset < 0 ? '-' : '+',
- std::abs(slot_offset));
- Object(uncompressed_value).ShortPrint(trace_file);
- }
- TranslatedValue translated_value =
- TranslatedValue::NewTagged(this, Object(uncompressed_value));
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::INT32_STACK_SLOT: {
- int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
- uint32_t value = GetUInt32Slot(fp, slot_offset);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%d ; (int32) [fp %c %3d] ",
- static_cast<int32_t>(value), slot_offset < 0 ? '-' : '+',
- std::abs(slot_offset));
- }
- TranslatedValue translated_value = TranslatedValue::NewInt32(this, value);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::INT64_STACK_SLOT: {
- int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
- uint64_t value = GetUInt64Slot(fp, slot_offset);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%" V8PRIdPTR " ; (int64) [fp %c %3d] ",
- static_cast<intptr_t>(value), slot_offset < 0 ? '-' : '+',
- std::abs(slot_offset));
- }
- TranslatedValue translated_value = TranslatedValue::NewInt64(this, value);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::UINT32_STACK_SLOT: {
- int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
- uint32_t value = GetUInt32Slot(fp, slot_offset);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%u ; (uint32) [fp %c %3d] ", value,
- slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
- }
- TranslatedValue translated_value =
- TranslatedValue::NewUInt32(this, value);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::BOOL_STACK_SLOT: {
- int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
- uint32_t value = GetUInt32Slot(fp, slot_offset);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%u ; (bool) [fp %c %3d] ", value,
- slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
- }
- TranslatedValue translated_value = TranslatedValue::NewBool(this, value);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::FLOAT_STACK_SLOT: {
- int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
- Float32 value = GetFloatSlot(fp, slot_offset);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%e ; (float) [fp %c %3d] ", value.get_scalar(),
- slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
- }
- TranslatedValue translated_value = TranslatedValue::NewFloat(this, value);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int slot_offset =
- OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
- Float64 value = GetDoubleSlot(fp, slot_offset);
- if (trace_file != nullptr) {
- PrintF(trace_file, "%e ; (double) [fp %c %d] ", value.get_scalar(),
- slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
- }
- TranslatedValue translated_value =
- TranslatedValue::NewDouble(this, value);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
- case Translation::LITERAL: {
- int literal_index = iterator->Next();
- Object value = literal_array.get(literal_index);
- if (trace_file != nullptr) {
- PrintF(trace_file, V8PRIxPTR_FMT " ; (literal %2d) ", value.ptr(),
- literal_index);
- value.ShortPrint(trace_file);
- }
-
- TranslatedValue translated_value =
- TranslatedValue::NewTagged(this, value);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
- }
-
- FATAL("We should never get here - unexpected deopt info.");
-}
-
-Address TranslatedState::DecompressIfNeeded(intptr_t value) {
- if (COMPRESS_POINTERS_BOOL) {
- return DecompressTaggedAny(isolate()->isolate_root(),
- static_cast<uint32_t>(value));
- } else {
- return value;
- }
-}
-
-TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationData data =
- static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData(
- &deopt_index);
- DCHECK(!data.is_null() && deopt_index != Safepoint::kNoDeoptimizationIndex);
- TranslationIterator it(data.TranslationByteArray(),
- data.TranslationIndex(deopt_index).value());
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- int actual_argc = frame->GetActualArgumentCount();
-#else
- int actual_argc = 0;
-#endif
- Init(frame->isolate(), frame->fp(), frame->fp(), &it, data.LiteralArray(),
- nullptr /* registers */, nullptr /* trace file */,
- frame->function().shared().internal_formal_parameter_count(),
- actual_argc);
-}
-
-void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
- Address stack_frame_pointer,
- TranslationIterator* iterator,
- FixedArray literal_array, RegisterValues* registers,
- FILE* trace_file, int formal_parameter_count,
- int actual_argument_count) {
- DCHECK(frames_.empty());
-
- stack_frame_pointer_ = stack_frame_pointer;
- formal_parameter_count_ = formal_parameter_count;
- actual_argument_count_ = actual_argument_count;
- isolate_ = isolate;
-
- // Read out the 'header' translation.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- CHECK(opcode == Translation::BEGIN);
-
- int count = iterator->Next();
- frames_.reserve(count);
- iterator->Next(); // Drop JS frames count.
- int update_feedback_count = iterator->Next();
- CHECK_GE(update_feedback_count, 0);
- CHECK_LE(update_feedback_count, 1);
-
- if (update_feedback_count == 1) {
- ReadUpdateFeedback(iterator, literal_array, trace_file);
- }
-
- std::stack<int> nested_counts;
-
- // Read the frames
- for (int frame_index = 0; frame_index < count; frame_index++) {
- // Read the frame descriptor.
- frames_.push_back(CreateNextTranslatedFrame(
- iterator, literal_array, input_frame_pointer, trace_file));
- TranslatedFrame& frame = frames_.back();
-
- // Read the values.
- int values_to_process = frame.GetValueCount();
- while (values_to_process > 0 || !nested_counts.empty()) {
- if (trace_file != nullptr) {
- if (nested_counts.empty()) {
- // For top level values, print the value number.
- PrintF(trace_file,
- " %3i: ", frame.GetValueCount() - values_to_process);
- } else {
- // Take care of indenting for nested values.
- PrintF(trace_file, " ");
- for (size_t j = 0; j < nested_counts.size(); j++) {
- PrintF(trace_file, " ");
- }
- }
- }
-
- int nested_count =
- CreateNextTranslatedValue(frame_index, iterator, literal_array,
- input_frame_pointer, registers, trace_file);
-
- if (trace_file != nullptr) {
- PrintF(trace_file, "\n");
- }
-
- // Update the value count and resolve the nesting.
- values_to_process--;
- if (nested_count > 0) {
- nested_counts.push(values_to_process);
- values_to_process = nested_count;
- } else {
- while (values_to_process == 0 && !nested_counts.empty()) {
- values_to_process = nested_counts.top();
- nested_counts.pop();
- }
- }
- }
- }
-
- CHECK(!iterator->HasNext() || static_cast<Translation::Opcode>(
- iterator->Next()) == Translation::BEGIN);
-}
-
-void TranslatedState::Prepare(Address stack_frame_pointer) {
- for (auto& frame : frames_) frame.Handlify();
-
- if (!feedback_vector_.is_null()) {
- feedback_vector_handle_ =
- Handle<FeedbackVector>(feedback_vector_, isolate());
- feedback_vector_ = FeedbackVector();
- }
- stack_frame_pointer_ = stack_frame_pointer;
-
- UpdateFromPreviouslyMaterializedObjects();
-}
-
-TranslatedValue* TranslatedState::GetValueByObjectIndex(int object_index) {
- CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
- TranslatedState::ObjectPosition pos = object_positions_[object_index];
- return &(frames_[pos.frame_index_].values_[pos.value_index_]);
-}
-
-Handle<HeapObject> TranslatedState::InitializeObjectAt(TranslatedValue* slot) {
- slot = ResolveCapturedObject(slot);
-
- DisallowGarbageCollection no_gc;
- if (slot->materialization_state() != TranslatedValue::kFinished) {
- std::stack<int> worklist;
- worklist.push(slot->object_index());
- slot->mark_finished();
-
- while (!worklist.empty()) {
- int index = worklist.top();
- worklist.pop();
- InitializeCapturedObjectAt(index, &worklist, no_gc);
- }
- }
- return slot->storage();
-}
-
-void TranslatedState::InitializeCapturedObjectAt(
- int object_index, std::stack<int>* worklist,
- const DisallowGarbageCollection& no_gc) {
- CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
- TranslatedState::ObjectPosition pos = object_positions_[object_index];
- int value_index = pos.value_index_;
-
- TranslatedFrame* frame = &(frames_[pos.frame_index_]);
- TranslatedValue* slot = &(frame->values_[value_index]);
- value_index++;
-
- CHECK_EQ(TranslatedValue::kFinished, slot->materialization_state());
- CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
-
- // Ensure all fields are initialized.
- int children_init_index = value_index;
- for (int i = 0; i < slot->GetChildrenCount(); i++) {
- // If the field is an object that has not been initialized yet, queue it
- // for initialization (and mark it as such).
- TranslatedValue* child_slot = frame->ValueAt(children_init_index);
- if (child_slot->kind() == TranslatedValue::kCapturedObject ||
- child_slot->kind() == TranslatedValue::kDuplicatedObject) {
- child_slot = ResolveCapturedObject(child_slot);
- if (child_slot->materialization_state() != TranslatedValue::kFinished) {
- DCHECK_EQ(TranslatedValue::kAllocated,
- child_slot->materialization_state());
- worklist->push(child_slot->object_index());
- child_slot->mark_finished();
- }
- }
- SkipSlots(1, frame, &children_init_index);
- }
-
- // Read the map.
- // The map should never be materialized, so let us check we already have
- // an existing object here.
- CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged);
- Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue());
- CHECK(map->IsMap());
- value_index++;
-
- // Handle the special cases.
- switch (map->instance_type()) {
- case HEAP_NUMBER_TYPE:
- case FIXED_DOUBLE_ARRAY_TYPE:
- return;
-
- case FIXED_ARRAY_TYPE:
- case AWAIT_CONTEXT_TYPE:
- case BLOCK_CONTEXT_TYPE:
- case CATCH_CONTEXT_TYPE:
- case DEBUG_EVALUATE_CONTEXT_TYPE:
- case EVAL_CONTEXT_TYPE:
- case FUNCTION_CONTEXT_TYPE:
- case MODULE_CONTEXT_TYPE:
- case NATIVE_CONTEXT_TYPE:
- case SCRIPT_CONTEXT_TYPE:
- case WITH_CONTEXT_TYPE:
- case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
- case HASH_TABLE_TYPE:
- case ORDERED_HASH_MAP_TYPE:
- case ORDERED_HASH_SET_TYPE:
- case NAME_DICTIONARY_TYPE:
- case GLOBAL_DICTIONARY_TYPE:
- case NUMBER_DICTIONARY_TYPE:
- case SIMPLE_NUMBER_DICTIONARY_TYPE:
- case PROPERTY_ARRAY_TYPE:
- case SCRIPT_CONTEXT_TABLE_TYPE:
- case SLOPPY_ARGUMENTS_ELEMENTS_TYPE:
- InitializeObjectWithTaggedFieldsAt(frame, &value_index, slot, map, no_gc);
- break;
-
- default:
- CHECK(map->IsJSObjectMap());
- InitializeJSObjectAt(frame, &value_index, slot, map, no_gc);
- break;
- }
- CHECK_EQ(value_index, children_init_index);
-}
-
-void TranslatedState::EnsureObjectAllocatedAt(TranslatedValue* slot) {
- slot = ResolveCapturedObject(slot);
-
- if (slot->materialization_state() == TranslatedValue::kUninitialized) {
- std::stack<int> worklist;
- worklist.push(slot->object_index());
- slot->mark_allocated();
-
- while (!worklist.empty()) {
- int index = worklist.top();
- worklist.pop();
- EnsureCapturedObjectAllocatedAt(index, &worklist);
- }
- }
-}
-
-int TranslatedValue::GetSmiValue() const {
- Object value = GetRawValue();
- CHECK(value.IsSmi());
- return Smi::cast(value).value();
-}
-
-void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame,
- int* value_index,
- TranslatedValue* slot,
- Handle<Map> map) {
- int length = frame->values_[*value_index].GetSmiValue();
- (*value_index)++;
- Handle<FixedDoubleArray> array = Handle<FixedDoubleArray>::cast(
- isolate()->factory()->NewFixedDoubleArray(length));
- CHECK_GT(length, 0);
- for (int i = 0; i < length; i++) {
- CHECK_NE(TranslatedValue::kCapturedObject,
- frame->values_[*value_index].kind());
- Handle<Object> value = frame->values_[*value_index].GetValue();
- if (value->IsNumber()) {
- array->set(i, value->Number());
- } else {
- CHECK(value.is_identical_to(isolate()->factory()->the_hole_value()));
- array->set_the_hole(isolate(), i);
- }
- (*value_index)++;
- }
- slot->set_storage(array);
-}
-
-void TranslatedState::MaterializeHeapNumber(TranslatedFrame* frame,
- int* value_index,
- TranslatedValue* slot) {
- CHECK_NE(TranslatedValue::kCapturedObject,
- frame->values_[*value_index].kind());
- Handle<Object> value = frame->values_[*value_index].GetValue();
- CHECK(value->IsNumber());
- Handle<HeapNumber> box = isolate()->factory()->NewHeapNumber(value->Number());
- (*value_index)++;
- slot->set_storage(box);
-}
-
-namespace {
-
-enum StorageKind : uint8_t {
- kStoreTagged,
- kStoreUnboxedDouble,
- kStoreHeapObject
-};
-
-} // namespace
-
-void TranslatedState::SkipSlots(int slots_to_skip, TranslatedFrame* frame,
- int* value_index) {
- while (slots_to_skip > 0) {
- TranslatedValue* slot = &(frame->values_[*value_index]);
- (*value_index)++;
- slots_to_skip--;
-
- if (slot->kind() == TranslatedValue::kCapturedObject) {
- slots_to_skip += slot->GetChildrenCount();
- }
- }
-}
-
-void TranslatedState::EnsureCapturedObjectAllocatedAt(
- int object_index, std::stack<int>* worklist) {
- CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
- TranslatedState::ObjectPosition pos = object_positions_[object_index];
- int value_index = pos.value_index_;
-
- TranslatedFrame* frame = &(frames_[pos.frame_index_]);
- TranslatedValue* slot = &(frame->values_[value_index]);
- value_index++;
-
- CHECK_EQ(TranslatedValue::kAllocated, slot->materialization_state());
- CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
-
- // Read the map.
- // The map should never be materialized, so let us check we already have
- // an existing object here.
- CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged);
- Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue());
- CHECK(map->IsMap());
- value_index++;
-
- // Handle the special cases.
- switch (map->instance_type()) {
- case FIXED_DOUBLE_ARRAY_TYPE:
- // Materialize (i.e. allocate&initialize) the array and return since
- // there is no need to process the children.
- return MaterializeFixedDoubleArray(frame, &value_index, slot, map);
-
- case HEAP_NUMBER_TYPE:
- // Materialize (i.e. allocate&initialize) the heap number and return.
- // There is no need to process the children.
- return MaterializeHeapNumber(frame, &value_index, slot);
-
- case FIXED_ARRAY_TYPE:
- case SCRIPT_CONTEXT_TABLE_TYPE:
- case AWAIT_CONTEXT_TYPE:
- case BLOCK_CONTEXT_TYPE:
- case CATCH_CONTEXT_TYPE:
- case DEBUG_EVALUATE_CONTEXT_TYPE:
- case EVAL_CONTEXT_TYPE:
- case FUNCTION_CONTEXT_TYPE:
- case MODULE_CONTEXT_TYPE:
- case NATIVE_CONTEXT_TYPE:
- case SCRIPT_CONTEXT_TYPE:
- case WITH_CONTEXT_TYPE:
- case HASH_TABLE_TYPE:
- case ORDERED_HASH_MAP_TYPE:
- case ORDERED_HASH_SET_TYPE:
- case NAME_DICTIONARY_TYPE:
- case GLOBAL_DICTIONARY_TYPE:
- case NUMBER_DICTIONARY_TYPE:
- case SIMPLE_NUMBER_DICTIONARY_TYPE: {
- // Check we have the right size.
- int array_length = frame->values_[value_index].GetSmiValue();
- int instance_size = FixedArray::SizeFor(array_length);
- CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
-
- // Canonicalize empty fixed array.
- if (*map == ReadOnlyRoots(isolate()).empty_fixed_array().map() &&
- array_length == 0) {
- slot->set_storage(isolate()->factory()->empty_fixed_array());
- } else {
- slot->set_storage(AllocateStorageFor(slot));
- }
-
- // Make sure all the remaining children (after the map) are allocated.
- return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
- &value_index, worklist);
- }
-
- case SLOPPY_ARGUMENTS_ELEMENTS_TYPE: {
- // Verify that the arguments size is correct.
- int args_length = frame->values_[value_index].GetSmiValue();
- int args_size = SloppyArgumentsElements::SizeFor(args_length);
- CHECK_EQ(args_size, slot->GetChildrenCount() * kTaggedSize);
-
- slot->set_storage(AllocateStorageFor(slot));
-
- // Make sure all the remaining children (after the map) are allocated.
- return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
- &value_index, worklist);
- }
-
- case PROPERTY_ARRAY_TYPE: {
- // Check we have the right size.
- int length_or_hash = frame->values_[value_index].GetSmiValue();
- int array_length = PropertyArray::LengthField::decode(length_or_hash);
- int instance_size = PropertyArray::SizeFor(array_length);
- CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
-
- slot->set_storage(AllocateStorageFor(slot));
-
- // Make sure all the remaining children (after the map) are allocated.
- return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
- &value_index, worklist);
- }
-
- default:
- CHECK(map->IsJSObjectMap());
- EnsureJSObjectAllocated(slot, map);
- TranslatedValue* properties_slot = &(frame->values_[value_index]);
- value_index++;
- if (properties_slot->kind() == TranslatedValue::kCapturedObject) {
- // If we are materializing the property array, make sure we put
- // the mutable heap numbers at the right places.
- EnsurePropertiesAllocatedAndMarked(properties_slot, map);
- EnsureChildrenAllocated(properties_slot->GetChildrenCount(), frame,
- &value_index, worklist);
- }
- // Make sure all the remaining children (after the map and properties) are
- // allocated.
- return EnsureChildrenAllocated(slot->GetChildrenCount() - 2, frame,
- &value_index, worklist);
- }
- UNREACHABLE();
-}
-
-void TranslatedState::EnsureChildrenAllocated(int count, TranslatedFrame* frame,
- int* value_index,
- std::stack<int>* worklist) {
- // Ensure all children are allocated.
- for (int i = 0; i < count; i++) {
- // If the field is an object that has not been allocated yet, queue it
- // for initialization (and mark it as such).
- TranslatedValue* child_slot = frame->ValueAt(*value_index);
- if (child_slot->kind() == TranslatedValue::kCapturedObject ||
- child_slot->kind() == TranslatedValue::kDuplicatedObject) {
- child_slot = ResolveCapturedObject(child_slot);
- if (child_slot->materialization_state() ==
- TranslatedValue::kUninitialized) {
- worklist->push(child_slot->object_index());
- child_slot->mark_allocated();
- }
- } else {
- // Make sure the simple values (heap numbers, etc.) are properly
- // initialized.
- child_slot->GetValue();
- }
- SkipSlots(1, frame, value_index);
- }
-}
-
-void TranslatedState::EnsurePropertiesAllocatedAndMarked(
- TranslatedValue* properties_slot, Handle<Map> map) {
- CHECK_EQ(TranslatedValue::kUninitialized,
- properties_slot->materialization_state());
-
- Handle<ByteArray> object_storage = AllocateStorageFor(properties_slot);
- properties_slot->mark_allocated();
- properties_slot->set_storage(object_storage);
-
- // Set markers for out-of-object properties.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
- isolate());
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- Representation representation = descriptors->GetDetails(i).representation();
- if (!index.is_inobject() &&
- (representation.IsDouble() || representation.IsHeapObject())) {
- CHECK(!map->IsUnboxedDoubleField(index));
- int outobject_index = index.outobject_array_index();
- int array_index = outobject_index * kTaggedSize;
- object_storage->set(array_index, kStoreHeapObject);
- }
- }
-}
-
-Handle<ByteArray> TranslatedState::AllocateStorageFor(TranslatedValue* slot) {
- int allocate_size =
- ByteArray::LengthFor(slot->GetChildrenCount() * kTaggedSize);
- // It is important to allocate all the objects tenured so that the marker
- // does not visit them.
- Handle<ByteArray> object_storage =
- isolate()->factory()->NewByteArray(allocate_size, AllocationType::kOld);
- for (int i = 0; i < object_storage->length(); i++) {
- object_storage->set(i, kStoreTagged);
- }
- return object_storage;
-}
-
-void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
- Handle<Map> map) {
- CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kTaggedSize);
-
- Handle<ByteArray> object_storage = AllocateStorageFor(slot);
- // Now we handle the interesting (JSObject) case.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
- isolate());
-
- // Set markers for in-object properties.
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- Representation representation = descriptors->GetDetails(i).representation();
- if (index.is_inobject() &&
- (representation.IsDouble() || representation.IsHeapObject())) {
- CHECK_GE(index.index(), FixedArray::kHeaderSize / kTaggedSize);
- int array_index = index.index() * kTaggedSize - FixedArray::kHeaderSize;
- uint8_t marker = map->IsUnboxedDoubleField(index) ? kStoreUnboxedDouble
- : kStoreHeapObject;
- object_storage->set(array_index, marker);
- }
- }
- slot->set_storage(object_storage);
-}
-
-TranslatedValue* TranslatedState::GetResolvedSlot(TranslatedFrame* frame,
- int value_index) {
- TranslatedValue* slot = frame->ValueAt(value_index);
- if (slot->kind() == TranslatedValue::kDuplicatedObject) {
- slot = ResolveCapturedObject(slot);
- }
- CHECK_NE(slot->materialization_state(), TranslatedValue::kUninitialized);
- return slot;
-}
-
-TranslatedValue* TranslatedState::GetResolvedSlotAndAdvance(
- TranslatedFrame* frame, int* value_index) {
- TranslatedValue* slot = GetResolvedSlot(frame, *value_index);
- SkipSlots(1, frame, value_index);
- return slot;
-}
-
-Handle<Object> TranslatedState::GetValueAndAdvance(TranslatedFrame* frame,
- int* value_index) {
- TranslatedValue* slot = GetResolvedSlot(frame, *value_index);
- SkipSlots(1, frame, value_index);
- return slot->GetValue();
-}
-
-void TranslatedState::InitializeJSObjectAt(
- TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
- Handle<Map> map, const DisallowGarbageCollection& no_gc) {
- Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
- DCHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
-
- // The object should have at least a map and some payload.
- CHECK_GE(slot->GetChildrenCount(), 2);
-
- // Notify the concurrent marker about the layout change.
- isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc);
-
- // Fill the property array field.
- {
- Handle<Object> properties = GetValueAndAdvance(frame, value_index);
- WRITE_FIELD(*object_storage, JSObject::kPropertiesOrHashOffset,
- *properties);
- WRITE_BARRIER(*object_storage, JSObject::kPropertiesOrHashOffset,
- *properties);
- }
-
- // For all the other fields we first look at the fixed array and check the
- // marker to see if we store an unboxed double.
- DCHECK_EQ(kTaggedSize, JSObject::kPropertiesOrHashOffset);
- for (int i = 2; i < slot->GetChildrenCount(); i++) {
- TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index);
- // Read out the marker and ensure the field is consistent with
- // what the markers in the storage say (note that all heap numbers
- // should be fully initialized by now).
- int offset = i * kTaggedSize;
- uint8_t marker = object_storage->ReadField<uint8_t>(offset);
- if (marker == kStoreUnboxedDouble) {
- Handle<HeapObject> field_value = slot->storage();
- CHECK(field_value->IsHeapNumber());
- object_storage->WriteField<double>(offset, field_value->Number());
- } else if (marker == kStoreHeapObject) {
- Handle<HeapObject> field_value = slot->storage();
- WRITE_FIELD(*object_storage, offset, *field_value);
- WRITE_BARRIER(*object_storage, offset, *field_value);
- } else {
- CHECK_EQ(kStoreTagged, marker);
- Handle<Object> field_value = slot->GetValue();
- DCHECK_IMPLIES(field_value->IsHeapNumber(),
- !IsSmiDouble(field_value->Number()));
- WRITE_FIELD(*object_storage, offset, *field_value);
- WRITE_BARRIER(*object_storage, offset, *field_value);
- }
- }
- object_storage->synchronized_set_map(*map);
-}
-
-void TranslatedState::InitializeObjectWithTaggedFieldsAt(
- TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
- Handle<Map> map, const DisallowGarbageCollection& no_gc) {
- Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
-
- // Skip the writes if we already have the canonical empty fixed array.
- if (*object_storage == ReadOnlyRoots(isolate()).empty_fixed_array()) {
- CHECK_EQ(2, slot->GetChildrenCount());
- Handle<Object> length_value = GetValueAndAdvance(frame, value_index);
- CHECK_EQ(*length_value, Smi::FromInt(0));
- return;
- }
-
- // Notify the concurrent marker about the layout change.
- isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc);
-
- // Write the fields to the object.
- for (int i = 1; i < slot->GetChildrenCount(); i++) {
- TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index);
- int offset = i * kTaggedSize;
- uint8_t marker = object_storage->ReadField<uint8_t>(offset);
- Handle<Object> field_value;
- if (i > 1 && marker == kStoreHeapObject) {
- field_value = slot->storage();
- } else {
- CHECK(marker == kStoreTagged || i == 1);
- field_value = slot->GetValue();
- DCHECK_IMPLIES(field_value->IsHeapNumber(),
- !IsSmiDouble(field_value->Number()));
- }
- WRITE_FIELD(*object_storage, offset, *field_value);
- WRITE_BARRIER(*object_storage, offset, *field_value);
- }
-
- object_storage->synchronized_set_map(*map);
-}
-
-TranslatedValue* TranslatedState::ResolveCapturedObject(TranslatedValue* slot) {
- while (slot->kind() == TranslatedValue::kDuplicatedObject) {
- slot = GetValueByObjectIndex(slot->object_index());
- }
- CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
- return slot;
-}
-
-TranslatedFrame* TranslatedState::GetFrameFromJSFrameIndex(int jsframe_index) {
- for (size_t i = 0; i < frames_.size(); i++) {
- if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction ||
- frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
- frames_[i].kind() ==
- TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
- if (jsframe_index > 0) {
- jsframe_index--;
- } else {
- return &(frames_[i]);
- }
- }
- }
- return nullptr;
-}
-
-TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
- int jsframe_index, int* args_count) {
- for (size_t i = 0; i < frames_.size(); i++) {
- if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction ||
- frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
- frames_[i].kind() ==
- TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
- if (jsframe_index > 0) {
- jsframe_index--;
- } else {
- // We have the JS function frame, now check if it has arguments
- // adaptor.
- if (i > 0 &&
- frames_[i - 1].kind() == TranslatedFrame::kArgumentsAdaptor) {
- *args_count = frames_[i - 1].height();
- return &(frames_[i - 1]);
- }
-
- // JavaScriptBuiltinContinuation frames that are not preceeded by
- // a arguments adapter frame are currently only used by C++ API calls
- // from TurboFan. Calls to C++ API functions from TurboFan need
- // a special marker frame state, otherwise the API call wouldn't
- // be shown in a stack trace.
- if (frames_[i].kind() ==
- TranslatedFrame::kJavaScriptBuiltinContinuation &&
- frames_[i].shared_info()->internal_formal_parameter_count() ==
- kDontAdaptArgumentsSentinel) {
- DCHECK(frames_[i].shared_info()->IsApiFunction());
-
- // The argument count for this special case is always the second
- // to last value in the TranslatedFrame. It should also always be
- // {1}, as the GenericLazyDeoptContinuation builtin only has one
- // argument (the receiver).
- static constexpr int kTheContext = 1;
- const int height = frames_[i].height() + kTheContext;
- *args_count = frames_[i].ValueAt(height - 1)->GetSmiValue();
- DCHECK_EQ(*args_count, 1);
- } else {
- *args_count = InternalFormalParameterCountWithReceiver(
- *frames_[i].shared_info());
- }
- return &(frames_[i]);
- }
- }
- }
- return nullptr;
-}
-
-void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
- MaterializedObjectStore* materialized_store =
- isolate_->materialized_object_store();
- Handle<FixedArray> previously_materialized_objects =
- materialized_store->Get(stack_frame_pointer_);
-
- Handle<Object> marker = isolate_->factory()->arguments_marker();
-
- int length = static_cast<int>(object_positions_.size());
- bool new_store = false;
- if (previously_materialized_objects.is_null()) {
- previously_materialized_objects =
- isolate_->factory()->NewFixedArray(length, AllocationType::kOld);
- for (int i = 0; i < length; i++) {
- previously_materialized_objects->set(i, *marker);
- }
- new_store = true;
- }
-
- CHECK_EQ(length, previously_materialized_objects->length());
-
- bool value_changed = false;
- for (int i = 0; i < length; i++) {
- TranslatedState::ObjectPosition pos = object_positions_[i];
- TranslatedValue* value_info =
- &(frames_[pos.frame_index_].values_[pos.value_index_]);
-
- CHECK(value_info->IsMaterializedObject());
-
- // Skip duplicate objects (i.e., those that point to some other object id).
- if (value_info->object_index() != i) continue;
-
- Handle<Object> previous_value(previously_materialized_objects->get(i),
- isolate_);
- Handle<Object> value(value_info->GetRawValue(), isolate_);
-
- if (value.is_identical_to(marker)) {
- DCHECK_EQ(*previous_value, *marker);
- } else {
- if (*previous_value == *marker) {
- if (value->IsSmi()) {
- value = isolate()->factory()->NewHeapNumber(value->Number());
- }
- previously_materialized_objects->set(i, *value);
- value_changed = true;
- } else {
- CHECK(*previous_value == *value ||
- (previous_value->IsHeapNumber() && value->IsSmi() &&
- previous_value->Number() == value->Number()));
- }
- }
- }
-
- if (new_store && value_changed) {
- materialized_store->Set(stack_frame_pointer_,
- previously_materialized_objects);
- CHECK_EQ(frames_[0].kind(), TranslatedFrame::kInterpretedFunction);
- CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
- Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode());
- }
-}
-
-void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
- MaterializedObjectStore* materialized_store =
- isolate_->materialized_object_store();
- Handle<FixedArray> previously_materialized_objects =
- materialized_store->Get(stack_frame_pointer_);
-
- // If we have no previously materialized objects, there is nothing to do.
- if (previously_materialized_objects.is_null()) return;
-
- Handle<Object> marker = isolate_->factory()->arguments_marker();
-
- int length = static_cast<int>(object_positions_.size());
- CHECK_EQ(length, previously_materialized_objects->length());
-
- for (int i = 0; i < length; i++) {
- // For a previously materialized objects, inject their value into the
- // translated values.
- if (previously_materialized_objects->get(i) != *marker) {
- TranslatedState::ObjectPosition pos = object_positions_[i];
- TranslatedValue* value_info =
- &(frames_[pos.frame_index_].values_[pos.value_index_]);
- CHECK(value_info->IsMaterializedObject());
-
- if (value_info->kind() == TranslatedValue::kCapturedObject) {
- Handle<Object> object(previously_materialized_objects->get(i),
- isolate_);
- CHECK(object->IsHeapObject());
- value_info->set_initialized_storage(Handle<HeapObject>::cast(object));
- }
- }
- }
-}
-
-void TranslatedState::VerifyMaterializedObjects() {
-#if VERIFY_HEAP
- int length = static_cast<int>(object_positions_.size());
- for (int i = 0; i < length; i++) {
- TranslatedValue* slot = GetValueByObjectIndex(i);
- if (slot->kind() == TranslatedValue::kCapturedObject) {
- CHECK_EQ(slot, GetValueByObjectIndex(slot->object_index()));
- if (slot->materialization_state() == TranslatedValue::kFinished) {
- slot->storage()->ObjectVerify(isolate());
- } else {
- CHECK_EQ(slot->materialization_state(),
- TranslatedValue::kUninitialized);
- }
- }
- }
-#endif
-}
-
-bool TranslatedState::DoUpdateFeedback() {
- if (!feedback_vector_handle_.is_null()) {
- CHECK(!feedback_slot_.IsInvalid());
- isolate()->CountUsage(v8::Isolate::kDeoptimizerDisableSpeculation);
- FeedbackNexus nexus(feedback_vector_handle_, feedback_slot_);
- nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation);
- return true;
- }
- return false;
-}
-
-void TranslatedState::ReadUpdateFeedback(TranslationIterator* iterator,
- FixedArray literal_array,
- FILE* trace_file) {
- CHECK_EQ(Translation::UPDATE_FEEDBACK, iterator->Next());
- feedback_vector_ = FeedbackVector::cast(literal_array.get(iterator->Next()));
- feedback_slot_ = FeedbackSlot(iterator->Next());
- if (trace_file != nullptr) {
- PrintF(trace_file, " reading FeedbackVector (slot %d)\n",
- feedback_slot_.ToInt());
- }
+ .SourcePosition(bytecode_offset.ToInt());
}
} // namespace internal
} // namespace v8
-
-// Undefine the heap manipulation macros.
-#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index efc37b5950..ced3eeab44 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -5,431 +5,23 @@
#ifndef V8_DEOPTIMIZER_DEOPTIMIZER_H_
#define V8_DEOPTIMIZER_DEOPTIMIZER_H_
-#include <stack>
#include <vector>
-#include "src/base/macros.h"
-#include "src/base/platform/wrappers.h"
-#include "src/codegen/label.h"
-#include "src/codegen/register-arch.h"
+#include "src/builtins/builtins.h"
#include "src/codegen/source-position.h"
-#include "src/common/assert-scope.h"
-#include "src/common/globals.h"
#include "src/deoptimizer/deoptimize-reason.h"
+#include "src/deoptimizer/frame-description.h"
+#include "src/deoptimizer/translated-state.h"
#include "src/diagnostics/code-tracer.h"
-#include "src/execution/frame-constants.h"
-#include "src/execution/isolate.h"
-#include "src/heap/heap.h"
-#include "src/objects/feedback-vector.h"
#include "src/objects/js-function.h"
-#include "src/objects/shared-function-info.h"
-#include "src/utils/allocation.h"
-#include "src/utils/boxed-float.h"
-#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
-class FrameDescription;
-class JavaScriptFrame;
-class TranslationIterator;
-class DeoptimizedFrameInfo;
-class TranslatedFrame;
-class TranslatedState;
-class RegisterValues;
-class MacroAssembler;
-class StrongRootsEntry;
-
enum class BuiltinContinuationMode;
-class TranslatedValue {
- public:
- // Allocation-free getter of the value.
- // Returns ReadOnlyRoots::arguments_marker() if allocation would be necessary
- // to get the value. In the case of numbers, returns a Smi if possible.
- Object GetRawValue() const;
-
- // Convenience wrapper around GetRawValue (checked).
- int GetSmiValue() const;
-
- // Returns the value, possibly materializing it first (and the whole subgraph
- // reachable from this value). In the case of numbers, returns a Smi if
- // possible.
- Handle<Object> GetValue();
-
- bool IsMaterializedObject() const;
- bool IsMaterializableByDebugger() const;
-
- private:
- friend class TranslatedState;
- friend class TranslatedFrame;
-
- enum Kind : uint8_t {
- kInvalid,
- kTagged,
- kInt32,
- kInt64,
- kUInt32,
- kBoolBit,
- kFloat,
- kDouble,
- kCapturedObject, // Object captured by the escape analysis.
- // The number of nested objects can be obtained
- // with the DeferredObjectLength() method
- // (the values of the nested objects follow
- // this value in the depth-first order.)
- kDuplicatedObject // Duplicated object of a deferred object.
- };
-
- enum MaterializationState : uint8_t {
- kUninitialized,
- kAllocated, // Storage for the object has been allocated (or
- // enqueued for allocation).
- kFinished, // The object has been initialized (or enqueued for
- // initialization).
- };
-
- TranslatedValue(TranslatedState* container, Kind kind)
- : kind_(kind), container_(container) {}
- Kind kind() const { return kind_; }
- MaterializationState materialization_state() const {
- return materialization_state_;
- }
- void Handlify();
- int GetChildrenCount() const;
-
- static TranslatedValue NewDeferredObject(TranslatedState* container,
- int length, int object_index);
- static TranslatedValue NewDuplicateObject(TranslatedState* container, int id);
- static TranslatedValue NewFloat(TranslatedState* container, Float32 value);
- static TranslatedValue NewDouble(TranslatedState* container, Float64 value);
- static TranslatedValue NewInt32(TranslatedState* container, int32_t value);
- static TranslatedValue NewInt64(TranslatedState* container, int64_t value);
- static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value);
- static TranslatedValue NewBool(TranslatedState* container, uint32_t value);
- static TranslatedValue NewTagged(TranslatedState* container, Object literal);
- static TranslatedValue NewInvalid(TranslatedState* container);
-
- Isolate* isolate() const;
-
- void set_storage(Handle<HeapObject> storage) { storage_ = storage; }
- void set_initialized_storage(Handle<HeapObject> storage);
- void mark_finished() { materialization_state_ = kFinished; }
- void mark_allocated() { materialization_state_ = kAllocated; }
-
- Handle<HeapObject> storage() {
- DCHECK_NE(materialization_state(), kUninitialized);
- return storage_;
- }
-
- Kind kind_;
- MaterializationState materialization_state_ = kUninitialized;
- TranslatedState* container_; // This is only needed for materialization of
- // objects and constructing handles (to get
- // to the isolate).
-
- Handle<HeapObject> storage_; // Contains the materialized value or the
- // byte-array that will be later morphed into
- // the materialized object.
-
- struct MaterializedObjectInfo {
- int id_;
- int length_; // Applies only to kCapturedObject kinds.
- };
-
- union {
- // kind kTagged. After handlification it is always nullptr.
- Object raw_literal_;
- // kind is kUInt32 or kBoolBit.
- uint32_t uint32_value_;
- // kind is kInt32.
- int32_t int32_value_;
- // kind is kInt64.
- int64_t int64_value_;
- // kind is kFloat
- Float32 float_value_;
- // kind is kDouble
- Float64 double_value_;
- // kind is kDuplicatedObject or kCapturedObject.
- MaterializedObjectInfo materialization_info_;
- };
-
- // Checked accessors for the union members.
- Object raw_literal() const;
- int32_t int32_value() const;
- int64_t int64_value() const;
- uint32_t uint32_value() const;
- Float32 float_value() const;
- Float64 double_value() const;
- int object_length() const;
- int object_index() const;
-};
-
-class TranslatedFrame {
- public:
- enum Kind {
- kInterpretedFunction,
- kArgumentsAdaptor,
- kConstructStub,
- kBuiltinContinuation,
- kJavaScriptBuiltinContinuation,
- kJavaScriptBuiltinContinuationWithCatch,
- kInvalid
- };
-
- int GetValueCount();
-
- Kind kind() const { return kind_; }
- BailoutId node_id() const { return node_id_; }
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
-
- // TODO(jgruber): Simplify/clarify the semantics of this field. The name
- // `height` is slightly misleading. Yes, this value is related to stack frame
- // height, but must undergo additional mutations to arrive at the real stack
- // frame height (e.g.: addition/subtraction of context, accumulator, fixed
- // frame sizes, padding).
- int height() const { return height_; }
-
- int return_value_offset() const { return return_value_offset_; }
- int return_value_count() const { return return_value_count_; }
-
- SharedFunctionInfo raw_shared_info() const {
- CHECK(!raw_shared_info_.is_null());
- return raw_shared_info_;
- }
-
- class iterator {
- public:
- iterator& operator++() {
- ++input_index_;
- AdvanceIterator(&position_);
- return *this;
- }
-
- iterator operator++(int) {
- iterator original(position_, input_index_);
- ++input_index_;
- AdvanceIterator(&position_);
- return original;
- }
-
- bool operator==(const iterator& other) const {
- // Ignore {input_index_} for equality.
- return position_ == other.position_;
- }
- bool operator!=(const iterator& other) const { return !(*this == other); }
-
- TranslatedValue& operator*() { return (*position_); }
- TranslatedValue* operator->() { return &(*position_); }
- const TranslatedValue& operator*() const { return (*position_); }
- const TranslatedValue* operator->() const { return &(*position_); }
-
- int input_index() const { return input_index_; }
-
- private:
- friend TranslatedFrame;
-
- explicit iterator(std::deque<TranslatedValue>::iterator position,
- int input_index = 0)
- : position_(position), input_index_(input_index) {}
-
- std::deque<TranslatedValue>::iterator position_;
- int input_index_;
- };
-
- using reference = TranslatedValue&;
- using const_reference = TranslatedValue const&;
-
- iterator begin() { return iterator(values_.begin()); }
- iterator end() { return iterator(values_.end()); }
-
- reference front() { return values_.front(); }
- const_reference front() const { return values_.front(); }
-
- private:
- friend class TranslatedState;
-
- // Constructor static methods.
- static TranslatedFrame InterpretedFrame(BailoutId bytecode_offset,
- SharedFunctionInfo shared_info,
- int height, int return_value_offset,
- int return_value_count);
- static TranslatedFrame AccessorFrame(Kind kind,
- SharedFunctionInfo shared_info);
- static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo shared_info,
- int height);
- static TranslatedFrame ConstructStubFrame(BailoutId bailout_id,
- SharedFunctionInfo shared_info,
- int height);
- static TranslatedFrame BuiltinContinuationFrame(
- BailoutId bailout_id, SharedFunctionInfo shared_info, int height);
- static TranslatedFrame JavaScriptBuiltinContinuationFrame(
- BailoutId bailout_id, SharedFunctionInfo shared_info, int height);
- static TranslatedFrame JavaScriptBuiltinContinuationWithCatchFrame(
- BailoutId bailout_id, SharedFunctionInfo shared_info, int height);
- static TranslatedFrame InvalidFrame() {
- return TranslatedFrame(kInvalid, SharedFunctionInfo());
- }
-
- static void AdvanceIterator(std::deque<TranslatedValue>::iterator* iter);
-
- TranslatedFrame(Kind kind,
- SharedFunctionInfo shared_info = SharedFunctionInfo(),
- int height = 0, int return_value_offset = 0,
- int return_value_count = 0)
- : kind_(kind),
- node_id_(BailoutId::None()),
- raw_shared_info_(shared_info),
- height_(height),
- return_value_offset_(return_value_offset),
- return_value_count_(return_value_count) {}
-
- void Add(const TranslatedValue& value) { values_.push_back(value); }
- TranslatedValue* ValueAt(int index) { return &(values_[index]); }
- void Handlify();
-
- Kind kind_;
- BailoutId node_id_;
- SharedFunctionInfo raw_shared_info_;
- Handle<SharedFunctionInfo> shared_info_;
- int height_;
- int return_value_offset_;
- int return_value_count_;
-
- using ValuesContainer = std::deque<TranslatedValue>;
-
- ValuesContainer values_;
-};
-
-// Auxiliary class for translating deoptimization values.
-// Typical usage sequence:
-//
-// 1. Construct the instance. This will involve reading out the translations
-// and resolving them to values using the supplied frame pointer and
-// machine state (registers). This phase is guaranteed not to allocate
-// and not to use any HandleScope. Any object pointers will be stored raw.
-//
-// 2. Handlify pointers. This will convert all the raw pointers to handles.
-//
-// 3. Reading out the frame values.
-//
-// Note: After the instance is constructed, it is possible to iterate over
-// the values eagerly.
-
-class TranslatedState {
- public:
- TranslatedState() = default;
- explicit TranslatedState(const JavaScriptFrame* frame);
-
- void Prepare(Address stack_frame_pointer);
-
- // Store newly materialized values into the isolate.
- void StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame);
-
- using iterator = std::vector<TranslatedFrame>::iterator;
- iterator begin() { return frames_.begin(); }
- iterator end() { return frames_.end(); }
-
- using const_iterator = std::vector<TranslatedFrame>::const_iterator;
- const_iterator begin() const { return frames_.begin(); }
- const_iterator end() const { return frames_.end(); }
-
- std::vector<TranslatedFrame>& frames() { return frames_; }
-
- TranslatedFrame* GetFrameFromJSFrameIndex(int jsframe_index);
- TranslatedFrame* GetArgumentsInfoFromJSFrameIndex(int jsframe_index,
- int* arguments_count);
-
- Isolate* isolate() { return isolate_; }
-
- void Init(Isolate* isolate, Address input_frame_pointer,
- Address stack_frame_pointer, TranslationIterator* iterator,
- FixedArray literal_array, RegisterValues* registers,
- FILE* trace_file, int parameter_count, int actual_argument_count);
-
- void VerifyMaterializedObjects();
- bool DoUpdateFeedback();
-
- private:
- friend TranslatedValue;
-
- TranslatedFrame CreateNextTranslatedFrame(TranslationIterator* iterator,
- FixedArray literal_array,
- Address fp, FILE* trace_file);
- int CreateNextTranslatedValue(int frame_index, TranslationIterator* iterator,
- FixedArray literal_array, Address fp,
- RegisterValues* registers, FILE* trace_file);
- Address DecompressIfNeeded(intptr_t value);
- Address ComputeArgumentsPosition(Address input_frame_pointer, int* length);
- void CreateArgumentsElementsTranslatedValues(int frame_index,
- Address input_frame_pointer,
- CreateArgumentsType type,
- FILE* trace_file);
-
- void UpdateFromPreviouslyMaterializedObjects();
- void MaterializeFixedDoubleArray(TranslatedFrame* frame, int* value_index,
- TranslatedValue* slot, Handle<Map> map);
- void MaterializeHeapNumber(TranslatedFrame* frame, int* value_index,
- TranslatedValue* slot);
-
- void EnsureObjectAllocatedAt(TranslatedValue* slot);
-
- void SkipSlots(int slots_to_skip, TranslatedFrame* frame, int* value_index);
-
- Handle<ByteArray> AllocateStorageFor(TranslatedValue* slot);
- void EnsureJSObjectAllocated(TranslatedValue* slot, Handle<Map> map);
- void EnsurePropertiesAllocatedAndMarked(TranslatedValue* properties_slot,
- Handle<Map> map);
- void EnsureChildrenAllocated(int count, TranslatedFrame* frame,
- int* value_index, std::stack<int>* worklist);
- void EnsureCapturedObjectAllocatedAt(int object_index,
- std::stack<int>* worklist);
- Handle<HeapObject> InitializeObjectAt(TranslatedValue* slot);
- void InitializeCapturedObjectAt(int object_index, std::stack<int>* worklist,
- const DisallowGarbageCollection& no_gc);
- void InitializeJSObjectAt(TranslatedFrame* frame, int* value_index,
- TranslatedValue* slot, Handle<Map> map,
- const DisallowGarbageCollection& no_gc);
- void InitializeObjectWithTaggedFieldsAt(
- TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
- Handle<Map> map, const DisallowGarbageCollection& no_gc);
-
- void ReadUpdateFeedback(TranslationIterator* iterator,
- FixedArray literal_array, FILE* trace_file);
-
- TranslatedValue* ResolveCapturedObject(TranslatedValue* slot);
- TranslatedValue* GetValueByObjectIndex(int object_index);
- Handle<Object> GetValueAndAdvance(TranslatedFrame* frame, int* value_index);
- TranslatedValue* GetResolvedSlot(TranslatedFrame* frame, int value_index);
- TranslatedValue* GetResolvedSlotAndAdvance(TranslatedFrame* frame,
- int* value_index);
-
- static uint32_t GetUInt32Slot(Address fp, int slot_index);
- static uint64_t GetUInt64Slot(Address fp, int slot_index);
- static Float32 GetFloatSlot(Address fp, int slot_index);
- static Float64 GetDoubleSlot(Address fp, int slot_index);
-
- std::vector<TranslatedFrame> frames_;
- Isolate* isolate_ = nullptr;
- Address stack_frame_pointer_ = kNullAddress;
- int formal_parameter_count_;
- int actual_argument_count_;
-
- struct ObjectPosition {
- int frame_index_;
- int value_index_;
- };
- std::deque<ObjectPosition> object_positions_;
- Handle<FeedbackVector> feedback_vector_handle_;
- FeedbackVector feedback_vector_;
- FeedbackSlot feedback_slot_;
-};
-
-class OptimizedFunctionVisitor {
- public:
- virtual ~OptimizedFunctionVisitor() = default;
- virtual void VisitFunction(JSFunction function) = 0;
-};
+class DeoptimizedFrameInfo;
+class Isolate;
class Deoptimizer : public Malloced {
public:
@@ -438,35 +30,28 @@ class Deoptimizer : public Malloced {
int deopt_id)
: position(position), deopt_reason(deopt_reason), deopt_id(deopt_id) {}
- SourcePosition position;
- DeoptimizeReason deopt_reason;
- int deopt_id;
-
- static const int kNoDeoptId = -1;
+ const SourcePosition position;
+ const DeoptimizeReason deopt_reason;
+ const int deopt_id;
};
static DeoptInfo GetDeoptInfo(Code code, Address from);
- static int ComputeSourcePositionFromBytecodeArray(Isolate* isolate,
- SharedFunctionInfo shared,
- BailoutId node_id);
+ static int ComputeSourcePositionFromBytecodeArray(
+ Isolate* isolate, SharedFunctionInfo shared,
+ BytecodeOffset bytecode_offset);
static const char* MessageFor(DeoptimizeKind kind, bool reuse_code);
- int output_count() const { return output_count_; }
-
Handle<JSFunction> function() const;
Handle<Code> compiled_code() const;
DeoptimizeKind deopt_kind() const { return deopt_kind_; }
- // Number of created JS frames. Not all created frames are necessarily JS.
- int jsframe_count() const { return jsframe_count_; }
-
bool should_reuse_code() const;
static Deoptimizer* New(Address raw_function, DeoptimizeKind kind,
- unsigned bailout_id, Address from, int fp_to_sp_delta,
- Isolate* isolate);
+ unsigned deopt_exit_index, Address from,
+ int fp_to_sp_delta, Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
// The returned object with information on the optimized frame needs to be
@@ -532,9 +117,10 @@ class Deoptimizer : public Malloced {
static constexpr int kMaxNumberOfEntries = 16384;
- // This marker is passed to Deoptimizer::New as {bailout_id} on platforms
- // that have fixed deopt sizes (see also kSupportsFixedDeoptExitSizes). The
- // actual deoptimization id is then calculated from the return address.
+ // This marker is passed to Deoptimizer::New as {deopt_exit_index} on
+ // platforms that have fixed deopt sizes (see also
+ // kSupportsFixedDeoptExitSizes). The actual deoptimization id is then
+ // calculated from the return address.
static constexpr unsigned kFixedExitSizeMarker = kMaxUInt32;
// Set to true when the architecture supports deoptimization exit sequences
@@ -559,17 +145,16 @@ class Deoptimizer : public Malloced {
const char* reason);
private:
- friend class FrameWriter;
void QueueValueForMaterialization(Address output_address, Object obj,
const TranslatedFrame::iterator& iterator);
Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind,
- unsigned bailout_id, Address from, int fp_to_sp_delta);
+ unsigned deopt_exit_index, Address from, int fp_to_sp_delta);
Code FindOptimizedCode();
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
- void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
+ void DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
int frame_index, bool goto_catch_handler);
void DoComputeArgumentsAdaptorFrame(TranslatedFrame* translated_frame,
int frame_index);
@@ -579,6 +164,9 @@ class Deoptimizer : public Malloced {
static Builtins::Name TrampolineForBuiltinContinuation(
BuiltinContinuationMode mode, bool must_handle_result);
+ TranslatedValue TranslatedValueForWasmReturnType(
+ base::Optional<wasm::ValueKind> wasm_call_return_type);
+
void DoComputeBuiltinContinuation(TranslatedFrame* translated_frame,
int frame_index,
BuiltinContinuationMode mode);
@@ -587,7 +175,6 @@ class Deoptimizer : public Malloced {
unsigned ComputeInputFrameSize() const;
static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo shared);
- static unsigned ComputeOutgoingArgumentSize(Code code, unsigned bailout_id);
static void MarkAllCodeForContext(NativeContext native_context);
static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
@@ -605,7 +192,7 @@ class Deoptimizer : public Malloced {
CodeTracer::Scope* verbose_trace_scope() const {
return FLAG_trace_deopt_verbose ? trace_scope() : nullptr;
}
- void TraceDeoptBegin(int optimization_id, int node_id);
+ void TraceDeoptBegin(int optimization_id, BytecodeOffset bytecode_offset);
void TraceDeoptEnd(double deopt_duration);
#ifdef DEBUG
static void TraceFoundActivation(Isolate* isolate, JSFunction function);
@@ -616,7 +203,7 @@ class Deoptimizer : public Malloced {
Isolate* isolate_;
JSFunction function_;
Code compiled_code_;
- unsigned bailout_id_;
+ unsigned deopt_exit_index_;
DeoptimizeKind deopt_kind_;
Address from_;
int fp_to_sp_delta_;
@@ -628,8 +215,6 @@ class Deoptimizer : public Malloced {
FrameDescription* input_;
// Number of output frames.
int output_count_;
- // Number of output js frames.
- int jsframe_count_;
// Array of output frame descriptions.
FrameDescription** output_;
@@ -638,12 +223,11 @@ class Deoptimizer : public Malloced {
intptr_t caller_fp_;
intptr_t caller_pc_;
intptr_t caller_constant_pool_;
- intptr_t input_frame_context_;
// The argument count of the bottom most frame.
int actual_argument_count_;
- // Key for lookup of previously materialized objects
+ // Key for lookup of previously materialized objects.
intptr_t stack_fp_;
TranslatedState translated_state_;
@@ -659,394 +243,9 @@ class Deoptimizer : public Malloced {
std::unique_ptr<CodeTracer::Scope> trace_scope_;
- static const int table_entry_size_;
-
- friend class FrameDescription;
friend class DeoptimizedFrameInfo;
-};
-
-class RegisterValues {
- public:
- intptr_t GetRegister(unsigned n) const {
-#if DEBUG
- // This convoluted DCHECK is needed to work around a gcc problem that
- // improperly detects an array bounds overflow in optimized debug builds
- // when using a plain DCHECK.
- if (n >= arraysize(registers_)) {
- DCHECK(false);
- return 0;
- }
-#endif
- return registers_[n];
- }
-
- Float32 GetFloatRegister(unsigned n) const;
-
- Float64 GetDoubleRegister(unsigned n) const {
- DCHECK(n < arraysize(double_registers_));
- return double_registers_[n];
- }
-
- void SetRegister(unsigned n, intptr_t value) {
- DCHECK(n < arraysize(registers_));
- registers_[n] = value;
- }
-
- intptr_t registers_[Register::kNumRegisters];
- // Generated code writes directly into the following array, make sure the
- // element size matches what the machine instructions expect.
- static_assert(sizeof(Float64) == kDoubleSize, "size mismatch");
- Float64 double_registers_[DoubleRegister::kNumRegisters];
-};
-
-class FrameDescription {
- public:
- explicit FrameDescription(uint32_t frame_size, int parameter_count = 0);
-
- void* operator new(size_t size, uint32_t frame_size) {
- // Subtracts kSystemPointerSize, as the member frame_content_ already
- // supplies the first element of the area to store the frame.
- return base::Malloc(size + frame_size - kSystemPointerSize);
- }
-
- void operator delete(void* pointer, uint32_t frame_size) {
- base::Free(pointer);
- }
-
- void operator delete(void* description) { base::Free(description); }
-
- uint32_t GetFrameSize() const {
- USE(frame_content_);
- DCHECK(static_cast<uint32_t>(frame_size_) == frame_size_);
- return static_cast<uint32_t>(frame_size_);
- }
-
- intptr_t GetFrameSlot(unsigned offset) {
- return *GetFrameSlotPointer(offset);
- }
-
- unsigned GetLastArgumentSlotOffset(bool pad_arguments = true) {
- int parameter_slots = parameter_count();
- if (pad_arguments && ShouldPadArguments(parameter_slots)) parameter_slots++;
- return GetFrameSize() - parameter_slots * kSystemPointerSize;
- }
-
- Address GetFramePointerAddress() {
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // We should not pad arguments in the bottom frame, since this
- // already contain a padding if necessary and it might contain
- // extra arguments (actual argument count > parameter count).
- const bool pad_arguments_bottom_frame = false;
-#else
- const bool pad_arguments_bottom_frame = true;
-#endif
- int fp_offset = GetLastArgumentSlotOffset(pad_arguments_bottom_frame) -
- StandardFrameConstants::kCallerSPOffset;
- return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset));
- }
-
- RegisterValues* GetRegisterValues() { return &register_values_; }
-
- void SetFrameSlot(unsigned offset, intptr_t value) {
- *GetFrameSlotPointer(offset) = value;
- }
-
- void SetCallerPc(unsigned offset, intptr_t value);
-
- void SetCallerFp(unsigned offset, intptr_t value);
-
- void SetCallerConstantPool(unsigned offset, intptr_t value);
-
- intptr_t GetRegister(unsigned n) const {
- return register_values_.GetRegister(n);
- }
-
- Float64 GetDoubleRegister(unsigned n) const {
- return register_values_.GetDoubleRegister(n);
- }
-
- void SetRegister(unsigned n, intptr_t value) {
- register_values_.SetRegister(n, value);
- }
-
- intptr_t GetTop() const { return top_; }
- void SetTop(intptr_t top) { top_ = top; }
-
- intptr_t GetPc() const { return pc_; }
- void SetPc(intptr_t pc);
-
- intptr_t GetFp() const { return fp_; }
- void SetFp(intptr_t fp) { fp_ = fp; }
-
- intptr_t GetContext() const { return context_; }
- void SetContext(intptr_t context) { context_ = context; }
-
- intptr_t GetConstantPool() const { return constant_pool_; }
- void SetConstantPool(intptr_t constant_pool) {
- constant_pool_ = constant_pool;
- }
-
- void SetContinuation(intptr_t pc) { continuation_ = pc; }
-
- // Argument count, including receiver.
- int parameter_count() { return parameter_count_; }
-
- static int registers_offset() {
- return offsetof(FrameDescription, register_values_.registers_);
- }
-
- static constexpr int double_registers_offset() {
- return offsetof(FrameDescription, register_values_.double_registers_);
- }
-
- static int frame_size_offset() {
- return offsetof(FrameDescription, frame_size_);
- }
-
- static int pc_offset() { return offsetof(FrameDescription, pc_); }
-
- static int continuation_offset() {
- return offsetof(FrameDescription, continuation_);
- }
-
- static int frame_content_offset() {
- return offsetof(FrameDescription, frame_content_);
- }
-
- private:
- static const uint32_t kZapUint32 = 0xbeeddead;
-
- // Frame_size_ must hold a uint32_t value. It is only a uintptr_t to
- // keep the variable-size array frame_content_ of type intptr_t at
- // the end of the structure aligned.
- uintptr_t frame_size_; // Number of bytes.
- int parameter_count_;
- RegisterValues register_values_;
- intptr_t top_;
- intptr_t pc_;
- intptr_t fp_;
- intptr_t context_;
- intptr_t constant_pool_;
-
- // Continuation is the PC where the execution continues after
- // deoptimizing.
- intptr_t continuation_;
-
- // This must be at the end of the object as the object is allocated larger
- // than its definition indicates to extend this array.
- intptr_t frame_content_[1];
-
- intptr_t* GetFrameSlotPointer(unsigned offset) {
- DCHECK(offset < frame_size_);
- return reinterpret_cast<intptr_t*>(reinterpret_cast<Address>(this) +
- frame_content_offset() + offset);
- }
-};
-
-class TranslationBuffer {
- public:
- explicit TranslationBuffer(Zone* zone) : contents_(zone) {}
-
- int CurrentIndex() const { return static_cast<int>(contents_.size()); }
- void Add(int32_t value);
-
- Handle<ByteArray> CreateByteArray(Factory* factory);
-
- private:
- ZoneChunkList<uint8_t> contents_;
-};
-
-class TranslationIterator {
- public:
- TranslationIterator(ByteArray buffer, int index);
-
- int32_t Next();
-
- bool HasNext() const;
-
- void Skip(int n) {
- for (int i = 0; i < n; i++) Next();
- }
-
- private:
- ByteArray buffer_;
- int index_;
-};
-
-#define TRANSLATION_OPCODE_LIST(V) \
- V(BEGIN) \
- V(INTERPRETED_FRAME) \
- V(BUILTIN_CONTINUATION_FRAME) \
- V(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) \
- V(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME) \
- V(CONSTRUCT_STUB_FRAME) \
- V(ARGUMENTS_ADAPTOR_FRAME) \
- V(DUPLICATED_OBJECT) \
- V(ARGUMENTS_ELEMENTS) \
- V(ARGUMENTS_LENGTH) \
- V(CAPTURED_OBJECT) \
- V(REGISTER) \
- V(INT32_REGISTER) \
- V(INT64_REGISTER) \
- V(UINT32_REGISTER) \
- V(BOOL_REGISTER) \
- V(FLOAT_REGISTER) \
- V(DOUBLE_REGISTER) \
- V(STACK_SLOT) \
- V(INT32_STACK_SLOT) \
- V(INT64_STACK_SLOT) \
- V(UINT32_STACK_SLOT) \
- V(BOOL_STACK_SLOT) \
- V(FLOAT_STACK_SLOT) \
- V(DOUBLE_STACK_SLOT) \
- V(LITERAL) \
- V(UPDATE_FEEDBACK)
-
-class Translation {
- public:
-#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item,
- enum Opcode {
- TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM) LAST = LITERAL
- };
-#undef DECLARE_TRANSLATION_OPCODE_ENUM
-
- Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
- int update_feedback_count, Zone* zone)
- : buffer_(buffer), index_(buffer->CurrentIndex()), zone_(zone) {
- buffer_->Add(BEGIN);
- buffer_->Add(frame_count);
- buffer_->Add(jsframe_count);
- buffer_->Add(update_feedback_count);
- }
-
- int index() const { return index_; }
-
- // Commands.
- void BeginInterpretedFrame(BailoutId bytecode_offset, int literal_id,
- unsigned height, int return_value_offset,
- int return_value_count);
- void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
- void BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
- unsigned height);
- void BeginBuiltinContinuationFrame(BailoutId bailout_id, int literal_id,
- unsigned height);
- void BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id,
- int literal_id, unsigned height);
- void BeginJavaScriptBuiltinContinuationWithCatchFrame(BailoutId bailout_id,
- int literal_id,
- unsigned height);
- void ArgumentsElements(CreateArgumentsType type);
- void ArgumentsLength();
- void BeginCapturedObject(int length);
- void AddUpdateFeedback(int vector_literal, int slot);
- void DuplicateObject(int object_index);
- void StoreRegister(Register reg);
- void StoreInt32Register(Register reg);
- void StoreInt64Register(Register reg);
- void StoreUint32Register(Register reg);
- void StoreBoolRegister(Register reg);
- void StoreFloatRegister(FloatRegister reg);
- void StoreDoubleRegister(DoubleRegister reg);
- void StoreStackSlot(int index);
- void StoreInt32StackSlot(int index);
- void StoreInt64StackSlot(int index);
- void StoreUint32StackSlot(int index);
- void StoreBoolStackSlot(int index);
- void StoreFloatStackSlot(int index);
- void StoreDoubleStackSlot(int index);
- void StoreLiteral(int literal_id);
- void StoreJSFrameFunction();
-
- Zone* zone() const { return zone_; }
-
- static int NumberOfOperandsFor(Opcode opcode);
-
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
- static const char* StringFor(Opcode opcode);
-#endif
-
- private:
- TranslationBuffer* buffer_;
- int index_;
- Zone* zone_;
-};
-
-class MaterializedObjectStore {
- public:
- explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {}
-
- Handle<FixedArray> Get(Address fp);
- void Set(Address fp, Handle<FixedArray> materialized_objects);
- bool Remove(Address fp);
-
- private:
- Isolate* isolate() const { return isolate_; }
- Handle<FixedArray> GetStackEntries();
- Handle<FixedArray> EnsureStackEntries(int size);
-
- int StackIdToIndex(Address fp);
-
- Isolate* isolate_;
- std::vector<Address> frame_fps_;
-};
-
-// Class used to represent an unoptimized frame when the debugger
-// needs to inspect a frame that is part of an optimized frame. The
-// internally used FrameDescription objects are not GC safe so for use
-// by the debugger frame information is copied to an object of this type.
-// Represents parameters in unadapted form so their number might mismatch
-// formal parameter count.
-class DeoptimizedFrameInfo : public Malloced {
- public:
- DeoptimizedFrameInfo(TranslatedState* state,
- TranslatedState::iterator frame_it, Isolate* isolate);
-
- // Return the number of incoming arguments.
- int parameters_count() { return static_cast<int>(parameters_.size()); }
-
- // Return the height of the expression stack.
- int expression_count() { return static_cast<int>(expression_stack_.size()); }
-
- // Get the frame function.
- Handle<JSFunction> GetFunction() { return function_; }
-
- // Get the frame context.
- Handle<Object> GetContext() { return context_; }
-
- // Get an incoming argument.
- Handle<Object> GetParameter(int index) {
- DCHECK(0 <= index && index < parameters_count());
- return parameters_[index];
- }
-
- // Get an expression from the expression stack.
- Handle<Object> GetExpression(int index) {
- DCHECK(0 <= index && index < expression_count());
- return expression_stack_[index];
- }
-
- int GetSourcePosition() { return source_position_; }
-
- private:
- // Set an incoming argument.
- void SetParameter(int index, Handle<Object> obj) {
- DCHECK(0 <= index && index < parameters_count());
- parameters_[index] = obj;
- }
-
- // Set an expression on the expression stack.
- void SetExpression(int index, Handle<Object> obj) {
- DCHECK(0 <= index && index < expression_count());
- expression_stack_[index] = obj;
- }
-
- Handle<JSFunction> function_;
- Handle<Object> context_;
- std::vector<Handle<Object> > parameters_;
- std::vector<Handle<Object> > expression_stack_;
- int source_position_;
-
- friend class Deoptimizer;
+ friend class FrameDescription;
+ friend class FrameWriter;
};
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/frame-description.h b/deps/v8/src/deoptimizer/frame-description.h
new file mode 100644
index 0000000000..bc5c6219ef
--- /dev/null
+++ b/deps/v8/src/deoptimizer/frame-description.h
@@ -0,0 +1,230 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEOPTIMIZER_FRAME_DESCRIPTION_H_
+#define V8_DEOPTIMIZER_FRAME_DESCRIPTION_H_
+
+#include "src/codegen/register-arch.h"
+#include "src/execution/frame-constants.h"
+#include "src/utils/boxed-float.h"
+
+namespace v8 {
+namespace internal {
+
+// Classes in this file describe the physical stack frame state.
+//
+// RegisterValues: stores gp and fp register values. Can be filled in either by
+// the DeoptimizationEntry builtin (which fills in the input state of the
+// optimized frame); or by the FrameWriter (fills in the output state of the
+// interpreted frame).
+//
+// - FrameDescription: contains RegisterValues and other things.
+
+class RegisterValues {
+ public:
+ intptr_t GetRegister(unsigned n) const {
+#if DEBUG
+ // This convoluted DCHECK is needed to work around a gcc problem that
+ // improperly detects an array bounds overflow in optimized debug builds
+ // when using a plain DCHECK.
+ if (n >= arraysize(registers_)) {
+ DCHECK(false);
+ return 0;
+ }
+#endif
+ return registers_[n];
+ }
+
+ Float32 GetFloatRegister(unsigned n) const;
+
+ Float64 GetDoubleRegister(unsigned n) const {
+ DCHECK(n < arraysize(double_registers_));
+ return double_registers_[n];
+ }
+
+ void SetRegister(unsigned n, intptr_t value) {
+ DCHECK(n < arraysize(registers_));
+ registers_[n] = value;
+ }
+
+ intptr_t registers_[Register::kNumRegisters];
+ // Generated code writes directly into the following array, make sure the
+ // element size matches what the machine instructions expect.
+ static_assert(sizeof(Float64) == kDoubleSize, "size mismatch");
+ Float64 double_registers_[DoubleRegister::kNumRegisters];
+};
+
+class FrameDescription {
+ public:
+ FrameDescription(uint32_t frame_size, int parameter_count)
+ : frame_size_(frame_size),
+ parameter_count_(parameter_count),
+ top_(kZapUint32),
+ pc_(kZapUint32),
+ fp_(kZapUint32),
+ context_(kZapUint32),
+ constant_pool_(kZapUint32) {
+ // Zap all the registers.
+ for (int r = 0; r < Register::kNumRegisters; r++) {
+ // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
+ // isn't used before the next safepoint, the GC will try to scan it as a
+ // tagged value. kZapUint32 looks like a valid tagged pointer, but it
+ // isn't.
+#if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_ARM64)
+ // x18 is reserved as platform register on Windows arm64 platform
+ const int kPlatformRegister = 18;
+ if (r != kPlatformRegister) {
+ SetRegister(r, kZapUint32);
+ }
+#else
+ SetRegister(r, kZapUint32);
+#endif
+ }
+
+ // Zap all the slots.
+ for (unsigned o = 0; o < frame_size; o += kSystemPointerSize) {
+ SetFrameSlot(o, kZapUint32);
+ }
+ }
+
+ void* operator new(size_t size, uint32_t frame_size) {
+ // Subtracts kSystemPointerSize, as the member frame_content_ already
+ // supplies the first element of the area to store the frame.
+ return base::Malloc(size + frame_size - kSystemPointerSize);
+ }
+
+ void operator delete(void* pointer, uint32_t frame_size) {
+ base::Free(pointer);
+ }
+
+ void operator delete(void* description) { base::Free(description); }
+
+ uint32_t GetFrameSize() const {
+ USE(frame_content_);
+ DCHECK(static_cast<uint32_t>(frame_size_) == frame_size_);
+ return static_cast<uint32_t>(frame_size_);
+ }
+
+ intptr_t GetFrameSlot(unsigned offset) {
+ return *GetFrameSlotPointer(offset);
+ }
+
+ unsigned GetLastArgumentSlotOffset(bool pad_arguments = true) {
+ int parameter_slots = parameter_count();
+ if (pad_arguments && ShouldPadArguments(parameter_slots)) parameter_slots++;
+ return GetFrameSize() - parameter_slots * kSystemPointerSize;
+ }
+
+ Address GetFramePointerAddress() {
+ // We should not pad arguments in the bottom frame, since this
+ // already contain a padding if necessary and it might contain
+ // extra arguments (actual argument count > parameter count).
+ const bool pad_arguments_bottom_frame = false;
+ int fp_offset = GetLastArgumentSlotOffset(pad_arguments_bottom_frame) -
+ StandardFrameConstants::kCallerSPOffset;
+ return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset));
+ }
+
+ RegisterValues* GetRegisterValues() { return &register_values_; }
+
+ void SetFrameSlot(unsigned offset, intptr_t value) {
+ *GetFrameSlotPointer(offset) = value;
+ }
+
+ void SetCallerPc(unsigned offset, intptr_t value);
+
+ void SetCallerFp(unsigned offset, intptr_t value);
+
+ void SetCallerConstantPool(unsigned offset, intptr_t value);
+
+ intptr_t GetRegister(unsigned n) const {
+ return register_values_.GetRegister(n);
+ }
+
+ Float64 GetDoubleRegister(unsigned n) const {
+ return register_values_.GetDoubleRegister(n);
+ }
+
+ void SetRegister(unsigned n, intptr_t value) {
+ register_values_.SetRegister(n, value);
+ }
+
+ intptr_t GetTop() const { return top_; }
+ void SetTop(intptr_t top) { top_ = top; }
+
+ intptr_t GetPc() const { return pc_; }
+ void SetPc(intptr_t pc);
+
+ intptr_t GetFp() const { return fp_; }
+ void SetFp(intptr_t fp) { fp_ = fp; }
+
+ intptr_t GetContext() const { return context_; }
+ void SetContext(intptr_t context) { context_ = context; }
+
+ intptr_t GetConstantPool() const { return constant_pool_; }
+ void SetConstantPool(intptr_t constant_pool) {
+ constant_pool_ = constant_pool;
+ }
+
+ void SetContinuation(intptr_t pc) { continuation_ = pc; }
+
+ // Argument count, including receiver.
+ int parameter_count() { return parameter_count_; }
+
+ static int registers_offset() {
+ return offsetof(FrameDescription, register_values_.registers_);
+ }
+
+ static constexpr int double_registers_offset() {
+ return offsetof(FrameDescription, register_values_.double_registers_);
+ }
+
+ static int frame_size_offset() {
+ return offsetof(FrameDescription, frame_size_);
+ }
+
+ static int pc_offset() { return offsetof(FrameDescription, pc_); }
+
+ static int continuation_offset() {
+ return offsetof(FrameDescription, continuation_);
+ }
+
+ static int frame_content_offset() {
+ return offsetof(FrameDescription, frame_content_);
+ }
+
+ private:
+ static const uint32_t kZapUint32 = 0xbeeddead;
+
+ // Frame_size_ must hold a uint32_t value. It is only a uintptr_t to
+ // keep the variable-size array frame_content_ of type intptr_t at
+ // the end of the structure aligned.
+ uintptr_t frame_size_; // Number of bytes.
+ int parameter_count_;
+ RegisterValues register_values_;
+ intptr_t top_;
+ intptr_t pc_;
+ intptr_t fp_;
+ intptr_t context_;
+ intptr_t constant_pool_;
+
+ // Continuation is the PC where the execution continues after
+ // deoptimizing.
+ intptr_t continuation_;
+
+ // This must be at the end of the object as the object is allocated larger
+ // than its definition indicates to extend this array.
+ intptr_t frame_content_[1];
+
+ intptr_t* GetFrameSlotPointer(unsigned offset) {
+ DCHECK(offset < frame_size_);
+ return reinterpret_cast<intptr_t*>(reinterpret_cast<Address>(this) +
+ frame_content_offset() + offset);
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEOPTIMIZER_FRAME_DESCRIPTION_H_
diff --git a/deps/v8/src/deoptimizer/materialized-object-store.cc b/deps/v8/src/deoptimizer/materialized-object-store.cc
new file mode 100644
index 0000000000..aedc5a3a70
--- /dev/null
+++ b/deps/v8/src/deoptimizer/materialized-object-store.cc
@@ -0,0 +1,90 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer/materialized-object-store.h"
+
+#include "src/execution/isolate.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/oddball.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
+ int index = StackIdToIndex(fp);
+ if (index == -1) {
+ return Handle<FixedArray>::null();
+ }
+ Handle<FixedArray> array = GetStackEntries();
+ CHECK_GT(array->length(), index);
+ return Handle<FixedArray>::cast(Handle<Object>(array->get(index), isolate()));
+}
+
+void MaterializedObjectStore::Set(Address fp,
+ Handle<FixedArray> materialized_objects) {
+ int index = StackIdToIndex(fp);
+ if (index == -1) {
+ index = static_cast<int>(frame_fps_.size());
+ frame_fps_.push_back(fp);
+ }
+
+ Handle<FixedArray> array = EnsureStackEntries(index + 1);
+ array->set(index, *materialized_objects);
+}
+
+bool MaterializedObjectStore::Remove(Address fp) {
+ auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp);
+ if (it == frame_fps_.end()) return false;
+ int index = static_cast<int>(std::distance(frame_fps_.begin(), it));
+
+ frame_fps_.erase(it);
+ FixedArray array = isolate()->heap()->materialized_objects();
+
+ CHECK_LT(index, array.length());
+ int fps_size = static_cast<int>(frame_fps_.size());
+ for (int i = index; i < fps_size; i++) {
+ array.set(i, array.get(i + 1));
+ }
+ array.set(fps_size, ReadOnlyRoots(isolate()).undefined_value());
+ return true;
+}
+
+int MaterializedObjectStore::StackIdToIndex(Address fp) {
+ auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp);
+ return it == frame_fps_.end()
+ ? -1
+ : static_cast<int>(std::distance(frame_fps_.begin(), it));
+}
+
+Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
+ return Handle<FixedArray>(isolate()->heap()->materialized_objects(),
+ isolate());
+}
+
+Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
+ Handle<FixedArray> array = GetStackEntries();
+ if (array->length() >= length) {
+ return array;
+ }
+
+ int new_length = length > 10 ? length : 10;
+ if (new_length < 2 * array->length()) {
+ new_length = 2 * array->length();
+ }
+
+ Handle<FixedArray> new_array =
+ isolate()->factory()->NewFixedArray(new_length, AllocationType::kOld);
+ for (int i = 0; i < array->length(); i++) {
+ new_array->set(i, array->get(i));
+ }
+ HeapObject undefined_value = ReadOnlyRoots(isolate()).undefined_value();
+ for (int i = array->length(); i < length; i++) {
+ new_array->set(i, undefined_value);
+ }
+ isolate()->heap()->SetRootMaterializedObjects(*new_array);
+ return new_array;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/deoptimizer/materialized-object-store.h b/deps/v8/src/deoptimizer/materialized-object-store.h
new file mode 100644
index 0000000000..27537b852d
--- /dev/null
+++ b/deps/v8/src/deoptimizer/materialized-object-store.h
@@ -0,0 +1,40 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEOPTIMIZER_MATERIALIZED_OBJECT_STORE_H_
+#define V8_DEOPTIMIZER_MATERIALIZED_OBJECT_STORE_H_
+
+#include <vector>
+
+#include "src/handles/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class FixedArray;
+class Isolate;
+
+class MaterializedObjectStore {
+ public:
+ explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {}
+
+ Handle<FixedArray> Get(Address fp);
+ void Set(Address fp, Handle<FixedArray> materialized_objects);
+ bool Remove(Address fp);
+
+ private:
+ Isolate* isolate() const { return isolate_; }
+ Handle<FixedArray> GetStackEntries();
+ Handle<FixedArray> EnsureStackEntries(int size);
+
+ int StackIdToIndex(Address fp);
+
+ Isolate* isolate_;
+ std::vector<Address> frame_fps_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEOPTIMIZER_MATERIALIZED_OBJECT_STORE_H_
diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index 3f38ba7108..f917c59f16 100644
--- a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -17,13 +17,6 @@ const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 2 * kInstrSize;
const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
2 * kInstrSize + kSystemPointerSize;
-// Maximum size of a table entry generated below.
-#ifdef _MIPS_ARCH_MIPS32R6
-const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
-#else
-const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
-#endif
-
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
static_cast<uint32_t>(double_registers_[n].get_bits()));
diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index 5bea7ccefb..f917c59f16 100644
--- a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -17,13 +17,6 @@ const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = 2 * kInstrSize;
const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
2 * kInstrSize + kSystemPointerSize;
-// Maximum size of a table entry generated below.
-#ifdef _MIPS_ARCH_MIPS64R6
-const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
-#else
-const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
-#endif
-
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
static_cast<uint32_t>(double_registers_[n].get_bits()));
diff --git a/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc b/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
new file mode 100644
index 0000000000..1cbe85ba5d
--- /dev/null
+++ b/deps/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
@@ -0,0 +1,41 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer/deoptimizer.h"
+
+namespace v8 {
+namespace internal {
+
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 5 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 5 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 6 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeDeoptExitSize =
+ kEagerWithResumeBeforeArgsSize + 4 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
+const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
+ kInstrSize + kSystemPointerSize;
+
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+ return Float32::FromBits(
+ static_cast<uint32_t>(double_registers_[n].get_bits()));
+}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+}
+
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/deoptimizer/translated-state.cc b/deps/v8/src/deoptimizer/translated-state.cc
new file mode 100644
index 0000000000..daaf449acd
--- /dev/null
+++ b/deps/v8/src/deoptimizer/translated-state.cc
@@ -0,0 +1,2100 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer/translated-state.h"
+
+#include <iomanip>
+
+#include "src/base/memory.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/deoptimizer/materialized-object-store.h"
+#include "src/deoptimizer/translation-opcode.h"
+#include "src/diagnostics/disasm.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/arguments.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/oddball.h"
+
+// Has to be the last include (doesn't have include guards)
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+
+using base::Memory;
+using base::ReadUnalignedValue;
+
+namespace internal {
+
+void TranslationArrayPrintSingleFrame(std::ostream& os,
+ TranslationArray translation_array,
+ int translation_index,
+ FixedArray literal_array) {
+ DisallowGarbageCollection gc_oh_noes;
+ TranslationArrayIterator iterator(translation_array, translation_index);
+ disasm::NameConverter converter;
+
+ TranslationOpcode opcode = TranslationOpcodeFromInt(iterator.Next());
+ DCHECK(TranslationOpcode::BEGIN == opcode);
+ int frame_count = iterator.Next();
+ int jsframe_count = iterator.Next();
+ int update_feedback_count = iterator.Next();
+ os << " " << TranslationOpcodeToString(opcode)
+ << " {frame count=" << frame_count << ", js frame count=" << jsframe_count
+ << ", update_feedback_count=" << update_feedback_count << "}\n";
+
+ while (iterator.HasNext()) {
+ opcode = TranslationOpcodeFromInt(iterator.Next());
+ if (opcode == TranslationOpcode::BEGIN) break;
+
+ os << std::setw(31) << " " << TranslationOpcodeToString(opcode) << " ";
+
+ switch (opcode) {
+ case TranslationOpcode::BEGIN:
+ UNREACHABLE();
+ break;
+
+ case TranslationOpcode::INTERPRETED_FRAME: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 5);
+ int bytecode_offset = iterator.Next();
+ int shared_info_id = iterator.Next();
+ unsigned height = iterator.Next();
+ int return_value_offset = iterator.Next();
+ int return_value_count = iterator.Next();
+ Object shared_info = literal_array.get(shared_info_id);
+ os << "{bytecode_offset=" << bytecode_offset << ", function="
+ << SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
+ << ", height=" << height << ", retval=@" << return_value_offset
+ << "(#" << return_value_count << ")}";
+ break;
+ }
+
+ case TranslationOpcode::CONSTRUCT_STUB_FRAME: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+ int bailout_id = iterator.Next();
+ int shared_info_id = iterator.Next();
+ Object shared_info = literal_array.get(shared_info_id);
+ unsigned height = iterator.Next();
+ os << "{bailout_id=" << bailout_id << ", function="
+ << SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
+ << ", height=" << height << "}";
+ break;
+ }
+
+ case TranslationOpcode::BUILTIN_CONTINUATION_FRAME:
+ case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
+ case TranslationOpcode::
+ JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+ int bailout_id = iterator.Next();
+ int shared_info_id = iterator.Next();
+ Object shared_info = literal_array.get(shared_info_id);
+ unsigned height = iterator.Next();
+ os << "{bailout_id=" << bailout_id << ", function="
+ << SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
+ << ", height=" << height << "}";
+ break;
+ }
+
+ case TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 4);
+ int bailout_id = iterator.Next();
+ int shared_info_id = iterator.Next();
+ Object shared_info = literal_array.get(shared_info_id);
+ unsigned height = iterator.Next();
+ int wasm_return_type = iterator.Next();
+ os << "{bailout_id=" << bailout_id << ", function="
+ << SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
+ << ", height=" << height << ", wasm_return_type=" << wasm_return_type
+ << "}";
+ break;
+ }
+
+ case TranslationOpcode::ARGUMENTS_ADAPTOR_FRAME: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 2);
+ int shared_info_id = iterator.Next();
+ Object shared_info = literal_array.get(shared_info_id);
+ unsigned height = iterator.Next();
+ os << "{function="
+ << SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
+ << ", height=" << height << "}";
+ break;
+ }
+
+ case TranslationOpcode::REGISTER: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code) << "}";
+ break;
+ }
+
+ case TranslationOpcode::INT32_REGISTER: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code) << " (int32)}";
+ break;
+ }
+
+ case TranslationOpcode::INT64_REGISTER: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code) << " (int64)}";
+ break;
+ }
+
+ case TranslationOpcode::UINT32_REGISTER: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code)
+ << " (uint32)}";
+ break;
+ }
+
+ case TranslationOpcode::BOOL_REGISTER: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int reg_code = iterator.Next();
+ os << "{input=" << converter.NameOfCPURegister(reg_code) << " (bool)}";
+ break;
+ }
+
+ case TranslationOpcode::FLOAT_REGISTER: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int reg_code = iterator.Next();
+ os << "{input=" << FloatRegister::from_code(reg_code) << "}";
+ break;
+ }
+
+ case TranslationOpcode::DOUBLE_REGISTER: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int reg_code = iterator.Next();
+ os << "{input=" << DoubleRegister::from_code(reg_code) << "}";
+ break;
+ }
+
+ case TranslationOpcode::STACK_SLOT: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << "}";
+ break;
+ }
+
+ case TranslationOpcode::INT32_STACK_SLOT: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << " (int32)}";
+ break;
+ }
+
+ case TranslationOpcode::INT64_STACK_SLOT: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << " (int64)}";
+ break;
+ }
+
+ case TranslationOpcode::UINT32_STACK_SLOT: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << " (uint32)}";
+ break;
+ }
+
+ case TranslationOpcode::BOOL_STACK_SLOT: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << " (bool)}";
+ break;
+ }
+
+ case TranslationOpcode::FLOAT_STACK_SLOT:
+ case TranslationOpcode::DOUBLE_STACK_SLOT: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << "}";
+ break;
+ }
+
+ case TranslationOpcode::LITERAL: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int literal_index = iterator.Next();
+ Object literal_value = literal_array.get(literal_index);
+ os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
+ << ")}";
+ break;
+ }
+
+ case TranslationOpcode::DUPLICATED_OBJECT: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int object_index = iterator.Next();
+ os << "{object_index=" << object_index << "}";
+ break;
+ }
+
+ case TranslationOpcode::ARGUMENTS_ELEMENTS: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ CreateArgumentsType arguments_type =
+ static_cast<CreateArgumentsType>(iterator.Next());
+ os << "{arguments_type=" << arguments_type << "}";
+ break;
+ }
+ case TranslationOpcode::ARGUMENTS_LENGTH: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 0);
+ os << "{arguments_length}";
+ break;
+ }
+
+ case TranslationOpcode::CAPTURED_OBJECT: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+ int args_length = iterator.Next();
+ os << "{length=" << args_length << "}";
+ break;
+ }
+
+ case TranslationOpcode::UPDATE_FEEDBACK: {
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 2);
+ int literal_index = iterator.Next();
+ FeedbackSlot slot(iterator.Next());
+ os << "{feedback={vector_index=" << literal_index << ", slot=" << slot
+ << "}}";
+ break;
+ }
+ }
+ os << "\n";
+ }
+}
+
+namespace {
+
+// Decodes the return type of a Wasm function as the integer value of
+// wasm::ValueKind, or kNoWasmReturnType if the function returns void.
+base::Optional<wasm::ValueKind> DecodeWasmReturnType(int code) {
+ if (code != kNoWasmReturnType) {
+ return {static_cast<wasm::ValueKind>(code)};
+ }
+ return {};
+}
+
+} // namespace
+
+// static
+TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container,
+ int length,
+ int object_index) {
+ TranslatedValue slot(container, kCapturedObject);
+ slot.materialization_info_ = {object_index, length};
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container,
+ int id) {
+ TranslatedValue slot(container, kDuplicatedObject);
+ slot.materialization_info_ = {id, -1};
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewFloat(TranslatedState* container,
+ Float32 value) {
+ TranslatedValue slot(container, kFloat);
+ slot.float_value_ = value;
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewDouble(TranslatedState* container,
+ Float64 value) {
+ TranslatedValue slot(container, kDouble);
+ slot.double_value_ = value;
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewInt32(TranslatedState* container,
+ int32_t value) {
+ TranslatedValue slot(container, kInt32);
+ slot.int32_value_ = value;
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewInt64(TranslatedState* container,
+ int64_t value) {
+ TranslatedValue slot(container, kInt64);
+ slot.int64_value_ = value;
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewInt64ToBigInt(TranslatedState* container,
+ int64_t value) {
+ TranslatedValue slot(container, kInt64ToBigInt);
+ slot.int64_value_ = value;
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewUInt32(TranslatedState* container,
+ uint32_t value) {
+ TranslatedValue slot(container, kUInt32);
+ slot.uint32_value_ = value;
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewBool(TranslatedState* container,
+ uint32_t value) {
+ TranslatedValue slot(container, kBoolBit);
+ slot.uint32_value_ = value;
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewTagged(TranslatedState* container,
+ Object literal) {
+ TranslatedValue slot(container, kTagged);
+ slot.raw_literal_ = literal;
+ return slot;
+}
+
+// static
+TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) {
+ return TranslatedValue(container, kInvalid);
+}
+
+Isolate* TranslatedValue::isolate() const { return container_->isolate(); }
+
+Object TranslatedValue::raw_literal() const {
+ DCHECK_EQ(kTagged, kind());
+ return raw_literal_;
+}
+
+int32_t TranslatedValue::int32_value() const {
+ DCHECK_EQ(kInt32, kind());
+ return int32_value_;
+}
+
+int64_t TranslatedValue::int64_value() const {
+ DCHECK(kInt64 == kind() || kInt64ToBigInt == kind());
+ return int64_value_;
+}
+
+uint32_t TranslatedValue::uint32_value() const {
+ DCHECK(kind() == kUInt32 || kind() == kBoolBit);
+ return uint32_value_;
+}
+
+Float32 TranslatedValue::float_value() const {
+ DCHECK_EQ(kFloat, kind());
+ return float_value_;
+}
+
+Float64 TranslatedValue::double_value() const {
+ DCHECK_EQ(kDouble, kind());
+ return double_value_;
+}
+
+int TranslatedValue::object_length() const {
+ DCHECK_EQ(kind(), kCapturedObject);
+ return materialization_info_.length_;
+}
+
+int TranslatedValue::object_index() const {
+ DCHECK(kind() == kCapturedObject || kind() == kDuplicatedObject);
+ return materialization_info_.id_;
+}
+
+Object TranslatedValue::GetRawValue() const {
+ // If we have a value, return it.
+ if (materialization_state() == kFinished) {
+ int smi;
+ if (storage_->IsHeapNumber() &&
+ DoubleToSmiInteger(storage_->Number(), &smi)) {
+ return Smi::FromInt(smi);
+ }
+ return *storage_;
+ }
+
+ // Otherwise, do a best effort to get the value without allocation.
+ switch (kind()) {
+ case kTagged:
+ return raw_literal();
+
+ case kInt32: {
+ bool is_smi = Smi::IsValid(int32_value());
+ if (is_smi) {
+ return Smi::FromInt(int32_value());
+ }
+ break;
+ }
+
+ case kInt64: {
+ bool is_smi = (int64_value() >= static_cast<int64_t>(Smi::kMinValue) &&
+ int64_value() <= static_cast<int64_t>(Smi::kMaxValue));
+ if (is_smi) {
+ return Smi::FromIntptr(static_cast<intptr_t>(int64_value()));
+ }
+ break;
+ }
+
+ case kInt64ToBigInt:
+ // Return the arguments marker.
+ break;
+
+ case kUInt32: {
+ bool is_smi = (uint32_value() <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (is_smi) {
+ return Smi::FromInt(static_cast<int32_t>(uint32_value()));
+ }
+ break;
+ }
+
+ case kBoolBit: {
+ if (uint32_value() == 0) {
+ return ReadOnlyRoots(isolate()).false_value();
+ } else {
+ CHECK_EQ(1U, uint32_value());
+ return ReadOnlyRoots(isolate()).true_value();
+ }
+ }
+
+ case kFloat: {
+ int smi;
+ if (DoubleToSmiInteger(float_value().get_scalar(), &smi)) {
+ return Smi::FromInt(smi);
+ }
+ break;
+ }
+
+ case kDouble: {
+ int smi;
+ if (DoubleToSmiInteger(double_value().get_scalar(), &smi)) {
+ return Smi::FromInt(smi);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // If we could not get the value without allocation, return the arguments
+ // marker.
+ return ReadOnlyRoots(isolate()).arguments_marker();
+}
+
+void TranslatedValue::set_initialized_storage(Handle<HeapObject> storage) {
+ DCHECK_EQ(kUninitialized, materialization_state());
+ storage_ = storage;
+ materialization_state_ = kFinished;
+}
+
+Handle<Object> TranslatedValue::GetValue() {
+ Handle<Object> value(GetRawValue(), isolate());
+ if (materialization_state() == kFinished) return value;
+
+ if (value->IsSmi()) {
+ // Even though stored as a Smi, this number might instead be needed as a
+ // HeapNumber when materializing a JSObject with a field of HeapObject
+ // representation. Since we don't have this information available here, we
+ // just always allocate a HeapNumber and later extract the Smi again if we
+ // don't need a HeapObject.
+ set_initialized_storage(
+ isolate()->factory()->NewHeapNumber(value->Number()));
+ return value;
+ }
+
+ if (*value != ReadOnlyRoots(isolate()).arguments_marker()) {
+ set_initialized_storage(Handle<HeapObject>::cast(value));
+ return storage_;
+ }
+
+ // Otherwise we have to materialize.
+
+ if (kind() == TranslatedValue::kCapturedObject ||
+ kind() == TranslatedValue::kDuplicatedObject) {
+ // We need to materialize the object (or possibly even object graphs).
+ // To make the object verifier happy, we materialize in two steps.
+
+ // 1. Allocate storage for reachable objects. This makes sure that for
+ // each object we have allocated space on heap. The space will be
+ // a byte array that will be later initialized, or a fully
+ // initialized object if it is safe to allocate one that will
+ // pass the verifier.
+ container_->EnsureObjectAllocatedAt(this);
+
+ // 2. Initialize the objects. If we have allocated only byte arrays
+ // for some objects, we now overwrite the byte arrays with the
+ // correct object fields. Note that this phase does not allocate
+ // any new objects, so it does not trigger the object verifier.
+ return container_->InitializeObjectAt(this);
+ }
+
+ double number = 0;
+ Handle<HeapObject> heap_object;
+ switch (kind()) {
+ case TranslatedValue::kInt32:
+ number = int32_value();
+ heap_object = isolate()->factory()->NewHeapNumber(number);
+ break;
+ case TranslatedValue::kInt64:
+ number = int64_value();
+ heap_object = isolate()->factory()->NewHeapNumber(number);
+ break;
+ case TranslatedValue::kInt64ToBigInt:
+ heap_object = BigInt::FromInt64(isolate(), int64_value());
+ break;
+ case TranslatedValue::kUInt32:
+ number = uint32_value();
+ heap_object = isolate()->factory()->NewHeapNumber(number);
+ break;
+ case TranslatedValue::kFloat:
+ number = float_value().get_scalar();
+ heap_object = isolate()->factory()->NewHeapNumber(number);
+ break;
+ case TranslatedValue::kDouble:
+ number = double_value().get_scalar();
+ heap_object = isolate()->factory()->NewHeapNumber(number);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ DCHECK(!IsSmiDouble(number) || kind() == TranslatedValue::kInt64ToBigInt);
+ set_initialized_storage(heap_object);
+ return storage_;
+}
+
+bool TranslatedValue::IsMaterializedObject() const {
+ switch (kind()) {
+ case kCapturedObject:
+ case kDuplicatedObject:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool TranslatedValue::IsMaterializableByDebugger() const {
+ // At the moment, we only allow materialization of doubles.
+ return (kind() == kDouble);
+}
+
+int TranslatedValue::GetChildrenCount() const {
+ if (kind() == kCapturedObject) {
+ return object_length();
+ } else {
+ return 0;
+ }
+}
+
+uint64_t TranslatedState::GetUInt64Slot(Address fp, int slot_offset) {
+#if V8_TARGET_ARCH_32_BIT
+ return ReadUnalignedValue<uint64_t>(fp + slot_offset);
+#else
+ return Memory<uint64_t>(fp + slot_offset);
+#endif
+}
+
+uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) {
+ Address address = fp + slot_offset;
+#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
+ return Memory<uint32_t>(address + kIntSize);
+#else
+ return Memory<uint32_t>(address);
+#endif
+}
+
+Float32 TranslatedState::GetFloatSlot(Address fp, int slot_offset) {
+#if !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
+ return Float32::FromBits(GetUInt32Slot(fp, slot_offset));
+#else
+ return Float32::FromBits(Memory<uint32_t>(fp + slot_offset));
+#endif
+}
+
+Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
+ return Float64::FromBits(GetUInt64Slot(fp, slot_offset));
+}
+
+void TranslatedValue::Handlify() {
+ if (kind() == kTagged && raw_literal().IsHeapObject()) {
+ set_initialized_storage(
+ Handle<HeapObject>(HeapObject::cast(raw_literal()), isolate()));
+ raw_literal_ = Object();
+ }
+}
+
+TranslatedFrame TranslatedFrame::UnoptimizedFrame(
+ BytecodeOffset bytecode_offset, SharedFunctionInfo shared_info, int height,
+ int return_value_offset, int return_value_count) {
+ TranslatedFrame frame(kUnoptimizedFunction, shared_info, height,
+ return_value_offset, return_value_count);
+ frame.bytecode_offset_ = bytecode_offset;
+ return frame;
+}
+
+TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame(
+ SharedFunctionInfo shared_info, int height) {
+ return TranslatedFrame(kArgumentsAdaptor, shared_info, height);
+}
+
+TranslatedFrame TranslatedFrame::ConstructStubFrame(
+ BytecodeOffset bytecode_offset, SharedFunctionInfo shared_info,
+ int height) {
+ TranslatedFrame frame(kConstructStub, shared_info, height);
+ frame.bytecode_offset_ = bytecode_offset;
+ return frame;
+}
+
+TranslatedFrame TranslatedFrame::BuiltinContinuationFrame(
+ BytecodeOffset bytecode_offset, SharedFunctionInfo shared_info,
+ int height) {
+ TranslatedFrame frame(kBuiltinContinuation, shared_info, height);
+ frame.bytecode_offset_ = bytecode_offset;
+ return frame;
+}
+
+TranslatedFrame TranslatedFrame::JSToWasmBuiltinContinuationFrame(
+ BytecodeOffset bytecode_offset, SharedFunctionInfo shared_info, int height,
+ base::Optional<wasm::ValueKind> return_type) {
+ TranslatedFrame frame(kJSToWasmBuiltinContinuation, shared_info, height);
+ frame.bytecode_offset_ = bytecode_offset;
+ frame.return_type_ = return_type;
+ return frame;
+}
+
+TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame(
+ BytecodeOffset bytecode_offset, SharedFunctionInfo shared_info,
+ int height) {
+ TranslatedFrame frame(kJavaScriptBuiltinContinuation, shared_info, height);
+ frame.bytecode_offset_ = bytecode_offset;
+ return frame;
+}
+
+TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
+ BytecodeOffset bytecode_offset, SharedFunctionInfo shared_info,
+ int height) {
+ TranslatedFrame frame(kJavaScriptBuiltinContinuationWithCatch, shared_info,
+ height);
+ frame.bytecode_offset_ = bytecode_offset;
+ return frame;
+}
+
+namespace {
+
+uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) {
+ static constexpr int kTheReceiver = 1;
+ return sfi.internal_formal_parameter_count() + kTheReceiver;
+}
+
+} // namespace
+
+int TranslatedFrame::GetValueCount() {
+ // The function is added to all frame state descriptors in
+ // InstructionSelector::AddInputsToFrameStateDescriptor.
+ static constexpr int kTheFunction = 1;
+
+ switch (kind()) {
+ case kUnoptimizedFunction: {
+ int parameter_count =
+ InternalFormalParameterCountWithReceiver(raw_shared_info_);
+ static constexpr int kTheContext = 1;
+ static constexpr int kTheAccumulator = 1;
+ return height() + parameter_count + kTheContext + kTheFunction +
+ kTheAccumulator;
+ }
+
+ case kArgumentsAdaptor:
+ return height() + kTheFunction;
+
+ case kConstructStub:
+ case kBuiltinContinuation:
+ case kJSToWasmBuiltinContinuation:
+ case kJavaScriptBuiltinContinuation:
+ case kJavaScriptBuiltinContinuationWithCatch: {
+ static constexpr int kTheContext = 1;
+ return height() + kTheContext + kTheFunction;
+ }
+
+ case kInvalid:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
+void TranslatedFrame::Handlify() {
+ if (!raw_shared_info_.is_null()) {
+ shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_,
+ raw_shared_info_.GetIsolate());
+ raw_shared_info_ = SharedFunctionInfo();
+ }
+ for (auto& value : values_) {
+ value.Handlify();
+ }
+}
+
+TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
+ TranslationArrayIterator* iterator, FixedArray literal_array, Address fp,
+ FILE* trace_file) {
+ TranslationOpcode opcode = TranslationOpcodeFromInt(iterator->Next());
+ switch (opcode) {
+ case TranslationOpcode::INTERPRETED_FRAME: {
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ SharedFunctionInfo shared_info =
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
+ int height = iterator->Next();
+ int return_value_offset = iterator->Next();
+ int return_value_count = iterator->Next();
+ if (trace_file != nullptr) {
+ std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
+ PrintF(trace_file, " reading input frame %s", name.get());
+ int arg_count = InternalFormalParameterCountWithReceiver(shared_info);
+ PrintF(trace_file,
+ " => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); "
+ "inputs:\n",
+ bytecode_offset.ToInt(), arg_count, height, return_value_offset,
+ return_value_count);
+ }
+ return TranslatedFrame::UnoptimizedFrame(bytecode_offset, shared_info,
+ height, return_value_offset,
+ return_value_count);
+ }
+
+ case TranslationOpcode::ARGUMENTS_ADAPTOR_FRAME: {
+ SharedFunctionInfo shared_info =
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
+ PrintF(trace_file, " reading arguments adaptor frame %s", name.get());
+ PrintF(trace_file, " => height=%d; inputs:\n", height);
+ }
+ return TranslatedFrame::ArgumentsAdaptorFrame(shared_info, height);
+ }
+
+ case TranslationOpcode::CONSTRUCT_STUB_FRAME: {
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ SharedFunctionInfo shared_info =
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
+ PrintF(trace_file, " reading construct stub frame %s", name.get());
+ PrintF(trace_file, " => bytecode_offset=%d, height=%d; inputs:\n",
+ bytecode_offset.ToInt(), height);
+ }
+ return TranslatedFrame::ConstructStubFrame(bytecode_offset, shared_info,
+ height);
+ }
+
+ case TranslationOpcode::BUILTIN_CONTINUATION_FRAME: {
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ SharedFunctionInfo shared_info =
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
+ PrintF(trace_file, " reading builtin continuation frame %s",
+ name.get());
+ PrintF(trace_file, " => bytecode_offset=%d, height=%d; inputs:\n",
+ bytecode_offset.ToInt(), height);
+ }
+ return TranslatedFrame::BuiltinContinuationFrame(bytecode_offset,
+ shared_info, height);
+ }
+
+ case TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME: {
+ BytecodeOffset bailout_id = BytecodeOffset(iterator->Next());
+ SharedFunctionInfo shared_info =
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
+ int height = iterator->Next();
+ base::Optional<wasm::ValueKind> return_type =
+ DecodeWasmReturnType(iterator->Next());
+ if (trace_file != nullptr) {
+ std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
+ PrintF(trace_file, " reading JS to Wasm builtin continuation frame %s",
+ name.get());
+ PrintF(trace_file,
+ " => bailout_id=%d, height=%d return_type=%d; inputs:\n",
+ bailout_id.ToInt(), height,
+ return_type.has_value() ? return_type.value() : -1);
+ }
+ return TranslatedFrame::JSToWasmBuiltinContinuationFrame(
+ bailout_id, shared_info, height, return_type);
+ }
+
+ case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ SharedFunctionInfo shared_info =
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
+ PrintF(trace_file, " reading JavaScript builtin continuation frame %s",
+ name.get());
+ PrintF(trace_file, " => bytecode_offset=%d, height=%d; inputs:\n",
+ bytecode_offset.ToInt(), height);
+ }
+ return TranslatedFrame::JavaScriptBuiltinContinuationFrame(
+ bytecode_offset, shared_info, height);
+ }
+
+ case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
+ BytecodeOffset bytecode_offset = BytecodeOffset(iterator->Next());
+ SharedFunctionInfo shared_info =
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
+ PrintF(trace_file,
+ " reading JavaScript builtin continuation frame with catch %s",
+ name.get());
+ PrintF(trace_file, " => bytecode_offset=%d, height=%d; inputs:\n",
+ bytecode_offset.ToInt(), height);
+ }
+ return TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
+ bytecode_offset, shared_info, height);
+ }
+ case TranslationOpcode::UPDATE_FEEDBACK:
+ case TranslationOpcode::BEGIN:
+ case TranslationOpcode::DUPLICATED_OBJECT:
+ case TranslationOpcode::ARGUMENTS_ELEMENTS:
+ case TranslationOpcode::ARGUMENTS_LENGTH:
+ case TranslationOpcode::CAPTURED_OBJECT:
+ case TranslationOpcode::REGISTER:
+ case TranslationOpcode::INT32_REGISTER:
+ case TranslationOpcode::INT64_REGISTER:
+ case TranslationOpcode::UINT32_REGISTER:
+ case TranslationOpcode::BOOL_REGISTER:
+ case TranslationOpcode::FLOAT_REGISTER:
+ case TranslationOpcode::DOUBLE_REGISTER:
+ case TranslationOpcode::STACK_SLOT:
+ case TranslationOpcode::INT32_STACK_SLOT:
+ case TranslationOpcode::INT64_STACK_SLOT:
+ case TranslationOpcode::UINT32_STACK_SLOT:
+ case TranslationOpcode::BOOL_STACK_SLOT:
+ case TranslationOpcode::FLOAT_STACK_SLOT:
+ case TranslationOpcode::DOUBLE_STACK_SLOT:
+ case TranslationOpcode::LITERAL:
+ break;
+ }
+ UNREACHABLE();
+}
+
+// static
+void TranslatedFrame::AdvanceIterator(
+ std::deque<TranslatedValue>::iterator* iter) {
+ int values_to_skip = 1;
+ while (values_to_skip > 0) {
+ // Consume the current element.
+ values_to_skip--;
+ // Add all the children.
+ values_to_skip += (*iter)->GetChildrenCount();
+
+ (*iter)++;
+ }
+}
+
+// Creates translated values for an arguments backing store, or the backing
+// store for rest parameters depending on the given {type}. The TranslatedValue
+// objects for the fields are not read from the TranslationArrayIterator, but
+// instead created on-the-fly based on dynamic information in the optimized
+// frame.
+void TranslatedState::CreateArgumentsElementsTranslatedValues(
+ int frame_index, Address input_frame_pointer, CreateArgumentsType type,
+ FILE* trace_file) {
+ TranslatedFrame& frame = frames_[frame_index];
+ int length =
+ type == CreateArgumentsType::kRestParameter
+ ? std::max(0, actual_argument_count_ - formal_parameter_count_)
+ : actual_argument_count_;
+ int object_index = static_cast<int>(object_positions_.size());
+ int value_index = static_cast<int>(frame.values_.size());
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "arguments elements object #%d (type = %d, length = %d)",
+ object_index, static_cast<uint8_t>(type), length);
+ }
+
+ object_positions_.push_back({frame_index, value_index});
+ frame.Add(TranslatedValue::NewDeferredObject(
+ this, length + FixedArray::kHeaderSize / kTaggedSize, object_index));
+
+ ReadOnlyRoots roots(isolate_);
+ frame.Add(TranslatedValue::NewTagged(this, roots.fixed_array_map()));
+ frame.Add(TranslatedValue::NewInt32(this, length));
+
+ int number_of_holes = 0;
+ if (type == CreateArgumentsType::kMappedArguments) {
+ // If the actual number of arguments is less than the number of formal
+ // parameters, we have fewer holes to fill to not overshoot the length.
+ number_of_holes = std::min(formal_parameter_count_, length);
+ }
+ for (int i = 0; i < number_of_holes; ++i) {
+ frame.Add(TranslatedValue::NewTagged(this, roots.the_hole_value()));
+ }
+ int argc = length - number_of_holes;
+ int start_index = number_of_holes;
+ if (type == CreateArgumentsType::kRestParameter) {
+ start_index = std::max(0, formal_parameter_count_);
+ }
+ for (int i = 0; i < argc; i++) {
+ // Skip the receiver.
+ int offset = i + start_index + 1;
+ Address arguments_frame = offset > formal_parameter_count_
+ ? stack_frame_pointer_
+ : input_frame_pointer;
+ Address argument_slot = arguments_frame +
+ CommonFrameConstants::kFixedFrameSizeAboveFp +
+ offset * kSystemPointerSize;
+
+ frame.Add(TranslatedValue::NewTagged(this, *FullObjectSlot(argument_slot)));
+ }
+}
+
+// We can't intermix stack decoding and allocations because the deoptimization
+// infrastracture is not GC safe.
+// Thus we build a temporary structure in malloced space.
+// The TranslatedValue objects created correspond to the static translation
+// instructions from the TranslationArrayIterator, except for
+// TranslationOpcode::ARGUMENTS_ELEMENTS, where the number and values of the
+// FixedArray elements depend on dynamic information from the optimized frame.
+// Returns the number of expected nested translations from the
+// TranslationArrayIterator.
+int TranslatedState::CreateNextTranslatedValue(
+ int frame_index, TranslationArrayIterator* iterator,
+ FixedArray literal_array, Address fp, RegisterValues* registers,
+ FILE* trace_file) {
+ disasm::NameConverter converter;
+
+ TranslatedFrame& frame = frames_[frame_index];
+ int value_index = static_cast<int>(frame.values_.size());
+
+ TranslationOpcode opcode = TranslationOpcodeFromInt(iterator->Next());
+ switch (opcode) {
+ case TranslationOpcode::BEGIN:
+ case TranslationOpcode::INTERPRETED_FRAME:
+ case TranslationOpcode::ARGUMENTS_ADAPTOR_FRAME:
+ case TranslationOpcode::CONSTRUCT_STUB_FRAME:
+ case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
+ case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME:
+ case TranslationOpcode::BUILTIN_CONTINUATION_FRAME:
+ case TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME:
+ case TranslationOpcode::UPDATE_FEEDBACK:
+ // Peeled off before getting here.
+ break;
+
+ case TranslationOpcode::DUPLICATED_OBJECT: {
+ int object_id = iterator->Next();
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "duplicated object #%d", object_id);
+ }
+ object_positions_.push_back(object_positions_[object_id]);
+ TranslatedValue translated_value =
+ TranslatedValue::NewDuplicateObject(this, object_id);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::ARGUMENTS_ELEMENTS: {
+ CreateArgumentsType arguments_type =
+ static_cast<CreateArgumentsType>(iterator->Next());
+ CreateArgumentsElementsTranslatedValues(frame_index, fp, arguments_type,
+ trace_file);
+ return 0;
+ }
+
+ case TranslationOpcode::ARGUMENTS_LENGTH: {
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "arguments length field (length = %d)",
+ actual_argument_count_);
+ }
+ frame.Add(TranslatedValue::NewInt32(this, actual_argument_count_));
+ return 0;
+ }
+
+ case TranslationOpcode::CAPTURED_OBJECT: {
+ int field_count = iterator->Next();
+ int object_index = static_cast<int>(object_positions_.size());
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "captured object #%d (length = %d)", object_index,
+ field_count);
+ }
+ object_positions_.push_back({frame_index, value_index});
+ TranslatedValue translated_value =
+ TranslatedValue::NewDeferredObject(this, field_count, object_index);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) {
+ TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+ intptr_t value = registers->GetRegister(input_reg);
+ Address uncompressed_value = DecompressIfNeeded(value);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, V8PRIxPTR_FMT " ; %s ", uncompressed_value,
+ converter.NameOfCPURegister(input_reg));
+ Object(uncompressed_value).ShortPrint(trace_file);
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewTagged(this, Object(uncompressed_value));
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::INT32_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) {
+ TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+ intptr_t value = registers->GetRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIdPTR " ; %s (int32)", value,
+ converter.NameOfCPURegister(input_reg));
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewInt32(this, static_cast<int32_t>(value));
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::INT64_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) {
+ TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+ intptr_t value = registers->GetRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIdPTR " ; %s (int64)", value,
+ converter.NameOfCPURegister(input_reg));
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewInt64(this, static_cast<int64_t>(value));
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::UINT32_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) {
+ TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+ intptr_t value = registers->GetRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIuPTR " ; %s (uint32)", value,
+ converter.NameOfCPURegister(input_reg));
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewUInt32(this, static_cast<uint32_t>(value));
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::BOOL_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) {
+ TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+ intptr_t value = registers->GetRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIdPTR " ; %s (bool)", value,
+ converter.NameOfCPURegister(input_reg));
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewBool(this, static_cast<uint32_t>(value));
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::FLOAT_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) {
+ TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+ Float32 value = registers->GetFloatRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%e ; %s (float)", value.get_scalar(),
+ RegisterName(FloatRegister::from_code(input_reg)));
+ }
+ TranslatedValue translated_value = TranslatedValue::NewFloat(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::DOUBLE_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) {
+ TranslatedValue translated_value = TranslatedValue::NewInvalid(this);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+ Float64 value = registers->GetDoubleRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%e ; %s (double)", value.get_scalar(),
+ RegisterName(DoubleRegister::from_code(input_reg)));
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewDouble(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::STACK_SLOT: {
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
+ Address uncompressed_value = DecompressIfNeeded(value);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, V8PRIxPTR_FMT " ; [fp %c %3d] ",
+ uncompressed_value, slot_offset < 0 ? '-' : '+',
+ std::abs(slot_offset));
+ Object(uncompressed_value).ShortPrint(trace_file);
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewTagged(this, Object(uncompressed_value));
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::INT32_STACK_SLOT: {
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ uint32_t value = GetUInt32Slot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%d ; (int32) [fp %c %3d] ",
+ static_cast<int32_t>(value), slot_offset < 0 ? '-' : '+',
+ std::abs(slot_offset));
+ }
+ TranslatedValue translated_value = TranslatedValue::NewInt32(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::INT64_STACK_SLOT: {
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ uint64_t value = GetUInt64Slot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIdPTR " ; (int64) [fp %c %3d] ",
+ static_cast<intptr_t>(value), slot_offset < 0 ? '-' : '+',
+ std::abs(slot_offset));
+ }
+ TranslatedValue translated_value = TranslatedValue::NewInt64(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::UINT32_STACK_SLOT: {
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ uint32_t value = GetUInt32Slot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%u ; (uint32) [fp %c %3d] ", value,
+ slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewUInt32(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::BOOL_STACK_SLOT: {
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ uint32_t value = GetUInt32Slot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%u ; (bool) [fp %c %3d] ", value,
+ slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+ }
+ TranslatedValue translated_value = TranslatedValue::NewBool(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::FLOAT_STACK_SLOT: {
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ Float32 value = GetFloatSlot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%e ; (float) [fp %c %3d] ", value.get_scalar(),
+ slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+ }
+ TranslatedValue translated_value = TranslatedValue::NewFloat(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::DOUBLE_STACK_SLOT: {
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ Float64 value = GetDoubleSlot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%e ; (double) [fp %c %d] ", value.get_scalar(),
+ slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+ }
+ TranslatedValue translated_value =
+ TranslatedValue::NewDouble(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+
+ case TranslationOpcode::LITERAL: {
+ int literal_index = iterator->Next();
+ Object value = literal_array.get(literal_index);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, V8PRIxPTR_FMT " ; (literal %2d) ", value.ptr(),
+ literal_index);
+ value.ShortPrint(trace_file);
+ }
+
+ TranslatedValue translated_value =
+ TranslatedValue::NewTagged(this, value);
+ frame.Add(translated_value);
+ return translated_value.GetChildrenCount();
+ }
+ }
+
+ FATAL("We should never get here - unexpected deopt info.");
+}
+
+Address TranslatedState::DecompressIfNeeded(intptr_t value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return DecompressTaggedAny(isolate()->isolate_root(),
+ static_cast<uint32_t>(value));
+ } else {
+ return value;
+ }
+}
+
+TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
+ int deopt_index = Safepoint::kNoDeoptimizationIndex;
+ DeoptimizationData data =
+ static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData(
+ &deopt_index);
+ DCHECK(!data.is_null() && deopt_index != Safepoint::kNoDeoptimizationIndex);
+ TranslationArrayIterator it(data.TranslationByteArray(),
+ data.TranslationIndex(deopt_index).value());
+ int actual_argc = frame->GetActualArgumentCount();
+ Init(frame->isolate(), frame->fp(), frame->fp(), &it, data.LiteralArray(),
+ nullptr /* registers */, nullptr /* trace file */,
+ frame->function().shared().internal_formal_parameter_count(),
+ actual_argc);
+}
+
+void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
+ Address stack_frame_pointer,
+ TranslationArrayIterator* iterator,
+ FixedArray literal_array, RegisterValues* registers,
+ FILE* trace_file, int formal_parameter_count,
+ int actual_argument_count) {
+ DCHECK(frames_.empty());
+
+ stack_frame_pointer_ = stack_frame_pointer;
+ formal_parameter_count_ = formal_parameter_count;
+ actual_argument_count_ = actual_argument_count;
+ isolate_ = isolate;
+
+ // Read out the 'header' translation.
+ TranslationOpcode opcode = TranslationOpcodeFromInt(iterator->Next());
+ CHECK(opcode == TranslationOpcode::BEGIN);
+
+ int count = iterator->Next();
+ frames_.reserve(count);
+ iterator->Next(); // Drop JS frames count.
+ int update_feedback_count = iterator->Next();
+ CHECK_GE(update_feedback_count, 0);
+ CHECK_LE(update_feedback_count, 1);
+
+ if (update_feedback_count == 1) {
+ ReadUpdateFeedback(iterator, literal_array, trace_file);
+ }
+
+ std::stack<int> nested_counts;
+
+ // Read the frames
+ for (int frame_index = 0; frame_index < count; frame_index++) {
+ // Read the frame descriptor.
+ frames_.push_back(CreateNextTranslatedFrame(
+ iterator, literal_array, input_frame_pointer, trace_file));
+ TranslatedFrame& frame = frames_.back();
+
+ // Read the values.
+ int values_to_process = frame.GetValueCount();
+ while (values_to_process > 0 || !nested_counts.empty()) {
+ if (trace_file != nullptr) {
+ if (nested_counts.empty()) {
+ // For top level values, print the value number.
+ PrintF(trace_file,
+ " %3i: ", frame.GetValueCount() - values_to_process);
+ } else {
+ // Take care of indenting for nested values.
+ PrintF(trace_file, " ");
+ for (size_t j = 0; j < nested_counts.size(); j++) {
+ PrintF(trace_file, " ");
+ }
+ }
+ }
+
+ int nested_count =
+ CreateNextTranslatedValue(frame_index, iterator, literal_array,
+ input_frame_pointer, registers, trace_file);
+
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "\n");
+ }
+
+ // Update the value count and resolve the nesting.
+ values_to_process--;
+ if (nested_count > 0) {
+ nested_counts.push(values_to_process);
+ values_to_process = nested_count;
+ } else {
+ while (values_to_process == 0 && !nested_counts.empty()) {
+ values_to_process = nested_counts.top();
+ nested_counts.pop();
+ }
+ }
+ }
+ }
+
+ CHECK(!iterator->HasNext() ||
+ TranslationOpcodeFromInt(iterator->Next()) == TranslationOpcode::BEGIN);
+}
+
+void TranslatedState::Prepare(Address stack_frame_pointer) {
+ for (auto& frame : frames_) frame.Handlify();
+
+ if (!feedback_vector_.is_null()) {
+ feedback_vector_handle_ =
+ Handle<FeedbackVector>(feedback_vector_, isolate());
+ feedback_vector_ = FeedbackVector();
+ }
+ stack_frame_pointer_ = stack_frame_pointer;
+
+ UpdateFromPreviouslyMaterializedObjects();
+}
+
+TranslatedValue* TranslatedState::GetValueByObjectIndex(int object_index) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ return &(frames_[pos.frame_index_].values_[pos.value_index_]);
+}
+
+Handle<HeapObject> TranslatedState::InitializeObjectAt(TranslatedValue* slot) {
+ slot = ResolveCapturedObject(slot);
+
+ DisallowGarbageCollection no_gc;
+ if (slot->materialization_state() != TranslatedValue::kFinished) {
+ std::stack<int> worklist;
+ worklist.push(slot->object_index());
+ slot->mark_finished();
+
+ while (!worklist.empty()) {
+ int index = worklist.top();
+ worklist.pop();
+ InitializeCapturedObjectAt(index, &worklist, no_gc);
+ }
+ }
+ return slot->storage();
+}
+
+void TranslatedState::InitializeCapturedObjectAt(
+ int object_index, std::stack<int>* worklist,
+ const DisallowGarbageCollection& no_gc) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ int value_index = pos.value_index_;
+
+ TranslatedFrame* frame = &(frames_[pos.frame_index_]);
+ TranslatedValue* slot = &(frame->values_[value_index]);
+ value_index++;
+
+ CHECK_EQ(TranslatedValue::kFinished, slot->materialization_state());
+ CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+
+ // Ensure all fields are initialized.
+ int children_init_index = value_index;
+ for (int i = 0; i < slot->GetChildrenCount(); i++) {
+ // If the field is an object that has not been initialized yet, queue it
+ // for initialization (and mark it as such).
+ TranslatedValue* child_slot = frame->ValueAt(children_init_index);
+ if (child_slot->kind() == TranslatedValue::kCapturedObject ||
+ child_slot->kind() == TranslatedValue::kDuplicatedObject) {
+ child_slot = ResolveCapturedObject(child_slot);
+ if (child_slot->materialization_state() != TranslatedValue::kFinished) {
+ DCHECK_EQ(TranslatedValue::kAllocated,
+ child_slot->materialization_state());
+ worklist->push(child_slot->object_index());
+ child_slot->mark_finished();
+ }
+ }
+ SkipSlots(1, frame, &children_init_index);
+ }
+
+ // Read the map.
+ // The map should never be materialized, so let us check we already have
+ // an existing object here.
+ CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged);
+ Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue());
+ CHECK(map->IsMap());
+ value_index++;
+
+ // Handle the special cases.
+ switch (map->instance_type()) {
+ case HEAP_NUMBER_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return;
+
+ case FIXED_ARRAY_TYPE:
+ case AWAIT_CONTEXT_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case NATIVE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
+ case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE:
+ case PROPERTY_ARRAY_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
+ case SLOPPY_ARGUMENTS_ELEMENTS_TYPE:
+ InitializeObjectWithTaggedFieldsAt(frame, &value_index, slot, map, no_gc);
+ break;
+
+ default:
+ CHECK(map->IsJSObjectMap());
+ InitializeJSObjectAt(frame, &value_index, slot, map, no_gc);
+ break;
+ }
+ CHECK_EQ(value_index, children_init_index);
+}
+
+void TranslatedState::EnsureObjectAllocatedAt(TranslatedValue* slot) {
+ slot = ResolveCapturedObject(slot);
+
+ if (slot->materialization_state() == TranslatedValue::kUninitialized) {
+ std::stack<int> worklist;
+ worklist.push(slot->object_index());
+ slot->mark_allocated();
+
+ while (!worklist.empty()) {
+ int index = worklist.top();
+ worklist.pop();
+ EnsureCapturedObjectAllocatedAt(index, &worklist);
+ }
+ }
+}
+
+int TranslatedValue::GetSmiValue() const {
+ Object value = GetRawValue();
+ CHECK(value.IsSmi());
+ return Smi::cast(value).value();
+}
+
+void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame,
+ int* value_index,
+ TranslatedValue* slot,
+ Handle<Map> map) {
+ int length = frame->values_[*value_index].GetSmiValue();
+ (*value_index)++;
+ Handle<FixedDoubleArray> array = Handle<FixedDoubleArray>::cast(
+ isolate()->factory()->NewFixedDoubleArray(length));
+ CHECK_GT(length, 0);
+ for (int i = 0; i < length; i++) {
+ CHECK_NE(TranslatedValue::kCapturedObject,
+ frame->values_[*value_index].kind());
+ Handle<Object> value = frame->values_[*value_index].GetValue();
+ if (value->IsNumber()) {
+ array->set(i, value->Number());
+ } else {
+ CHECK(value.is_identical_to(isolate()->factory()->the_hole_value()));
+ array->set_the_hole(isolate(), i);
+ }
+ (*value_index)++;
+ }
+ slot->set_storage(array);
+}
+
+void TranslatedState::MaterializeHeapNumber(TranslatedFrame* frame,
+ int* value_index,
+ TranslatedValue* slot) {
+ CHECK_NE(TranslatedValue::kCapturedObject,
+ frame->values_[*value_index].kind());
+ Handle<Object> value = frame->values_[*value_index].GetValue();
+ CHECK(value->IsNumber());
+ Handle<HeapNumber> box = isolate()->factory()->NewHeapNumber(value->Number());
+ (*value_index)++;
+ slot->set_storage(box);
+}
+
+namespace {
+
+enum StorageKind : uint8_t {
+ kStoreTagged,
+ kStoreHeapObject
+};
+
+} // namespace
+
+void TranslatedState::SkipSlots(int slots_to_skip, TranslatedFrame* frame,
+ int* value_index) {
+ while (slots_to_skip > 0) {
+ TranslatedValue* slot = &(frame->values_[*value_index]);
+ (*value_index)++;
+ slots_to_skip--;
+
+ if (slot->kind() == TranslatedValue::kCapturedObject) {
+ slots_to_skip += slot->GetChildrenCount();
+ }
+ }
+}
+
+void TranslatedState::EnsureCapturedObjectAllocatedAt(
+ int object_index, std::stack<int>* worklist) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ int value_index = pos.value_index_;
+
+ TranslatedFrame* frame = &(frames_[pos.frame_index_]);
+ TranslatedValue* slot = &(frame->values_[value_index]);
+ value_index++;
+
+ CHECK_EQ(TranslatedValue::kAllocated, slot->materialization_state());
+ CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+
+ // Read the map.
+ // The map should never be materialized, so let us check we already have
+ // an existing object here.
+ CHECK_EQ(frame->values_[value_index].kind(), TranslatedValue::kTagged);
+ Handle<Map> map = Handle<Map>::cast(frame->values_[value_index].GetValue());
+ CHECK(map->IsMap());
+ value_index++;
+
+ // Handle the special cases.
+ switch (map->instance_type()) {
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ // Materialize (i.e. allocate&initialize) the array and return since
+ // there is no need to process the children.
+ return MaterializeFixedDoubleArray(frame, &value_index, slot, map);
+
+ case HEAP_NUMBER_TYPE:
+ // Materialize (i.e. allocate&initialize) the heap number and return.
+ // There is no need to process the children.
+ return MaterializeHeapNumber(frame, &value_index, slot);
+
+ case FIXED_ARRAY_TYPE:
+ case SCRIPT_CONTEXT_TABLE_TYPE:
+ case AWAIT_CONTEXT_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case NATIVE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
+ case HASH_TABLE_TYPE:
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ case NAME_DICTIONARY_TYPE:
+ case GLOBAL_DICTIONARY_TYPE:
+ case NUMBER_DICTIONARY_TYPE:
+ case SIMPLE_NUMBER_DICTIONARY_TYPE: {
+ // Check we have the right size.
+ int array_length = frame->values_[value_index].GetSmiValue();
+ int instance_size = FixedArray::SizeFor(array_length);
+ CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
+
+ // Canonicalize empty fixed array.
+ if (*map == ReadOnlyRoots(isolate()).empty_fixed_array().map() &&
+ array_length == 0) {
+ slot->set_storage(isolate()->factory()->empty_fixed_array());
+ } else {
+ slot->set_storage(AllocateStorageFor(slot));
+ }
+
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
+ }
+
+ case SLOPPY_ARGUMENTS_ELEMENTS_TYPE: {
+ // Verify that the arguments size is correct.
+ int args_length = frame->values_[value_index].GetSmiValue();
+ int args_size = SloppyArgumentsElements::SizeFor(args_length);
+ CHECK_EQ(args_size, slot->GetChildrenCount() * kTaggedSize);
+
+ slot->set_storage(AllocateStorageFor(slot));
+
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
+ }
+
+ case PROPERTY_ARRAY_TYPE: {
+ // Check we have the right size.
+ int length_or_hash = frame->values_[value_index].GetSmiValue();
+ int array_length = PropertyArray::LengthField::decode(length_or_hash);
+ int instance_size = PropertyArray::SizeFor(array_length);
+ CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
+
+ slot->set_storage(AllocateStorageFor(slot));
+
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
+ }
+
+ default:
+ CHECK(map->IsJSObjectMap());
+ EnsureJSObjectAllocated(slot, map);
+ TranslatedValue* properties_slot = &(frame->values_[value_index]);
+ value_index++;
+ if (properties_slot->kind() == TranslatedValue::kCapturedObject) {
+ // If we are materializing the property array, make sure we put
+ // the mutable heap numbers at the right places.
+ EnsurePropertiesAllocatedAndMarked(properties_slot, map);
+ EnsureChildrenAllocated(properties_slot->GetChildrenCount(), frame,
+ &value_index, worklist);
+ }
+ // Make sure all the remaining children (after the map and properties) are
+ // allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 2, frame,
+ &value_index, worklist);
+ }
+ UNREACHABLE();
+}
+
+void TranslatedState::EnsureChildrenAllocated(int count, TranslatedFrame* frame,
+ int* value_index,
+ std::stack<int>* worklist) {
+ // Ensure all children are allocated.
+ for (int i = 0; i < count; i++) {
+ // If the field is an object that has not been allocated yet, queue it
+ // for initialization (and mark it as such).
+ TranslatedValue* child_slot = frame->ValueAt(*value_index);
+ if (child_slot->kind() == TranslatedValue::kCapturedObject ||
+ child_slot->kind() == TranslatedValue::kDuplicatedObject) {
+ child_slot = ResolveCapturedObject(child_slot);
+ if (child_slot->materialization_state() ==
+ TranslatedValue::kUninitialized) {
+ worklist->push(child_slot->object_index());
+ child_slot->mark_allocated();
+ }
+ } else {
+ // Make sure the simple values (heap numbers, etc.) are properly
+ // initialized.
+ child_slot->GetValue();
+ }
+ SkipSlots(1, frame, value_index);
+ }
+}
+
+void TranslatedState::EnsurePropertiesAllocatedAndMarked(
+ TranslatedValue* properties_slot, Handle<Map> map) {
+ CHECK_EQ(TranslatedValue::kUninitialized,
+ properties_slot->materialization_state());
+
+ Handle<ByteArray> object_storage = AllocateStorageFor(properties_slot);
+ properties_slot->mark_allocated();
+ properties_slot->set_storage(object_storage);
+
+ // Set markers for out-of-object properties.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ Representation representation = descriptors->GetDetails(i).representation();
+ if (!index.is_inobject() &&
+ (representation.IsDouble() || representation.IsHeapObject())) {
+ int outobject_index = index.outobject_array_index();
+ int array_index = outobject_index * kTaggedSize;
+ object_storage->set(array_index, kStoreHeapObject);
+ }
+ }
+}
+
+Handle<ByteArray> TranslatedState::AllocateStorageFor(TranslatedValue* slot) {
+ int allocate_size =
+ ByteArray::LengthFor(slot->GetChildrenCount() * kTaggedSize);
+ // It is important to allocate all the objects tenured so that the marker
+ // does not visit them.
+ Handle<ByteArray> object_storage =
+ isolate()->factory()->NewByteArray(allocate_size, AllocationType::kOld);
+ for (int i = 0; i < object_storage->length(); i++) {
+ object_storage->set(i, kStoreTagged);
+ }
+ return object_storage;
+}
+
+void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
+ Handle<Map> map) {
+ CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kTaggedSize);
+
+ Handle<ByteArray> object_storage = AllocateStorageFor(slot);
+ // Now we handle the interesting (JSObject) case.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
+
+ // Set markers for in-object properties.
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ Representation representation = descriptors->GetDetails(i).representation();
+ if (index.is_inobject() &&
+ (representation.IsDouble() || representation.IsHeapObject())) {
+ CHECK_GE(index.index(), FixedArray::kHeaderSize / kTaggedSize);
+ int array_index = index.index() * kTaggedSize - FixedArray::kHeaderSize;
+ object_storage->set(array_index, kStoreHeapObject);
+ }
+ }
+ slot->set_storage(object_storage);
+}
+
+TranslatedValue* TranslatedState::GetResolvedSlot(TranslatedFrame* frame,
+ int value_index) {
+ TranslatedValue* slot = frame->ValueAt(value_index);
+ if (slot->kind() == TranslatedValue::kDuplicatedObject) {
+ slot = ResolveCapturedObject(slot);
+ }
+ CHECK_NE(slot->materialization_state(), TranslatedValue::kUninitialized);
+ return slot;
+}
+
+TranslatedValue* TranslatedState::GetResolvedSlotAndAdvance(
+ TranslatedFrame* frame, int* value_index) {
+ TranslatedValue* slot = GetResolvedSlot(frame, *value_index);
+ SkipSlots(1, frame, value_index);
+ return slot;
+}
+
+Handle<Object> TranslatedState::GetValueAndAdvance(TranslatedFrame* frame,
+ int* value_index) {
+ TranslatedValue* slot = GetResolvedSlot(frame, *value_index);
+ SkipSlots(1, frame, value_index);
+ return slot->GetValue();
+}
+
+void TranslatedState::InitializeJSObjectAt(
+ TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
+ Handle<Map> map, const DisallowGarbageCollection& no_gc) {
+ Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
+ DCHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+
+ // The object should have at least a map and some payload.
+ CHECK_GE(slot->GetChildrenCount(), 2);
+
+ // Notify the concurrent marker about the layout change.
+ isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc);
+
+ // Fill the property array field.
+ {
+ Handle<Object> properties = GetValueAndAdvance(frame, value_index);
+ WRITE_FIELD(*object_storage, JSObject::kPropertiesOrHashOffset,
+ *properties);
+ WRITE_BARRIER(*object_storage, JSObject::kPropertiesOrHashOffset,
+ *properties);
+ }
+
+ // For all the other fields we first look at the fixed array and check the
+ // marker to see if we store an unboxed double.
+ DCHECK_EQ(kTaggedSize, JSObject::kPropertiesOrHashOffset);
+ for (int i = 2; i < slot->GetChildrenCount(); i++) {
+ TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index);
+ // Read out the marker and ensure the field is consistent with
+ // what the markers in the storage say (note that all heap numbers
+ // should be fully initialized by now).
+ int offset = i * kTaggedSize;
+ uint8_t marker = object_storage->ReadField<uint8_t>(offset);
+ if (marker == kStoreHeapObject) {
+ Handle<HeapObject> field_value = slot->storage();
+ WRITE_FIELD(*object_storage, offset, *field_value);
+ WRITE_BARRIER(*object_storage, offset, *field_value);
+ } else {
+ CHECK_EQ(kStoreTagged, marker);
+ Handle<Object> field_value = slot->GetValue();
+ DCHECK_IMPLIES(field_value->IsHeapNumber(),
+ !IsSmiDouble(field_value->Number()));
+ WRITE_FIELD(*object_storage, offset, *field_value);
+ WRITE_BARRIER(*object_storage, offset, *field_value);
+ }
+ }
+ object_storage->synchronized_set_map(*map);
+}
+
+void TranslatedState::InitializeObjectWithTaggedFieldsAt(
+ TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
+ Handle<Map> map, const DisallowGarbageCollection& no_gc) {
+ Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
+
+ // Skip the writes if we already have the canonical empty fixed array.
+ if (*object_storage == ReadOnlyRoots(isolate()).empty_fixed_array()) {
+ CHECK_EQ(2, slot->GetChildrenCount());
+ Handle<Object> length_value = GetValueAndAdvance(frame, value_index);
+ CHECK_EQ(*length_value, Smi::FromInt(0));
+ return;
+ }
+
+ // Notify the concurrent marker about the layout change.
+ isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc);
+
+ // Write the fields to the object.
+ for (int i = 1; i < slot->GetChildrenCount(); i++) {
+ TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index);
+ int offset = i * kTaggedSize;
+ uint8_t marker = object_storage->ReadField<uint8_t>(offset);
+ Handle<Object> field_value;
+ if (i > 1 && marker == kStoreHeapObject) {
+ field_value = slot->storage();
+ } else {
+ CHECK(marker == kStoreTagged || i == 1);
+ field_value = slot->GetValue();
+ DCHECK_IMPLIES(field_value->IsHeapNumber(),
+ !IsSmiDouble(field_value->Number()));
+ }
+ WRITE_FIELD(*object_storage, offset, *field_value);
+ WRITE_BARRIER(*object_storage, offset, *field_value);
+ }
+
+ object_storage->synchronized_set_map(*map);
+}
+
+TranslatedValue* TranslatedState::ResolveCapturedObject(TranslatedValue* slot) {
+ while (slot->kind() == TranslatedValue::kDuplicatedObject) {
+ slot = GetValueByObjectIndex(slot->object_index());
+ }
+ CHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+ return slot;
+}
+
+TranslatedFrame* TranslatedState::GetFrameFromJSFrameIndex(int jsframe_index) {
+ for (size_t i = 0; i < frames_.size(); i++) {
+ if (frames_[i].kind() == TranslatedFrame::kUnoptimizedFunction ||
+ frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
+ frames_[i].kind() ==
+ TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
+ if (jsframe_index > 0) {
+ jsframe_index--;
+ } else {
+ return &(frames_[i]);
+ }
+ }
+ }
+ return nullptr;
+}
+
+TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
+ int jsframe_index, int* args_count) {
+ for (size_t i = 0; i < frames_.size(); i++) {
+ if (frames_[i].kind() == TranslatedFrame::kUnoptimizedFunction ||
+ frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
+ frames_[i].kind() ==
+ TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
+ if (jsframe_index > 0) {
+ jsframe_index--;
+ } else {
+ // We have the JS function frame, now check if it has arguments
+ // adaptor.
+ if (i > 0 &&
+ frames_[i - 1].kind() == TranslatedFrame::kArgumentsAdaptor) {
+ *args_count = frames_[i - 1].height();
+ return &(frames_[i - 1]);
+ }
+
+ // JavaScriptBuiltinContinuation frames that are not preceeded by
+ // a arguments adapter frame are currently only used by C++ API calls
+ // from TurboFan. Calls to C++ API functions from TurboFan need
+ // a special marker frame state, otherwise the API call wouldn't
+ // be shown in a stack trace.
+ if (frames_[i].kind() ==
+ TranslatedFrame::kJavaScriptBuiltinContinuation &&
+ frames_[i].shared_info()->internal_formal_parameter_count() ==
+ kDontAdaptArgumentsSentinel) {
+ DCHECK(frames_[i].shared_info()->IsApiFunction());
+
+ // The argument count for this special case is always the second
+ // to last value in the TranslatedFrame. It should also always be
+ // {1}, as the GenericLazyDeoptContinuation builtin only has one
+ // argument (the receiver).
+ static constexpr int kTheContext = 1;
+ const int height = frames_[i].height() + kTheContext;
+ *args_count = frames_[i].ValueAt(height - 1)->GetSmiValue();
+ DCHECK_EQ(*args_count, 1);
+ } else {
+ *args_count = InternalFormalParameterCountWithReceiver(
+ *frames_[i].shared_info());
+ }
+ return &(frames_[i]);
+ }
+ }
+ }
+ return nullptr;
+}
+
+void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
+ MaterializedObjectStore* materialized_store =
+ isolate_->materialized_object_store();
+ Handle<FixedArray> previously_materialized_objects =
+ materialized_store->Get(stack_frame_pointer_);
+
+ Handle<Object> marker = isolate_->factory()->arguments_marker();
+
+ int length = static_cast<int>(object_positions_.size());
+ bool new_store = false;
+ if (previously_materialized_objects.is_null()) {
+ previously_materialized_objects =
+ isolate_->factory()->NewFixedArray(length, AllocationType::kOld);
+ for (int i = 0; i < length; i++) {
+ previously_materialized_objects->set(i, *marker);
+ }
+ new_store = true;
+ }
+
+ CHECK_EQ(length, previously_materialized_objects->length());
+
+ bool value_changed = false;
+ for (int i = 0; i < length; i++) {
+ TranslatedState::ObjectPosition pos = object_positions_[i];
+ TranslatedValue* value_info =
+ &(frames_[pos.frame_index_].values_[pos.value_index_]);
+
+ CHECK(value_info->IsMaterializedObject());
+
+ // Skip duplicate objects (i.e., those that point to some other object id).
+ if (value_info->object_index() != i) continue;
+
+ Handle<Object> previous_value(previously_materialized_objects->get(i),
+ isolate_);
+ Handle<Object> value(value_info->GetRawValue(), isolate_);
+
+ if (value.is_identical_to(marker)) {
+ DCHECK_EQ(*previous_value, *marker);
+ } else {
+ if (*previous_value == *marker) {
+ if (value->IsSmi()) {
+ value = isolate()->factory()->NewHeapNumber(value->Number());
+ }
+ previously_materialized_objects->set(i, *value);
+ value_changed = true;
+ } else {
+ CHECK(*previous_value == *value ||
+ (previous_value->IsHeapNumber() && value->IsSmi() &&
+ previous_value->Number() == value->Number()));
+ }
+ }
+ }
+
+ if (new_store && value_changed) {
+ materialized_store->Set(stack_frame_pointer_,
+ previously_materialized_objects);
+ CHECK_EQ(frames_[0].kind(), TranslatedFrame::kUnoptimizedFunction);
+ CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
+ Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode());
+ }
+}
+
+void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
+ MaterializedObjectStore* materialized_store =
+ isolate_->materialized_object_store();
+ Handle<FixedArray> previously_materialized_objects =
+ materialized_store->Get(stack_frame_pointer_);
+
+ // If we have no previously materialized objects, there is nothing to do.
+ if (previously_materialized_objects.is_null()) return;
+
+ Handle<Object> marker = isolate_->factory()->arguments_marker();
+
+ int length = static_cast<int>(object_positions_.size());
+ CHECK_EQ(length, previously_materialized_objects->length());
+
+ for (int i = 0; i < length; i++) {
+ // For a previously materialized objects, inject their value into the
+ // translated values.
+ if (previously_materialized_objects->get(i) != *marker) {
+ TranslatedState::ObjectPosition pos = object_positions_[i];
+ TranslatedValue* value_info =
+ &(frames_[pos.frame_index_].values_[pos.value_index_]);
+ CHECK(value_info->IsMaterializedObject());
+
+ if (value_info->kind() == TranslatedValue::kCapturedObject) {
+ Handle<Object> object(previously_materialized_objects->get(i),
+ isolate_);
+ CHECK(object->IsHeapObject());
+ value_info->set_initialized_storage(Handle<HeapObject>::cast(object));
+ }
+ }
+ }
+}
+
+void TranslatedState::VerifyMaterializedObjects() {
+#if VERIFY_HEAP
+ int length = static_cast<int>(object_positions_.size());
+ for (int i = 0; i < length; i++) {
+ TranslatedValue* slot = GetValueByObjectIndex(i);
+ if (slot->kind() == TranslatedValue::kCapturedObject) {
+ CHECK_EQ(slot, GetValueByObjectIndex(slot->object_index()));
+ if (slot->materialization_state() == TranslatedValue::kFinished) {
+ slot->storage()->ObjectVerify(isolate());
+ } else {
+ CHECK_EQ(slot->materialization_state(),
+ TranslatedValue::kUninitialized);
+ }
+ }
+ }
+#endif
+}
+
+bool TranslatedState::DoUpdateFeedback() {
+ if (!feedback_vector_handle_.is_null()) {
+ CHECK(!feedback_slot_.IsInvalid());
+ isolate()->CountUsage(v8::Isolate::kDeoptimizerDisableSpeculation);
+ FeedbackNexus nexus(feedback_vector_handle_, feedback_slot_);
+ nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation);
+ return true;
+ }
+ return false;
+}
+
+void TranslatedState::ReadUpdateFeedback(TranslationArrayIterator* iterator,
+ FixedArray literal_array,
+ FILE* trace_file) {
+ CHECK_EQ(TranslationOpcode::UPDATE_FEEDBACK,
+ TranslationOpcodeFromInt(iterator->Next()));
+ feedback_vector_ = FeedbackVector::cast(literal_array.get(iterator->Next()));
+ feedback_slot_ = FeedbackSlot(iterator->Next());
+ if (trace_file != nullptr) {
+ PrintF(trace_file, " reading FeedbackVector (slot %d)\n",
+ feedback_slot_.ToInt());
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+// Undefine the heap manipulation macros.
+#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/deoptimizer/translated-state.h b/deps/v8/src/deoptimizer/translated-state.h
new file mode 100644
index 0000000000..eb8188a0ed
--- /dev/null
+++ b/deps/v8/src/deoptimizer/translated-state.h
@@ -0,0 +1,451 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEOPTIMIZER_TRANSLATED_STATE_H_
+#define V8_DEOPTIMIZER_TRANSLATED_STATE_H_
+
+#include <stack>
+#include <vector>
+
+#include "src/deoptimizer/translation-array.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/shared-function-info.h"
+#include "src/utils/boxed-float.h"
+#include "src/wasm/value-type.h"
+
+namespace v8 {
+namespace internal {
+
+class RegisterValues;
+class TranslatedState;
+
+// TODO(jgruber): This duplicates decoding logic already present in
+// TranslatedState/TranslatedFrame. Deduplicate into one class, e.g. by basing
+// printing off TranslatedFrame.
+void TranslationArrayPrintSingleFrame(std::ostream& os,
+ TranslationArray translation_array,
+ int translation_index,
+ FixedArray literal_array);
+
+// The Translated{Value,Frame,State} class hierarchy are a set of utility
+// functions to work with the combination of translations (built from a
+// TranslationArray) and the actual current CPU state (represented by
+// RegisterValues).
+//
+// TranslatedState: describes the entire stack state of the current optimized
+// frame, contains:
+//
+// TranslatedFrame: describes a single unoptimized frame, contains:
+//
+// TranslatedValue: the actual value of some location.
+
+class TranslatedValue {
+ public:
+ // Allocation-free getter of the value.
+ // Returns ReadOnlyRoots::arguments_marker() if allocation would be necessary
+ // to get the value. In the case of numbers, returns a Smi if possible.
+ Object GetRawValue() const;
+
+ // Convenience wrapper around GetRawValue (checked).
+ int GetSmiValue() const;
+
+ // Returns the value, possibly materializing it first (and the whole subgraph
+ // reachable from this value). In the case of numbers, returns a Smi if
+ // possible.
+ Handle<Object> GetValue();
+
+ bool IsMaterializedObject() const;
+ bool IsMaterializableByDebugger() const;
+
+ private:
+ friend class TranslatedState;
+ friend class TranslatedFrame;
+ friend class Deoptimizer;
+
+ enum Kind : uint8_t {
+ kInvalid,
+ kTagged,
+ kInt32,
+ kInt64,
+ kInt64ToBigInt,
+ kUInt32,
+ kBoolBit,
+ kFloat,
+ kDouble,
+ kCapturedObject, // Object captured by the escape analysis.
+ // The number of nested objects can be obtained
+ // with the DeferredObjectLength() method
+ // (the values of the nested objects follow
+ // this value in the depth-first order.)
+ kDuplicatedObject // Duplicated object of a deferred object.
+ };
+
+ enum MaterializationState : uint8_t {
+ kUninitialized,
+ kAllocated, // Storage for the object has been allocated (or
+ // enqueued for allocation).
+ kFinished, // The object has been initialized (or enqueued for
+ // initialization).
+ };
+
+ TranslatedValue(TranslatedState* container, Kind kind)
+ : kind_(kind), container_(container) {}
+ Kind kind() const { return kind_; }
+ MaterializationState materialization_state() const {
+ return materialization_state_;
+ }
+ void Handlify();
+ int GetChildrenCount() const;
+
+ static TranslatedValue NewDeferredObject(TranslatedState* container,
+ int length, int object_index);
+ static TranslatedValue NewDuplicateObject(TranslatedState* container, int id);
+ static TranslatedValue NewFloat(TranslatedState* container, Float32 value);
+ static TranslatedValue NewDouble(TranslatedState* container, Float64 value);
+ static TranslatedValue NewInt32(TranslatedState* container, int32_t value);
+ static TranslatedValue NewInt64(TranslatedState* container, int64_t value);
+ static TranslatedValue NewInt64ToBigInt(TranslatedState* container,
+ int64_t value);
+ static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value);
+ static TranslatedValue NewBool(TranslatedState* container, uint32_t value);
+ static TranslatedValue NewTagged(TranslatedState* container, Object literal);
+ static TranslatedValue NewInvalid(TranslatedState* container);
+
+ Isolate* isolate() const;
+
+ void set_storage(Handle<HeapObject> storage) { storage_ = storage; }
+ void set_initialized_storage(Handle<HeapObject> storage);
+ void mark_finished() { materialization_state_ = kFinished; }
+ void mark_allocated() { materialization_state_ = kAllocated; }
+
+ Handle<HeapObject> storage() {
+ DCHECK_NE(materialization_state(), kUninitialized);
+ return storage_;
+ }
+
+ Kind kind_;
+ MaterializationState materialization_state_ = kUninitialized;
+ TranslatedState* container_; // This is only needed for materialization of
+ // objects and constructing handles (to get
+ // to the isolate).
+
+ Handle<HeapObject> storage_; // Contains the materialized value or the
+ // byte-array that will be later morphed into
+ // the materialized object.
+
+ struct MaterializedObjectInfo {
+ int id_;
+ int length_; // Applies only to kCapturedObject kinds.
+ };
+
+ union {
+ // kind kTagged. After handlification it is always nullptr.
+ Object raw_literal_;
+ // kind is kUInt32 or kBoolBit.
+ uint32_t uint32_value_;
+ // kind is kInt32.
+ int32_t int32_value_;
+ // kind is kInt64.
+ int64_t int64_value_;
+ // kind is kFloat
+ Float32 float_value_;
+ // kind is kDouble
+ Float64 double_value_;
+ // kind is kDuplicatedObject or kCapturedObject.
+ MaterializedObjectInfo materialization_info_;
+ };
+
+ // Checked accessors for the union members.
+ Object raw_literal() const;
+ int32_t int32_value() const;
+ int64_t int64_value() const;
+ uint32_t uint32_value() const;
+ Float32 float_value() const;
+ Float64 double_value() const;
+ int object_length() const;
+ int object_index() const;
+};
+
+class TranslatedFrame {
+ public:
+ enum Kind {
+ kUnoptimizedFunction,
+ kArgumentsAdaptor,
+ kConstructStub,
+ kBuiltinContinuation,
+ kJSToWasmBuiltinContinuation,
+ kJavaScriptBuiltinContinuation,
+ kJavaScriptBuiltinContinuationWithCatch,
+ kInvalid
+ };
+
+ int GetValueCount();
+
+ Kind kind() const { return kind_; }
+ BytecodeOffset bytecode_offset() const { return bytecode_offset_; }
+ Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+
+ // TODO(jgruber): Simplify/clarify the semantics of this field. The name
+ // `height` is slightly misleading. Yes, this value is related to stack frame
+ // height, but must undergo additional mutations to arrive at the real stack
+ // frame height (e.g.: addition/subtraction of context, accumulator, fixed
+ // frame sizes, padding).
+ int height() const { return height_; }
+
+ int return_value_offset() const { return return_value_offset_; }
+ int return_value_count() const { return return_value_count_; }
+
+ SharedFunctionInfo raw_shared_info() const {
+ CHECK(!raw_shared_info_.is_null());
+ return raw_shared_info_;
+ }
+
+ class iterator {
+ public:
+ iterator& operator++() {
+ ++input_index_;
+ AdvanceIterator(&position_);
+ return *this;
+ }
+
+ iterator operator++(int) {
+ iterator original(position_, input_index_);
+ ++input_index_;
+ AdvanceIterator(&position_);
+ return original;
+ }
+
+ bool operator==(const iterator& other) const {
+ // Ignore {input_index_} for equality.
+ return position_ == other.position_;
+ }
+ bool operator!=(const iterator& other) const { return !(*this == other); }
+
+ TranslatedValue& operator*() { return (*position_); }
+ TranslatedValue* operator->() { return &(*position_); }
+ const TranslatedValue& operator*() const { return (*position_); }
+ const TranslatedValue* operator->() const { return &(*position_); }
+
+ int input_index() const { return input_index_; }
+
+ private:
+ friend TranslatedFrame;
+
+ explicit iterator(std::deque<TranslatedValue>::iterator position,
+ int input_index = 0)
+ : position_(position), input_index_(input_index) {}
+
+ std::deque<TranslatedValue>::iterator position_;
+ int input_index_;
+ };
+
+ using reference = TranslatedValue&;
+ using const_reference = TranslatedValue const&;
+
+ iterator begin() { return iterator(values_.begin()); }
+ iterator end() { return iterator(values_.end()); }
+
+ reference front() { return values_.front(); }
+ const_reference front() const { return values_.front(); }
+
+ // Only for Kind == kJSToWasmBuiltinContinuation
+ base::Optional<wasm::ValueKind> wasm_call_return_type() const {
+ DCHECK_EQ(kind(), kJSToWasmBuiltinContinuation);
+ return return_type_;
+ }
+
+ private:
+ friend class TranslatedState;
+ friend class Deoptimizer;
+
+ // Constructor static methods.
+ static TranslatedFrame UnoptimizedFrame(BytecodeOffset bytecode_offset,
+ SharedFunctionInfo shared_info,
+ int height, int return_value_offset,
+ int return_value_count);
+ static TranslatedFrame AccessorFrame(Kind kind,
+ SharedFunctionInfo shared_info);
+ static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo shared_info,
+ int height);
+ static TranslatedFrame ConstructStubFrame(BytecodeOffset bailout_id,
+ SharedFunctionInfo shared_info,
+ int height);
+ static TranslatedFrame BuiltinContinuationFrame(
+ BytecodeOffset bailout_id, SharedFunctionInfo shared_info, int height);
+ static TranslatedFrame JSToWasmBuiltinContinuationFrame(
+ BytecodeOffset bailout_id, SharedFunctionInfo shared_info, int height,
+ base::Optional<wasm::ValueKind> return_type);
+ static TranslatedFrame JavaScriptBuiltinContinuationFrame(
+ BytecodeOffset bailout_id, SharedFunctionInfo shared_info, int height);
+ static TranslatedFrame JavaScriptBuiltinContinuationWithCatchFrame(
+ BytecodeOffset bailout_id, SharedFunctionInfo shared_info, int height);
+ static TranslatedFrame InvalidFrame() {
+ return TranslatedFrame(kInvalid, SharedFunctionInfo());
+ }
+
+ static void AdvanceIterator(std::deque<TranslatedValue>::iterator* iter);
+
+ TranslatedFrame(Kind kind,
+ SharedFunctionInfo shared_info = SharedFunctionInfo(),
+ int height = 0, int return_value_offset = 0,
+ int return_value_count = 0)
+ : kind_(kind),
+ bytecode_offset_(BytecodeOffset::None()),
+ raw_shared_info_(shared_info),
+ height_(height),
+ return_value_offset_(return_value_offset),
+ return_value_count_(return_value_count) {}
+
+ void Add(const TranslatedValue& value) { values_.push_back(value); }
+ TranslatedValue* ValueAt(int index) { return &(values_[index]); }
+ void Handlify();
+
+ Kind kind_;
+ BytecodeOffset bytecode_offset_;
+ SharedFunctionInfo raw_shared_info_;
+ Handle<SharedFunctionInfo> shared_info_;
+ int height_;
+ int return_value_offset_;
+ int return_value_count_;
+
+ using ValuesContainer = std::deque<TranslatedValue>;
+
+ ValuesContainer values_;
+
+ // Only for Kind == kJSToWasmBuiltinContinuation
+ base::Optional<wasm::ValueKind> return_type_;
+};
+
+// Auxiliary class for translating deoptimization values.
+// Typical usage sequence:
+//
+// 1. Construct the instance. This will involve reading out the translations
+// and resolving them to values using the supplied frame pointer and
+// machine state (registers). This phase is guaranteed not to allocate
+// and not to use any HandleScope. Any object pointers will be stored raw.
+//
+// 2. Handlify pointers. This will convert all the raw pointers to handles.
+//
+// 3. Reading out the frame values.
+//
+// Note: After the instance is constructed, it is possible to iterate over
+// the values eagerly.
+
+class TranslatedState {
+ public:
+ TranslatedState() = default;
+ explicit TranslatedState(const JavaScriptFrame* frame);
+
+ void Prepare(Address stack_frame_pointer);
+
+ // Store newly materialized values into the isolate.
+ void StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame);
+
+ using iterator = std::vector<TranslatedFrame>::iterator;
+ iterator begin() { return frames_.begin(); }
+ iterator end() { return frames_.end(); }
+
+ using const_iterator = std::vector<TranslatedFrame>::const_iterator;
+ const_iterator begin() const { return frames_.begin(); }
+ const_iterator end() const { return frames_.end(); }
+
+ std::vector<TranslatedFrame>& frames() { return frames_; }
+
+ TranslatedFrame* GetFrameFromJSFrameIndex(int jsframe_index);
+ TranslatedFrame* GetArgumentsInfoFromJSFrameIndex(int jsframe_index,
+ int* arguments_count);
+
+ Isolate* isolate() { return isolate_; }
+
+ void Init(Isolate* isolate, Address input_frame_pointer,
+ Address stack_frame_pointer, TranslationArrayIterator* iterator,
+ FixedArray literal_array, RegisterValues* registers,
+ FILE* trace_file, int parameter_count, int actual_argument_count);
+
+ void VerifyMaterializedObjects();
+ bool DoUpdateFeedback();
+
+ private:
+ friend TranslatedValue;
+
+ TranslatedFrame CreateNextTranslatedFrame(TranslationArrayIterator* iterator,
+ FixedArray literal_array,
+ Address fp, FILE* trace_file);
+ int CreateNextTranslatedValue(int frame_index,
+ TranslationArrayIterator* iterator,
+ FixedArray literal_array, Address fp,
+ RegisterValues* registers, FILE* trace_file);
+ Address DecompressIfNeeded(intptr_t value);
+ void CreateArgumentsElementsTranslatedValues(int frame_index,
+ Address input_frame_pointer,
+ CreateArgumentsType type,
+ FILE* trace_file);
+
+ void UpdateFromPreviouslyMaterializedObjects();
+ void MaterializeFixedDoubleArray(TranslatedFrame* frame, int* value_index,
+ TranslatedValue* slot, Handle<Map> map);
+ void MaterializeHeapNumber(TranslatedFrame* frame, int* value_index,
+ TranslatedValue* slot);
+
+ void EnsureObjectAllocatedAt(TranslatedValue* slot);
+
+ void SkipSlots(int slots_to_skip, TranslatedFrame* frame, int* value_index);
+
+ Handle<ByteArray> AllocateStorageFor(TranslatedValue* slot);
+ void EnsureJSObjectAllocated(TranslatedValue* slot, Handle<Map> map);
+ void EnsurePropertiesAllocatedAndMarked(TranslatedValue* properties_slot,
+ Handle<Map> map);
+ void EnsureChildrenAllocated(int count, TranslatedFrame* frame,
+ int* value_index, std::stack<int>* worklist);
+ void EnsureCapturedObjectAllocatedAt(int object_index,
+ std::stack<int>* worklist);
+ Handle<HeapObject> InitializeObjectAt(TranslatedValue* slot);
+ void InitializeCapturedObjectAt(int object_index, std::stack<int>* worklist,
+ const DisallowGarbageCollection& no_gc);
+ void InitializeJSObjectAt(TranslatedFrame* frame, int* value_index,
+ TranslatedValue* slot, Handle<Map> map,
+ const DisallowGarbageCollection& no_gc);
+ void InitializeObjectWithTaggedFieldsAt(
+ TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
+ Handle<Map> map, const DisallowGarbageCollection& no_gc);
+
+ void ReadUpdateFeedback(TranslationArrayIterator* iterator,
+ FixedArray literal_array, FILE* trace_file);
+
+ TranslatedValue* ResolveCapturedObject(TranslatedValue* slot);
+ TranslatedValue* GetValueByObjectIndex(int object_index);
+ Handle<Object> GetValueAndAdvance(TranslatedFrame* frame, int* value_index);
+ TranslatedValue* GetResolvedSlot(TranslatedFrame* frame, int value_index);
+ TranslatedValue* GetResolvedSlotAndAdvance(TranslatedFrame* frame,
+ int* value_index);
+
+ static uint32_t GetUInt32Slot(Address fp, int slot_index);
+ static uint64_t GetUInt64Slot(Address fp, int slot_index);
+ static Float32 GetFloatSlot(Address fp, int slot_index);
+ static Float64 GetDoubleSlot(Address fp, int slot_index);
+
+ std::vector<TranslatedFrame> frames_;
+ Isolate* isolate_ = nullptr;
+ Address stack_frame_pointer_ = kNullAddress;
+ int formal_parameter_count_;
+ int actual_argument_count_;
+
+ struct ObjectPosition {
+ int frame_index_;
+ int value_index_;
+ };
+ std::deque<ObjectPosition> object_positions_;
+ Handle<FeedbackVector> feedback_vector_handle_;
+ FeedbackVector feedback_vector_;
+ FeedbackSlot feedback_slot_;
+};
+
+// Return type encoding for a Wasm function returning void.
+const int kNoWasmReturnType = -1;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEOPTIMIZER_TRANSLATED_STATE_H_
diff --git a/deps/v8/src/deoptimizer/translation-array.cc b/deps/v8/src/deoptimizer/translation-array.cc
new file mode 100644
index 0000000000..0e1ee34b40
--- /dev/null
+++ b/deps/v8/src/deoptimizer/translation-array.cc
@@ -0,0 +1,356 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer/translation-array.h"
+
+#include "src/deoptimizer/translated-state.h"
+#include "src/objects/fixed-array-inl.h"
+#include "third_party/zlib/google/compression_utils_portable.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// Constants describing compressed TranslationArray layout. Only relevant if
+// --turbo-compress-translation-arrays is enabled.
+constexpr int kUncompressedSizeOffset = 0;
+constexpr int kUncompressedSizeSize = kInt32Size;
+constexpr int kCompressedDataOffset =
+ kUncompressedSizeOffset + kUncompressedSizeSize;
+constexpr int kTranslationArrayElementSize = kInt32Size;
+
+// Encodes the return type of a Wasm function as the integer value of
+// wasm::ValueKind, or kNoWasmReturnType if the function returns void.
+int EncodeWasmReturnType(base::Optional<wasm::ValueKind> return_type) {
+ return return_type ? static_cast<int>(return_type.value())
+ : kNoWasmReturnType;
+}
+
+} // namespace
+
+TranslationArrayIterator::TranslationArrayIterator(TranslationArray buffer,
+ int index)
+ : buffer_(buffer), index_(index) {
+ if (V8_UNLIKELY(FLAG_turbo_compress_translation_arrays)) {
+ const int size = buffer_.get_int(kUncompressedSizeOffset);
+ uncompressed_contents_.insert(uncompressed_contents_.begin(), size, 0);
+
+ uLongf uncompressed_size = size * kTranslationArrayElementSize;
+
+ CHECK_EQ(
+ zlib_internal::UncompressHelper(
+ zlib_internal::ZRAW,
+ bit_cast<Bytef*>(uncompressed_contents_.data()), &uncompressed_size,
+ buffer_.GetDataStartAddress() + kCompressedDataOffset,
+ buffer_.DataSize()),
+ Z_OK);
+ DCHECK(index >= 0 && index < size);
+ } else {
+ DCHECK(index >= 0 && index < buffer.length());
+ }
+}
+
+int32_t TranslationArrayIterator::Next() {
+ if (V8_UNLIKELY(FLAG_turbo_compress_translation_arrays)) {
+ return uncompressed_contents_[index_++];
+ } else {
+ // Run through the bytes until we reach one with a least significant
+ // bit of zero (marks the end).
+ uint32_t bits = 0;
+ for (int i = 0; true; i += 7) {
+ DCHECK(HasNext());
+ uint8_t next = buffer_.get(index_++);
+ bits |= (next >> 1) << i;
+ if ((next & 1) == 0) break;
+ }
+ // The bits encode the sign in the least significant bit.
+ bool is_negative = (bits & 1) == 1;
+ int32_t result = bits >> 1;
+ return is_negative ? -result : result;
+ }
+}
+
+bool TranslationArrayIterator::HasNext() const {
+ if (V8_UNLIKELY(FLAG_turbo_compress_translation_arrays)) {
+ return index_ < static_cast<int>(uncompressed_contents_.size());
+ } else {
+ return index_ < buffer_.length();
+ }
+}
+
+void TranslationArrayBuilder::Add(int32_t value) {
+ if (V8_UNLIKELY(FLAG_turbo_compress_translation_arrays)) {
+ contents_for_compression_.push_back(value);
+ } else {
+ // This wouldn't handle kMinInt correctly if it ever encountered it.
+ DCHECK_NE(value, kMinInt);
+ // Encode the sign bit in the least significant bit.
+ bool is_negative = (value < 0);
+ uint32_t bits = (static_cast<uint32_t>(is_negative ? -value : value) << 1) |
+ static_cast<uint32_t>(is_negative);
+ // Encode the individual bytes using the least significant bit of
+ // each byte to indicate whether or not more bytes follow.
+ do {
+ uint32_t next = bits >> 7;
+ contents_.push_back(((bits << 1) & 0xFF) | (next != 0));
+ bits = next;
+ } while (bits != 0);
+ }
+}
+
+Handle<TranslationArray> TranslationArrayBuilder::ToTranslationArray(
+ Factory* factory) {
+ if (V8_UNLIKELY(FLAG_turbo_compress_translation_arrays)) {
+ const int input_size = SizeInBytes();
+ uLongf compressed_data_size = compressBound(input_size);
+
+ ZoneVector<byte> compressed_data(compressed_data_size, zone());
+
+ CHECK_EQ(
+ zlib_internal::CompressHelper(
+ zlib_internal::ZRAW, compressed_data.data(), &compressed_data_size,
+ bit_cast<const Bytef*>(contents_for_compression_.data()),
+ input_size, Z_DEFAULT_COMPRESSION, nullptr, nullptr),
+ Z_OK);
+
+ const int translation_array_size =
+ static_cast<int>(compressed_data_size) + kUncompressedSizeSize;
+ Handle<TranslationArray> result =
+ factory->NewByteArray(translation_array_size, AllocationType::kOld);
+
+ result->set_int(kUncompressedSizeOffset, Size());
+ std::memcpy(result->GetDataStartAddress() + kCompressedDataOffset,
+ compressed_data.data(), compressed_data_size);
+
+ return result;
+ } else {
+ Handle<TranslationArray> result =
+ factory->NewByteArray(SizeInBytes(), AllocationType::kOld);
+ memcpy(result->GetDataStartAddress(), contents_.data(),
+ contents_.size() * sizeof(uint8_t));
+ return result;
+ }
+}
+
+void TranslationArrayBuilder::BeginBuiltinContinuationFrame(
+ BytecodeOffset bytecode_offset, int literal_id, unsigned height) {
+ auto opcode = TranslationOpcode::BUILTIN_CONTINUATION_FRAME;
+ Add(opcode);
+ Add(bytecode_offset.ToInt());
+ Add(literal_id);
+ Add(height);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+}
+
+void TranslationArrayBuilder::BeginJSToWasmBuiltinContinuationFrame(
+ BytecodeOffset bytecode_offset, int literal_id, unsigned height,
+ base::Optional<wasm::ValueKind> return_type) {
+ auto opcode = TranslationOpcode::JS_TO_WASM_BUILTIN_CONTINUATION_FRAME;
+ Add(opcode);
+ Add(bytecode_offset.ToInt());
+ Add(literal_id);
+ Add(height);
+ Add(EncodeWasmReturnType(return_type));
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 4);
+}
+
+void TranslationArrayBuilder::BeginJavaScriptBuiltinContinuationFrame(
+ BytecodeOffset bytecode_offset, int literal_id, unsigned height) {
+ auto opcode = TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME;
+ Add(opcode);
+ Add(bytecode_offset.ToInt());
+ Add(literal_id);
+ Add(height);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+}
+
+void TranslationArrayBuilder::BeginJavaScriptBuiltinContinuationWithCatchFrame(
+ BytecodeOffset bytecode_offset, int literal_id, unsigned height) {
+ auto opcode =
+ TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME;
+ Add(opcode);
+ Add(bytecode_offset.ToInt());
+ Add(literal_id);
+ Add(height);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+}
+
+void TranslationArrayBuilder::BeginConstructStubFrame(
+ BytecodeOffset bytecode_offset, int literal_id, unsigned height) {
+ auto opcode = TranslationOpcode::CONSTRUCT_STUB_FRAME;
+ Add(opcode);
+ Add(bytecode_offset.ToInt());
+ Add(literal_id);
+ Add(height);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+}
+
+void TranslationArrayBuilder::BeginArgumentsAdaptorFrame(int literal_id,
+ unsigned height) {
+ auto opcode = TranslationOpcode::ARGUMENTS_ADAPTOR_FRAME;
+ Add(opcode);
+ Add(literal_id);
+ Add(height);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 2);
+}
+
+void TranslationArrayBuilder::BeginInterpretedFrame(
+ BytecodeOffset bytecode_offset, int literal_id, unsigned height,
+ int return_value_offset, int return_value_count) {
+ auto opcode = TranslationOpcode::INTERPRETED_FRAME;
+ Add(opcode);
+ Add(bytecode_offset.ToInt());
+ Add(literal_id);
+ Add(height);
+ Add(return_value_offset);
+ Add(return_value_count);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 5);
+}
+
+void TranslationArrayBuilder::ArgumentsElements(CreateArgumentsType type) {
+ auto opcode = TranslationOpcode::ARGUMENTS_ELEMENTS;
+ Add(opcode);
+ Add(static_cast<uint8_t>(type));
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::ArgumentsLength() {
+ auto opcode = TranslationOpcode::ARGUMENTS_LENGTH;
+ Add(opcode);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 0);
+}
+
+void TranslationArrayBuilder::BeginCapturedObject(int length) {
+ auto opcode = TranslationOpcode::CAPTURED_OBJECT;
+ Add(opcode);
+ Add(length);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::DuplicateObject(int object_index) {
+ auto opcode = TranslationOpcode::DUPLICATED_OBJECT;
+ Add(opcode);
+ Add(object_index);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreRegister(Register reg) {
+ auto opcode = TranslationOpcode::REGISTER;
+ Add(opcode);
+ Add(reg.code());
+}
+
+void TranslationArrayBuilder::StoreInt32Register(Register reg) {
+ auto opcode = TranslationOpcode::INT32_REGISTER;
+ Add(opcode);
+ Add(reg.code());
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreInt64Register(Register reg) {
+ auto opcode = TranslationOpcode::INT64_REGISTER;
+ Add(opcode);
+ Add(reg.code());
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreUint32Register(Register reg) {
+ auto opcode = TranslationOpcode::UINT32_REGISTER;
+ Add(opcode);
+ Add(reg.code());
+}
+
+void TranslationArrayBuilder::StoreBoolRegister(Register reg) {
+ auto opcode = TranslationOpcode::BOOL_REGISTER;
+ Add(opcode);
+ Add(reg.code());
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreFloatRegister(FloatRegister reg) {
+ auto opcode = TranslationOpcode::FLOAT_REGISTER;
+ Add(opcode);
+ Add(reg.code());
+}
+
+void TranslationArrayBuilder::StoreDoubleRegister(DoubleRegister reg) {
+ auto opcode = TranslationOpcode::DOUBLE_REGISTER;
+ Add(opcode);
+ Add(reg.code());
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreStackSlot(int index) {
+ auto opcode = TranslationOpcode::STACK_SLOT;
+ Add(opcode);
+ Add(index);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreInt32StackSlot(int index) {
+ auto opcode = TranslationOpcode::INT32_STACK_SLOT;
+ Add(opcode);
+ Add(index);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreInt64StackSlot(int index) {
+ auto opcode = TranslationOpcode::INT64_STACK_SLOT;
+ Add(opcode);
+ Add(index);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreUint32StackSlot(int index) {
+ auto opcode = TranslationOpcode::UINT32_STACK_SLOT;
+ Add(opcode);
+ Add(index);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreBoolStackSlot(int index) {
+ auto opcode = TranslationOpcode::BOOL_STACK_SLOT;
+ Add(opcode);
+ Add(index);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreFloatStackSlot(int index) {
+ auto opcode = TranslationOpcode::FLOAT_STACK_SLOT;
+ Add(opcode);
+ Add(index);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreDoubleStackSlot(int index) {
+ auto opcode = TranslationOpcode::DOUBLE_STACK_SLOT;
+ Add(opcode);
+ Add(index);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::StoreLiteral(int literal_id) {
+ auto opcode = TranslationOpcode::LITERAL;
+ Add(opcode);
+ Add(literal_id);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 1);
+}
+
+void TranslationArrayBuilder::AddUpdateFeedback(int vector_literal, int slot) {
+ auto opcode = TranslationOpcode::UPDATE_FEEDBACK;
+ Add(opcode);
+ Add(vector_literal);
+ Add(slot);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 2);
+}
+
+void TranslationArrayBuilder::StoreJSFrameFunction() {
+ StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kFunctionOffset) /
+ kSystemPointerSize);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/deoptimizer/translation-array.h b/deps/v8/src/deoptimizer/translation-array.h
new file mode 100644
index 0000000000..db6be0f87b
--- /dev/null
+++ b/deps/v8/src/deoptimizer/translation-array.h
@@ -0,0 +1,125 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEOPTIMIZER_TRANSLATION_ARRAY_H_
+#define V8_DEOPTIMIZER_TRANSLATION_ARRAY_H_
+
+#include "src/codegen/register-arch.h"
+#include "src/deoptimizer/translation-opcode.h"
+#include "src/objects/fixed-array.h"
+#include "src/wasm/value-type.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Factory;
+
+// The TranslationArray is the on-heap representation of translations created
+// during code generation in a (zone-allocated) TranslationArrayBuilder. The
+// translation array specifies how to transform an optimized frame back into
+// one or more unoptimized frames.
+// TODO(jgruber): Consider a real type instead of this type alias.
+using TranslationArray = ByteArray;
+
+class TranslationArrayIterator {
+ public:
+ TranslationArrayIterator(TranslationArray buffer, int index);
+
+ int32_t Next();
+
+ bool HasNext() const;
+
+ void Skip(int n) {
+ for (int i = 0; i < n; i++) Next();
+ }
+
+ private:
+ std::vector<int32_t> uncompressed_contents_;
+ TranslationArray buffer_;
+ int index_;
+};
+
+class TranslationArrayBuilder {
+ public:
+ explicit TranslationArrayBuilder(Zone* zone)
+ : contents_(zone), contents_for_compression_(zone), zone_(zone) {}
+
+ Handle<TranslationArray> ToTranslationArray(Factory* factory);
+
+ int BeginTranslation(int frame_count, int jsframe_count,
+ int update_feedback_count) {
+ int start_index = Size();
+ auto opcode = TranslationOpcode::BEGIN;
+ Add(opcode);
+ Add(frame_count);
+ Add(jsframe_count);
+ Add(update_feedback_count);
+ DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 3);
+ return start_index;
+ }
+
+ void BeginInterpretedFrame(BytecodeOffset bytecode_offset, int literal_id,
+ unsigned height, int return_value_offset,
+ int return_value_count);
+ void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
+ void BeginConstructStubFrame(BytecodeOffset bailout_id, int literal_id,
+ unsigned height);
+ void BeginBuiltinContinuationFrame(BytecodeOffset bailout_id, int literal_id,
+ unsigned height);
+ void BeginJSToWasmBuiltinContinuationFrame(
+ BytecodeOffset bailout_id, int literal_id, unsigned height,
+ base::Optional<wasm::ValueKind> return_type);
+ void BeginJavaScriptBuiltinContinuationFrame(BytecodeOffset bailout_id,
+ int literal_id, unsigned height);
+ void BeginJavaScriptBuiltinContinuationWithCatchFrame(
+ BytecodeOffset bailout_id, int literal_id, unsigned height);
+ void ArgumentsElements(CreateArgumentsType type);
+ void ArgumentsLength();
+ void BeginCapturedObject(int length);
+ void AddUpdateFeedback(int vector_literal, int slot);
+ void DuplicateObject(int object_index);
+ void StoreRegister(Register reg);
+ void StoreInt32Register(Register reg);
+ void StoreInt64Register(Register reg);
+ void StoreUint32Register(Register reg);
+ void StoreBoolRegister(Register reg);
+ void StoreFloatRegister(FloatRegister reg);
+ void StoreDoubleRegister(DoubleRegister reg);
+ void StoreStackSlot(int index);
+ void StoreInt32StackSlot(int index);
+ void StoreInt64StackSlot(int index);
+ void StoreUint32StackSlot(int index);
+ void StoreBoolStackSlot(int index);
+ void StoreFloatStackSlot(int index);
+ void StoreDoubleStackSlot(int index);
+ void StoreLiteral(int literal_id);
+ void StoreJSFrameFunction();
+
+ private:
+ void Add(int32_t value);
+ void Add(TranslationOpcode opcode) { Add(static_cast<int32_t>(opcode)); }
+
+ int Size() const {
+ return V8_UNLIKELY(FLAG_turbo_compress_translation_arrays)
+ ? static_cast<int>(contents_for_compression_.size())
+ : static_cast<int>(contents_.size());
+ }
+ int SizeInBytes() const {
+ return V8_UNLIKELY(FLAG_turbo_compress_translation_arrays)
+ ? Size() * kInt32Size
+ : Size();
+ }
+
+ Zone* zone() const { return zone_; }
+
+ ZoneVector<uint8_t> contents_;
+ ZoneVector<int32_t> contents_for_compression_;
+ Zone* const zone_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEOPTIMIZER_TRANSLATION_ARRAY_H_
diff --git a/deps/v8/src/deoptimizer/translation-opcode.h b/deps/v8/src/deoptimizer/translation-opcode.h
new file mode 100644
index 0000000000..a91a948d8e
--- /dev/null
+++ b/deps/v8/src/deoptimizer/translation-opcode.h
@@ -0,0 +1,71 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEOPTIMIZER_TRANSLATION_OPCODE_H_
+#define V8_DEOPTIMIZER_TRANSLATION_OPCODE_H_
+
+namespace v8 {
+namespace internal {
+
+// V(name, operand_count)
+#define TRANSLATION_OPCODE_LIST(V) \
+ V(ARGUMENTS_ADAPTOR_FRAME, 2) \
+ V(ARGUMENTS_ELEMENTS, 1) \
+ V(ARGUMENTS_LENGTH, 0) \
+ V(BEGIN, 3) \
+ V(BOOL_REGISTER, 1) \
+ V(BOOL_STACK_SLOT, 1) \
+ V(BUILTIN_CONTINUATION_FRAME, 3) \
+ V(CAPTURED_OBJECT, 1) \
+ V(CONSTRUCT_STUB_FRAME, 3) \
+ V(DOUBLE_REGISTER, 1) \
+ V(DOUBLE_STACK_SLOT, 1) \
+ V(DUPLICATED_OBJECT, 1) \
+ V(FLOAT_REGISTER, 1) \
+ V(FLOAT_STACK_SLOT, 1) \
+ V(INT32_REGISTER, 1) \
+ V(INT32_STACK_SLOT, 1) \
+ V(INT64_REGISTER, 1) \
+ V(INT64_STACK_SLOT, 1) \
+ V(INTERPRETED_FRAME, 5) \
+ V(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME, 3) \
+ V(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME, 3) \
+ V(JS_TO_WASM_BUILTIN_CONTINUATION_FRAME, 4) \
+ V(LITERAL, 1) \
+ V(REGISTER, 1) \
+ V(STACK_SLOT, 1) \
+ V(UINT32_REGISTER, 1) \
+ V(UINT32_STACK_SLOT, 1) \
+ V(UPDATE_FEEDBACK, 2)
+
+enum class TranslationOpcode {
+#define CASE(name, ...) name,
+ TRANSLATION_OPCODE_LIST(CASE)
+#undef CASE
+};
+
+constexpr TranslationOpcode TranslationOpcodeFromInt(int i) {
+ return static_cast<TranslationOpcode>(i);
+}
+
+inline int TranslationOpcodeOperandCount(TranslationOpcode o) {
+#define CASE(name, operand_count) operand_count,
+ static const int counts[] = {TRANSLATION_OPCODE_LIST(CASE)};
+#undef CASE
+ return counts[static_cast<int>(o)];
+}
+
+inline const char* TranslationOpcodeToString(TranslationOpcode o) {
+#define CASE(name, ...) #name,
+ static const char* const names[] = {TRANSLATION_OPCODE_LIST(CASE)};
+#undef CASE
+ return names[static_cast<int>(o)];
+}
+
+#undef TRANSLATION_OPCODE_LIST
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEOPTIMIZER_TRANSLATION_OPCODE_H_
diff --git a/deps/v8/src/diagnostics/arm/disasm-arm.cc b/deps/v8/src/diagnostics/arm/disasm-arm.cc
index 1e909ff564..1be66425db 100644
--- a/deps/v8/src/diagnostics/arm/disasm-arm.cc
+++ b/deps/v8/src/diagnostics/arm/disasm-arm.cc
@@ -2275,6 +2275,10 @@ void Decoder::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
Format(instr, q ? "vcnt.8 'Qd, 'Qm" : "vcnt.8 'Dd, 'Dm");
} else if (opc1 == 0 && opc2 == 0b1011) {
Format(instr, "vmvn 'Qd, 'Qm");
+ } else if (opc1 == 0b01 && opc2 == 0b0010) {
+ DCHECK_NE(0b11, size);
+ Format(instr,
+ q ? "vceq.s'size2 'Qd, 'Qm, #0" : "vceq.s.'size2 'Dd, 'Dm, #0");
} else if (opc1 == 0b01 && opc2 == 0b0100) {
DCHECK_NE(0b11, size);
Format(instr,
diff --git a/deps/v8/src/diagnostics/arm/unwinder-arm.cc b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
index 171a258a0c..846bbfe6bc 100644
--- a/deps/v8/src/diagnostics/arm/unwinder-arm.cc
+++ b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
@@ -12,7 +12,7 @@ void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {
const i::Address base_addr =
reinterpret_cast<i::Address>(fp) +
- i::EntryFrameConstants::kDirectCallerRRegistersOffset;
+ i::EntryFrameConstants::kDirectCallerGeneralRegistersOffset;
if (!register_state->callee_saved) {
register_state->callee_saved = std::make_unique<CalleeSavedRegisters>();
diff --git a/deps/v8/src/diagnostics/basic-block-profiler.cc b/deps/v8/src/diagnostics/basic-block-profiler.cc
index eff80e6fca..20b6e567ea 100644
--- a/deps/v8/src/diagnostics/basic-block-profiler.cc
+++ b/deps/v8/src/diagnostics/basic-block-profiler.cc
@@ -60,7 +60,7 @@ Handle<String> CopyStringToJSHeap(const std::string& source, Isolate* isolate) {
}
constexpr int kBlockIdSlotSize = kInt32Size;
-constexpr int kBlockCountSlotSize = kDoubleSize;
+constexpr int kBlockCountSlotSize = kInt32Size;
} // namespace
BasicBlockProfilerData::BasicBlockProfilerData(
@@ -81,8 +81,7 @@ void BasicBlockProfilerData::CopyFromJSHeap(
code_ = js_heap_data.code().ToCString().get();
ByteArray counts(js_heap_data.counts());
for (int i = 0; i < counts.length() / kBlockCountSlotSize; ++i) {
- counts_.push_back(
- reinterpret_cast<double*>(counts.GetDataStartAddress())[i]);
+ counts_.push_back(counts.get_uint32(i));
}
ByteArray block_ids(js_heap_data.block_ids());
for (int i = 0; i < block_ids.length() / kBlockIdSlotSize; ++i) {
@@ -112,7 +111,7 @@ Handle<OnHeapBasicBlockProfilerData> BasicBlockProfilerData::CopyToJSHeap(
Handle<ByteArray> counts = isolate->factory()->NewByteArray(
counts_array_size_in_bytes, AllocationType::kOld);
for (int i = 0; i < static_cast<int>(n_blocks()); ++i) {
- reinterpret_cast<double*>(counts->GetDataStartAddress())[i] = counts_[i];
+ counts->set_uint32(i, counts_[i]);
}
Handle<String> name = CopyStringToJSHeap(function_name_, isolate);
Handle<String> schedule = CopyStringToJSHeap(schedule_, isolate);
@@ -133,7 +132,7 @@ void BasicBlockProfiler::ResetCounts(Isolate* isolate) {
Handle<ByteArray> counts(
OnHeapBasicBlockProfilerData::cast(list->Get(i)).counts(), isolate);
for (int j = 0; j < counts->length() / kBlockCountSlotSize; ++j) {
- reinterpret_cast<double*>(counts->GetDataStartAddress())[j] = 0;
+ counts->set_uint32(j, 0);
}
}
}
@@ -197,9 +196,11 @@ void BasicBlockProfilerData::Log(Isolate* isolate) {
}
std::ostream& operator<<(std::ostream& os, const BasicBlockProfilerData& d) {
- double block_count_sum =
- std::accumulate(d.counts_.begin(), d.counts_.end(), 0);
- if (block_count_sum == 0) return os;
+ if (std::all_of(d.counts_.cbegin(), d.counts_.cend(),
+ [](uint32_t count) { return count == 0; })) {
+ // No data was collected for this function.
+ return os;
+ }
const char* name = "unknown function";
if (!d.function_name_.empty()) {
name = d.function_name_.c_str();
@@ -210,14 +211,14 @@ std::ostream& operator<<(std::ostream& os, const BasicBlockProfilerData& d) {
os << d.schedule_.c_str() << std::endl;
}
os << "block counts for " << name << ":" << std::endl;
- std::vector<std::pair<size_t, double>> pairs;
+ std::vector<std::pair<size_t, uint32_t>> pairs;
pairs.reserve(d.n_blocks());
for (size_t i = 0; i < d.n_blocks(); ++i) {
pairs.push_back(std::make_pair(i, d.counts_[i]));
}
std::sort(
pairs.begin(), pairs.end(),
- [=](std::pair<size_t, double> left, std::pair<size_t, double> right) {
+ [=](std::pair<size_t, uint32_t> left, std::pair<size_t, uint32_t> right) {
if (right.second == left.second) return left.first < right.first;
return right.second < left.second;
});
diff --git a/deps/v8/src/diagnostics/basic-block-profiler.h b/deps/v8/src/diagnostics/basic-block-profiler.h
index 9753dcc3a1..edf6df0983 100644
--- a/deps/v8/src/diagnostics/basic-block-profiler.h
+++ b/deps/v8/src/diagnostics/basic-block-profiler.h
@@ -36,7 +36,7 @@ class BasicBlockProfilerData {
DCHECK_EQ(block_ids_.size(), counts_.size());
return block_ids_.size();
}
- const double* counts() const { return &counts_[0]; }
+ const uint32_t* counts() const { return &counts_[0]; }
void SetCode(const std::ostringstream& os);
void SetFunctionName(std::unique_ptr<char[]> name);
@@ -62,7 +62,7 @@ class BasicBlockProfilerData {
// These vectors are indexed by reverse post-order block number.
std::vector<int32_t> block_ids_;
- std::vector<double> counts_;
+ std::vector<uint32_t> counts_;
std::string function_name_;
std::string schedule_;
std::string code_;
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index 659e1d8aca..ae4cc02459 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -696,6 +696,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vbroadcastss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x37:
+ AppendToBuffer("vpcmpgtq %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x99:
AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
@@ -1016,6 +1021,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
+ case 0xE6:
+ AppendToBuffer("vcvtdq2pd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
default:
UnimplementedInstruction();
}
@@ -1140,6 +1149,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
+ case 0x14:
+ AppendToBuffer("vunpcklps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x16:
AppendToBuffer("vmovhps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1200,6 +1214,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5A:
+ AppendToBuffer("vcvtps2pd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5B:
AppendToBuffer("vcvtdq2ps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1291,6 +1309,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5A:
+ AppendToBuffer("vcvtpd2ps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5C:
AppendToBuffer("vsubpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1315,6 +1337,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovd %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
break;
+ case 0x6f:
+ AppendToBuffer("vmovdqa %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ break;
case 0x70:
AppendToBuffer("vpshufd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1372,6 +1398,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(rm));
current++;
break;
+ case 0xE6:
+ AppendToBuffer("vcvttpd2dq %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
@@ -1884,6 +1914,10 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("movlps ");
data += PrintRightXMMOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (f0byte == 0x14) {
+ data += 2;
+ AppendToBuffer("unpcklps %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (f0byte == 0x16) {
data += 2;
AppendToBuffer("movhps %s,", NameOfXMMRegister(regop));
@@ -2236,6 +2270,10 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
AppendToBuffer(",xmm0");
break;
+ case 0x37:
+ AppendToBuffer("pcmpgtq %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ break;
default:
UnimplementedInstruction();
}
@@ -2352,10 +2390,10 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("movmskpd %s,%s", NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (*data >= 0x54 && *data <= 0x59) {
- const char* const pseudo_op[] = {
- "andpd", "andnpd", "orpd", "xorpd", "addpd", "mulpd",
- };
+ } else if (*data >= 0x54 && *data <= 0x5A) {
+ const char* const pseudo_op[] = {"andpd", "andnpd", "orpd",
+ "xorpd", "addpd", "mulpd",
+ "cvtpd2ps"};
byte op = *data;
data++;
int mod, regop, rm;
@@ -2468,6 +2506,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
AppendToBuffer(",%d", Imm8(data));
data++;
+ } else if (*data == 0xE6) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("cvttpd2dq %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (*data == 0xE7) {
data++;
int mod, regop, rm;
@@ -2732,6 +2776,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("lzcnt %s,", NameOfCPURegister(regop));
data += PrintRightOperand(data);
+ } else if (b2 == 0xE6) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("cvtdq2pd %s", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else {
const char* mnem = "?";
switch (b2) {
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index 23ad5ebd27..203548eb44 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -5,6 +5,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/common/globals.h"
#include "src/date/date.h"
+#include "src/debug/debug-wasm-objects-inl.h"
#include "src/diagnostics/disasm.h"
#include "src/diagnostics/disassembler.h"
#include "src/heap/combined-heap.h"
@@ -30,7 +31,6 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-array-inl.h"
-#include "src/objects/layout-descriptor.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/roots/roots.h"
@@ -68,6 +68,7 @@
#include "src/objects/property-descriptor-object-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/objects/synthetic-module-inl.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/torque-defined-classes-inl.h"
@@ -182,8 +183,6 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case NUMBER_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
case EPHEMERON_HASH_TABLE_TYPE:
- case FIXED_ARRAY_TYPE:
- case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
FixedArray::cast(*this).FixedArrayVerify(isolate);
break;
@@ -201,9 +200,6 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case NATIVE_CONTEXT_TYPE:
NativeContext::cast(*this).NativeContextVerify(isolate);
break;
- case WEAK_FIXED_ARRAY_TYPE:
- WeakFixedArray::cast(*this).WeakFixedArrayVerify(isolate);
- break;
case FEEDBACK_METADATA_TYPE:
FeedbackMetadata::cast(*this).FeedbackMetadataVerify(isolate);
break;
@@ -221,7 +217,6 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case JS_ITERATOR_PROTOTYPE_TYPE:
case JS_MAP_ITERATOR_PROTOTYPE_TYPE:
case JS_OBJECT_PROTOTYPE_TYPE:
- case JS_OBJECT_TYPE:
case JS_PROMISE_PROTOTYPE_TYPE:
case JS_REG_EXP_PROTOTYPE_TYPE:
case JS_SET_ITERATOR_PROTOTYPE_TYPE:
@@ -234,8 +229,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case WASM_INSTANCE_OBJECT_TYPE:
WasmInstanceObject::cast(*this).WasmInstanceObjectVerify(isolate);
break;
- case JS_GENERATOR_OBJECT_TYPE:
- JSGeneratorObject::cast(*this).JSGeneratorObjectVerify(isolate);
+ case WASM_VALUE_OBJECT_TYPE:
+ WasmValueObject::cast(*this).WasmValueObjectVerify(isolate);
break;
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
@@ -262,9 +257,6 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(MAKE_TORQUE_CASE)
#undef MAKE_TORQUE_CASE
- case FOREIGN_TYPE:
- break; // No interesting fields.
-
case ALLOCATION_SITE_TYPE:
AllocationSite::cast(*this).AllocationSiteVerify(isolate);
break;
@@ -276,6 +268,16 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case STORE_HANDLER_TYPE:
StoreHandler::cast(*this).StoreHandlerVerify(isolate);
break;
+
+ case JS_PROMISE_CONSTRUCTOR_TYPE:
+ case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_ARRAY_CONSTRUCTOR_TYPE:
+#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
+ case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
+#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
+ JSFunction::cast(*this).JSFunctionVerify(isolate);
+ break;
}
}
@@ -395,10 +397,6 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
DCHECK_EQ(kData, details.kind());
Representation r = details.representation();
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
- if (IsUnboxedDoubleField(index)) {
- DCHECK(r.IsDouble());
- continue;
- }
if (COMPRESS_POINTERS_BOOL && index.is_inobject()) {
VerifyObjectField(isolate, index.offset());
}
@@ -489,11 +487,10 @@ void Map::MapVerify(Isolate* isolate) {
TransitionsAccessor(isolate, *this, &no_gc).IsSortedNoDuplicates());
SLOW_DCHECK(TransitionsAccessor(isolate, *this, &no_gc)
.IsConsistentWithBackPointers());
- SLOW_DCHECK(!FLAG_unbox_double_fields ||
- layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
// Only JSFunction maps have has_prototype_slot() bit set and constructible
// JSFunction objects must have prototype slot.
- CHECK_IMPLIES(has_prototype_slot(), instance_type() == JS_FUNCTION_TYPE);
+ CHECK_IMPLIES(has_prototype_slot(),
+ InstanceTypeChecker::IsJSFunction(instance_type()));
if (!may_have_interesting_symbols()) {
CHECK(!has_named_interceptor());
CHECK(!is_dictionary_map());
@@ -581,6 +578,20 @@ void Context::ContextVerify(Isolate* isolate) {
}
}
+void ScopeInfo::ScopeInfoVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::ScopeInfoVerify(*this, isolate);
+
+ // Make sure that the FixedArray-style length matches the length that we would
+ // compute based on the Torque indexed fields.
+ CHECK_EQ(FixedArray::SizeFor(length()), AllocatedSize());
+
+ // Code that treats ScopeInfo like a FixedArray expects all values to be
+ // tagged.
+ for (int i = 0; i < length(); ++i) {
+ Object::VerifyPointer(isolate, get(isolate, i));
+ }
+}
+
void NativeContext::NativeContextVerify(Isolate* isolate) {
ContextVerify(isolate);
CHECK_EQ(length(), NativeContext::NATIVE_CONTEXT_SLOTS);
@@ -860,8 +871,8 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
CHECK(feedback_metadata().IsFeedbackMetadata());
}
- int expected_map_index = Context::FunctionMapIndex(
- language_mode(), kind(), HasSharedName(), needs_home_object());
+ int expected_map_index =
+ Context::FunctionMapIndex(language_mode(), kind(), HasSharedName());
CHECK_EQ(expected_map_index, function_map_index());
if (!scope_info().IsEmpty()) {
@@ -949,11 +960,9 @@ void Oddball::OddballVerify(Isolate* isolate) {
}
void PropertyCell::PropertyCellVerify(Isolate* isolate) {
- // TODO(torque): replace with USE_TORQUE_VERIFIER(PropertyCell) once
- // it supports UniqueName type.
TorqueGeneratedClassVerifiers::PropertyCellVerify(*this, isolate);
-
CHECK(name().IsUniqueName());
+ CheckDataIsCompatible(property_details(), value());
}
void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
@@ -1238,6 +1247,81 @@ void SmallOrderedNameDictionary::SmallOrderedNameDictionaryVerify(
}
}
+void SwissNameDictionary::SwissNameDictionaryVerify(Isolate* isolate) {
+ this->SwissNameDictionaryVerify(isolate, false);
+}
+
+void SwissNameDictionary::SwissNameDictionaryVerify(Isolate* isolate,
+ bool slow_checks) {
+ DisallowHeapAllocation no_gc;
+
+ CHECK(IsValidCapacity(Capacity()));
+
+ meta_table().ByteArrayVerify(isolate);
+
+ int seen_deleted = 0;
+ int seen_present = 0;
+
+ for (int i = 0; i < Capacity(); i++) {
+ ctrl_t ctrl = GetCtrl(i);
+
+ if (IsFull(ctrl) || slow_checks) {
+ Object key = KeyAt(i);
+ Object value = ValueAtRaw(i);
+
+ if (IsFull(ctrl)) {
+ ++seen_present;
+
+ Name name = Name::cast(key);
+ if (slow_checks) {
+ CHECK_EQ(swiss_table::H2(name.hash()), ctrl);
+ }
+
+ CHECK(!key.IsTheHole());
+ CHECK(!value.IsTheHole());
+ name.NameVerify(isolate);
+ value.ObjectVerify(isolate);
+ } else if (IsDeleted(ctrl)) {
+ ++seen_deleted;
+ CHECK(key.IsTheHole());
+ CHECK(value.IsTheHole());
+ } else if (IsEmpty(ctrl)) {
+ CHECK(key.IsTheHole());
+ CHECK(value.IsTheHole());
+ } else {
+ // Something unexpected. Note that we don't use kSentinel at the moment.
+ UNREACHABLE();
+ }
+ }
+ }
+
+ CHECK_EQ(seen_present, NumberOfElements());
+ if (slow_checks) {
+ CHECK_EQ(seen_deleted, NumberOfDeletedElements());
+
+ // Verify copy of first group at end (= after Capacity() slots) of control
+ // table.
+ for (int i = 0; i < std::min(static_cast<int>(Group::kWidth), Capacity());
+ ++i) {
+ CHECK_EQ(CtrlTable()[i], CtrlTable()[Capacity() + i]);
+ }
+ // If 2 * capacity is smaller than the capacity plus group width, the slots
+ // after that must be empty.
+ for (int i = 2 * Capacity(); i < Capacity() + kGroupWidth; ++i) {
+ CHECK_EQ(Ctrl::kEmpty, CtrlTable()[i]);
+ }
+
+ for (int enum_index = 0; enum_index < UsedCapacity(); ++enum_index) {
+ int entry = EntryForEnumerationIndex(enum_index);
+ CHECK_LT(entry, Capacity());
+ ctrl_t ctrl = GetCtrl(entry);
+
+ // Enum table must not point to empty slots.
+ CHECK(IsFull(ctrl) || IsDeleted(ctrl));
+ }
+ }
+}
+
void JSRegExp::JSRegExpVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSRegExpVerify(*this, isolate);
switch (TypeTag()) {
@@ -1399,6 +1483,11 @@ void Module::ModuleVerify(Isolate* isolate) {
CHECK_EQ(JSModuleNamespace::cast(module_namespace()).module(), *this);
}
+ if (!(status() == kErrored || status() == kEvaluating ||
+ status() == kEvaluated)) {
+ CHECK(top_level_capability().IsUndefined());
+ }
+
CHECK_NE(hash(), 0);
}
@@ -1432,7 +1521,6 @@ void SourceTextModule::SourceTextModuleVerify(Isolate* isolate) {
} else if (status() == kUninstantiated) {
CHECK(code().IsSharedFunctionInfo());
}
- CHECK(top_level_capability().IsUndefined());
CHECK(!AsyncParentModuleCount());
CHECK(!pending_async_dependencies());
CHECK(!async_evaluating());
@@ -1526,6 +1614,11 @@ void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
}
}
+void WasmValueObject::WasmValueObjectVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
+ CHECK(IsWasmValueObject());
+}
+
void WasmExportedFunctionData::WasmExportedFunctionDataVerify(
Isolate* isolate) {
TorqueGeneratedClassVerifiers::WasmExportedFunctionDataVerify(*this, isolate);
@@ -1633,6 +1726,16 @@ void PreparseData::PreparseDataVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(InterpreterData)
+void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::StackFrameInfoVerify(*this, isolate);
+ CHECK_IMPLIES(IsAsmJsWasm(), IsWasm());
+ CHECK_IMPLIES(IsWasm(), receiver_or_instance().IsWasmInstanceObject());
+ CHECK_IMPLIES(IsWasm(), function().IsSmi());
+ CHECK_IMPLIES(!IsWasm(), function().IsJSFunction());
+ CHECK_IMPLIES(IsAsync(), !IsWasm());
+ CHECK_IMPLIES(IsConstructor(), !IsWasm());
+}
+
#endif // VERIFY_HEAP
#ifdef DEBUG
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 94acb12d09..bd03a837a8 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -6,7 +6,7 @@
#include <memory>
#include "src/common/globals.h"
-#include "src/compiler/node.h"
+#include "src/debug/debug-wasm-objects-inl.h"
#include "src/diagnostics/disasm.h"
#include "src/diagnostics/disassembler.h"
#include "src/heap/heap-inl.h" // For InOldSpace.
@@ -100,9 +100,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
}
switch (instance_type) {
- case FIXED_ARRAY_TYPE:
- FixedArray::cast(*this).FixedArrayPrint(os);
- break;
case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
@@ -166,7 +163,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_ITERATOR_PROTOTYPE_TYPE:
case JS_MAP_ITERATOR_PROTOTYPE_TYPE:
case JS_OBJECT_PROTOTYPE_TYPE:
- case JS_OBJECT_TYPE:
case JS_PROMISE_PROTOTYPE_TYPE:
case JS_REG_EXP_PROTOTYPE_TYPE:
case JS_SET_ITERATOR_PROTOTYPE_TYPE:
@@ -179,8 +175,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case WASM_INSTANCE_OBJECT_TYPE:
WasmInstanceObject::cast(*this).WasmInstanceObjectPrint(os);
break;
- case JS_GENERATOR_OBJECT_TYPE:
- JSGeneratorObject::cast(*this).JSGeneratorObjectPrint(os);
+ case WASM_VALUE_OBJECT_TYPE:
+ WasmValueObject::cast(*this).WasmValueObjectPrint(os);
break;
case CODE_TYPE:
Code::cast(*this).CodePrint(os);
@@ -207,9 +203,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(MAKE_TORQUE_CASE)
#undef MAKE_TORQUE_CASE
- case FOREIGN_TYPE:
- Foreign::cast(*this).ForeignPrint(os);
- break;
case ALLOCATION_SITE_TYPE:
AllocationSite::cast(*this).AllocationSitePrint(os);
break;
@@ -219,19 +212,24 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case STORE_HANDLER_TYPE:
StoreHandler::cast(*this).StoreHandlerPrint(os);
break;
- case SCOPE_INFO_TYPE:
- ScopeInfo::cast(*this).ScopeInfoPrint(os);
- break;
case FEEDBACK_METADATA_TYPE:
FeedbackMetadata::cast(*this).FeedbackMetadataPrint(os);
break;
- case WEAK_FIXED_ARRAY_TYPE:
- WeakFixedArray::cast(*this).WeakFixedArrayPrint(os);
+ case JS_PROMISE_CONSTRUCTOR_TYPE:
+ case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_ARRAY_CONSTRUCTOR_TYPE:
+#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
+ case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
+#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
+ JSFunction::cast(*this).JSFunctionPrint(os);
break;
case INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case STRING_TYPE:
case CONS_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
@@ -279,11 +277,7 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
switch (details.location()) {
case kField: {
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
- if (IsUnboxedDoubleField(field_index)) {
- os << "<unboxed double> " << RawFastDoublePropertyAt(field_index);
- } else {
- os << Brief(RawFastPropertyAt(field_index));
- }
+ os << Brief(RawFastPropertyAt(field_index));
break;
}
case kDescriptor:
@@ -917,18 +911,18 @@ void GlobalDictionary::GlobalDictionaryPrint(std::ostream& os) {
void SmallOrderedHashSet::SmallOrderedHashSetPrint(std::ostream& os) {
PrintHeader(os, "SmallOrderedHashSet");
- // TODO(tebbi): Print all fields.
+ // TODO(turbofan): Print all fields.
}
void SmallOrderedHashMap::SmallOrderedHashMapPrint(std::ostream& os) {
PrintHeader(os, "SmallOrderedHashMap");
- // TODO(tebbi): Print all fields.
+ // TODO(turbofan): Print all fields.
}
void SmallOrderedNameDictionary::SmallOrderedNameDictionaryPrint(
std::ostream& os) {
PrintHeader(os, "SmallOrderedNameDictionary");
- // TODO(tebbi): Print all fields.
+ // TODO(turbofan): Print all fields.
}
void OrderedHashSet::OrderedHashSetPrint(std::ostream& os) {
@@ -946,6 +940,74 @@ void OrderedNameDictionary::OrderedNameDictionaryPrint(std::ostream& os) {
PrintDictionaryContentsFull(os, *this);
}
+void print_hex_byte(std::ostream& os, int value) {
+ os << "0x" << std::setfill('0') << std::setw(2) << std::right << std::hex
+ << (value & 0xff) << std::setfill(' ');
+}
+
+void SwissNameDictionary::SwissNameDictionaryPrint(std::ostream& os) {
+ this->PrintHeader(os, "SwissNameDictionary");
+ os << "\n - meta table ByteArray: "
+ << reinterpret_cast<void*>(this->meta_table().ptr());
+ os << "\n - capacity: " << this->Capacity();
+ os << "\n - elements: " << this->NumberOfElements();
+ os << "\n - deleted: " << this->NumberOfDeletedElements();
+
+ std::ios_base::fmtflags sav_flags = os.flags();
+ os << "\n - ctrl table (omitting buckets where key is hole value): {";
+ for (int i = 0; i < this->Capacity() + kGroupWidth; i++) {
+ ctrl_t ctrl = CtrlTable()[i];
+
+ if (ctrl == Ctrl::kEmpty) continue;
+
+ os << "\n " << std::setw(12) << std::dec << i << ": ";
+ switch (ctrl) {
+ case Ctrl::kEmpty:
+ UNREACHABLE();
+ break;
+ case Ctrl::kDeleted:
+ print_hex_byte(os, ctrl);
+ os << " (= kDeleted)";
+ break;
+ case Ctrl::kSentinel:
+ print_hex_byte(os, ctrl);
+ os << " (= kSentinel)";
+ break;
+ default:
+ print_hex_byte(os, ctrl);
+ os << " (= H2 of a key)";
+ break;
+ }
+ }
+ os << "\n }";
+
+ os << "\n - enumeration table: {";
+ for (int enum_index = 0; enum_index < this->UsedCapacity(); enum_index++) {
+ int entry = EntryForEnumerationIndex(enum_index);
+ os << "\n " << std::setw(12) << std::dec << enum_index << ": " << entry;
+ }
+ os << "\n }";
+
+ os << "\n - data table (omitting slots where key is the hole): {";
+ for (int bucket = 0; bucket < this->Capacity(); ++bucket) {
+ Object k;
+ if (!this->ToKey(this->GetReadOnlyRoots(), bucket, &k)) continue;
+
+ Object value = this->ValueAtRaw(bucket);
+ PropertyDetails details = this->DetailsAt(bucket);
+ os << "\n " << std::setw(12) << std::dec << bucket << ": ";
+ if (k.IsString()) {
+ String::cast(k).PrintUC16(os);
+ } else {
+ os << Brief(k);
+ }
+ os << " -> " << Brief(value);
+ details.PrintAsSlowTo(os, false);
+ }
+ os << "\n }\n";
+ os.flags(sav_flags);
+}
+
void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "PropertyArray");
os << "\n - length: " << length();
@@ -1059,6 +1121,8 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
os << "\n - optimization tier: " << optimization_tier();
os << "\n - invocation count: " << invocation_count();
os << "\n - profiler ticks: " << profiler_ticks();
+ os << "\n - closure feedback cell array: ";
+ closure_feedback_cell_array().ClosureFeedbackCellArrayPrint(os);
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
@@ -1426,6 +1490,9 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "feedback metadata is not available in SFI\n";
} else if (has_feedback_vector()) {
feedback_vector().FeedbackVectorPrint(os);
+ } else if (has_closure_feedback_cell_array()) {
+ os << "No feedback vector, but we have a closure feedback cell array\n";
+ closure_feedback_cell_array().ClosureFeedbackCellArrayPrint(os);
} else {
os << "not available\n";
}
@@ -1462,16 +1529,13 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
}
os << "\n - kind: " << kind();
os << "\n - syntax kind: " << syntax_kind();
- if (needs_home_object()) {
- os << "\n - needs_home_object";
- }
os << "\n - function_map_index: " << function_map_index();
os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - language_mode: " << language_mode();
os << "\n - data: " << Brief(function_data(kAcquireLoad));
os << "\n - code (from data): ";
- os << Brief(GetCode());
+ os << Brief(GetCode());
PrintSourceCode(os);
// Script files are often large, thus only print their {Brief} representation.
os << "\n - script: " << Brief(script());
@@ -1518,11 +1582,11 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "PropertyCell");
os << "\n - name: ";
name().NamePrint(os);
- os << "\n - value: " << Brief(value());
+ os << "\n - value: " << Brief(value(kAcquireLoad));
os << "\n - details: ";
- property_details().PrintAsSlowTo(os, true);
- PropertyCellType cell_type = property_details().cell_type();
- os << "\n - cell_type: " << cell_type;
+ PropertyDetails details = property_details(kAcquireLoad);
+ details.PrintAsSlowTo(os, true);
+ os << "\n - cell_type: " << details.cell_type();
os << "\n";
}
@@ -1709,6 +1773,15 @@ void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionPrint(
os << "\n";
}
+void RegExpBoilerplateDescription::RegExpBoilerplateDescriptionPrint(
+ std::ostream& os) { // NOLINT
+ PrintHeader(os, "RegExpBoilerplateDescription");
+ os << "\n - data: " << Brief(data());
+ os << "\n - source: " << Brief(source());
+ os << "\n - flags: " << flags();
+ os << "\n";
+}
+
void AsmWasmData::AsmWasmDataPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "AsmWasmData");
os << "\n - native module: " << Brief(managed_native_module());
@@ -1720,7 +1793,6 @@ void AsmWasmData::AsmWasmDataPrint(std::ostream& os) { // NOLINT
void WasmTypeInfo::WasmTypeInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "WasmTypeInfo");
os << "\n - type address: " << reinterpret_cast<void*>(foreign_address());
- os << "\n - parent: " << Brief(parent());
os << "\n";
}
@@ -1734,26 +1806,27 @@ void WasmStruct::WasmStructPrint(std::ostream& os) { // NOLINT
uint32_t field_offset = struct_type->field_offset(i);
Address field_address = RawField(field_offset).address();
switch (field.kind()) {
- case wasm::ValueType::kI32:
+ case wasm::kI32:
os << base::ReadUnalignedValue<int32_t>(field_address);
break;
- case wasm::ValueType::kI64:
+ case wasm::kI64:
os << base::ReadUnalignedValue<int64_t>(field_address);
break;
- case wasm::ValueType::kF32:
+ case wasm::kF32:
os << base::ReadUnalignedValue<float>(field_address);
break;
- case wasm::ValueType::kF64:
+ case wasm::kF64:
os << base::ReadUnalignedValue<double>(field_address);
break;
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kS128:
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
- case wasm::ValueType::kRtt:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kS128:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ case wasm::kBottom:
+ case wasm::kStmt:
os << "UNIMPLEMENTED"; // TODO(7748): Implement.
break;
}
@@ -1769,30 +1842,31 @@ void WasmArray::WasmArrayPrint(std::ostream& os) { // NOLINT
os << "\n - length: " << len;
Address data_ptr = ptr() + WasmArray::kHeaderSize - kHeapObjectTag;
switch (array_type->element_type().kind()) {
- case wasm::ValueType::kI32:
+ case wasm::kI32:
PrintTypedArrayElements(os, reinterpret_cast<int32_t*>(data_ptr), len,
true);
break;
- case wasm::ValueType::kI64:
+ case wasm::kI64:
PrintTypedArrayElements(os, reinterpret_cast<int64_t*>(data_ptr), len,
true);
break;
- case wasm::ValueType::kF32:
+ case wasm::kF32:
PrintTypedArrayElements(os, reinterpret_cast<float*>(data_ptr), len,
true);
break;
- case wasm::ValueType::kF64:
+ case wasm::kF64:
PrintTypedArrayElements(os, reinterpret_cast<double*>(data_ptr), len,
true);
break;
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kS128:
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
- case wasm::ValueType::kRtt:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kS128:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ case wasm::kBottom:
+ case wasm::kStmt:
os << "\n Printing elements of this type is unimplemented, sorry";
// TODO(7748): Implement.
break;
@@ -1888,6 +1962,12 @@ void WasmTableObject::WasmTableObjectPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void WasmValueObject::WasmValueObjectPrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "WasmValueObject");
+ os << "\n - value: " << Brief(value());
+ os << "\n";
+}
+
void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "WasmGlobalObject");
if (type().is_reference_type()) {
@@ -2261,67 +2341,15 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void StackTraceFrame::StackTraceFramePrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "StackTraceFrame");
- os << "\n - frame_index: " << frame_index();
- os << "\n - frame_info: " << Brief(frame_info());
- os << "\n";
-}
-
void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "StackFrame");
- os << "\n - line_number: " << line_number();
- os << "\n - column_number: " << column_number();
- os << "\n - script_id: " << script_id();
- os << "\n - script_name: " << Brief(script_name());
- os << "\n - script_name_or_source_url: "
- << Brief(script_name_or_source_url());
- os << "\n - function_name: " << Brief(function_name());
- os << "\n - is_eval: " << (is_eval() ? "true" : "false");
- os << "\n - is_constructor: " << (is_constructor() ? "true" : "false");
- os << "\n";
-}
-
-static void PrintBitMask(std::ostream& os, uint32_t value) { // NOLINT
- for (int i = 0; i < 32; i++) {
- if ((i & 7) == 0) os << " ";
- os << (((value & 1) == 0) ? "_" : "x");
- value >>= 1;
- }
-}
-
-void LayoutDescriptor::Print() {
- StdoutStream os;
- this->Print(os);
- os << std::flush;
-}
-
-void LayoutDescriptor::ShortPrint(std::ostream& os) {
- if (IsSmi()) {
- // Print tagged value for easy use with "jld" gdb macro.
- os << reinterpret_cast<void*>(ptr());
- } else {
- os << Brief(*this);
- }
-}
-
-void LayoutDescriptor::Print(std::ostream& os) { // NOLINT
- os << "Layout descriptor: ";
- if (IsFastPointerLayout()) {
- os << "<all tagged>";
- } else if (IsSmi()) {
- os << "fast";
- PrintBitMask(os, static_cast<uint32_t>(Smi::ToInt(*this)));
- } else if (IsOddball() && IsUninitialized()) {
- os << "<uninitialized>";
- } else {
- os << "slow";
- int num_words = number_of_layout_words();
- for (int i = 0; i < num_words; i++) {
- if (i > 0) os << " |";
- PrintBitMask(os, get_layout_word(i));
- }
- }
+ PrintHeader(os, "StackFrameInfo");
+ os << "\n - receiver_or_instance: " << Brief(receiver_or_instance());
+ os << "\n - function: " << Brief(function());
+ os << "\n - code_object: " << Brief(code_object());
+ os << "\n - code_offset_or_source_position: "
+ << code_offset_or_source_position();
+ os << "\n - flags: " << flags();
+ os << "\n - parameters: " << Brief(parameters());
os << "\n";
}
@@ -2488,10 +2516,6 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
<< "#" << NumberOfOwnDescriptors() << ": "
<< Brief(instance_descriptors(kRelaxedLoad));
- if (FLAG_unbox_double_fields) {
- os << "\n - layout descriptor: ";
- layout_descriptor(kAcquireLoad).ShortPrint(os);
- }
// Read-only maps can't have transitions, which is fortunate because we need
// the isolate to iterate over the transitions.
@@ -2747,12 +2771,12 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
}
if (!isolate->heap()->InSpaceSlow(address, i::CODE_SPACE) &&
- !isolate->heap()->InSpaceSlow(address, i::LO_SPACE) &&
+ !isolate->heap()->InSpaceSlow(address, i::CODE_LO_SPACE) &&
!i::InstructionStream::PcIsOffHeap(isolate, address) &&
!i::ReadOnlyHeap::Contains(address)) {
i::PrintF(
- "%p is not within the current isolate's large object, code, read_only "
- "or embedded spaces\n",
+ "%p is not within the current isolate's code, read_only or embedded "
+ "spaces\n",
object);
return;
}
@@ -2770,16 +2794,6 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
#endif // ENABLE_DISASSEMBLER
}
-V8_EXPORT_PRIVATE extern void _v8_internal_Print_LayoutDescriptor(
- void* object) {
- i::Object o(GetObjectFromRaw(object));
- if (!o.IsLayoutDescriptor()) {
- printf("Please provide a layout descriptor\n");
- } else {
- i::LayoutDescriptor::cast(o).Print();
- }
-}
-
V8_EXPORT_PRIVATE extern void _v8_internal_Print_StackTrace() {
i::Isolate* isolate = i::Isolate::Current();
isolate->PrintStack(stdout);
@@ -2798,7 +2812,3 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_TransitionTree(void* object) {
#endif
}
}
-
-V8_EXPORT_PRIVATE extern void _v8_internal_Node_Print(void* object) {
- reinterpret_cast<i::compiler::Node*>(object)->Print();
-}
diff --git a/deps/v8/src/diagnostics/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index 822e690bb0..5a2650131f 100644
--- a/deps/v8/src/diagnostics/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -27,6 +27,8 @@
#include "src/diagnostics/perf-jit.h"
+#include "src/common/assert-scope.h"
+
// Only compile the {PerfJitLogger} on Linux.
#if V8_OS_LINUX
@@ -211,7 +213,8 @@ void PerfJitLogger::LogRecordedBuffer(
(abstract_code->kind() != CodeKind::INTERPRETED_FUNCTION &&
abstract_code->kind() != CodeKind::TURBOFAN &&
abstract_code->kind() != CodeKind::NATIVE_CONTEXT_INDEPENDENT &&
- abstract_code->kind() != CodeKind::TURBOPROP)) {
+ abstract_code->kind() != CodeKind::TURBOPROP &&
+ abstract_code->kind() != CodeKind::BASELINE)) {
return;
}
@@ -335,9 +338,17 @@ SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
Handle<SharedFunctionInfo> shared) {
+ DisallowGarbageCollection no_gc;
+ // TODO(v8:11429,cbruni): add proper baseline source position iterator
+ bool is_baseline = code->kind() == CodeKind::BASELINE;
+ ByteArray source_position_table = code->SourcePositionTable();
+ if (is_baseline) {
+ source_position_table =
+ shared->GetBytecodeArray(shared->GetIsolate()).SourcePositionTable();
+ }
// Compute the entry count and get the name of the script.
uint32_t entry_count = 0;
- for (SourcePositionTableIterator iterator(code->SourcePositionTable());
+ for (SourcePositionTableIterator iterator(source_position_table);
!iterator.done(); iterator.Advance()) {
entry_count++;
}
@@ -358,7 +369,7 @@ void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
size += entry_count * sizeof(PerfJitDebugEntry);
// Add the size of the name after each entry.
- for (SourcePositionTableIterator iterator(code->SourcePositionTable());
+ for (SourcePositionTableIterator iterator(source_position_table);
!iterator.done(); iterator.Advance()) {
SourcePositionInfo info(
GetSourcePositionInfo(code, shared, iterator.source_position()));
@@ -371,7 +382,7 @@ void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
Address code_start = code->InstructionStart();
- for (SourcePositionTableIterator iterator(code->SourcePositionTable());
+ for (SourcePositionTableIterator iterator(source_position_table);
!iterator.done(); iterator.Advance()) {
SourcePositionInfo info(
GetSourcePositionInfo(code, shared, iterator.source_position()));
diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h
index dbe78ddf2d..1c9112299e 100644
--- a/deps/v8/src/diagnostics/perf-jit.h
+++ b/deps/v8/src/diagnostics/perf-jit.h
@@ -86,6 +86,7 @@ class PerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachARM64 = 183;
static const uint32_t kElfMachS390x = 22;
static const uint32_t kElfMachPPC64 = 21;
+ static const uint32_t kElfMachRISCV = 243;
uint32_t GetElfMach() {
#if V8_TARGET_ARCH_IA32
@@ -104,6 +105,8 @@ class PerfJitLogger : public CodeEventLogger {
return kElfMachS390x;
#elif V8_TARGET_ARCH_PPC64
return kElfMachPPC64;
+#elif V8_TARGET_ARCH_RISCV64
+ return kElfMachRISCV;
#else
UNIMPLEMENTED();
return 0;
diff --git a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
new file mode 100644
index 0000000000..a39261555d
--- /dev/null
+++ b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
@@ -0,0 +1,1862 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// NameConverter converter;
+// Disassembler d(converter);
+// for (byte* pc = begin; pc < end;) {
+// v8::internal::EmbeddedVector<char, 256> buffer;
+// byte* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/base/platform/platform.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/riscv64/constants-riscv64.h"
+#include "src/diagnostics/disasm.h"
+
+namespace v8 {
+namespace internal {
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter,
+ v8::internal::Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+ Decoder(const Decoder&) = delete;
+ Decoder& operator=(const Decoder&) = delete;
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte* instruction);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintFPURegister(int freg);
+ void PrintFPUStatusRegister(int freg);
+ void PrintRs1(Instruction* instr);
+ void PrintRs2(Instruction* instr);
+ void PrintRd(Instruction* instr);
+ void PrintVs1(Instruction* instr);
+ void PrintFRs1(Instruction* instr);
+ void PrintFRs2(Instruction* instr);
+ void PrintFRs3(Instruction* instr);
+ void PrintFRd(Instruction* instr);
+ void PrintImm12(Instruction* instr);
+ void PrintImm12X(Instruction* instr);
+ void PrintImm20U(Instruction* instr);
+ void PrintImm20J(Instruction* instr);
+ void PrintShamt(Instruction* instr);
+ void PrintShamt32(Instruction* instr);
+ void PrintRvcImm6(Instruction* instr);
+ void PrintRvcImm6U(Instruction* instr);
+ void PrintRvcImm6Addi16sp(Instruction* instr);
+ void PrintRvcShamt(Instruction* instr);
+ void PrintRvcImm6Ldsp(Instruction* instr);
+ void PrintRvcImm6Lwsp(Instruction* instr);
+ void PrintRvcImm6Sdsp(Instruction* instr);
+ void PrintRvcImm6Swsp(Instruction* instr);
+ void PrintRvcImm5W(Instruction* instr);
+ void PrintRvcImm5D(Instruction* instr);
+ void PrintRvcImm8Addi4spn(Instruction* instr);
+ void PrintRvcImm11CJ(Instruction* instr);
+ void PrintAcquireRelease(Instruction* instr);
+ void PrintBranchOffset(Instruction* instr);
+ void PrintStoreOffset(Instruction* instr);
+ void PrintCSRReg(Instruction* instr);
+ void PrintRoundingMode(Instruction* instr);
+ void PrintMemoryOrder(Instruction* instr, bool is_pred);
+
+ // Each of these functions decodes one particular instruction type.
+ void DecodeRType(Instruction* instr);
+ void DecodeR4Type(Instruction* instr);
+ void DecodeRAType(Instruction* instr);
+ void DecodeRFPType(Instruction* instr);
+ void DecodeIType(Instruction* instr);
+ void DecodeSType(Instruction* instr);
+ void DecodeBType(Instruction* instr);
+ void DecodeUType(Instruction* instr);
+ void DecodeJType(Instruction* instr);
+ void DecodeCRType(Instruction* instr);
+ void DecodeCAType(Instruction* instr);
+ void DecodeCIType(Instruction* instr);
+ void DecodeCIWType(Instruction* instr);
+ void DecodeCSSType(Instruction* instr);
+ void DecodeCLType(Instruction* instr);
+ void DecodeCSType(Instruction* instr);
+ void DecodeCJType(Instruction* instr);
+
+ // Printing of instruction name.
+ void PrintInstructionName(Instruction* instr);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatFPURegisterOrRoundMode(Instruction* instr, const char* option);
+ int FormatRvcRegister(Instruction* instr, const char* option);
+ int FormatRvcImm(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ v8::internal::Vector<char> out_buffer_;
+ int out_buffer_pos_;
+};
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+void Decoder::PrintRs1(Instruction* instr) {
+ int reg = instr->Rs1Value();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRs2(Instruction* instr) {
+ int reg = instr->Rs2Value();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintVs1(Instruction* instr) {
+ int val = instr->Rs1Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", val);
+}
+
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
+}
+
+void Decoder::PrintFRs1(Instruction* instr) {
+ int reg = instr->Rs1Value();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintFRs2(Instruction* instr) {
+ int reg = instr->Rs2Value();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintFRs3(Instruction* instr) {
+ int reg = instr->Rs3Value();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintFRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintFPURegister(reg);
+}
+
+void Decoder::PrintImm12X(Instruction* instr) {
+ int32_t imm = instr->Imm12Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+void Decoder::PrintImm12(Instruction* instr) {
+ int32_t imm = instr->Imm12Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintBranchOffset(Instruction* instr) {
+ int32_t imm = instr->BranchOffset();
+ const char* target =
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + imm);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d -> %s", imm, target);
+}
+
+void Decoder::PrintStoreOffset(Instruction* instr) {
+ int32_t imm = instr->StoreOffset();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintImm20U(Instruction* instr) {
+ int32_t imm = instr->Imm20UValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+void Decoder::PrintImm20J(Instruction* instr) {
+ int32_t imm = instr->Imm20JValue();
+ const char* target =
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + imm);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d -> %s", imm, target);
+}
+
+void Decoder::PrintShamt(Instruction* instr) {
+ int32_t imm = instr->Shamt();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintShamt32(Instruction* instr) {
+ int32_t imm = instr->Shamt32();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6(Instruction* instr) {
+ int32_t imm = instr->RvcImm6Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6U(Instruction* instr) {
+ int32_t imm = instr->RvcImm6Value() & 0xFFFFF;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+void Decoder::PrintRvcImm6Addi16sp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6Addi16spValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcShamt(Instruction* instr) {
+ int32_t imm = instr->RvcShamt6();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Ldsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6LdspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Lwsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6LwspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Swsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6SwspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm6Sdsp(Instruction* instr) {
+ int32_t imm = instr->RvcImm6SdspValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm5W(Instruction* instr) {
+ int32_t imm = instr->RvcImm5WValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm5D(Instruction* instr) {
+ int32_t imm = instr->RvcImm5DValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm8Addi4spn(Instruction* instr) {
+ int32_t imm = instr->RvcImm8Addi4spnValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintRvcImm11CJ(Instruction* instr) {
+ int32_t imm = instr->RvcImm11CJValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+void Decoder::PrintAcquireRelease(Instruction* instr) {
+ bool aq = instr->AqValue();
+ bool rl = instr->RlValue();
+ if (aq || rl) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ".");
+ }
+ if (aq) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "aq");
+ }
+ if (rl) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "rl");
+ }
+}
+
+void Decoder::PrintCSRReg(Instruction* instr) {
+ int32_t csr_reg = instr->CsrValue();
+ std::string s;
+ switch (csr_reg) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ s = "csr_fflags";
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ s = "csr_frm";
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ s = "csr_fcsr";
+ break;
+ case csr_cycle:
+ s = "csr_cycle";
+ break;
+ case csr_time:
+ s = "csr_time";
+ break;
+ case csr_instret:
+ s = "csr_instret";
+ break;
+ case csr_cycleh:
+ s = "csr_cycleh";
+ break;
+ case csr_timeh:
+ s = "csr_timeh";
+ break;
+ case csr_instreth:
+ s = "csr_instreth";
+ break;
+ default:
+ UNREACHABLE();
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", s.c_str());
+}
+
+void Decoder::PrintRoundingMode(Instruction* instr) {
+ int frm = instr->RoundMode();
+ std::string s;
+ switch (frm) {
+ case RNE:
+ s = "RNE";
+ break;
+ case RTZ:
+ s = "RTZ";
+ break;
+ case RDN:
+ s = "RDN";
+ break;
+ case RUP:
+ s = "RUP";
+ break;
+ case RMM:
+ s = "RMM";
+ break;
+ case DYN:
+ s = "DYN";
+ break;
+ default:
+ UNREACHABLE();
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", s.c_str());
+}
+
+void Decoder::PrintMemoryOrder(Instruction* instr, bool is_pred) {
+ int memOrder = instr->MemoryOrder(is_pred);
+ std::string s;
+ if ((memOrder & PSI) == PSI) {
+ s += "i";
+ }
+ if ((memOrder & PSO) == PSO) {
+ s += "o";
+ }
+ if ((memOrder & PSR) == PSR) {
+ s += "r";
+ }
+ if ((memOrder & PSW) == PSW) {
+ s += "w";
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", s.c_str());
+}
+
+// Printing of instruction name.
+void Decoder::PrintInstructionName(Instruction* instr) {}
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ DCHECK_EQ(format[0], 'r');
+ if (format[1] == 's') { // 'rs[12]: Rs register.
+ if (format[2] == '1') {
+ int reg = instr->Rs1Value();
+ PrintRegister(reg);
+ return 3;
+ } else if (format[2] == '2') {
+ int reg = instr->Rs2Value();
+ PrintRegister(reg);
+ return 3;
+ }
+ UNREACHABLE();
+ } else if (format[1] == 'd') { // 'rd: rd register.
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+}
+
+// Handle all FPUregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPURegisterOrRoundMode(Instruction* instr,
+ const char* format) {
+ DCHECK_EQ(format[0], 'f');
+ if (format[1] == 's') { // 'fs[1-3]: Rs register.
+ if (format[2] == '1') {
+ int reg = instr->Rs1Value();
+ PrintFPURegister(reg);
+ return 3;
+ } else if (format[2] == '2') {
+ int reg = instr->Rs2Value();
+ PrintFPURegister(reg);
+ return 3;
+ } else if (format[2] == '3') {
+ int reg = instr->Rs3Value();
+ PrintFPURegister(reg);
+ return 3;
+ }
+ UNREACHABLE();
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->RdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'frm
+ DCHECK(STRING_STARTS_WITH(format, "frm"));
+ PrintRoundingMode(instr);
+ return 3;
+ }
+ UNREACHABLE();
+}
+
+// Handle all C extension register based formatting in this function to reduce
+// the complexity of FormatOption.
+int Decoder::FormatRvcRegister(Instruction* instr, const char* format) {
+ DCHECK_EQ(format[0], 'C');
+ DCHECK(format[1] == 'r' || format[1] == 'f');
+ if (format[2] == 's') { // 'Crs[12]: Rs register.
+ if (format[3] == '1') {
+ if (format[4] == 's') { // 'Crs1s: 3-bits register
+ int reg = instr->RvcRs1sValue();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 5;
+ }
+ int reg = instr->RvcRs1Value();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 4;
+ } else if (format[3] == '2') {
+ if (format[4] == 's') { // 'Crs2s: 3-bits register
+ int reg = instr->RvcRs2sValue();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 5;
+ }
+ int reg = instr->RvcRs2Value();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 4;
+ }
+ UNREACHABLE();
+ } else if (format[2] == 'd') { // 'Crd: rd register.
+ int reg = instr->RvcRdValue();
+ if (format[1] == 'r') {
+ PrintRegister(reg);
+ } else if (format[1] == 'f') {
+ PrintFPURegister(reg);
+ }
+ return 3;
+ }
+ UNREACHABLE();
+}
+
+// Handle all C extension immediates based formatting in this function to reduce
+// the complexity of FormatOption.
+int Decoder::FormatRvcImm(Instruction* instr, const char* format) {
+ // TODO(riscv): add other rvc imm format
+ DCHECK(STRING_STARTS_WITH(format, "Cimm"));
+ if (format[4] == '6') {
+ if (format[5] == 'U') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm6U"));
+ PrintRvcImm6U(instr);
+ return 6;
+ } else if (format[5] == 'A') {
+ if (format[9] == '1' && format[10] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm6Addi16sp"));
+ PrintRvcImm6Addi16sp(instr);
+ return 13;
+ }
+ UNREACHABLE();
+ } else if (format[5] == 'L') {
+ if (format[6] == 'd') {
+ if (format[7] == 's') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm6Ldsp"));
+ PrintRvcImm6Ldsp(instr);
+ return 9;
+ }
+ } else if (format[6] == 'w') {
+ if (format[7] == 's') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm6Lwsp"));
+ PrintRvcImm6Lwsp(instr);
+ return 9;
+ }
+ }
+ UNREACHABLE();
+ } else if (format[5] == 'S') {
+ if (format[6] == 'w') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm6Swsp"));
+ PrintRvcImm6Swsp(instr);
+ return 9;
+ } else if (format[6] == 'd') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm6Sdsp"));
+ PrintRvcImm6Sdsp(instr);
+ return 9;
+ }
+ UNREACHABLE();
+ }
+ PrintRvcImm6(instr);
+ return 5;
+ } else if (format[4] == '5') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm5"));
+ if (format[5] == 'W') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm5W"));
+ PrintRvcImm5W(instr);
+ return 6;
+ } else if (format[5] == 'D') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm5D"));
+ PrintRvcImm5D(instr);
+ return 6;
+ }
+ UNREACHABLE();
+ } else if (format[4] == '8') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm8"));
+ if (format[5] == 'A') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm8Addi4spn"));
+ PrintRvcImm8Addi4spn(instr);
+ return 13;
+ }
+ UNREACHABLE();
+ } else if (format[4] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm1"));
+ if (format[5] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "Cimm11CJ"));
+ PrintRvcImm11CJ(instr);
+ return 8;
+ }
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'C': { // `C extension
+ if (format[1] == 'r' || format[1] == 'f') {
+ return FormatRvcRegister(instr, format);
+ } else if (format[1] == 'i') {
+ return FormatRvcImm(instr, format);
+ } else if (format[1] == 's') {
+ DCHECK(STRING_STARTS_WITH(format, "Cshamt"));
+ PrintRvcShamt(instr);
+ return 6;
+ }
+ UNREACHABLE();
+ }
+ case 'c': { // `csr: CSR registers
+ if (format[1] == 's') {
+ if (format[2] == 'r') {
+ PrintCSRReg(instr);
+ return 3;
+ }
+ }
+ UNREACHABLE();
+ }
+ case 'i': { // 'imm12, 'imm12x, 'imm20U, or 'imm20J: Immediates.
+ if (format[3] == '1') {
+ if (format[4] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "imm12"));
+ if (format[5] == 'x') {
+ PrintImm12X(instr);
+ return 6;
+ }
+ PrintImm12(instr);
+ return 5;
+ }
+ } else if (format[3] == '2' && format[4] == '0') {
+ DCHECK(STRING_STARTS_WITH(format, "imm20"));
+ switch (format[5]) {
+ case 'U':
+ DCHECK(STRING_STARTS_WITH(format, "imm20U"));
+ PrintImm20U(instr);
+ break;
+ case 'J':
+ DCHECK(STRING_STARTS_WITH(format, "imm20J"));
+ PrintImm20J(instr);
+ break;
+ }
+ return 6;
+ }
+ UNREACHABLE();
+ }
+ case 'o': { // 'offB or 'offS: Offsets.
+ if (format[3] == 'B') {
+ DCHECK(STRING_STARTS_WITH(format, "offB"));
+ PrintBranchOffset(instr);
+ return 4;
+ } else if (format[3] == 'S') {
+ DCHECK(STRING_STARTS_WITH(format, "offS"));
+ PrintStoreOffset(instr);
+ return 4;
+ }
+ UNREACHABLE();
+ }
+ case 'r': { // 'r: registers.
+ return FormatRegister(instr, format);
+ }
+ case 'f': { // 'f: FPUregisters or `frm
+ return FormatFPURegisterOrRoundMode(instr, format);
+ }
+ case 'a': { // 'a: Atomic acquire and release.
+ PrintAcquireRelease(instr);
+ return 1;
+ }
+ case 'p': { // `pre
+ DCHECK(STRING_STARTS_WITH(format, "pre"));
+ PrintMemoryOrder(instr, true);
+ return 3;
+ }
+ case 's': { // 's32 or 's64: Shift amount.
+ if (format[1] == '3') {
+ DCHECK(STRING_STARTS_WITH(format, "s32"));
+ PrintShamt32(instr);
+ return 3;
+ } else if (format[1] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "s64"));
+ PrintShamt(instr);
+ return 3;
+ } else if (format[1] == 'u') {
+ DCHECK(STRING_STARTS_WITH(format, "suc"));
+ PrintMemoryOrder(instr, false);
+ return 3;
+ }
+ UNREACHABLE();
+ }
+ case 'v': { // 'vs1: Raw values from register fields
+ DCHECK(STRING_STARTS_WITH(format, "vs1"));
+ PrintVs1(instr);
+ return 3;
+ }
+ }
+ UNREACHABLE();
+}
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+// RISCV Instruction Decode Routine
+void Decoder::DecodeRType(Instruction* instr) {
+ switch (instr->InstructionBits() & kRTypeMask) {
+ case RO_ADD:
+ Format(instr, "add 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SUB:
+ if (instr->Rs1Value() == zero_reg.code())
+ Format(instr, "neg 'rd, rs2");
+ else
+ Format(instr, "sub 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLL:
+ Format(instr, "sll 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLT:
+ if (instr->Rs2Value() == zero_reg.code())
+ Format(instr, "sltz 'rd, 'rs1");
+ else if (instr->Rs1Value() == zero_reg.code())
+ Format(instr, "sgtz 'rd, 'rs2");
+ else
+ Format(instr, "slt 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLTU:
+ if (instr->Rs1Value() == zero_reg.code())
+ Format(instr, "snez 'rd, 'rs2");
+ else
+ Format(instr, "sltu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_XOR:
+ Format(instr, "xor 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRL:
+ Format(instr, "srl 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRA:
+ Format(instr, "sra 'rd, 'rs1, 'rs2");
+ break;
+ case RO_OR:
+ Format(instr, "or 'rd, 'rs1, 'rs2");
+ break;
+ case RO_AND:
+ Format(instr, "and 'rd, 'rs1, 'rs2");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_ADDW:
+ Format(instr, "addw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SUBW:
+ if (instr->Rs1Value() == zero_reg.code())
+ Format(instr, "negw 'rd, 'rs2");
+ else
+ Format(instr, "subw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SLLW:
+ Format(instr, "sllw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRLW:
+ Format(instr, "srlw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_SRAW:
+ Format(instr, "sraw 'rd, 'rs1, 'rs2");
+ break;
+#endif /* V8_TARGET_ARCH_64_BIT */
+ // TODO(riscv): Add RISCV M extension macro
+ case RO_MUL:
+ Format(instr, "mul 'rd, 'rs1, 'rs2");
+ break;
+ case RO_MULH:
+ Format(instr, "mulh 'rd, 'rs1, 'rs2");
+ break;
+ case RO_MULHSU:
+ Format(instr, "mulhsu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_MULHU:
+ Format(instr, "mulhu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIV:
+ Format(instr, "div 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIVU:
+ Format(instr, "divu 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REM:
+ Format(instr, "rem 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REMU:
+ Format(instr, "remu 'rd, 'rs1, 'rs2");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_MULW:
+ Format(instr, "mulw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIVW:
+ Format(instr, "divw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_DIVUW:
+ Format(instr, "divuw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REMW:
+ Format(instr, "remw 'rd, 'rs1, 'rs2");
+ break;
+ case RO_REMUW:
+ Format(instr, "remuw 'rd, 'rs1, 'rs2");
+ break;
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ // TODO(riscv): End Add RISCV M extension macro
+ default: {
+ switch (instr->BaseOpcode()) {
+ case AMO:
+ DecodeRAType(instr);
+ break;
+ case OP_FP:
+ DecodeRFPType(instr);
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ }
+ }
+}
+
+void Decoder::DecodeRAType(Instruction* instr) {
+ // TODO(riscv): Add macro for RISCV A extension
+ // Special handling for A extension instructions because it uses func5
+ // For all A extension instruction, V8 simulator is pure sequential. No
+ // Memory address lock or other synchronizaiton behaviors.
+ switch (instr->InstructionBits() & kRATypeMask) {
+ case RO_LR_W:
+ Format(instr, "lr.w'a 'rd, ('rs1)");
+ break;
+ case RO_SC_W:
+ Format(instr, "sc.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOSWAP_W:
+ Format(instr, "amoswap.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOADD_W:
+ Format(instr, "amoadd.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOXOR_W:
+ Format(instr, "amoxor.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOAND_W:
+ Format(instr, "amoand.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOOR_W:
+ Format(instr, "amoor.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMIN_W:
+ Format(instr, "amomin.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAX_W:
+ Format(instr, "amomax.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMINU_W:
+ Format(instr, "amominu.w'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAXU_W:
+ Format(instr, "amomaxu.w'a 'rd, 'rs2, ('rs1)");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_LR_D:
+ Format(instr, "lr.d'a 'rd, ('rs1)");
+ break;
+ case RO_SC_D:
+ Format(instr, "sc.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOSWAP_D:
+ Format(instr, "amoswap.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOADD_D:
+ Format(instr, "amoadd.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOXOR_D:
+ Format(instr, "amoxor.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOAND_D:
+ Format(instr, "amoand.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOOR_D:
+ Format(instr, "amoor.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMIN_D:
+ Format(instr, "amomin.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAX_D:
+ Format(instr, "amoswap.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMINU_D:
+ Format(instr, "amominu.d'a 'rd, 'rs2, ('rs1)");
+ break;
+ case RO_AMOMAXU_D:
+ Format(instr, "amomaxu.d'a 'rd, 'rs2, ('rs1)");
+ break;
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ // TODO(riscv): End Add macro for RISCV A extension
+ default: {
+ UNSUPPORTED_RISCV();
+ }
+ }
+}
+
+void Decoder::DecodeRFPType(Instruction* instr) {
+ // OP_FP instructions (F/D) uses func7 first. Some further uses fun3 and rs2()
+
+ // kRATypeMask is only for func7
+ switch (instr->InstructionBits() & kRFPTypeMask) {
+ // TODO(riscv): Add macro for RISCV F extension
+ case RO_FADD_S:
+ Format(instr, "fadd.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSUB_S:
+ Format(instr, "fsub.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FMUL_S:
+ Format(instr, "fmul.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FDIV_S:
+ Format(instr, "fdiv.s 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSQRT_S:
+ Format(instr, "fsqrt.s 'fd, 'fs1");
+ break;
+ case RO_FSGNJ_S: { // RO_FSGNJN_S RO_FSGNJX_S
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FSGNJ_S
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fmv.s 'fd, 'fs1");
+ else
+ Format(instr, "fsgnj.s 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FSGNJN_S
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fneg.s 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjn.s 'fd, 'fs1, 'fs2");
+ break;
+ case 0b010: // RO_FSGNJX_S
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fabs.s 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjx.s 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FMIN_S: { // RO_FMAX_S
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FMIN_S
+ Format(instr, "fmin.s 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FMAX_S
+ Format(instr, "fmax.s 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_W_S: { // RO_FCVT_WU_S , 64F RO_FCVT_L_S RO_FCVT_LU_S
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_W_S
+ Format(instr, "fcvt.w.s ['frm] 'rd, 'fs1");
+ break;
+ case 0b00001: // RO_FCVT_WU_S
+ Format(instr, "fcvt.wu.s ['frm] 'rd, 'fs1");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b00010: // RO_FCVT_L_S
+ Format(instr, "fcvt.l.s ['frm] 'rd, 'fs1");
+ break;
+ case 0b00011: // RO_FCVT_LU_S
+ Format(instr, "fcvt.lu.s ['frm] 'rd, 'fs1");
+ break;
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FMV: { // RO_FCLASS_S
+ if (instr->Rs2Value() != 0b00000) {
+ UNSUPPORTED_RISCV();
+ }
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FMV_X_W
+ Format(instr, "fmv.x.w 'rd, 'fs1");
+ break;
+ case 0b001: // RO_FCLASS_S
+ Format(instr, "fclass.s 'rd, 'fs1");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FLE_S: { // RO_FEQ_S RO_FLT_S RO_FLE_S
+ switch (instr->Funct3Value()) {
+ case 0b010: // RO_FEQ_S
+ Format(instr, "feq.s 'rd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FLT_S
+ Format(instr, "flt.s 'rd, 'fs1, 'fs2");
+ break;
+ case 0b000: // RO_FLE_S
+ Format(instr, "fle.s 'rd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_S_W: { // RO_FCVT_S_WU , 64F RO_FCVT_S_L RO_FCVT_S_LU
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_S_W
+ Format(instr, "fcvt.s.w 'fd, 'rs1");
+ break;
+ case 0b00001: // RO_FCVT_S_WU
+ Format(instr, "fcvt.s.wu 'fd, 'rs1");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b00010: // RO_FCVT_S_L
+ Format(instr, "fcvt.s.l 'fd, 'rs1");
+ break;
+ case 0b00011: // RO_FCVT_S_LU
+ Format(instr, "fcvt.s.lu 'fd, 'rs1");
+ break;
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default: {
+ UNSUPPORTED_RISCV();
+ }
+ }
+ break;
+ }
+ case RO_FMV_W_X: {
+ if (instr->Funct3Value() == 0b000) {
+ Format(instr, "fmv.w.x 'fd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ // TODO(riscv): Add macro for RISCV D extension
+ case RO_FADD_D:
+ Format(instr, "fadd.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSUB_D:
+ Format(instr, "fsub.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FMUL_D:
+ Format(instr, "fmul.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FDIV_D:
+ Format(instr, "fdiv.d 'fd, 'fs1, 'fs2");
+ break;
+ case RO_FSQRT_D: {
+ if (instr->Rs2Value() == 0b00000) {
+ Format(instr, "fsqrt.d 'fd, 'fs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FSGNJ_D: { // RO_FSGNJN_D RO_FSGNJX_D
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FSGNJ_D
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fmv.d 'fd, 'fs1");
+ else
+ Format(instr, "fsgnj.d 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FSGNJN_D
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fneg.d 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjn.d 'fd, 'fs1, 'fs2");
+ break;
+ case 0b010: // RO_FSGNJX_D
+ if (instr->Rs1Value() == instr->Rs2Value())
+ Format(instr, "fabs.d 'fd, 'fs1");
+ else
+ Format(instr, "fsgnjx.d 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FMIN_D: { // RO_FMAX_D
+ switch (instr->Funct3Value()) {
+ case 0b000: // RO_FMIN_D
+ Format(instr, "fmin.d 'fd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FMAX_D
+ Format(instr, "fmax.d 'fd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case (RO_FCVT_S_D & kRFPTypeMask): {
+ if (instr->Rs2Value() == 0b00001) {
+ Format(instr, "fcvt.s.d ['frm] 'fd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_D_S: {
+ if (instr->Rs2Value() == 0b00000) {
+ Format(instr, "fcvt.d.s 'fd, 'fs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FLE_D: { // RO_FEQ_D RO_FLT_D RO_FLE_D
+ switch (instr->Funct3Value()) {
+ case 0b010: // RO_FEQ_S
+ Format(instr, "feq.d 'rd, 'fs1, 'fs2");
+ break;
+ case 0b001: // RO_FLT_D
+ Format(instr, "flt.d 'rd, 'fs1, 'fs2");
+ break;
+ case 0b000: // RO_FLE_D
+ Format(instr, "fle.d 'rd, 'fs1, 'fs2");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
+ if (instr->Rs2Value() != 0b00000) {
+ UNSUPPORTED_RISCV();
+ break;
+ }
+ switch (instr->Funct3Value()) {
+ case 0b001: // RO_FCLASS_D
+ Format(instr, "fclass.d 'rd, 'fs1");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b000: // RO_FMV_X_D
+ Format(instr, "fmv.x.d 'rd, 'fs1");
+ break;
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_W_D: { // RO_FCVT_WU_D , 64F RO_FCVT_L_D RO_FCVT_LU_D
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_W_D
+ Format(instr, "fcvt.w.d ['frm] 'rd, 'fs1");
+ break;
+ case 0b00001: // RO_FCVT_WU_D
+ Format(instr, "fcvt.wu.d ['frm] 'rd, 'fs1");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b00010: // RO_FCVT_L_D
+ Format(instr, "fcvt.l.d ['frm] 'rd, 'fs1");
+ break;
+ case 0b00011: // RO_FCVT_LU_D
+ Format(instr, "fcvt.lu.d ['frm] 'rd, 'fs1");
+ break;
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ case RO_FCVT_D_W: { // RO_FCVT_D_WU , 64F RO_FCVT_D_L RO_FCVT_D_LU
+ switch (instr->Rs2Value()) {
+ case 0b00000: // RO_FCVT_D_W
+ Format(instr, "fcvt.d.w 'fd, 'rs1");
+ break;
+ case 0b00001: // RO_FCVT_D_WU
+ Format(instr, "fcvt.d.wu 'fd, 'rs1");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b00010: // RO_FCVT_D_L
+ Format(instr, "fcvt.d.l 'fd, 'rs1");
+ break;
+ case 0b00011: // RO_FCVT_D_LU
+ Format(instr, "fcvt.d.lu 'fd, 'rs1");
+ break;
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default:
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_FMV_D_X: {
+ if (instr->Funct3Value() == 0b000 && instr->Rs2Value() == 0b00000) {
+ Format(instr, "fmv.d.x 'fd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default: {
+ UNSUPPORTED_RISCV();
+ }
+ }
+}
+
+void Decoder::DecodeR4Type(Instruction* instr) {
+ switch (instr->InstructionBits() & kR4TypeMask) {
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_S:
+ Format(instr, "fmadd.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FMSUB_S:
+ Format(instr, "fmsub.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMSUB_S:
+ Format(instr, "fnmsub.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMADD_S:
+ Format(instr, "fnmadd.s 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_D:
+ Format(instr, "fmadd.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FMSUB_D:
+ Format(instr, "fmsub.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMSUB_D:
+ Format(instr, "fnmsub.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ case RO_FNMADD_D:
+ Format(instr, "fnmadd.d 'fd, 'fs1, 'fs2, 'fs3");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeIType(Instruction* instr) {
+ switch (instr->InstructionBits() & kITypeMask) {
+ case RO_JALR:
+ if (instr->RdValue() == zero_reg.code() &&
+ instr->Rs1Value() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "ret");
+ else if (instr->RdValue() == zero_reg.code() && instr->Imm12Value() == 0)
+ Format(instr, "jr 'rs1");
+ else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "jalr 'rs1");
+ else
+ Format(instr, "jalr 'rd, 'imm12('rs1)");
+ break;
+ case RO_LB:
+ Format(instr, "lb 'rd, 'imm12('rs1)");
+ break;
+ case RO_LH:
+ Format(instr, "lh 'rd, 'imm12('rs1)");
+ break;
+ case RO_LW:
+ Format(instr, "lw 'rd, 'imm12('rs1)");
+ break;
+ case RO_LBU:
+ Format(instr, "lbu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LHU:
+ Format(instr, "lhu 'rd, 'imm12('rs1)");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_LWU:
+ Format(instr, "lwu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LD:
+ Format(instr, "ld 'rd, 'imm12('rs1)");
+ break;
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ case RO_ADDI:
+ if (instr->Imm12Value() == 0) {
+ if (instr->RdValue() == zero_reg.code() &&
+ instr->Rs1Value() == zero_reg.code())
+ Format(instr, "nop");
+ else
+ Format(instr, "mv 'rd, 'rs1");
+ } else if (instr->Rs1Value() == zero_reg.code()) {
+ Format(instr, "li 'rd, 'imm12");
+ } else {
+ Format(instr, "addi 'rd, 'rs1, 'imm12");
+ }
+ break;
+ case RO_SLTI:
+ Format(instr, "slti 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLTIU:
+ if (instr->Imm12Value() == 1)
+ Format(instr, "seqz 'rd, 'rs1");
+ else
+ Format(instr, "sltiu 'rd, 'rs1, 'imm12");
+ break;
+ case RO_XORI:
+ if (instr->Imm12Value() == -1)
+ Format(instr, "not 'rd, 'rs1");
+ else
+ Format(instr, "xori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ORI:
+ Format(instr, "ori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ANDI:
+ Format(instr, "andi 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_SLLI:
+ Format(instr, "slli 'rd, 'rs1, 's64");
+ break;
+ case RO_SRLI: { // RO_SRAI
+ if (!instr->IsArithShift()) {
+ Format(instr, "srli 'rd, 'rs1, 's64");
+ } else {
+ Format(instr, "srai 'rd, 'rs1, 's64");
+ }
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_ADDIW:
+ if (instr->Imm12Value() == 0)
+ Format(instr, "sext.w 'rd, 'rs1");
+ else
+ Format(instr, "addiw 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLLIW:
+ Format(instr, "slliw 'rd, 'rs1, 's32");
+ break;
+ case RO_SRLIW: { // RO_SRAIW
+ if (!instr->IsArithShift()) {
+ Format(instr, "srliw 'rd, 'rs1, 's32");
+ } else {
+ Format(instr, "sraiw 'rd, 'rs1, 's32");
+ }
+ break;
+ }
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ case RO_FENCE:
+ if (instr->MemoryOrder(true) == PSIORW &&
+ instr->MemoryOrder(false) == PSIORW)
+ Format(instr, "fence");
+ else
+ Format(instr, "fence 'pre, 'suc");
+ break;
+ case RO_ECALL: { // RO_EBREAK
+ if (instr->Imm12Value() == 0) { // ECALL
+ Format(instr, "ecall");
+ } else if (instr->Imm12Value() == 1) { // EBREAK
+ Format(instr, "ebreak");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
+ // TODO(riscv): use Zifencei Standard Extension macro block
+ case RO_FENCE_I:
+ Format(instr, "fence.i");
+ break;
+ // TODO(riscv): use Zicsr Standard Extension macro block
+ case RO_CSRRW:
+ if (instr->CsrValue() == csr_fcsr) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fscsr 'rs1");
+ else
+ Format(instr, "fscsr 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_frm) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fsrm 'rs1");
+ else
+ Format(instr, "fsrm 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_fflags) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fsflags 'rs1");
+ else
+ Format(instr, "fsflags 'rd, 'rs1");
+ } else if (instr->RdValue() == zero_reg.code()) {
+ Format(instr, "csrw 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrw 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRS:
+ if (instr->Rs1Value() == zero_reg.code()) {
+ switch (instr->CsrValue()) {
+ case csr_instret:
+ Format(instr, "rdinstret 'rd");
+ break;
+ case csr_instreth:
+ Format(instr, "rdinstreth 'rd");
+ break;
+ case csr_time:
+ Format(instr, "rdtime 'rd");
+ break;
+ case csr_timeh:
+ Format(instr, "rdtimeh 'rd");
+ break;
+ case csr_cycle:
+ Format(instr, "rdcycle 'rd");
+ break;
+ case csr_cycleh:
+ Format(instr, "rdcycleh 'rd");
+ break;
+ case csr_fflags:
+ Format(instr, "frflags 'rd");
+ break;
+ case csr_frm:
+ Format(instr, "frrm 'rd");
+ break;
+ case csr_fcsr:
+ Format(instr, "frcsr 'rd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (instr->Rs1Value() == zero_reg.code()) {
+ Format(instr, "csrr 'rd, 'csr");
+ } else if (instr->RdValue() == zero_reg.code()) {
+ Format(instr, "csrs 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrs 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRC:
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "csrc 'csr, 'rs1");
+ else
+ Format(instr, "csrrc 'rd, 'csr, 'rs1");
+ break;
+ case RO_CSRRWI:
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "csrwi 'csr, 'vs1");
+ else
+ Format(instr, "csrrwi 'rd, 'csr, 'vs1");
+ break;
+ case RO_CSRRSI:
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "csrsi 'csr, 'vs1");
+ else
+ Format(instr, "csrrsi 'rd, 'csr, 'vs1");
+ break;
+ case RO_CSRRCI:
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "csrci 'csr, 'vs1");
+ else
+ Format(instr, "csrrci 'rd, 'csr, 'vs1");
+ break;
+ // TODO(riscv): use F Extension macro block
+ case RO_FLW:
+ Format(instr, "flw 'fd, 'imm12('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FLD:
+ Format(instr, "fld 'fd, 'imm12('rs1)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeSType(Instruction* instr) {
+ switch (instr->InstructionBits() & kSTypeMask) {
+ case RO_SB:
+ Format(instr, "sb 'rs2, 'offS('rs1)");
+ break;
+ case RO_SH:
+ Format(instr, "sh 'rs2, 'offS('rs1)");
+ break;
+ case RO_SW:
+ Format(instr, "sw 'rs2, 'offS('rs1)");
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_SD:
+ Format(instr, "sd 'rs2, 'offS('rs1)");
+ break;
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ // TODO(riscv): use F Extension macro block
+ case RO_FSW:
+ Format(instr, "fsw 'fs2, 'offS('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FSD:
+ Format(instr, "fsd 'fs2, 'offS('rs1)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeBType(Instruction* instr) {
+ switch (instr->InstructionBits() & kBTypeMask) {
+ case RO_BEQ:
+ Format(instr, "beq 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BNE:
+ Format(instr, "bne 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BLT:
+ Format(instr, "blt 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BGE:
+ Format(instr, "bge 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BLTU:
+ Format(instr, "bltu 'rs1, 'rs2, 'offB");
+ break;
+ case RO_BGEU:
+ Format(instr, "bgeu 'rs1, 'rs2, 'offB");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+void Decoder::DecodeUType(Instruction* instr) {
+ // U Type doesn't have additional mask
+ switch (instr->BaseOpcodeFieldRaw()) {
+ case RO_LUI:
+ Format(instr, "lui 'rd, 'imm20U");
+ break;
+ case RO_AUIPC:
+ Format(instr, "auipc 'rd, 'imm20U");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+void Decoder::DecodeJType(Instruction* instr) {
+ // J Type doesn't have additional mask
+ switch (instr->BaseOpcodeValue()) {
+ case RO_JAL:
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "j 'imm20J");
+ else if (instr->RdValue() == ra.code())
+ Format(instr, "jal 'imm20J");
+ else
+ Format(instr, "jal 'rd, 'imm20J");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCRType(Instruction* instr) {
+ switch (instr->RvcFunct4Value()) {
+ case 0b1000:
+ if (instr->RvcRs1Value() != 0 && instr->RvcRs2Value() == 0)
+ Format(instr, "jr 'Crs1");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0)
+ Format(instr, "mv 'Crd, 'Crs2");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ case 0b1001:
+ if (instr->RvcRs1Value() == 0 && instr->RvcRs2Value() == 0)
+ Format(instr, "ebreak");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() == 0)
+ Format(instr, "jalr 'Crs1");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRs2Value() != 0)
+ Format(instr, "add 'Crd, 'Crd, 'Crs2");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCAType(Instruction* instr) {
+ switch (instr->InstructionBits() & kCATypeMask) {
+ case RO_C_SUB:
+ Format(instr, "sub 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_XOR:
+ Format(instr, "xor 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_OR:
+ Format(instr, "or 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_AND:
+ Format(instr, "and 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_SUBW:
+ Format(instr, "subw 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ case RO_C_ADDW:
+ Format(instr, "addw 'Crs1s, 'Crs1s, 'Crs2s");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCIType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_NOP_ADDI:
+ if (instr->RvcRdValue() == 0)
+ Format(instr, "nop");
+ else
+ Format(instr, "addi 'Crd, 'Crd, 'Cimm6");
+ break;
+ case RO_C_ADDIW:
+ Format(instr, "addiw 'Crd, 'Crd, 'Cimm6");
+ break;
+ case RO_C_LI:
+ Format(instr, "li 'Crd, 'Cimm6");
+ break;
+ case RO_C_LUI_ADD:
+ if (instr->RvcRdValue() == 2)
+ Format(instr, "addi sp, sp, 'Cimm6Addi16sp");
+ else if (instr->RvcRdValue() != 0 && instr->RvcRdValue() != 2)
+ Format(instr, "lui 'Crd, 'Cimm6U");
+ else
+ UNSUPPORTED_RISCV();
+ break;
+ case RO_C_SLLI:
+ Format(instr, "slli 'Crd, 'Crd, 'Cshamt");
+ break;
+ case RO_C_FLDSP:
+ Format(instr, "fld 'Cfd, 'Cimm6Ldsp(sp)");
+ break;
+ case RO_C_LWSP:
+ Format(instr, "lw 'Crd, 'Cimm6Lwsp(sp)");
+ break;
+ case RO_C_LDSP:
+ Format(instr, "ld 'Crd, 'Cimm6Ldsp(sp)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCIWType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_ADDI4SPN:
+ Format(instr, "addi 'Crs2s, sp, 'Cimm8Addi4spn");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCSSType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_SWSP:
+ Format(instr, "sw 'Crs2, 'Cimm6Swsp(sp)");
+ break;
+ case RO_C_SDSP:
+ Format(instr, "sd 'Crs2, 'Cimm6Sdsp(sp)");
+ break;
+ case RO_C_FSDSP:
+ Format(instr, "fsd 'Cfs2, 'Cimm6Sdsp(sp)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCLType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_FLD:
+ Format(instr, "fld 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+ case RO_C_LW:
+ Format(instr, "lw 'Crs2s, 'Cimm5W('Crs1s)");
+ break;
+ case RO_C_LD:
+ Format(instr, "ld 'Crs2s, 'Cimm5D('Crs1s)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCSType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_FSD:
+ Format(instr, "fsd 'Cfs2s, 'Cimm5D('Crs1s)");
+ break;
+ case RO_C_SW:
+ Format(instr, "sw 'Crs2s, 'Cimm5W('Crs1s)");
+ break;
+ case RO_C_SD:
+ Format(instr, "sd 'Crs2s, 'Cimm5D('Crs1s)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+void Decoder::DecodeCJType(Instruction* instr) {
+ switch (instr->RvcOpcode()) {
+ case RO_C_J:
+ Format(instr, "j 'Cimm11CJ");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
+}
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+// All instructions are one word long, except for the simulator
+// pseudo-instruction stop(msg). For that one special case, we return
+// size larger than one kInstrSize.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
+ instr->InstructionBits());
+ switch (instr->InstructionType()) {
+ case Instruction::kRType:
+ DecodeRType(instr);
+ break;
+ case Instruction::kR4Type:
+ DecodeR4Type(instr);
+ break;
+ case Instruction::kIType:
+ DecodeIType(instr);
+ break;
+ case Instruction::kSType:
+ DecodeSType(instr);
+ break;
+ case Instruction::kBType:
+ DecodeBType(instr);
+ break;
+ case Instruction::kUType:
+ DecodeUType(instr);
+ break;
+ case Instruction::kJType:
+ DecodeJType(instr);
+ break;
+ case Instruction::kCRType:
+ DecodeCRType(instr);
+ break;
+ case Instruction::kCAType:
+ DecodeCAType(instr);
+ break;
+ case Instruction::kCJType:
+ DecodeCJType(instr);
+ break;
+ case Instruction::kCIType:
+ DecodeCIType(instr);
+ break;
+ case Instruction::kCIWType:
+ DecodeCIWType(instr);
+ break;
+ case Instruction::kCSSType:
+ DecodeCSSType(instr);
+ break;
+ case Instruction::kCLType:
+ DecodeCLType(instr);
+ break;
+ case Instruction::kCSType:
+ DecodeCSType(instr);
+ break;
+ default:
+ Format(instr, "UNSUPPORTED");
+ UNSUPPORTED_RISCV();
+ }
+ return instr->InstructionSize();
+}
+
+} // namespace internal
+} // namespace v8
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
+ return tmp_buffer_.begin();
+}
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return v8::internal::Registers::Name(reg);
+}
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ return v8::internal::FPURegisters::Name(reg);
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // RISC-V does not have the concept of a byte register.
+ return "nobytereg";
+}
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+//------------------------------------------------------------------------------
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ v8::internal::Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+// The RISC-V assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
+ return v8::internal::Assembler::ConstantPoolSizeAt(
+ reinterpret_cast<v8::internal::Instruction*>(instruction));
+}
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
+ NameConverter converter;
+ Disassembler d(converter, unimplemented_action);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
+ *reinterpret_cast<uint32_t*>(prev_pc), buffer.begin());
+ }
+}
+
+#undef STRING_STARTS_WITH
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc b/deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc
new file mode 100644
index 0000000000..ccfb9268ea
--- /dev/null
+++ b/deps/v8/src/diagnostics/riscv64/unwinder-riscv64.cc
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 54978eeb74..9a5f7069e7 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -17,37 +17,6 @@
#error "Unsupported OS"
#endif // V8_OS_WIN_X64
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 25b16df6df..f988741518 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -1173,6 +1173,10 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
+ case 0xE6:
+ AppendToBuffer("vcvtdq2pd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
default:
UnimplementedInstruction();
}
@@ -1441,45 +1445,6 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer("vmovmskps %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
break;
- case 0x51:
- case 0x52:
- case 0x53: {
- const char* const pseudo_op[] = {"vsqrtps", "vrsqrtps", "vrcpps"};
-
- AppendToBuffer("%s %s,", pseudo_op[opcode - 0x51],
- NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- break;
- }
- case 0x5A:
- case 0x5B: {
- const char* const pseudo_op[] = {"vcvtps2pd", "vcvtdq2ps"};
-
- AppendToBuffer("%s %s,", pseudo_op[opcode - 0x5A],
- NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- break;
- }
- case 0x54:
- case 0x55:
- case 0x56:
- case 0x57:
- case 0x58:
- case 0x59:
- case 0x5C:
- case 0x5D:
- case 0x5E:
- case 0x5F: {
- const char* const pseudo_op[] = {
- "vandps", "vandnps", "vorps", "vxorps", "vaddps", "vmulps",
- "", "", "vsubps", "vminps", "vdivps", "vmaxps",
- };
-
- AppendToBuffer("%s %s,%s,", pseudo_op[opcode - 0x54],
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- }
case 0xC2: {
AppendToBuffer("vcmpps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1495,6 +1460,21 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer(",0x%x", *current++);
break;
}
+#define SSE_UNOP_CASE(instruction, unused, code) \
+ case 0x##code: \
+ AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
+ current += PrintRightXMMOperand(current); \
+ break;
+ SSE_UNOP_INSTRUCTION_LIST(SSE_UNOP_CASE)
+#undef SSE_UNOP_CASE
+#define SSE_BINOP_CASE(instruction, unused, code) \
+ case 0x##code: \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
+ NameOfXMMRegister(vvvv)); \
+ current += PrintRightXMMOperand(current); \
+ break;
+ SSE_BINOP_INSTRUCTION_LIST(SSE_BINOP_CASE)
+#undef SSE_BINOP_CASE
default:
UnimplementedInstruction();
}
@@ -2071,6 +2051,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("cmp%sss %s,%s", cmp_pseudo_op[current[1]],
NameOfXMMRegister(regop), NameOfXMMRegister(rm));
current += 2;
+ } else if (opcode == 0xE6) {
+ current += PrintOperands("cvtdq2pd", XMMREG_XMMOPER_OP_ORDER, current);
} else {
UnimplementedInstruction();
}
@@ -2127,14 +2109,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
const InstructionDesc& idesc = cmov_instructions[condition];
byte_size_operand_ = idesc.byte_size_operation;
current += PrintOperands(idesc.mnem, idesc.op_order_, current);
- } else if (opcode >= 0x51 && opcode <= 0x5F) {
- const char* const pseudo_op[] = {
- "sqrtps", "rsqrtps", "rcpps", "andps", "andnps",
- "orps", "xorps", "addps", "mulps", "cvtps2pd",
- "cvtdq2ps", "subps", "minps", "divps", "maxps",
- };
- current += PrintOperands(pseudo_op[opcode - 0x51], XMMREG_XMMOPER_OP_ORDER,
- current);
} else if (opcode == 0xC0) {
byte_size_operand_ = true;
current += PrintOperands("xadd", OPER_REG_OP_ORDER, current);
@@ -2207,6 +2181,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xAE && (data[2] & 0xF8) == 0xE8) {
AppendToBuffer("lfence");
current = data + 3;
+ // clang-format off
+#define SSE_DISASM_CASE(instruction, unused, code) \
+ } else if (opcode == 0x##code) { \
+ current += PrintOperands(#instruction, XMMREG_XMMOPER_OP_ORDER, current);
+ SSE_UNOP_INSTRUCTION_LIST(SSE_DISASM_CASE)
+ SSE_BINOP_INSTRUCTION_LIST(SSE_DISASM_CASE)
+#undef SSE_DISASM_CASE
+ // clang-format on
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.cc b/deps/v8/src/execution/arm/frame-constants-arm.cc
index 602242ac97..7a72dab870 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.cc
+++ b/deps/v8/src/execution/arm/frame-constants-arm.cc
@@ -17,7 +17,7 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
-int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.h b/deps/v8/src/execution/arm/frame-constants-arm.h
index 47e901ea99..b4c4e013b7 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.h
+++ b/deps/v8/src/execution/arm/frame-constants-arm.h
@@ -16,20 +16,17 @@ namespace internal {
// The layout of an EntryFrame is as follows:
// TOP OF THE STACK LOWEST ADDRESS
// +---------------------+-----------------------
-// 0 | bad frame pointer | <-- frame ptr
-// | (0xFFF.. FF) |
+// 0 | saved fp (r11) | <-- frame ptr
// |- - - - - - - - - - -|
-// 1..2 | saved register d8 |
-// ... | ... |
-// 15..16 | saved register d15 |
+// 1 | saved lr (r14) |
// |- - - - - - - - - - -|
-// 17 | saved register r4 |
+// 2..3 | saved register d8 |
// ... | ... |
-// 23 | saved register r10 |
-// |- - - - - - - - - - -|
-// 24 | saved fp (r11) |
+// 16..17 | saved register d15 |
// |- - - - - - - - - - -|
-// 25 | saved lr (r14) |
+// 18 | saved register r4 |
+// ... | ... |
+// 24 | saved register r10 |
// -----+---------------------+-----------------------
// BOTTOM OF THE STACK HIGHEST ADDRESS
class EntryFrameConstants : public AllStatic {
@@ -43,19 +40,19 @@ class EntryFrameConstants : public AllStatic {
static constexpr int kArgvOffset = +1 * kSystemPointerSize;
// These offsets refer to the immediate caller (i.e a native frame).
- static constexpr int kDirectCallerRRegistersOffset =
- /* bad frame pointer (-1) */
- kPointerSize +
- /* d8...d15 */
- kNumDoubleCalleeSaved * kDoubleSize;
- static constexpr int kDirectCallerFPOffset =
- kDirectCallerRRegistersOffset +
- /* r4...r10 (i.e. callee saved without fp) */
- (kNumCalleeSaved - 1) * kPointerSize;
+ static constexpr int kDirectCallerFPOffset = 0;
static constexpr int kDirectCallerPCOffset =
kDirectCallerFPOffset + 1 * kSystemPointerSize;
+ static constexpr int kDirectCallerGeneralRegistersOffset =
+ kDirectCallerPCOffset +
+ /* saved caller PC */
+ kSystemPointerSize +
+ /* d8...d15 */
+ kNumDoubleCalleeSaved * kDoubleSize;
static constexpr int kDirectCallerSPOffset =
- kDirectCallerPCOffset + 1 * kSystemPointerSize;
+ kDirectCallerGeneralRegistersOffset +
+ /* r4...r10 (i.e. callee saved without fp) */
+ (kNumCalleeSaved - 1) * kSystemPointerSize;
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
@@ -64,7 +61,10 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kNumberOfSavedFpParamRegs = 8;
// FP-relative.
- static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ // The instance is pushed as part of the saved registers. Being in {r3}, it is
+ // at position 1 in the list [r0, r2, r3, r6] (kGpParamRegisters sorted by
+ // number and indexed zero-based from the back).
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
kNumberOfSavedGpParamRegs * kPointerSize +
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index dc2dc7952d..a013deb418 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -3930,19 +3930,6 @@ U Widen(T value) {
}
template <typename T, typename U>
-U Narrow(T value) {
- static_assert(sizeof(int8_t) < sizeof(T), "T must be int16_t or larger");
- static_assert(sizeof(U) < sizeof(T), "T must larger than U");
- static_assert(std::is_unsigned<T>() == std::is_unsigned<U>(),
- "Signed-ness of T and U must match");
- // Make sure value can be expressed in the smaller type; otherwise, the
- // casted result is implementation defined.
- DCHECK_LE(std::numeric_limits<T>::min(), value);
- DCHECK_GE(std::numeric_limits<T>::max(), value);
- return static_cast<U>(value);
-}
-
-template <typename T, typename U>
void Widen(Simulator* simulator, int Vd, int Vm) {
static const int kLanes = 8 / sizeof(T);
T src[kLanes];
@@ -3974,19 +3961,7 @@ void SaturatingNarrow(Simulator* simulator, int Vd, int Vm) {
U dst[kLanes];
simulator->get_neon_register(Vm, src);
for (int i = 0; i < kLanes; i++) {
- dst[i] = Narrow<T, U>(Saturate<U>(src[i]));
- }
- simulator->set_neon_register<U, kDoubleSize>(Vd, dst);
-}
-
-template <typename T, typename U>
-void SaturatingUnsignedNarrow(Simulator* simulator, int Vd, int Vm) {
- static const int kLanes = 16 / sizeof(T);
- T src[kLanes];
- U dst[kLanes];
- simulator->get_neon_register(Vm, src);
- for (int i = 0; i < kLanes; i++) {
- dst[i] = Saturate<U>(src[i]);
+ dst[i] = base::saturated_cast<U>(src[i]);
}
simulator->set_neon_register<U, kDoubleSize>(Vd, dst);
}
@@ -4529,6 +4504,25 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
get_neon_register(vm, q_data);
for (int i = 0; i < 4; i++) q_data[i] = ~q_data[i];
set_neon_register(vd, q_data);
+ } else if (opc1 == 0b01 && opc2 == 0b0010) {
+ // vceq.<dt> Qd, Qm, #0 (signed integers).
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ switch (size) {
+ case Neon8:
+ Unop<int8_t>(this, Vd, Vm, [](int8_t x) { return x == 0 ? -1 : 0; });
+ break;
+ case Neon16:
+ Unop<int16_t>(this, Vd, Vm,
+ [](int16_t x) { return x == 0 ? -1 : 0; });
+ break;
+ case Neon32:
+ Unop<int32_t>(this, Vd, Vm,
+ [](int32_t x) { return x == 0 ? -1 : 0; });
+ break;
+ case Neon64:
+ UNREACHABLE();
+ }
} else if (opc1 == 0b01 && opc2 == 0b0100) {
// vclt.<dt> Qd, Qm, #0 (signed integers).
int Vd = instr->VFPDRegValue(kSimd128Precision);
@@ -4546,7 +4540,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
case Neon64:
UNREACHABLE();
}
-
} else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b110) {
// vabs<type>.<size> Qd, Qm
int Vd = instr->VFPDRegValue(kSimd128Precision);
@@ -4731,7 +4724,7 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
if (src_unsigned) {
SaturatingNarrow<uint16_t, uint8_t>(this, Vd, Vm);
} else if (dst_unsigned) {
- SaturatingUnsignedNarrow<int16_t, uint8_t>(this, Vd, Vm);
+ SaturatingNarrow<int16_t, uint8_t>(this, Vd, Vm);
} else {
SaturatingNarrow<int16_t, int8_t>(this, Vd, Vm);
}
@@ -4741,7 +4734,7 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
if (src_unsigned) {
SaturatingNarrow<uint32_t, uint16_t>(this, Vd, Vm);
} else if (dst_unsigned) {
- SaturatingUnsignedNarrow<int32_t, uint16_t>(this, Vd, Vm);
+ SaturatingNarrow<int32_t, uint16_t>(this, Vd, Vm);
} else {
SaturatingNarrow<int32_t, int16_t>(this, Vd, Vm);
}
@@ -4751,7 +4744,7 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
if (src_unsigned) {
SaturatingNarrow<uint64_t, uint32_t>(this, Vd, Vm);
} else if (dst_unsigned) {
- SaturatingUnsignedNarrow<int64_t, uint32_t>(this, Vd, Vm);
+ SaturatingNarrow<int64_t, uint32_t>(this, Vd, Vm);
} else {
SaturatingNarrow<int64_t, int32_t>(this, Vd, Vm);
}
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.cc b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
index 94d12f058f..07aebe4867 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.cc
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
@@ -19,7 +19,7 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
-int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
STATIC_ASSERT(InterpreterFrameConstants::kFixedFrameSize % 16 == 8);
// Interpreter frame header size is not 16-bytes aligned, so we'll need at
// least one register slot to make the frame a multiple of 16 bytes. The code
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.h b/deps/v8/src/execution/arm64/frame-constants-arm64.h
index fba69f917d..a01c15d348 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.h
@@ -18,20 +18,17 @@ namespace internal {
// BOTTOM OF THE STACK HIGHEST ADDRESS
// slot Entry frame
// +---------------------+-----------------------
-// -20 | saved register d15 |
+// -19 | saved register d15 |
// ... | ... |
-// -13 | saved register d8 |
+// -12 | saved register d8 |
// |- - - - - - - - - - -|
-// -12 | saved lr (x30) |
-// |- - - - - - - - - - -|
-// -11 | saved fp (x29) |
-// |- - - - - - - - - - -|
-// -10 | saved register x28 |
+// -11 | saved register x28 |
// ... | ... |
-// -1 | saved register x19 |
+// -2 | saved register x19 |
// |- - - - - - - - - - -|
-// 0 | bad frame pointer | <-- frame ptr
-// | (0xFFF.. FF) |
+// -1 | saved lr (x30) |
+// |- - - - - - - - - - -|
+// 0 | saved fp (x29) | <-- frame ptr
// |- - - - - - - - - - -|
// 1 | stack frame marker |
// | (ENTRY) |
@@ -41,10 +38,8 @@ namespace internal {
// |- - - - - - - - - - -|
// 3 | C entry FP |
// |- - - - - - - - - - -|
-// 4 | JS entry frame |
+// 4 | JS entry frame | <-- stack ptr
// | marker |
-// |- - - - - - - - - - -|
-// 5 | padding | <-- stack ptr
// -----+---------------------+-----------------------
// TOP OF THE STACK LOWEST ADDRESS
//
@@ -53,16 +48,15 @@ class EntryFrameConstants : public AllStatic {
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
- static constexpr int kFixedFrameSize = 6 * kSystemPointerSize;
+ static constexpr int kFixedFrameSize = 4 * kSystemPointerSize;
// The following constants are defined so we can static-assert their values
// near the relevant JSEntry assembly code, not because they're actually very
// useful.
static constexpr int kCalleeSavedRegisterBytesPushedBeforeFpLrPair =
- 8 * kSystemPointerSize;
- static constexpr int kCalleeSavedRegisterBytesPushedAfterFpLrPair =
- 10 * kSystemPointerSize;
- static constexpr int kOffsetToCalleeSavedRegisters = 1 * kSystemPointerSize;
+ 18 * kSystemPointerSize;
+ static constexpr int kCalleeSavedRegisterBytesPushedAfterFpLrPair = 0;
+ static constexpr int kOffsetToCalleeSavedRegisters = 0;
// These offsets refer to the immediate caller (a native frame), not to the
// previous JS exit frame like kCallerFPOffset above.
@@ -82,12 +76,16 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kNumberOfSavedFpParamRegs = 8;
// FP-relative.
+ // The instance is pushed as part of the saved registers. Being in {r7}, it is
+ // the first register pushed (highest register code in
+ // {wasm::kGpParamRegisters}). Because of padding of the frame header, it is
+ // actually one word further down the stack though (thus at position {1}).
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
static constexpr int kFixedFrameSizeFromFp =
// Header is padded to 16 byte (see {MacroAssembler::EnterFrame}).
RoundUp<16>(TypedFrameConstants::kFixedFrameSizeFromFp) +
kNumberOfSavedGpParamRegs * kSystemPointerSize +
- kNumberOfSavedFpParamRegs * kDoubleSize;
+ kNumberOfSavedFpParamRegs * kSimd128Size;
};
// Frame constructed by the {WasmDebugBreak} builtin.
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 7b49ee4673..78ea638e0d 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -4,6 +4,8 @@
#include "src/execution/arm64/simulator-arm64.h"
+#include "src/execution/isolate.h"
+
#if defined(USE_SIMULATOR)
#include <stdlib.h>
@@ -378,7 +380,6 @@ Simulator::~Simulator() {
delete[] reinterpret_cast<byte*>(stack_);
delete disassembler_decoder_;
delete print_disasm_;
- DeleteArray(last_debugger_input_);
delete decoder_;
}
@@ -3291,6 +3292,17 @@ bool Simulator::PrintValue(const char* desc) {
}
void Simulator::Debug() {
+ bool done = false;
+ while (!done) {
+ // Disassemble the next instruction to execute before doing anything else.
+ PrintInstructionsAt(pc_, 1);
+ // Read the command line.
+ ArrayUniquePtr<char> line(ReadLine("sim> "));
+ done = ExecDebugCommand(std::move(line));
+ }
+}
+
+bool Simulator::ExecDebugCommand(ArrayUniquePtr<char> line_ptr) {
#define COMMAND_SIZE 63
#define ARG_SIZE 255
@@ -3307,291 +3319,313 @@ void Simulator::Debug() {
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
- bool done = false;
bool cleared_log_disasm_bit = false;
- while (!done) {
- // Disassemble the next instruction to execute before doing anything else.
- PrintInstructionsAt(pc_, 1);
- // Read the command line.
- char* line = ReadLine("sim> ");
- if (line == nullptr) {
- break;
+ if (line_ptr == nullptr) return false;
+
+ // Repeat last command by default.
+ const char* line = line_ptr.get();
+ const char* last_input = last_debugger_input();
+ if (strcmp(line, "\n") == 0 && (last_input != nullptr)) {
+ line_ptr.reset();
+ line = last_input;
+ } else {
+ // Update the latest command ran
+ set_last_debugger_input(std::move(line_ptr));
+ }
+
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+
+ // stepi / si ------------------------------------------------------------
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ // We are about to execute instructions, after which by default we
+ // should increment the pc_. If it was set when reaching this debug
+ // instruction, it has not been cleared because this instruction has not
+ // completed yet. So clear it manually.
+ pc_modified_ = false;
+
+ if (argc == 1) {
+ ExecuteInstruction();
} else {
- // Repeat last command by default.
- char* last_input = last_debugger_input();
- if (strcmp(line, "\n") == 0 && (last_input != nullptr)) {
- DeleteArray(line);
- line = last_input;
- } else {
- // Update the latest command ran
- set_last_debugger_input(line);
+ int64_t number_of_instructions_to_execute = 1;
+ GetValue(arg1, &number_of_instructions_to_execute);
+
+ set_log_parameters(log_parameters() | LOG_DISASM);
+ while (number_of_instructions_to_execute-- > 0) {
+ ExecuteInstruction();
}
+ set_log_parameters(log_parameters() & ~LOG_DISASM);
+ PrintF("\n");
+ }
- // Use sscanf to parse the individual parts of the command line. At the
- // moment no command expects more than two parameters.
- int argc = SScanF(line,
- "%" XSTR(COMMAND_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s",
- cmd, arg1, arg2);
-
- // stepi / si ------------------------------------------------------------
- if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- // We are about to execute instructions, after which by default we
- // should increment the pc_. If it was set when reaching this debug
- // instruction, it has not been cleared because this instruction has not
- // completed yet. So clear it manually.
- pc_modified_ = false;
-
- if (argc == 1) {
- ExecuteInstruction();
- } else {
- int64_t number_of_instructions_to_execute = 1;
- GetValue(arg1, &number_of_instructions_to_execute);
+ // If it was necessary, the pc has already been updated or incremented
+ // when executing the instruction. So we do not want it to be updated
+ // again. It will be cleared when exiting.
+ pc_modified_ = true;
+
+ // next / n
+ // --------------------------------------------------------------
+ } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
+ // Tell the simulator to break after the next executed BL.
+ break_on_next_ = true;
+ // Continue.
+ return true;
- set_log_parameters(log_parameters() | LOG_DISASM);
- while (number_of_instructions_to_execute-- > 0) {
- ExecuteInstruction();
- }
- set_log_parameters(log_parameters() & ~LOG_DISASM);
- PrintF("\n");
- }
+ // continue / cont / c
+ // ---------------------------------------------------
+ } else if ((strcmp(cmd, "continue") == 0) || (strcmp(cmd, "cont") == 0) ||
+ (strcmp(cmd, "c") == 0)) {
+ // Leave the debugger shell.
+ return true;
- // If it was necessary, the pc has already been updated or incremented
- // when executing the instruction. So we do not want it to be updated
- // again. It will be cleared when exiting.
- pc_modified_ = true;
-
- // next / n
- // --------------------------------------------------------------
- } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
- // Tell the simulator to break after the next executed BL.
- break_on_next_ = true;
- // Continue.
- done = true;
-
- // continue / cont / c
- // ---------------------------------------------------
- } else if ((strcmp(cmd, "continue") == 0) || (strcmp(cmd, "cont") == 0) ||
- (strcmp(cmd, "c") == 0)) {
- // Leave the debugger shell.
- done = true;
-
- // disassemble / disasm / di
- // ---------------------------------------------
- } else if (strcmp(cmd, "disassemble") == 0 ||
- strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
- int64_t n_of_instrs_to_disasm = 10; // default value.
- int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
- if (argc >= 2) { // disasm <n of instrs>
- GetValue(arg1, &n_of_instrs_to_disasm);
- }
- if (argc >= 3) { // disasm <n of instrs> <address>
- GetValue(arg2, &address);
- }
+ // disassemble / disasm / di
+ // ---------------------------------------------
+ } else if (strcmp(cmd, "disassemble") == 0 || strcmp(cmd, "disasm") == 0 ||
+ strcmp(cmd, "di") == 0) {
+ int64_t n_of_instrs_to_disasm = 10; // default value.
+ int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
+ if (argc >= 2) { // disasm <n of instrs>
+ GetValue(arg1, &n_of_instrs_to_disasm);
+ }
+ if (argc >= 3) { // disasm <n of instrs> <address>
+ GetValue(arg2, &address);
+ }
- // Disassemble.
- PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
- n_of_instrs_to_disasm);
- PrintF("\n");
-
- // print / p
- // -------------------------------------------------------------
- } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
- if (argc == 2) {
- if (strcmp(arg1, "all") == 0) {
- PrintRegisters();
- PrintVRegisters();
- } else {
- if (!PrintValue(arg1)) {
- PrintF("%s unrecognized\n", arg1);
- }
- }
- } else {
- PrintF(
- "print <register>\n"
- " Print the content of a register. (alias 'p')\n"
- " 'print all' will print all registers.\n"
- " Use 'printobject' to get more details about the value.\n");
+ // Disassemble.
+ PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
+ n_of_instrs_to_disasm);
+ PrintF("\n");
+
+ // print / p
+ // -------------------------------------------------------------
+ } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
+ if (argc == 2) {
+ if (strcmp(arg1, "all") == 0) {
+ PrintRegisters();
+ PrintVRegisters();
+ } else {
+ if (!PrintValue(arg1)) {
+ PrintF("%s unrecognized\n", arg1);
}
+ }
+ } else {
+ PrintF(
+ "print <register>\n"
+ " Print the content of a register. (alias 'p')\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n");
+ }
- // printobject / po
- // ------------------------------------------------------
- } else if ((strcmp(cmd, "printobject") == 0) ||
- (strcmp(cmd, "po") == 0)) {
- if (argc == 2) {
- int64_t value;
- StdoutStream os;
- if (GetValue(arg1, &value)) {
- Object obj(value);
- os << arg1 << ": \n";
+ // printobject / po
+ // ------------------------------------------------------
+ } else if ((strcmp(cmd, "printobject") == 0) || (strcmp(cmd, "po") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ StdoutStream os;
+ if (GetValue(arg1, &value)) {
+ Object obj(value);
+ os << arg1 << ": \n";
#ifdef DEBUG
- obj.Print(os);
- os << "\n";
+ obj.Print(os);
+ os << "\n";
#else
- os << Brief(obj) << "\n";
+ os << Brief(obj) << "\n";
#endif
- } else {
- os << arg1 << " unrecognized\n";
- }
- } else {
- PrintF(
- "printobject <value>\n"
- "printobject <register>\n"
- " Print details about the value. (alias 'po')\n");
- }
-
- // stack / mem
- // ----------------------------------------------------------
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
- strcmp(cmd, "dump") == 0) {
- int64_t* cur = nullptr;
- int64_t* end = nullptr;
- int next_arg = 1;
-
- if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int64_t*>(sp());
-
- } else { // "mem"
- int64_t value;
- if (!GetValue(arg1, &value)) {
- PrintF("%s unrecognized\n", arg1);
- continue;
- }
- cur = reinterpret_cast<int64_t*>(value);
- next_arg++;
- }
-
- int64_t words = 0;
- if (argc == next_arg) {
- words = 10;
- } else if (argc == next_arg + 1) {
- if (!GetValue(argv[next_arg], &words)) {
- PrintF("%s unrecognized\n", argv[next_arg]);
- PrintF("Printing 10 double words by default");
- words = 10;
- }
- } else {
- UNREACHABLE();
- }
- end = cur + words;
-
- bool skip_obj_print = (strcmp(cmd, "dump") == 0);
- while (cur < end) {
- PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
- reinterpret_cast<uint64_t>(cur), *cur, *cur);
- if (!skip_obj_print) {
- Object obj(*cur);
- Heap* current_heap = isolate_->heap();
- if (obj.IsSmi() ||
- IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
- PrintF(" (");
- if (obj.IsSmi()) {
- PrintF("smi %" PRId32, Smi::ToInt(obj));
- } else {
- obj.ShortPrint();
- }
- PrintF(")");
- }
- }
- PrintF("\n");
- cur++;
- }
+ } else {
+ os << arg1 << " unrecognized\n";
+ }
+ } else {
+ PrintF(
+ "printobject <value>\n"
+ "printobject <register>\n"
+ " Print details about the value. (alias 'po')\n");
+ }
- // trace / t
- // -------------------------------------------------------------
- } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
- if ((log_parameters() & LOG_ALL) != LOG_ALL) {
- PrintF("Enabling disassembly, registers and memory write tracing\n");
- set_log_parameters(log_parameters() | LOG_ALL);
- } else {
- PrintF("Disabling disassembly, registers and memory write tracing\n");
- set_log_parameters(log_parameters() & ~LOG_ALL);
- }
+ // stack / mem
+ // ----------------------------------------------------------
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sp());
+
+ } else { // "mem"
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ return false;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
- // break / b
- // -------------------------------------------------------------
- } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
- if (argc == 2) {
- int64_t value;
- if (GetValue(arg1, &value)) {
- SetBreakpoint(reinterpret_cast<Instruction*>(value));
+ int64_t words = 0;
+ if (argc == next_arg) {
+ words = 10;
+ } else if (argc == next_arg + 1) {
+ if (!GetValue(argv[next_arg], &words)) {
+ PrintF("%s unrecognized\n", argv[next_arg]);
+ PrintF("Printing 10 double words by default");
+ words = 10;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ end = cur + words;
+
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
+ while (cur < end) {
+ PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
+ reinterpret_cast<uint64_t>(cur), *cur, *cur);
+ if (!skip_obj_print) {
+ Object obj(*cur);
+ Heap* current_heap = isolate_->heap();
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %" PRId32, Smi::ToInt(obj));
} else {
- PrintF("%s unrecognized\n", arg1);
+ obj.ShortPrint();
}
- } else {
- ListBreakpoints();
- PrintF("Use `break <address>` to set or disable a breakpoint\n");
+ PrintF(")");
}
+ }
+ PrintF("\n");
+ cur++;
+ }
- // gdb
- // -------------------------------------------------------------------
- } else if (strcmp(cmd, "gdb") == 0) {
- PrintF("Relinquishing control to gdb.\n");
- base::OS::DebugBreak();
- PrintF("Regaining control from gdb.\n");
-
- // sysregs
- // ---------------------------------------------------------------
- } else if (strcmp(cmd, "sysregs") == 0) {
- PrintSystemRegisters();
-
- // help / h
- // --------------------------------------------------------------
- } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
- PrintF(
- "stepi / si\n"
- " stepi <n>\n"
- " Step <n> instructions.\n"
- "next / n\n"
- " Continue execution until a BL instruction is reached.\n"
- " At this point a breakpoint is set just after this BL.\n"
- " Then execution is resumed. It will probably later hit the\n"
- " breakpoint just set.\n"
- "continue / cont / c\n"
- " Continue execution from here.\n"
- "disassemble / disasm / di\n"
- " disassemble <n> <address>\n"
- " Disassemble <n> instructions from current <address>.\n"
- " By default <n> is 20 and <address> is the current pc.\n"
- "print / p\n"
- " print <register>\n"
- " Print the content of a register.\n"
- " 'print all' will print all registers.\n"
- " Use 'printobject' to get more details about the value.\n"
- "printobject / po\n"
- " printobject <value>\n"
- " printobject <register>\n"
- " Print details about the value.\n"
- "stack\n"
- " stack [<words>]\n"
- " Dump stack content, default dump 10 words\n"
- "mem\n"
- " mem <address> [<words>]\n"
- " Dump memory content, default dump 10 words\n"
- "dump\n"
- " dump <address> [<words>]\n"
- " Dump memory content without pretty printing JS objects, "
- "default dump 10 words\n"
- "trace / t\n"
- " Toggle disassembly and register tracing\n"
- "break / b\n"
- " break : list all breakpoints\n"
- " break <address> : set / enable / disable a breakpoint.\n"
- "gdb\n"
- " Enter gdb.\n"
- "sysregs\n"
- " Print all system registers (including NZCV).\n");
+ // trace / t
+ // -------------------------------------------------------------
+ } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
+ if ((log_parameters() & LOG_ALL) != LOG_ALL) {
+ PrintF("Enabling disassembly, registers and memory write tracing\n");
+ set_log_parameters(log_parameters() | LOG_ALL);
+ } else {
+ PrintF("Disabling disassembly, registers and memory write tracing\n");
+ set_log_parameters(log_parameters() & ~LOG_ALL);
+ }
+
+ // break / b
+ // -------------------------------------------------------------
+ } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ SetBreakpoint(reinterpret_cast<Instruction*>(value));
} else {
- PrintF("Unknown command: %s\n", cmd);
- PrintF("Use 'help' for more information.\n");
+ PrintF("%s unrecognized\n", arg1);
}
+ } else {
+ ListBreakpoints();
+ PrintF("Use `break <address>` to set or disable a breakpoint\n");
}
- if (cleared_log_disasm_bit == true) {
- set_log_parameters(log_parameters_ | LOG_DISASM);
+
+ // backtrace / bt
+ // ---------------------------------------------------------------
+ } else if (strcmp(cmd, "backtrace") == 0 || strcmp(cmd, "bt") == 0) {
+ Address pc = reinterpret_cast<Address>(pc_);
+ Address lr = reinterpret_cast<Address>(this->lr());
+ Address sp = static_cast<Address>(this->sp());
+ Address fp = static_cast<Address>(this->fp());
+
+ int i = 0;
+ while (true) {
+ PrintF("#%d: " V8PRIxPTR_FMT " (sp=" V8PRIxPTR_FMT ", fp=" V8PRIxPTR_FMT
+ ")\n",
+ i, pc, sp, fp);
+ pc = lr;
+ sp = fp;
+ if (pc == reinterpret_cast<Address>(kEndOfSimAddress)) {
+ break;
+ }
+ lr = *(reinterpret_cast<Address*>(fp) + 1);
+ fp = *reinterpret_cast<Address*>(fp);
+ i++;
+ if (i > 100) {
+ PrintF("Too many frames\n");
+ break;
+ }
}
+
+ // gdb
+ // -------------------------------------------------------------------
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("Relinquishing control to gdb.\n");
+ base::OS::DebugBreak();
+ PrintF("Regaining control from gdb.\n");
+
+ // sysregs
+ // ---------------------------------------------------------------
+ } else if (strcmp(cmd, "sysregs") == 0) {
+ PrintSystemRegisters();
+
+ // help / h
+ // --------------------------------------------------------------
+ } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
+ PrintF(
+ "stepi / si\n"
+ " stepi <n>\n"
+ " Step <n> instructions.\n"
+ "next / n\n"
+ " Continue execution until a BL instruction is reached.\n"
+ " At this point a breakpoint is set just after this BL.\n"
+ " Then execution is resumed. It will probably later hit the\n"
+ " breakpoint just set.\n"
+ "continue / cont / c\n"
+ " Continue execution from here.\n"
+ "disassemble / disasm / di\n"
+ " disassemble <n> <address>\n"
+ " Disassemble <n> instructions from current <address>.\n"
+ " By default <n> is 20 and <address> is the current pc.\n"
+ "print / p\n"
+ " print <register>\n"
+ " Print the content of a register.\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n"
+ "printobject / po\n"
+ " printobject <value>\n"
+ " printobject <register>\n"
+ " Print details about the value.\n"
+ "stack\n"
+ " stack [<words>]\n"
+ " Dump stack content, default dump 10 words\n"
+ "mem\n"
+ " mem <address> [<words>]\n"
+ " Dump memory content, default dump 10 words\n"
+ "dump\n"
+ " dump <address> [<words>]\n"
+ " Dump memory content without pretty printing JS objects, "
+ "default dump 10 words\n"
+ "trace / t\n"
+ " Toggle disassembly and register tracing\n"
+ "break / b\n"
+ " break : list all breakpoints\n"
+ " break <address> : set / enable / disable a breakpoint.\n"
+ "backtrace / bt\n"
+ " Walk the frame pointers, dumping the pc/sp/fp for each frame.\n"
+ "gdb\n"
+ " Enter gdb.\n"
+ "sysregs\n"
+ " Print all system registers (including NZCV).\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ PrintF("Use 'help' for more information.\n");
}
+
+ if (cleared_log_disasm_bit == true) {
+ set_log_parameters(log_parameters_ | LOG_DISASM);
+ }
+ return false;
}
void Simulator::VisitException(Instruction* instr) {
@@ -6137,4 +6171,26 @@ void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
} // namespace internal
} // namespace v8
+//
+// The following functions are used by our gdb macros.
+//
+V8_EXPORT_PRIVATE extern bool _v8_internal_Simulator_ExecDebugCommand(
+ const char* command) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate) {
+ fprintf(stderr, "No V8 Isolate found\n");
+ return false;
+ }
+ i::Simulator* simulator = i::Simulator::current(isolate);
+ if (!simulator) {
+ fprintf(stderr, "No Arm64 simulator found\n");
+ return false;
+ }
+ // Copy the command so that the simulator can take ownership of it.
+ size_t len = strlen(command);
+ i::ArrayUniquePtr<char> command_copy(i::NewArray<char>(len + 1));
+ i::MemCopy(command_copy.get(), command, len + 1);
+ return simulator->ExecDebugCommand(std::move(command_copy));
+}
+
#endif // USE_SIMULATOR
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.h b/deps/v8/src/execution/arm64/simulator-arm64.h
index 6098a74776..2dbbef6ff6 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.h
+++ b/deps/v8/src/execution/arm64/simulator-arm64.h
@@ -731,6 +731,11 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
// Start the debugging command line.
void Debug();
+ // Executes a single debug command. Takes ownership of the command (so that it
+ // can store it for repeat executions), and returns true if the debugger
+ // should resume execution after this command completes.
+ bool ExecDebugCommand(ArrayUniquePtr<char> command);
+
bool GetValue(const char* desc, int64_t* value);
bool PrintValue(const char* desc);
@@ -2327,12 +2332,11 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
static const char* vreg_names[];
// Debugger input.
- void set_last_debugger_input(char* input) {
- DeleteArray(last_debugger_input_);
- last_debugger_input_ = input;
+ void set_last_debugger_input(ArrayUniquePtr<char> input) {
+ last_debugger_input_ = std::move(input);
}
- char* last_debugger_input() { return last_debugger_input_; }
- char* last_debugger_input_;
+ const char* last_debugger_input() { return last_debugger_input_.get(); }
+ ArrayUniquePtr<char> last_debugger_input_;
// Synchronization primitives. See ARM DDI 0487A.a, B2.10. Pair types not
// implemented.
diff --git a/deps/v8/src/execution/arm64/simulator-logic-arm64.cc b/deps/v8/src/execution/arm64/simulator-logic-arm64.cc
index db39408a49..f294c58b8a 100644
--- a/deps/v8/src/execution/arm64/simulator-logic-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-logic-arm64.cc
@@ -3856,6 +3856,7 @@ LogicVRegister Simulator::fcvtn(VectorFormat vform, LogicVRegister dst,
dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPTieEven));
}
}
+ dst.ClearForWrite(vform);
return dst;
}
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 1c0a1f65f0..6903ae0032 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -71,7 +71,7 @@ class CommonFrameConstants : public AllStatic {
-(kCPSlotSize + kContextOrFrameTypeSize);
};
-// StandardFrames are used for interpreted and optimized JavaScript
+// StandardFrames are used for both unoptimized and optimized JavaScript
// frames. They always have a context below the saved fp/constant
// pool, below that the JSFunction of the executing function and below that an
// integer (not a Smi) containing the actual number of arguments passed to the
@@ -196,15 +196,6 @@ class TypedFrameConstants : public CommonFrameConstants {
#define DEFINE_TYPED_FRAME_SIZES(count) \
DEFINE_FRAME_SIZES(TypedFrameConstants, count)
-class ArgumentsAdaptorFrameConstants : public TypedFrameConstants {
- public:
- // FP-relative.
- static constexpr int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static constexpr int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- static constexpr int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
- DEFINE_TYPED_FRAME_SIZES(3);
-};
-
class BuiltinFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
@@ -293,12 +284,52 @@ class BuiltinExitFrameConstants : public ExitFrameConstants {
static constexpr int kNumExtraArgsWithReceiver = 5;
};
-class InterpreterFrameConstants : public StandardFrameConstants {
+// Unoptimized frames are used for interpreted and baseline-compiled JavaScript
+// frames. They are a "standard" frame, with an additional fixed header for the
+// BytecodeArray, bytecode offset (if running interpreted), feedback vector (if
+// running baseline code), and then the interpreter register file.
+//
+// slot JS frame
+// +-----------------+--------------------------------
+// -n-1 | parameter n | ^
+// |- - - - - - - - -| |
+// -n | parameter n-1 | Caller
+// ... | ... | frame slots
+// -2 | parameter 1 | (slot < 0)
+// |- - - - - - - - -| |
+// -1 | parameter 0 | v
+// -----+-----------------+--------------------------------
+// 0 | return addr | ^ ^
+// |- - - - - - - - -| | |
+// 1 | saved frame ptr | Fixed |
+// |- - - - - - - - -| Header <-- frame ptr |
+// 2 | [Constant Pool] | | |
+// |- - - - - - - - -| | |
+// 2+cp | Context | | if a constant pool |
+// |- - - - - - - - -| | is used, cp = 1, |
+// 3+cp | JSFunction | | otherwise, cp = 0 |
+// |- - - - - - - - -| | |
+// 4+cp | argc | v |
+// +-----------------+---- |
+// 5+cp | BytecodeArray | ^ |
+// |- - - - - - - - -| Unoptimized code header |
+// 6+cp | offset or FBV | v |
+// +-----------------+---- |
+// 7+cp | register 0 | ^ Callee
+// |- - - - - - - - -| | frame slots
+// 8+cp | register 1 | Register file (slot >= 0)
+// ... | ... | | |
+// | register n-1 | | |
+// |- - - - - - - - -| | |
+// 8+cp+n| register n | v v
+// -----+-----------------+----- <-- stack ptr -------------
+//
+class UnoptimizedFrameConstants : public StandardFrameConstants {
public:
// FP-relative.
static constexpr int kBytecodeArrayFromFp =
STANDARD_FRAME_EXTRA_PUSHED_VALUE_OFFSET(0);
- static constexpr int kBytecodeOffsetFromFp =
+ static constexpr int kBytecodeOffsetOrFeedbackVectorFromFp =
STANDARD_FRAME_EXTRA_PUSHED_VALUE_OFFSET(1);
DEFINE_STANDARD_FRAME_SIZES(2);
@@ -310,7 +341,7 @@ class InterpreterFrameConstants : public StandardFrameConstants {
// Expression index for {JavaScriptFrame::GetExpressionAddress}.
static constexpr int kBytecodeArrayExpressionIndex = -2;
- static constexpr int kBytecodeOffsetExpressionIndex = -1;
+ static constexpr int kBytecodeOffsetOrFeedbackVectorExpressionIndex = -1;
static constexpr int kRegisterFileExpressionIndex = 0;
// Returns the number of stack slots needed for 'register_count' registers.
@@ -319,6 +350,30 @@ class InterpreterFrameConstants : public StandardFrameConstants {
static int RegisterStackSlotCount(int register_count);
};
+// Interpreter frames are unoptimized frames that are being executed by the
+// interpreter. In this case, the "offset or FBV" slot contains the bytecode
+// offset of the currently executing bytecode.
+class InterpreterFrameConstants : public UnoptimizedFrameConstants {
+ public:
+ static constexpr int kBytecodeOffsetExpressionIndex =
+ kBytecodeOffsetOrFeedbackVectorExpressionIndex;
+
+ static constexpr int kBytecodeOffsetFromFp =
+ kBytecodeOffsetOrFeedbackVectorFromFp;
+};
+
+// Sparkplug frames are unoptimized frames that are being executed by
+// sparkplug-compiled baseline code. base. In this case, the "offset or FBV"
+// slot contains a cached pointer to the feedback vector.
+class BaselineFrameConstants : public UnoptimizedFrameConstants {
+ public:
+ static constexpr int kFeedbackVectorExpressionIndex =
+ kBytecodeOffsetOrFeedbackVectorExpressionIndex;
+
+ static constexpr int kFeedbackVectorFromFp =
+ kBytecodeOffsetOrFeedbackVectorFromFp;
+};
+
inline static int FPOffsetToFrameSlot(int frame_offset) {
return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
frame_offset / kSystemPointerSize;
@@ -348,6 +403,8 @@ inline static int FrameSlotToFPOffset(int slot) {
#include "src/execution/mips64/frame-constants-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/frame-constants-s390.h" // NOLINT
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/execution/riscv64/frame-constants-riscv64.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index b394c7b614..a5d60f825f 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -47,7 +47,6 @@ inline Address StackHandler::address() const {
return reinterpret_cast<Address>(const_cast<StackHandler*>(this));
}
-
inline StackHandler* StackHandler::next() const {
const int offset = StackHandlerConstants::kNextOffset;
return FromAddress(base::Memory<Address>(address() + offset));
@@ -61,10 +60,8 @@ inline StackHandler* StackHandler::FromAddress(Address address) {
return reinterpret_cast<StackHandler*>(address);
}
-
inline StackFrame::StackFrame(StackFrameIteratorBase* iterator)
- : iterator_(iterator), isolate_(iterator_->isolate()) {
-}
+ : iterator_(iterator), isolate_(iterator_->isolate()) {}
inline StackHandler* StackFrame::top_handler() const {
return iterator_->handler();
@@ -89,9 +86,8 @@ inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
if (return_address_location_resolver_ == nullptr) {
return pc_address;
} else {
- return reinterpret_cast<Address*>(
- return_address_location_resolver_(
- reinterpret_cast<uintptr_t>(pc_address)));
+ return reinterpret_cast<Address*>(return_address_location_resolver_(
+ reinterpret_cast<uintptr_t>(pc_address)));
}
}
@@ -178,12 +174,6 @@ inline Address CommonFrame::ComputeConstantPoolAddress(Address fp) {
return fp + StandardFrameConstants::kConstantPoolOffset;
}
-inline bool CommonFrame::IsArgumentsAdaptorFrame(Address fp) {
- intptr_t frame_type =
- base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
- return frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
-}
-
inline bool CommonFrameWithJSLinkage::IsConstructFrame(Address fp) {
intptr_t frame_type =
base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
@@ -195,31 +185,20 @@ inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
Address CommonFrameWithJSLinkage::GetParameterSlot(int index) const {
DCHECK_LE(-1, index);
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
DCHECK_LT(index,
std::max(GetActualArgumentCount(), ComputeParametersCount()));
-#else
- DCHECK(index < ComputeParametersCount() ||
- ComputeParametersCount() == kDontAdaptArgumentsSentinel);
-#endif
int parameter_offset = (index + 1) * kSystemPointerSize;
return caller_sp() + parameter_offset;
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
inline int CommonFrameWithJSLinkage::GetActualArgumentCount() const {
return 0;
}
-#endif
inline void JavaScriptFrame::set_receiver(Object value) {
base::Memory<Address>(GetParameterSlot(-1)) = value.ptr();
}
-inline bool JavaScriptFrame::has_adapted_arguments() const {
- return IsArgumentsAdaptorFrame(caller_fp());
-}
-
inline Object JavaScriptFrame::function_slot_object() const {
const int offset = StandardFrameConstants::kFunctionOffset;
return Object(base::Memory<Address>(fp() + offset));
@@ -229,16 +208,16 @@ inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
: TypedFrame(iterator) {}
inline OptimizedFrame::OptimizedFrame(StackFrameIteratorBase* iterator)
- : JavaScriptFrame(iterator) {
-}
+ : JavaScriptFrame(iterator) {}
-inline InterpretedFrame::InterpretedFrame(StackFrameIteratorBase* iterator)
+inline UnoptimizedFrame::UnoptimizedFrame(StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {}
+inline InterpretedFrame::InterpretedFrame(StackFrameIteratorBase* iterator)
+ : UnoptimizedFrame(iterator) {}
-inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
- StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
-}
+inline BaselineFrame::BaselineFrame(StackFrameIteratorBase* iterator)
+ : UnoptimizedFrame(iterator) {}
inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
: TypedFrameWithJSLinkage(iterator) {}
@@ -270,8 +249,7 @@ inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
: TypedFrame(iterator) {}
inline ConstructFrame::ConstructFrame(StackFrameIteratorBase* iterator)
- : InternalFrame(iterator) {
-}
+ : InternalFrame(iterator) {}
inline BuiltinContinuationFrame::BuiltinContinuationFrame(
StackFrameIteratorBase* iterator)
@@ -286,35 +264,38 @@ inline JavaScriptBuiltinContinuationWithCatchFrame::
StackFrameIteratorBase* iterator)
: JavaScriptBuiltinContinuationFrame(iterator) {}
-inline JavaScriptFrameIterator::JavaScriptFrameIterator(
- Isolate* isolate)
+inline JavaScriptFrameIterator::JavaScriptFrameIterator(Isolate* isolate)
: iterator_(isolate) {
if (!done()) Advance();
}
-inline JavaScriptFrameIterator::JavaScriptFrameIterator(
- Isolate* isolate, ThreadLocalTop* top)
+inline JavaScriptFrameIterator::JavaScriptFrameIterator(Isolate* isolate,
+ ThreadLocalTop* top)
: iterator_(isolate, top) {
if (!done()) Advance();
}
inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
- // TODO(1233797): The frame hierarchy needs to change. It's
- // problematic that we can't use the safe-cast operator to cast to
- // the JavaScript frame type, because we may encounter arguments
- // adaptor frames.
StackFrame* frame = iterator_.frame();
- DCHECK(frame->is_java_script() || frame->is_arguments_adaptor());
- return static_cast<JavaScriptFrame*>(frame);
+ return JavaScriptFrame::cast(frame);
+}
+
+inline JavaScriptFrame* JavaScriptFrameIterator::Reframe() {
+ StackFrame* frame = iterator_.Reframe();
+ return JavaScriptFrame::cast(frame);
}
inline CommonFrame* StackTraceFrameIterator::frame() const {
StackFrame* frame = iterator_.frame();
- DCHECK(frame->is_java_script() || frame->is_arguments_adaptor() ||
- frame->is_wasm());
+ DCHECK(frame->is_java_script() || frame->is_wasm());
return static_cast<CommonFrame*>(frame);
}
+inline CommonFrame* StackTraceFrameIterator::Reframe() {
+ iterator_.Reframe();
+ return frame();
+}
+
bool StackTraceFrameIterator::is_javascript() const {
return frame()->is_java_script();
}
@@ -333,7 +314,6 @@ inline StackFrame* SafeStackFrameIterator::frame() const {
return frame_;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 64ac998c28..6ee597572e 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -122,6 +122,12 @@ void StackFrameIterator::Advance() {
DCHECK(!done() || handler_ == nullptr);
}
+StackFrame* StackFrameIterator::Reframe() {
+ StackFrame::Type type = frame_->ComputeType(this, &frame_->state_);
+ frame_ = SingletonFor(type, &frame_->state_);
+ return frame();
+}
+
void StackFrameIterator::Reset(ThreadLocalTop* top) {
StackFrame::State state;
StackFrame::Type type =
@@ -434,15 +440,6 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
Address caller_fp =
Memory<Address>(frame->fp() + EntryFrameConstants::kCallerFPOffset);
if (!IsValidExitFrame(caller_fp)) return false;
- } else if (frame->is_arguments_adaptor()) {
- // See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
- // the number of arguments is stored on stack as Smi. We need to check
- // that it really an Smi.
- Object number_of_args =
- reinterpret_cast<ArgumentsAdaptorFrame*>(frame)->GetExpression(0);
- if (!number_of_args.IsSmi()) {
- return false;
- }
}
frame->ComputeCallerState(&state);
return IsValidStackAddress(state.sp) && IsValidStackAddress(state.fp) &&
@@ -590,6 +587,9 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
if (code_obj.is_interpreter_trampoline_builtin()) {
return INTERPRETED;
}
+ if (code_obj.is_baseline_leave_frame_builtin()) {
+ return BASELINE;
+ }
if (code_obj.is_turbofanned()) {
// TODO(bmeurer): We treat frames for BUILTIN Code objects as
// OptimizedFrame for now (all the builtins with JavaScript
@@ -602,6 +602,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
case CodeKind::TURBOPROP:
return OPTIMIZED;
+ case CodeKind::BASELINE:
+ return Type::BASELINE;
case CodeKind::JS_TO_WASM_FUNCTION:
return JS_TO_WASM;
case CodeKind::JS_TO_JS_FUNCTION:
@@ -636,7 +638,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case STUB:
case INTERNAL:
case CONSTRUCT:
- case ARGUMENTS_ADAPTOR:
case WASM_TO_JS:
case WASM:
case WASM_COMPILE_LAZY:
@@ -864,8 +865,8 @@ Address CommonFrame::GetExpressionAddress(int n) const {
return fp() + offset - n * kSystemPointerSize;
}
-Address InterpretedFrame::GetExpressionAddress(int n) const {
- const int offset = InterpreterFrameConstants::kExpressionsOffset;
+Address UnoptimizedFrame::GetExpressionAddress(int n) const {
+ const int offset = UnoptimizedFrameConstants::kExpressionsOffset;
return fp() + offset - n * kSystemPointerSize;
}
@@ -915,14 +916,15 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
SafepointEntry safepoint_entry;
uint32_t stack_slots;
Code code;
- bool has_tagged_params = false;
+ bool has_tagged_outgoing_params = false;
uint32_t tagged_parameter_slots = 0;
if (wasm_code != nullptr) {
SafepointTable table(wasm_code);
safepoint_entry = table.FindEntry(inner_pointer);
stack_slots = wasm_code->stack_slots();
- has_tagged_params = wasm_code->kind() != wasm::WasmCode::kFunction &&
- wasm_code->kind() != wasm::WasmCode::kWasmToCapiWrapper;
+ has_tagged_outgoing_params =
+ wasm_code->kind() != wasm::WasmCode::kFunction &&
+ wasm_code->kind() != wasm::WasmCode::kWasmToCapiWrapper;
tagged_parameter_slots = wasm_code->tagged_parameter_slots();
} else {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
@@ -938,7 +940,16 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
code = entry->code;
safepoint_entry = entry->safepoint_entry;
stack_slots = code.stack_slots();
- has_tagged_params = code.has_tagged_params();
+
+ // With inlined JS-to-Wasm calls, we can be in an OptimizedFrame and
+ // directly call a Wasm function from JavaScript. In this case the
+ // parameters we pass to the callee are not tagged.
+ wasm::WasmCode* wasm_callee =
+ isolate()->wasm_engine()->code_manager()->LookupCode(callee_pc());
+ bool is_wasm_call = (wasm_callee != nullptr);
+
+ has_tagged_outgoing_params =
+ !is_wasm_call && code.has_tagged_outgoing_params();
}
uint32_t slot_space = stack_slots * kSystemPointerSize;
@@ -957,7 +968,6 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
case JAVA_SCRIPT_BUILTIN_CONTINUATION:
case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
case BUILTIN_EXIT:
- case ARGUMENTS_ADAPTOR:
case STUB:
case INTERNAL:
case CONSTRUCT:
@@ -981,6 +991,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
break;
case OPTIMIZED:
case INTERPRETED:
+ case BASELINE:
case BUILTIN:
// These frame types have a context, but they are actually stored
// in the place on the stack that one finds the frame type.
@@ -1004,7 +1015,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
FullObjectSlot parameters_limit(frame_header_base.address() - slot_space);
// Visit the rest of the parameters if they are tagged.
- if (has_tagged_params) {
+ if (has_tagged_outgoing_params) {
v->VisitRootPointers(Root::kTop, nullptr, parameters_base,
parameters_limit);
}
@@ -1086,12 +1097,7 @@ void JavaScriptFrame::SetParameterValue(int index, Object value) const {
}
bool JavaScriptFrame::IsConstructor() const {
- Address fp = caller_fp();
- if (has_adapted_arguments()) {
- // Skip the arguments adaptor frame and look at the real caller.
- fp = Memory<Address>(fp + StandardFrameConstants::kCallerFPOffset);
- }
- return IsConstructFrame(fp);
+ return IsConstructFrame(caller_fp());
}
bool JavaScriptFrame::HasInlinedFrames() const {
@@ -1181,7 +1187,8 @@ Script JavaScriptFrame::script() const {
int CommonFrameWithJSLinkage::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
DCHECK(!LookupCode().has_handler_table());
- DCHECK(!LookupCode().is_optimized_code());
+ DCHECK(!LookupCode().is_optimized_code() ||
+ LookupCode().kind() == CodeKind::BASELINE);
return -1;
}
@@ -1283,12 +1290,10 @@ int CommonFrameWithJSLinkage::ComputeParametersCount() const {
return function().shared().internal_formal_parameter_count();
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
int JavaScriptFrame::GetActualArgumentCount() const {
return static_cast<int>(
Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset));
}
-#endif
Handle<FixedArray> CommonFrameWithJSLinkage::GetParameters() const {
if (V8_LIKELY(!FLAG_detailed_error_stack_trace)) {
@@ -1542,7 +1547,7 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
// in the deoptimization translation are ordered bottom-to-top.
bool is_constructor = IsConstructor();
for (auto it = translated.begin(); it != translated.end(); it++) {
- if (it->kind() == TranslatedFrame::kInterpretedFunction ||
+ if (it->kind() == TranslatedFrame::kUnoptimizedFunction ||
it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
it->kind() ==
TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
@@ -1569,13 +1574,13 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
it->kind() ==
TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
code_offset = 0;
- abstract_code =
- handle(AbstractCode::cast(isolate()->builtins()->builtin(
- Builtins::GetBuiltinFromBailoutId(it->node_id()))),
- isolate());
+ abstract_code = handle(
+ AbstractCode::cast(isolate()->builtins()->builtin(
+ Builtins::GetBuiltinFromBytecodeOffset(it->bytecode_offset()))),
+ isolate());
} else {
- DCHECK_EQ(it->kind(), TranslatedFrame::kInterpretedFunction);
- code_offset = it->node_id().ToInt(); // Points to current bytecode.
+ DCHECK_EQ(it->kind(), TranslatedFrame::kUnoptimizedFunction);
+ code_offset = it->bytecode_offset().ToInt();
abstract_code =
handle(shared_info->abstract_code(isolate()), isolate());
}
@@ -1660,10 +1665,10 @@ void OptimizedFrame::GetFunctions(
DCHECK_NE(Safepoint::kNoDeoptimizationIndex, deopt_index);
FixedArray const literal_array = data.LiteralArray();
- TranslationIterator it(data.TranslationByteArray(),
- data.TranslationIndex(deopt_index).value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK_EQ(Translation::BEGIN, opcode);
+ TranslationArrayIterator it(data.TranslationByteArray(),
+ data.TranslationIndex(deopt_index).value());
+ TranslationOpcode opcode = TranslationOpcodeFromInt(it.Next());
+ DCHECK_EQ(TranslationOpcode::BEGIN, opcode);
it.Next(); // Skip frame count.
int jsframe_count = it.Next();
it.Next(); // Skip update feedback count.
@@ -1671,11 +1676,11 @@ void OptimizedFrame::GetFunctions(
// We insert the frames in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
while (jsframe_count != 0) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::INTERPRETED_FRAME ||
- opcode == Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME ||
- opcode ==
- Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME) {
+ opcode = TranslationOpcodeFromInt(it.Next());
+ if (opcode == TranslationOpcode::INTERPRETED_FRAME ||
+ opcode == TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME ||
+ opcode == TranslationOpcode::
+ JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME) {
it.Next(); // Skip bailout id.
jsframe_count--;
@@ -1684,10 +1689,10 @@ void OptimizedFrame::GetFunctions(
functions->push_back(SharedFunctionInfo::cast(shared));
// Skip over remaining operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode) - 2);
+ it.Skip(TranslationOpcodeOperandCount(opcode) - 2);
} else {
// Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
+ it.Skip(TranslationOpcodeOperandCount(opcode));
}
}
}
@@ -1701,18 +1706,45 @@ Object OptimizedFrame::StackSlotAt(int index) const {
return Object(Memory<Address>(fp() + StackSlotOffsetRelativeToFp(index)));
}
-int InterpretedFrame::position() const {
+int UnoptimizedFrame::position() const {
AbstractCode code = AbstractCode::cast(GetBytecodeArray());
int code_offset = GetBytecodeOffset();
return code.SourcePosition(code_offset);
}
-int InterpretedFrame::LookupExceptionHandlerInTable(
+int UnoptimizedFrame::LookupExceptionHandlerInTable(
int* context_register, HandlerTable::CatchPrediction* prediction) {
HandlerTable table(GetBytecodeArray());
return table.LookupRange(GetBytecodeOffset(), context_register, prediction);
}
+BytecodeArray UnoptimizedFrame::GetBytecodeArray() const {
+ const int index = UnoptimizedFrameConstants::kBytecodeArrayExpressionIndex;
+ DCHECK_EQ(UnoptimizedFrameConstants::kBytecodeArrayFromFp,
+ UnoptimizedFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
+ return BytecodeArray::cast(GetExpression(index));
+}
+
+Object UnoptimizedFrame::ReadInterpreterRegister(int register_index) const {
+ const int index = UnoptimizedFrameConstants::kRegisterFileExpressionIndex;
+ DCHECK_EQ(UnoptimizedFrameConstants::kRegisterFileFromFp,
+ UnoptimizedFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
+ return GetExpression(index + register_index);
+}
+
+void UnoptimizedFrame::Summarize(std::vector<FrameSummary>* functions) const {
+ DCHECK(functions->empty());
+ Handle<AbstractCode> abstract_code(AbstractCode::cast(GetBytecodeArray()),
+ isolate());
+ Handle<FixedArray> params = GetParameters();
+ FrameSummary::JavaScriptFrameSummary summary(
+ isolate(), receiver(), function(), *abstract_code, GetBytecodeOffset(),
+ IsConstructor(), *params);
+ functions->push_back(summary);
+}
+
int InterpretedFrame::GetBytecodeOffset() const {
const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
DCHECK_EQ(InterpreterFrameConstants::kBytecodeOffsetFromFp,
@@ -1722,6 +1754,7 @@ int InterpretedFrame::GetBytecodeOffset() const {
return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
}
+// static
int InterpretedFrame::GetBytecodeOffset(Address fp) {
const int offset = InterpreterFrameConstants::kExpressionsOffset;
const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
@@ -1742,14 +1775,6 @@ void InterpretedFrame::PatchBytecodeOffset(int new_offset) {
SetExpression(index, Smi::FromInt(raw_offset));
}
-BytecodeArray InterpretedFrame::GetBytecodeArray() const {
- const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
- DCHECK_EQ(InterpreterFrameConstants::kBytecodeArrayFromFp,
- InterpreterFrameConstants::kExpressionsOffset -
- index * kSystemPointerSize);
- return BytecodeArray::cast(GetExpression(index));
-}
-
void InterpretedFrame::PatchBytecodeArray(BytecodeArray bytecode_array) {
const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
DCHECK_EQ(InterpreterFrameConstants::kBytecodeArrayFromFp,
@@ -1758,41 +1783,17 @@ void InterpretedFrame::PatchBytecodeArray(BytecodeArray bytecode_array) {
SetExpression(index, bytecode_array);
}
-Object InterpretedFrame::ReadInterpreterRegister(int register_index) const {
- const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
- DCHECK_EQ(InterpreterFrameConstants::kRegisterFileFromFp,
- InterpreterFrameConstants::kExpressionsOffset -
- index * kSystemPointerSize);
- return GetExpression(index + register_index);
-}
-
-void InterpretedFrame::WriteInterpreterRegister(int register_index,
- Object value) {
- const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
- DCHECK_EQ(InterpreterFrameConstants::kRegisterFileFromFp,
- InterpreterFrameConstants::kExpressionsOffset -
- index * kSystemPointerSize);
- return SetExpression(index + register_index, value);
+int BaselineFrame::GetBytecodeOffset() const {
+ return LookupCode().GetBytecodeOffsetForBaselinePC(this->pc());
}
-void InterpretedFrame::Summarize(std::vector<FrameSummary>* functions) const {
- DCHECK(functions->empty());
- Handle<AbstractCode> abstract_code(AbstractCode::cast(GetBytecodeArray()),
- isolate());
- Handle<FixedArray> params = GetParameters();
- FrameSummary::JavaScriptFrameSummary summary(
- isolate(), receiver(), function(), *abstract_code, GetBytecodeOffset(),
- IsConstructor(), *params);
- functions->push_back(summary);
+intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
+ return LookupCode().GetBaselinePCForBytecodeOffset(bytecode_offset);
}
-int ArgumentsAdaptorFrame::ComputeParametersCount() const {
- const int offset = ArgumentsAdaptorFrameConstants::kLengthOffset;
- return Smi::ToInt(Object(base::Memory<Address>(fp() + offset)));
-}
-
-Code ArgumentsAdaptorFrame::unchecked_code() const {
- return isolate()->builtins()->builtin(Builtins::kArgumentsAdaptorTrampoline);
+void BaselineFrame::PatchContext(Context value) {
+ base::Memory<Address>(fp() + BaselineFrameConstants::kContextOffset) =
+ value.ptr();
}
JSFunction BuiltinFrame::function() const {
@@ -1805,8 +1806,6 @@ int BuiltinFrame::ComputeParametersCount() const {
return Smi::ToInt(Object(base::Memory<Address>(fp() + offset)));
}
-Code InternalFrame::unchecked_code() const { return Code(); }
-
void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
PrintIndex(accumulator, mode, index);
@@ -1835,10 +1834,6 @@ void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
if (mode != OVERVIEW) accumulator->Add("\n");
}
-Code WasmFrame::unchecked_code() const {
- return isolate()->FindCodeObject(pc());
-}
-
wasm::WasmCode* WasmFrame::wasm_code() const {
return isolate()->wasm_engine()->code_manager()->LookupCode(pc());
}
@@ -1858,7 +1853,8 @@ WasmModuleObject WasmFrame::module_object() const {
}
uint32_t WasmFrame::function_index() const {
- return FrameSummary::GetSingle(this).AsWasm().function_index();
+ wasm::WasmCodeRefScope code_ref_scope;
+ return wasm_code()->index();
}
Script WasmFrame::script() const { return module_object().script(); }
@@ -2030,8 +2026,7 @@ void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->PrintName(script.name());
if (is_interpreted()) {
- const InterpretedFrame* iframe =
- reinterpret_cast<const InterpretedFrame*>(this);
+ const InterpretedFrame* iframe = InterpretedFrame::cast(this);
BytecodeArray bytecodes = iframe->GetBytecodeArray();
int offset = iframe->GetBytecodeOffset();
int source_pos = AbstractCode::cast(bytecodes).SourcePosition(offset);
@@ -2116,34 +2111,6 @@ void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->Add("}\n\n");
}
-void ArgumentsAdaptorFrame::Print(StringStream* accumulator, PrintMode mode,
- int index) const {
- int actual = ComputeParametersCount();
- int expected = -1;
- JSFunction function = this->function();
- expected = function.shared().internal_formal_parameter_count();
-
- PrintIndex(accumulator, mode, index);
- accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
- if (mode == OVERVIEW) {
- accumulator->Add("\n");
- return;
- }
- accumulator->Add(" {\n");
-
- // Print actual arguments.
- if (actual > 0) accumulator->Add(" // actual arguments\n");
- for (int i = 0; i < actual; i++) {
- accumulator->Add(" [%02d] : %o", i, GetParameter(i));
- if (expected != -1 && i >= expected) {
- accumulator->Add(" // not passed to callee");
- }
- accumulator->Add("\n");
- }
-
- accumulator->Add("}\n\n");
-}
-
void EntryFrame::Iterate(RootVisitor* v) const {
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
@@ -2176,11 +2143,11 @@ void InternalFrame::Iterate(RootVisitor* v) const {
IteratePc(v, pc_address(), constant_pool_address(), code);
// Internal frames typically do not receive any arguments, hence their stack
// only contains tagged pointers.
- // We are misusing the has_tagged_params flag here to tell us whether
+ // We are misusing the has_tagged_outgoing_params flag here to tell us whether
// the full stack frame contains only tagged pointers or only raw values.
// This is used for the WasmCompileLazy builtin, where we actually pass
// untagged arguments and also store untagged values on the stack.
- if (code.has_tagged_params()) IterateExpressions(v);
+ if (code.has_tagged_outgoing_params()) IterateExpressions(v);
}
// -------------------------------------------------------------------------
@@ -2249,14 +2216,14 @@ bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode) {
} // namespace
-InterpretedFrameInfo::InterpretedFrameInfo(int parameters_count_with_receiver,
+UnoptimizedFrameInfo::UnoptimizedFrameInfo(int parameters_count_with_receiver,
int translation_height,
bool is_topmost, bool pad_arguments,
FrameInfoKind frame_info_kind) {
const int locals_count = translation_height;
register_stack_slot_count_ =
- InterpreterFrameConstants::RegisterStackSlotCount(locals_count);
+ UnoptimizedFrameConstants::RegisterStackSlotCount(locals_count);
static constexpr int kTheAccumulator = 1;
static constexpr int kTopOfStackPadding = TopOfStackRegisterPaddingSlots();
@@ -2280,17 +2247,6 @@ InterpretedFrameInfo::InterpretedFrameInfo(int parameters_count_with_receiver,
frame_size_in_bytes_ = frame_size_in_bytes_without_fixed_ + fixed_frame_size;
}
-ArgumentsAdaptorFrameInfo::ArgumentsAdaptorFrameInfo(int translation_height) {
- // Note: This is according to the Translation's notion of 'parameters' which
- // differs to that of the SharedFunctionInfo, e.g. by including the receiver.
- const int parameters_count = translation_height;
- frame_size_in_bytes_without_fixed_ =
- (parameters_count + ArgumentPaddingSlots(parameters_count)) *
- kSystemPointerSize;
- frame_size_in_bytes_ = frame_size_in_bytes_without_fixed_ +
- ArgumentsAdaptorFrameConstants::kFixedFrameSize;
-}
-
ConstructStubFrameInfo::ConstructStubFrameInfo(int translation_height,
bool is_topmost,
FrameInfoKind frame_info_kind) {
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 7a86dfa641..eef201914b 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_FRAMES_H_
#define V8_EXECUTION_FRAMES_H_
+#include "src/base/bounds.h"
#include "src/codegen/safepoint-table.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
@@ -16,9 +17,10 @@
// - CommonFrame
// - CommonFrameWithJSLinkage
// - JavaScriptFrame (aka StandardFrame)
-// - InterpretedFrame
+// - UnoptimizedFrame
+// - InterpretedFrame
+// - BaselineFrame
// - OptimizedFrame
-// - ArgumentsAdaptorFrame (technically a TypedFrame)
// - TypedFrameWithJSLinkage
// - BuiltinFrame
// - JavaScriptBuiltinContinuationFrame
@@ -94,7 +96,6 @@ class StackHandler {
V(ENTRY, EntryFrame) \
V(CONSTRUCT_ENTRY, ConstructEntryFrame) \
V(EXIT, ExitFrame) \
- V(OPTIMIZED, OptimizedFrame) \
V(WASM, WasmFrame) \
V(WASM_TO_JS, WasmToJsFrame) \
V(JS_TO_WASM, JsToWasmFrame) \
@@ -103,6 +104,8 @@ class StackHandler {
V(WASM_EXIT, WasmExitFrame) \
V(WASM_COMPILE_LAZY, WasmCompileLazyFrame) \
V(INTERPRETED, InterpretedFrame) \
+ V(BASELINE, BaselineFrame) \
+ V(OPTIMIZED, OptimizedFrame) \
V(STUB, StubFrame) \
V(BUILTIN_CONTINUATION, BuiltinContinuationFrame) \
V(JAVA_SCRIPT_BUILTIN_CONTINUATION, JavaScriptBuiltinContinuationFrame) \
@@ -110,7 +113,6 @@ class StackHandler {
JavaScriptBuiltinContinuationWithCatchFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
- V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame) \
V(BUILTIN, BuiltinFrame) \
V(BUILTIN_EXIT, BuiltinExitFrame) \
V(NATIVE, NativeFrame)
@@ -208,11 +210,15 @@ class StackFrame {
bool is_construct_entry() const { return type() == CONSTRUCT_ENTRY; }
bool is_exit() const { return type() == EXIT; }
bool is_optimized() const { return type() == OPTIMIZED; }
+ bool is_unoptimized() const {
+ STATIC_ASSERT(BASELINE == INTERPRETED + 1);
+ return base::IsInRange(type(), INTERPRETED, BASELINE);
+ }
bool is_interpreted() const { return type() == INTERPRETED; }
+ bool is_baseline() const { return type() == BASELINE; }
bool is_wasm() const { return this->type() == WASM; }
bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
bool is_wasm_debug_break() const { return type() == WASM_DEBUG_BREAK; }
- bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_builtin() const { return type() == BUILTIN; }
bool is_internal() const { return type() == INTERNAL; }
bool is_builtin_continuation() const {
@@ -227,10 +233,12 @@ class StackFrame {
bool is_construct() const { return type() == CONSTRUCT; }
bool is_builtin_exit() const { return type() == BUILTIN_EXIT; }
- bool is_java_script() const {
- Type type = this->type();
- return (type == OPTIMIZED) || (type == INTERPRETED);
+ static bool IsJavaScript(Type t) {
+ STATIC_ASSERT(INTERPRETED + 1 == BASELINE);
+ STATIC_ASSERT(BASELINE + 1 == OPTIMIZED);
+ return t >= INTERPRETED && t <= OPTIMIZED;
}
+ bool is_java_script() const { return IsJavaScript(type()); }
bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
@@ -509,10 +517,6 @@ class CommonFrame : public StackFrame {
// Returns the address of the n'th expression stack element.
virtual Address GetExpressionAddress(int n) const;
- // Determines if the standard frame for the given frame pointer is
- // an arguments adaptor frame.
- static inline bool IsArgumentsAdaptorFrame(Address fp);
-
// Used by OptimizedFrames and StubFrames.
void IterateCompiledFrame(RootVisitor* v) const;
@@ -523,7 +527,7 @@ class CommonFrame : public StackFrame {
class TypedFrame : public CommonFrame {
public:
- Code unchecked_code() const override { return Code(); }
+ Code unchecked_code() const override { return {}; }
void Iterate(RootVisitor* v) const override { IterateCompiledFrame(v); }
protected:
@@ -540,9 +544,7 @@ class CommonFrameWithJSLinkage : public CommonFrame {
virtual Object GetParameter(int index) const;
virtual int ComputeParametersCount() const;
Handle<FixedArray> GetParameters() const;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
virtual int GetActualArgumentCount() const;
-#endif
// Determine the code for the frame.
Code unchecked_code() const override;
@@ -550,7 +552,7 @@ class CommonFrameWithJSLinkage : public CommonFrame {
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns data associated with the handler site specific to the frame type:
// - OptimizedFrame : Data is not used and will not return a value.
- // - InterpretedFrame: Data is the register index holding the context.
+ // - UnoptimizedFrame: Data is the register index holding the context.
virtual int LookupExceptionHandlerInTable(
int* data, HandlerTable::CatchPrediction* prediction);
@@ -586,10 +588,7 @@ class JavaScriptFrame : public CommonFrameWithJSLinkage {
Object unchecked_function() const;
Script script() const;
Object context() const override;
-
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
int GetActualArgumentCount() const override;
-#endif
inline void set_receiver(Object value);
@@ -603,11 +602,6 @@ class JavaScriptFrame : public CommonFrameWithJSLinkage {
// about the inlined frames use {GetFunctions} and {Summarize}.
bool HasInlinedFrames() const;
- // Check if this frame has "adapted" arguments in the sense that the
- // actual passed arguments are available in an arguments adaptor
- // frame below it on the stack.
- inline bool has_adapted_arguments() const;
-
// Garbage collection support.
void Iterate(RootVisitor* v) const override;
@@ -780,7 +774,7 @@ class BuiltinExitFrame : public ExitFrame {
inline Object new_target_slot_object() const;
friend class StackFrameIteratorBase;
- friend class FrameArrayBuilder;
+ friend class StackTraceBuilder;
};
class StubFrame : public TypedFrame {
@@ -834,10 +828,11 @@ class OptimizedFrame : public JavaScriptFrame {
Object StackSlotAt(int index) const;
};
-class InterpretedFrame : public JavaScriptFrame {
+// An unoptimized frame is a JavaScript frame that is executing bytecode. It
+// may be executing it using the interpreter, or via baseline code compiled from
+// the bytecode.
+class UnoptimizedFrame : public JavaScriptFrame {
public:
- Type type() const override { return INTERPRETED; }
-
// Accessors.
int position() const override;
@@ -846,69 +841,82 @@ class InterpretedFrame : public JavaScriptFrame {
int* data, HandlerTable::CatchPrediction* prediction) override;
// Returns the current offset into the bytecode stream.
- int GetBytecodeOffset() const;
-
- // Updates the current offset into the bytecode stream, mainly used for stack
- // unwinding to continue execution at a different bytecode offset.
- void PatchBytecodeOffset(int new_offset);
+ virtual int GetBytecodeOffset() const = 0;
// Returns the frame's current bytecode array.
BytecodeArray GetBytecodeArray() const;
- // Updates the frame's BytecodeArray with |bytecode_array|. Used by the
- // debugger to swap execution onto a BytecodeArray patched with breakpoints.
- void PatchBytecodeArray(BytecodeArray bytecode_array);
-
// Access to the interpreter register file for this frame.
Object ReadInterpreterRegister(int register_index) const;
- void WriteInterpreterRegister(int register_index, Object value);
// Build a list with summaries for this frame including all inlined frames.
void Summarize(std::vector<FrameSummary>* frames) const override;
- static int GetBytecodeOffset(Address fp);
+ static UnoptimizedFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_unoptimized());
+ return static_cast<UnoptimizedFrame*>(frame);
+ }
+
+ protected:
+ inline explicit UnoptimizedFrame(StackFrameIteratorBase* iterator);
+
+ Address GetExpressionAddress(int n) const override;
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
+class InterpretedFrame : public UnoptimizedFrame {
+ public:
+ Type type() const override { return INTERPRETED; }
+
+ // Returns the current offset into the bytecode stream.
+ int GetBytecodeOffset() const override;
+
+ // Updates the current offset into the bytecode stream, mainly used for stack
+ // unwinding to continue execution at a different bytecode offset.
+ void PatchBytecodeOffset(int new_offset);
+
+ // Updates the frame's BytecodeArray with |bytecode_array|. Used by the
+ // debugger to swap execution onto a BytecodeArray patched with breakpoints.
+ void PatchBytecodeArray(BytecodeArray bytecode_array);
static InterpretedFrame* cast(StackFrame* frame) {
DCHECK(frame->is_interpreted());
return static_cast<InterpretedFrame*>(frame);
}
+ static const InterpretedFrame* cast(const StackFrame* frame) {
+ DCHECK(frame->is_interpreted());
+ return static_cast<const InterpretedFrame*>(frame);
+ }
+
+ static int GetBytecodeOffset(Address fp);
protected:
inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
- Address GetExpressionAddress(int n) const override;
-
private:
friend class StackFrameIteratorBase;
};
-// Arguments adaptor frames are automatically inserted below
-// JavaScript frames when the actual number of parameters does not
-// match the formal number of parameters.
-// NOTE: this inheritance is wrong, an ArgumentsAdaptorFrame should be
-// of type TypedFrame, but due to FrameInspector::javascript_frame(),
-// it needs to be seen as JavaScriptFrame.
-// This frame will however be deleted soon.
-class ArgumentsAdaptorFrame : public JavaScriptFrame {
+class BaselineFrame : public UnoptimizedFrame {
public:
- Type type() const override { return ARGUMENTS_ADAPTOR; }
+ Type type() const override { return BASELINE; }
- // Determine the code for the frame.
- Code unchecked_code() const override;
+ // Returns the current offset into the bytecode stream.
+ int GetBytecodeOffset() const override;
- static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_arguments_adaptor());
- return static_cast<ArgumentsAdaptorFrame*>(frame);
- }
+ intptr_t GetPCForBytecodeOffset(int lookup_offset) const;
- int ComputeParametersCount() const override;
+ void PatchContext(Context value);
- // Printing support.
- void Print(StringStream* accumulator, PrintMode mode,
- int index) const override;
+ static BaselineFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_baseline());
+ return static_cast<BaselineFrame*>(frame);
+ }
protected:
- inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
+ inline explicit BaselineFrame(StackFrameIteratorBase* iterator);
private:
friend class StackFrameIteratorBase;
@@ -946,9 +954,6 @@ class WasmFrame : public TypedFrame {
// Lookup exception handler for current {pc}, returns -1 if none found.
int LookupExceptionHandlerInTable();
- // Determine the code for the frame.
- Code unchecked_code() const override;
-
// Accessors.
V8_EXPORT_PRIVATE WasmInstanceObject wasm_instance() const;
V8_EXPORT_PRIVATE wasm::NativeModule* native_module() const;
@@ -1077,9 +1082,6 @@ class InternalFrame : public TypedFrame {
// Garbage collection support.
void Iterate(RootVisitor* v) const override;
- // Determine the code for the frame.
- Code unchecked_code() const override;
-
static InternalFrame* cast(StackFrame* frame) {
DCHECK(frame->is_internal());
return static_cast<InternalFrame*>(frame);
@@ -1223,6 +1225,7 @@ class StackFrameIterator : public StackFrameIteratorBase {
return frame_;
}
V8_EXPORT_PRIVATE void Advance();
+ StackFrame* Reframe();
private:
// Go back to the first frame.
@@ -1240,6 +1243,7 @@ class JavaScriptFrameIterator {
bool done() const { return iterator_.done(); }
V8_EXPORT_PRIVATE void Advance();
void AdvanceOneFrame() { iterator_.Advance(); }
+ inline JavaScriptFrame* Reframe();
private:
StackFrameIterator iterator_;
@@ -1259,6 +1263,7 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
int FrameFunctionCount() const;
inline CommonFrame* frame() const;
+ inline CommonFrame* Reframe();
inline bool is_javascript() const;
inline bool is_wasm() const;
@@ -1334,16 +1339,16 @@ enum class BuiltinContinuationMode {
JAVASCRIPT_HANDLE_EXCEPTION // JavaScriptBuiltinContinuationWithCatchFrame
};
-class InterpretedFrameInfo {
+class UnoptimizedFrameInfo {
public:
- static InterpretedFrameInfo Precise(int parameters_count_with_receiver,
+ static UnoptimizedFrameInfo Precise(int parameters_count_with_receiver,
int translation_height, bool is_topmost,
bool pad_arguments) {
return {parameters_count_with_receiver, translation_height, is_topmost,
pad_arguments, FrameInfoKind::kPrecise};
}
- static InterpretedFrameInfo Conservative(int parameters_count_with_receiver,
+ static UnoptimizedFrameInfo Conservative(int parameters_count_with_receiver,
int locals_count) {
return {parameters_count_with_receiver, locals_count, false, true,
FrameInfoKind::kConservative};
@@ -1358,7 +1363,7 @@ class InterpretedFrameInfo {
uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
private:
- InterpretedFrameInfo(int parameters_count_with_receiver,
+ UnoptimizedFrameInfo(int parameters_count_with_receiver,
int translation_height, bool is_topmost,
bool pad_arguments, FrameInfoKind frame_info_kind);
@@ -1367,28 +1372,6 @@ class InterpretedFrameInfo {
uint32_t frame_size_in_bytes_;
};
-class ArgumentsAdaptorFrameInfo {
- public:
- static ArgumentsAdaptorFrameInfo Precise(int translation_height) {
- return ArgumentsAdaptorFrameInfo{translation_height};
- }
-
- static ArgumentsAdaptorFrameInfo Conservative(int parameters_count) {
- return ArgumentsAdaptorFrameInfo{parameters_count};
- }
-
- uint32_t frame_size_in_bytes_without_fixed() const {
- return frame_size_in_bytes_without_fixed_;
- }
- uint32_t frame_size_in_bytes() const { return frame_size_in_bytes_; }
-
- private:
- explicit ArgumentsAdaptorFrameInfo(int translation_height);
-
- uint32_t frame_size_in_bytes_without_fixed_;
- uint32_t frame_size_in_bytes_;
-};
-
class ConstructStubFrameInfo {
public:
static ConstructStubFrameInfo Precise(int translation_height,
diff --git a/deps/v8/src/execution/ia32/frame-constants-ia32.cc b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
index 7faecdb858..16e9e75a44 100644
--- a/deps/v8/src/execution/ia32/frame-constants-ia32.cc
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
@@ -18,7 +18,7 @@ Register JavaScriptFrame::fp_register() { return ebp; }
Register JavaScriptFrame::context_register() { return esi; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
-int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index 7315837b7e..a6610c12f0 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -13,7 +13,7 @@
#include "src/execution/thread-local-top.h"
#include "src/roots/roots.h"
#include "src/utils/utils.h"
-#include "testing/gtest/include/gtest/gtest_prod.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index aa477913b6..7a81bf7d24 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -114,6 +114,11 @@ Isolate::ExceptionScope::~ExceptionScope() {
isolate_->set_pending_exception(*pending_exception_);
}
+bool Isolate::IsAnyInitialArrayPrototype(JSArray array) {
+ DisallowGarbageCollection no_gc;
+ return IsInAnyContext(array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+}
+
#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
Handle<type> Isolate::name() { \
return Handle<type>(raw_native_context().name(), this); \
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index f6f47a75cc..248cba7a20 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -35,6 +35,7 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/deoptimizer/materialized-object-store.h"
#include "src/diagnostics/basic-block-profiler.h"
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames-inl.h"
@@ -63,7 +64,6 @@
#include "src/objects/backing-store.h"
#include "src/objects/elements.h"
#include "src/objects/feedback-vector.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator-inl.h"
@@ -92,6 +92,7 @@
#include "src/utils/version.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/type-stats.h"
@@ -614,85 +615,69 @@ StackTraceFailureMessage::StackTraceFailureMessage(Isolate* isolate, void* ptr1,
}
}
-class FrameArrayBuilder {
+class StackTraceBuilder {
public:
enum FrameFilterMode { ALL, CURRENT_SECURITY_CONTEXT };
- FrameArrayBuilder(Isolate* isolate, FrameSkipMode mode, int limit,
+ StackTraceBuilder(Isolate* isolate, FrameSkipMode mode, int limit,
Handle<Object> caller, FrameFilterMode filter_mode)
: isolate_(isolate),
mode_(mode),
limit_(limit),
caller_(caller),
+ skip_next_frame_(mode != SKIP_NONE),
check_security_context_(filter_mode == CURRENT_SECURITY_CONTEXT) {
- switch (mode_) {
- case SKIP_FIRST:
- skip_next_frame_ = true;
- break;
- case SKIP_UNTIL_SEEN:
- DCHECK(caller_->IsJSFunction());
- skip_next_frame_ = true;
- break;
- case SKIP_NONE:
- skip_next_frame_ = false;
- break;
- }
-
- elements_ = isolate->factory()->NewFrameArray(std::min(limit, 10));
+ DCHECK_IMPLIES(mode_ == SKIP_UNTIL_SEEN, caller_->IsJSFunction());
+ // Modern web applications are usually built with multiple layers of
+ // framework and library code, and stack depth tends to be more than
+ // a dozen frames, so we over-allocate a bit here to avoid growing
+ // the elements array in the common case.
+ elements_ = isolate->factory()->NewFixedArray(std::min(64, limit));
}
void AppendAsyncFrame(Handle<JSGeneratorObject> generator_object) {
- if (full()) return;
Handle<JSFunction> function(generator_object->function(), isolate_);
if (!IsVisibleInStackTrace(function)) return;
- int flags = FrameArray::kIsAsync;
- if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
+ int flags = StackFrameInfo::kIsAsync;
+ if (IsStrictFrame(function)) flags |= StackFrameInfo::kIsStrict;
Handle<Object> receiver(generator_object->receiver(), isolate_);
- Handle<AbstractCode> code(
- AbstractCode::cast(function->shared().GetBytecodeArray(isolate_)),
- isolate_);
- int offset = Smi::ToInt(generator_object->input_or_debug_pos());
+ Handle<BytecodeArray> code(function->shared().GetBytecodeArray(isolate_),
+ isolate_);
// The stored bytecode offset is relative to a different base than what
// is used in the source position table, hence the subtraction.
- offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
+ int offset = Smi::ToInt(generator_object->input_or_debug_pos()) -
+ (BytecodeArray::kHeaderSize - kHeapObjectTag);
Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
- int param_count = function->shared().internal_formal_parameter_count();
- parameters = isolate_->factory()->NewFixedArray(param_count);
- for (int i = 0; i < param_count; i++) {
- parameters->set(i, generator_object->parameters_and_registers().get(i));
- }
+ parameters = isolate_->factory()->CopyFixedArrayUpTo(
+ handle(generator_object->parameters_and_registers(), isolate_),
+ function->shared().internal_formal_parameter_count());
}
- elements_ = FrameArray::AppendJSFrame(elements_, receiver, function, code,
- offset, flags, parameters);
+ AppendFrame(receiver, function, code, offset, flags, parameters);
}
void AppendPromiseCombinatorFrame(Handle<JSFunction> element_function,
- Handle<JSFunction> combinator,
- FrameArray::Flag combinator_flag,
- Handle<Context> context) {
- if (full()) return;
- int flags = FrameArray::kIsAsync | combinator_flag;
-
- Handle<Context> native_context(context->native_context(), isolate_);
+ Handle<JSFunction> combinator) {
if (!IsVisibleInStackTrace(combinator)) return;
+ int flags =
+ StackFrameInfo::kIsAsync | StackFrameInfo::kIsSourcePositionComputed;
- Handle<Object> receiver(native_context->promise_function(), isolate_);
- Handle<AbstractCode> code(AbstractCode::cast(combinator->code()), isolate_);
+ Handle<Object> receiver(combinator->native_context().promise_function(),
+ isolate_);
+ Handle<Code> code(combinator->code(), isolate_);
// TODO(mmarchini) save Promises list from the Promise combinator
Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
// We store the offset of the promise into the element function's
// hash field for element callbacks.
- int const offset =
+ int promise_index =
Smi::ToInt(Smi::cast(element_function->GetIdentityHash())) - 1;
- elements_ = FrameArray::AppendJSFrame(elements_, receiver, combinator, code,
- offset, flags, parameters);
+ AppendFrame(receiver, combinator, code, promise_index, flags, parameters);
}
void AppendJavaScriptFrame(
@@ -700,48 +685,37 @@ class FrameArrayBuilder {
// Filter out internal frames that we do not want to show.
if (!IsVisibleInStackTrace(summary.function())) return;
- Handle<AbstractCode> abstract_code = summary.abstract_code();
- const int offset = summary.code_offset();
-
- const bool is_constructor = summary.is_constructor();
-
int flags = 0;
Handle<JSFunction> function = summary.function();
- if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
- if (is_constructor) flags |= FrameArray::kIsConstructor;
+ if (IsStrictFrame(function)) flags |= StackFrameInfo::kIsStrict;
+ if (summary.is_constructor()) flags |= StackFrameInfo::kIsConstructor;
- Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
- if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
- parameters = summary.parameters();
- }
-
- elements_ = FrameArray::AppendJSFrame(
- elements_, TheHoleToUndefined(isolate_, summary.receiver()), function,
- abstract_code, offset, flags, parameters);
+ AppendFrame(summary.receiver(), function, summary.abstract_code(),
+ summary.code_offset(), flags, summary.parameters());
}
void AppendWasmFrame(FrameSummary::WasmFrameSummary const& summary) {
if (summary.code()->kind() != wasm::WasmCode::kFunction) return;
Handle<WasmInstanceObject> instance = summary.wasm_instance();
- int flags = 0;
+ int flags = StackFrameInfo::kIsWasm;
if (instance->module_object().is_asm_js()) {
- flags |= FrameArray::kIsAsmJsWasmFrame;
+ flags |= StackFrameInfo::kIsAsmJsWasm;
if (summary.at_to_number_conversion()) {
- flags |= FrameArray::kAsmJsAtNumberConversion;
+ flags |= StackFrameInfo::kIsAsmJsAtNumberConversion;
}
- } else {
- flags |= FrameArray::kIsWasmFrame;
}
- elements_ = FrameArray::AppendWasmFrame(
- elements_, instance, summary.function_index(), summary.code(),
- summary.code_offset(), flags);
+ auto code = Managed<wasm::GlobalWasmCodeRef>::Allocate(
+ isolate_, 0, summary.code(),
+ instance->module_object().shared_native_module());
+ AppendFrame(instance,
+ handle(Smi::FromInt(summary.function_index()), isolate_), code,
+ summary.code_offset(), flags,
+ isolate_->factory()->empty_fixed_array());
}
void AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
- Handle<JSFunction> function = handle(exit_frame->function(), isolate_);
-
- // Filter out internal frames that we do not want to show.
+ Handle<JSFunction> function(exit_frame->function(), isolate_);
if (!IsVisibleInStackTrace(function)) return;
// TODO(szuend): Remove this check once the flag is enabled
@@ -757,8 +731,8 @@ class FrameArrayBuilder {
static_cast<int>(exit_frame->pc() - code->InstructionStart());
int flags = 0;
- if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
- if (exit_frame->IsConstructor()) flags |= FrameArray::kIsConstructor;
+ if (IsStrictFrame(function)) flags |= StackFrameInfo::kIsStrict;
+ if (exit_frame->IsConstructor()) flags |= StackFrameInfo::kIsConstructor;
Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
@@ -769,31 +743,13 @@ class FrameArrayBuilder {
}
}
- elements_ = FrameArray::AppendJSFrame(elements_, receiver, function,
- Handle<AbstractCode>::cast(code),
- offset, flags, parameters);
+ AppendFrame(receiver, function, code, offset, flags, parameters);
}
- bool full() { return elements_->FrameCount() >= limit_; }
+ bool Full() { return index_ >= limit_; }
- Handle<FrameArray> GetElements() {
- elements_->ShrinkToFit(isolate_);
- return elements_;
- }
-
- // Creates a StackTraceFrame object for each frame in the FrameArray.
- Handle<FixedArray> GetElementsAsStackTraceFrameArray() {
- elements_->ShrinkToFit(isolate_);
- const int frame_count = elements_->FrameCount();
- Handle<FixedArray> stack_trace =
- isolate_->factory()->NewFixedArray(frame_count);
-
- for (int i = 0; i < frame_count; ++i) {
- Handle<StackTraceFrame> frame =
- isolate_->factory()->NewStackTraceFrame(elements_, i);
- stack_trace->set(i, *frame);
- }
- return stack_trace;
+ Handle<FixedArray> Build() {
+ return FixedArray::ShrinkOrEmpty(isolate_, elements_, index_);
}
private:
@@ -855,22 +811,34 @@ class FrameArrayBuilder {
return isolate_->context().HasSameSecurityTokenAs(function->context());
}
- // TODO(jgruber): Fix all cases in which frames give us a hole value (e.g. the
- // receiver in RegExp constructor frames.
- Handle<Object> TheHoleToUndefined(Isolate* isolate, Handle<Object> in) {
- return (in->IsTheHole(isolate))
- ? Handle<Object>::cast(isolate->factory()->undefined_value())
- : in;
+ void AppendFrame(Handle<Object> receiver_or_instance, Handle<Object> function,
+ Handle<HeapObject> code, int offset, int flags,
+ Handle<FixedArray> parameters) {
+ DCHECK_LE(index_, elements_->length());
+ DCHECK_LE(elements_->length(), limit_);
+ if (index_ == elements_->length()) {
+ elements_ = isolate_->factory()->CopyFixedArrayAndGrow(
+ elements_, std::min(16, limit_ - elements_->length()));
+ }
+ if (receiver_or_instance->IsTheHole(isolate_)) {
+ // TODO(jgruber): Fix all cases in which frames give us a hole value
+ // (e.g. the receiver in RegExp constructor frames).
+ receiver_or_instance = isolate_->factory()->undefined_value();
+ }
+ auto info = isolate_->factory()->NewStackFrameInfo(
+ receiver_or_instance, function, code, offset, flags, parameters);
+ elements_->set(index_++, *info);
}
Isolate* isolate_;
const FrameSkipMode mode_;
- int limit_;
+ int index_ = 0;
+ const int limit_;
const Handle<Object> caller_;
- bool skip_next_frame_ = true;
+ bool skip_next_frame_;
bool encountered_strict_function_ = false;
const bool check_security_context_;
- Handle<FrameArray> elements_;
+ Handle<FixedArray> elements_;
};
bool GetStackTraceLimit(Isolate* isolate, int* result) {
@@ -892,6 +860,8 @@ bool GetStackTraceLimit(Isolate* isolate, int* result) {
bool NoExtension(const v8::FunctionCallbackInfo<v8::Value>&) { return false; }
+namespace {
+
bool IsBuiltinFunction(Isolate* isolate, HeapObject object,
Builtins::Name builtin_index) {
if (!object.IsJSFunction()) return false;
@@ -900,8 +870,8 @@ bool IsBuiltinFunction(Isolate* isolate, HeapObject object,
}
void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
- FrameArrayBuilder* builder) {
- while (!builder->full()) {
+ StackTraceBuilder* builder) {
+ while (!builder->Full()) {
// Check that the {promise} is not settled.
if (promise->status() != Promise::kPending) return;
@@ -952,8 +922,7 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
Handle<Context> context(function->context(), isolate);
Handle<JSFunction> combinator(context->native_context().promise_all(),
isolate);
- builder->AppendPromiseCombinatorFrame(function, combinator,
- FrameArray::kIsPromiseAll, context);
+ builder->AppendPromiseCombinatorFrame(function, combinator);
// Now peak into the Promise.all() resolve element context to
// find the promise capability that's being resolved when all
@@ -971,8 +940,7 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
Handle<Context> context(function->context(), isolate);
Handle<JSFunction> combinator(context->native_context().promise_any(),
isolate);
- builder->AppendPromiseCombinatorFrame(function, combinator,
- FrameArray::kIsPromiseAny, context);
+ builder->AppendPromiseCombinatorFrame(function, combinator);
// Now peak into the Promise.any() reject element context to
// find the promise capability that's being resolved when any of
@@ -1012,36 +980,34 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
}
}
-namespace {
-
struct CaptureStackTraceOptions {
int limit;
// 'filter_mode' and 'skip_mode' are somewhat orthogonal. 'filter_mode'
// specifies whether to capture all frames, or just frames in the same
// security context. While 'skip_mode' allows skipping the first frame.
FrameSkipMode skip_mode;
- FrameArrayBuilder::FrameFilterMode filter_mode;
+ StackTraceBuilder::FrameFilterMode filter_mode;
bool capture_builtin_exit_frames;
bool capture_only_frames_subject_to_debugging;
bool async_stack_trace;
};
-Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
- CaptureStackTraceOptions options) {
+Handle<FixedArray> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
+ CaptureStackTraceOptions options) {
DisallowJavascriptExecution no_js(isolate);
TRACE_EVENT_BEGIN1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
"CaptureStackTrace", "maxFrameCount", options.limit);
wasm::WasmCodeRefScope code_ref_scope;
- FrameArrayBuilder builder(isolate, options.skip_mode, options.limit, caller,
+ StackTraceBuilder builder(isolate, options.skip_mode, options.limit, caller,
options.filter_mode);
// Build the regular stack trace, and remember the last relevant
// frame ID and inlined index (for the async stack trace handling
// below, which starts from this last frame).
- for (StackFrameIterator it(isolate); !it.done() && !builder.full();
+ for (StackFrameIterator it(isolate); !it.done() && !builder.Full();
it.Advance()) {
StackFrame* const frame = it.frame();
switch (frame->type()) {
@@ -1049,13 +1015,14 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
+ case StackFrame::BASELINE:
case StackFrame::BUILTIN:
case StackFrame::WASM: {
// A standard frame may include many summarized frames (due to
// inlining).
std::vector<FrameSummary> frames;
CommonFrame::cast(frame)->Summarize(&frames);
- for (size_t i = frames.size(); i-- != 0 && !builder.full();) {
+ for (size_t i = frames.size(); i-- != 0 && !builder.Full();) {
auto& summary = frames[i];
if (options.capture_only_frames_subject_to_debugging &&
!summary.is_subject_to_debugging()) {
@@ -1155,7 +1122,7 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
}
}
- Handle<FixedArray> stack_trace = builder.GetElementsAsStackTraceFrameArray();
+ Handle<FixedArray> stack_trace = builder.Build();
TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
"CaptureStackTrace", "frameCount", stack_trace->length());
return stack_trace;
@@ -1174,7 +1141,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
options.skip_mode = mode;
options.capture_builtin_exit_frames = true;
options.async_stack_trace = FLAG_async_stack_traces;
- options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
+ options.filter_mode = StackTraceBuilder::CURRENT_SECURITY_CONTEXT;
options.capture_only_frames_subject_to_debugging = false;
return CaptureStackTrace(this, caller, options);
@@ -1249,8 +1216,8 @@ Address Isolate::GetAbstractPC(int* line, int* column) {
*column = -1;
}
- if (frame->is_interpreted()) {
- InterpretedFrame* iframe = static_cast<InterpretedFrame*>(frame);
+ if (frame->is_unoptimized()) {
+ UnoptimizedFrame* iframe = static_cast<UnoptimizedFrame*>(frame);
Address bytecode_start =
iframe->GetBytecodeArray().GetFirstBytecodeAddress();
return bytecode_start + iframe->GetBytecodeOffset();
@@ -1268,12 +1235,11 @@ Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
options.async_stack_trace = false;
options.filter_mode =
(stack_trace_options & StackTrace::kExposeFramesAcrossSecurityOrigins)
- ? FrameArrayBuilder::ALL
- : FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
+ ? StackTraceBuilder::ALL
+ : StackTraceBuilder::CURRENT_SECURITY_CONTEXT;
options.capture_only_frames_subject_to_debugging = true;
- return Handle<FixedArray>::cast(
- CaptureStackTrace(this, factory()->undefined_value(), options));
+ return CaptureStackTrace(this, factory()->undefined_value(), options);
}
void Isolate::PrintStack(FILE* out, PrintStackMode mode) {
@@ -1597,6 +1563,8 @@ Handle<JSMessageObject> Isolate::CreateMessageOrAbort(
Object Isolate::ThrowInternal(Object raw_exception, MessageLocation* location) {
DCHECK(!has_pending_exception());
+ DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
+ !trap_handler::IsThreadInWasm());
HandleScope scope(this);
Handle<Object> exception(raw_exception, this);
@@ -1692,6 +1660,8 @@ Object Isolate::ReThrow(Object exception) {
Object Isolate::UnwindAndFindHandler() {
Object exception = pending_exception();
+ DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
+ !trap_handler::IsThreadInWasm());
auto FoundHandler = [&](Context context, Address instruction_start,
intptr_t handler_offset,
@@ -1765,10 +1735,6 @@ Object Isolate::UnwindAndFindHandler() {
}
case StackFrame::WASM: {
- if (trap_handler::IsThreadInWasm()) {
- trap_handler::ClearThreadInWasm();
- }
-
if (!catchable_by_wasm) break;
// For WebAssembly frames we perform a lookup in the handler table.
@@ -1781,14 +1747,14 @@ Object Isolate::UnwindAndFindHandler() {
wasm_engine()->code_manager()->LookupCode(frame->pc());
int offset = wasm_frame->LookupExceptionHandlerInTable();
if (offset < 0) break;
+ wasm_engine()->SampleCatchEvent(this);
// Compute the stack pointer from the frame pointer. This ensures that
// argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
wasm_code->stack_slots() * kSystemPointerSize;
- // This is going to be handled by Wasm, so we need to set the TLS flag
- // again. It was cleared above assuming the frame would be unwound.
+ // This is going to be handled by Wasm, so we need to set the TLS flag.
trap_handler::SetThreadInWasm();
return FoundHandler(Context(), wasm_code->instruction_start(), offset,
@@ -1798,10 +1764,7 @@ Object Isolate::UnwindAndFindHandler() {
case StackFrame::WASM_COMPILE_LAZY: {
// Can only fail directly on invocation. This happens if an invalid
// function was validated lazily.
- DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
- trap_handler::IsThreadInWasm());
DCHECK(FLAG_wasm_lazy_validation);
- trap_handler::ClearThreadInWasm();
break;
}
@@ -1860,11 +1823,12 @@ Object Isolate::UnwindAndFindHandler() {
code.constant_pool(), return_sp, frame->fp());
}
- case StackFrame::INTERPRETED: {
+ case StackFrame::INTERPRETED:
+ case StackFrame::BASELINE: {
// For interpreted frame we perform a range lookup in the handler table.
if (!catchable_by_js) break;
- InterpretedFrame* js_frame = static_cast<InterpretedFrame*>(frame);
- int register_slots = InterpreterFrameConstants::RegisterStackSlotCount(
+ UnoptimizedFrame* js_frame = UnoptimizedFrame::cast(frame);
+ int register_slots = UnoptimizedFrameConstants::RegisterStackSlotCount(
js_frame->GetBytecodeArray().register_count());
int context_reg = 0; // Will contain register index holding context.
int offset =
@@ -1885,12 +1849,26 @@ Object Isolate::UnwindAndFindHandler() {
// the correct context for the handler from the interpreter register.
Context context =
Context::cast(js_frame->ReadInterpreterRegister(context_reg));
- js_frame->PatchBytecodeOffset(static_cast<int>(offset));
+ DCHECK(context.IsContext());
+
+ if (frame->is_baseline()) {
+ BaselineFrame* sp_frame = BaselineFrame::cast(js_frame);
+ Code code = sp_frame->LookupCode();
+ intptr_t pc_offset = sp_frame->GetPCForBytecodeOffset(offset);
+ // Patch the context register directly on the frame, so that we don't
+ // need to have a context read + write in the baseline code.
+ sp_frame->PatchContext(context);
+ return FoundHandler(Context(), code.InstructionStart(), pc_offset,
+ code.constant_pool(), return_sp, sp_frame->fp());
+ } else {
+ InterpretedFrame::cast(js_frame)->PatchBytecodeOffset(
+ static_cast<int>(offset));
- Code code =
- builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- return FoundHandler(context, code.InstructionStart(), 0,
- code.constant_pool(), return_sp, frame->fp());
+ Code code =
+ builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+ return FoundHandler(context, code.InstructionStart(), 0,
+ code.constant_pool(), return_sp, frame->fp());
+ }
}
case StackFrame::BUILTIN:
@@ -2013,6 +1991,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
// For JavaScript frames we perform a lookup in the handler table.
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
+ case StackFrame::BASELINE:
case StackFrame::BUILTIN: {
JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
Isolate::CatchType prediction = ToCatchType(PredictException(js_frame));
@@ -2109,17 +2088,16 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
options.skip_mode = SKIP_NONE;
options.capture_builtin_exit_frames = true;
options.async_stack_trace = FLAG_async_stack_traces;
- options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
+ options.filter_mode = StackTraceBuilder::CURRENT_SECURITY_CONTEXT;
options.capture_only_frames_subject_to_debugging = false;
- Handle<FixedArray> frames = Handle<FixedArray>::cast(
- CaptureStackTrace(this, this->factory()->undefined_value(), options));
+ Handle<FixedArray> frames =
+ CaptureStackTrace(this, this->factory()->undefined_value(), options);
IncrementalStringBuilder builder(this);
for (int i = 0; i < frames->length(); ++i) {
- Handle<StackTraceFrame> frame(StackTraceFrame::cast(frames->get(i)), this);
-
- SerializeStackTraceFrame(this, frame, &builder);
+ Handle<StackFrameInfo> frame(StackFrameInfo::cast(frames->get(i)), this);
+ SerializeStackFrameInfo(this, frame, &builder);
}
Handle<String> stack_trace = builder.Finish().ToHandleChecked();
@@ -2191,57 +2169,10 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
Handle<Object> property =
JSReceiver::GetDataProperty(Handle<JSObject>::cast(exception), key);
if (!property->IsFixedArray()) return false;
-
- Handle<FrameArray> elements =
- GetFrameArrayFromStackTrace(this, Handle<FixedArray>::cast(property));
-
- const int frame_count = elements->FrameCount();
- for (int i = 0; i < frame_count; i++) {
- if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
- int func_index = elements->WasmFunctionIndex(i).value();
- int offset = elements->Offset(i).value();
- bool is_at_number_conversion =
- elements->IsAsmJsWasmFrame(i) &&
- elements->Flags(i).value() & FrameArray::kAsmJsAtNumberConversion;
- if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
- // WasmCode* held alive by the {GlobalWasmCodeRef}.
- wasm::WasmCode* code =
- Managed<wasm::GlobalWasmCodeRef>::cast(elements->WasmCodeObject(i))
- .get()
- ->code();
- offset = code->GetSourcePositionBefore(offset);
- }
- Handle<WasmInstanceObject> instance(elements->WasmInstance(i), this);
- const wasm::WasmModule* module = elements->WasmInstance(i).module();
- int pos = GetSourcePosition(module, func_index, offset,
- is_at_number_conversion);
- Handle<Script> script(instance->module_object().script(), this);
-
- *target = MessageLocation(script, pos, pos + 1);
- return true;
- }
-
- Handle<JSFunction> fun = handle(elements->Function(i), this);
- if (!fun->shared().IsSubjectToDebugging()) continue;
-
- Object script = fun->shared().script();
- if (script.IsScript() &&
- !(Script::cast(script).source().IsUndefined(this))) {
- Handle<SharedFunctionInfo> shared = handle(fun->shared(), this);
-
- AbstractCode abstract_code = elements->Code(i);
- const int code_offset = elements->Offset(i).value();
- Handle<Script> casted_script(Script::cast(script), this);
- if (shared->HasBytecodeArray() &&
- shared->GetBytecodeArray(this).HasSourcePositionTable()) {
- int pos = abstract_code.SourcePosition(code_offset);
- *target = MessageLocation(casted_script, pos, pos + 1, shared);
- } else {
- *target = MessageLocation(casted_script, shared, code_offset);
- }
-
- return true;
- }
+ Handle<FixedArray> stack = Handle<FixedArray>::cast(property);
+ for (int i = 0; i < stack->length(); i++) {
+ Handle<StackFrameInfo> frame(StackFrameInfo::cast(stack->get(i)), this);
+ if (StackFrameInfo::ComputeLocation(frame, target)) return true;
}
return false;
}
@@ -2596,14 +2527,6 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
abort_on_uncaught_exception_callback_ = callback;
}
-bool Isolate::AreWasmThreadsEnabled(Handle<Context> context) {
- if (wasm_threads_enabled_callback()) {
- v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
- return wasm_threads_enabled_callback()(api_context);
- }
- return FLAG_experimental_wasm_threads;
-}
-
bool Isolate::IsWasmSimdEnabled(Handle<Context> context) {
if (wasm_simd_enabled_callback()) {
v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
@@ -2612,6 +2535,14 @@ bool Isolate::IsWasmSimdEnabled(Handle<Context> context) {
return FLAG_experimental_wasm_simd;
}
+bool Isolate::AreWasmExceptionsEnabled(Handle<Context> context) {
+ if (wasm_exceptions_enabled_callback()) {
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ return wasm_exceptions_enabled_callback()(api_context);
+ }
+ return FLAG_experimental_wasm_eh;
+}
+
Handle<Context> Isolate::GetIncumbentContext() {
JavaScriptFrameIterator it(this);
@@ -3885,7 +3816,8 @@ bool Isolate::NeedsDetailedOptimizedCodeLineInfo() const {
bool Isolate::NeedsSourcePositionsForProfiling() const {
return FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
FLAG_turbo_profiling || FLAG_perf_prof || is_profiling() ||
- debug_->is_active() || logger_->is_logging() || FLAG_trace_maps;
+ debug_->is_active() || logger_->is_logging() || FLAG_log_maps ||
+ FLAG_log_ic;
}
void Isolate::SetFeedbackVectorsForProfilingTools(Object value) {
@@ -3977,11 +3909,6 @@ void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
Protectors::InvalidateNoElements(this);
}
-bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
- DisallowGarbageCollection no_gc;
- return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
-}
-
static base::RandomNumberGenerator* ensure_rng_exists(
base::RandomNumberGenerator** rng, int seed) {
if (*rng == nullptr) {
@@ -4151,11 +4078,17 @@ MaybeHandle<JSPromise> NewRejectedPromise(Isolate* isolate,
} // namespace
MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
- Handle<Script> referrer, Handle<Object> specifier) {
+ Handle<Script> referrer, Handle<Object> specifier,
+ MaybeHandle<Object> maybe_import_assertions_argument) {
v8::Local<v8::Context> api_context =
v8::Utils::ToLocal(Handle<Context>(native_context()));
+ DCHECK(host_import_module_dynamically_callback_ == nullptr ||
+ host_import_module_dynamically_with_import_assertions_callback_ ==
+ nullptr);
- if (host_import_module_dynamically_callback_ == nullptr) {
+ if (host_import_module_dynamically_callback_ == nullptr &&
+ host_import_module_dynamically_with_import_assertions_callback_ ==
+ nullptr) {
Handle<Object> exception =
factory()->NewError(error_function(), MessageTemplate::kUnsupported);
return NewRejectedPromise(this, api_context, exception);
@@ -4172,22 +4105,131 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
DCHECK(!has_pending_exception());
v8::Local<v8::Promise> promise;
- ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
- this, promise,
- host_import_module_dynamically_callback_(
- api_context, v8::Utils::ScriptOrModuleToLocal(referrer),
- v8::Utils::ToLocal(specifier_str)),
- MaybeHandle<JSPromise>());
- return v8::Utils::OpenHandle(*promise);
+
+ if (host_import_module_dynamically_with_import_assertions_callback_) {
+ Handle<FixedArray> import_assertions_array;
+ if (GetImportAssertionsFromArgument(maybe_import_assertions_argument)
+ .ToHandle(&import_assertions_array)) {
+ ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
+ this, promise,
+ host_import_module_dynamically_with_import_assertions_callback_(
+ api_context, v8::Utils::ScriptOrModuleToLocal(referrer),
+ v8::Utils::ToLocal(specifier_str),
+ ToApiHandle<v8::FixedArray>(import_assertions_array)),
+ MaybeHandle<JSPromise>());
+ return v8::Utils::OpenHandle(*promise);
+ } else {
+ Handle<Object> exception(pending_exception(), this);
+ clear_pending_exception();
+
+ return NewRejectedPromise(this, api_context, exception);
+ }
+
+ } else {
+ DCHECK_NOT_NULL(host_import_module_dynamically_callback_);
+ ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
+ this, promise,
+ host_import_module_dynamically_callback_(
+ api_context, v8::Utils::ScriptOrModuleToLocal(referrer),
+ v8::Utils::ToLocal(specifier_str)),
+ MaybeHandle<JSPromise>());
+ return v8::Utils::OpenHandle(*promise);
+ }
+}
+
+MaybeHandle<FixedArray> Isolate::GetImportAssertionsFromArgument(
+ MaybeHandle<Object> maybe_import_assertions_argument) {
+ Handle<FixedArray> import_assertions_array = factory()->empty_fixed_array();
+ Handle<Object> import_assertions_argument;
+ if (!maybe_import_assertions_argument.ToHandle(&import_assertions_argument) ||
+ import_assertions_argument->IsUndefined()) {
+ return import_assertions_array;
+ }
+
+ // The parser shouldn't have allowed the second argument to import() if
+ // the flag wasn't enabled.
+ DCHECK(FLAG_harmony_import_assertions);
+
+ if (!import_assertions_argument->IsJSReceiver()) {
+ this->Throw(
+ *factory()->NewTypeError(MessageTemplate::kNonObjectImportArgument));
+ return MaybeHandle<FixedArray>();
+ }
+
+ Handle<JSReceiver> import_assertions_argument_receiver =
+ Handle<JSReceiver>::cast(import_assertions_argument);
+ Handle<Name> key = factory()->assert_string();
+
+ Handle<Object> import_assertions_object;
+ if (!JSReceiver::GetProperty(this, import_assertions_argument_receiver, key)
+ .ToHandle(&import_assertions_object)) {
+ // This can happen if the property has a getter function that throws
+ // an error.
+ return MaybeHandle<FixedArray>();
+ }
+
+ // If there is no 'assert' option in the options bag, it's not an error. Just
+ // do the import() as if no assertions were provided.
+ if (import_assertions_object->IsUndefined()) return import_assertions_array;
+
+ if (!import_assertions_object->IsJSReceiver()) {
+ this->Throw(
+ *factory()->NewTypeError(MessageTemplate::kNonObjectAssertOption));
+ return MaybeHandle<FixedArray>();
+ }
+
+ Handle<JSReceiver> import_assertions_object_receiver =
+ Handle<JSReceiver>::cast(import_assertions_object);
+
+ Handle<FixedArray> assertion_keys =
+ KeyAccumulator::GetKeys(import_assertions_object_receiver,
+ KeyCollectionMode::kOwnOnly, ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString)
+ .ToHandleChecked();
+
+ // The assertions will be passed to the host in the form: [key1,
+ // value1, key2, value2, ...].
+ constexpr size_t kAssertionEntrySizeForDynamicImport = 2;
+ import_assertions_array = factory()->NewFixedArray(static_cast<int>(
+ assertion_keys->length() * kAssertionEntrySizeForDynamicImport));
+ for (int i = 0; i < assertion_keys->length(); i++) {
+ Handle<String> assertion_key(String::cast(assertion_keys->get(i)), this);
+ Handle<Object> assertion_value;
+ if (!JSReceiver::GetProperty(this, import_assertions_object_receiver,
+ assertion_key)
+ .ToHandle(&assertion_value)) {
+ // This can happen if the property has a getter function that throws
+ // an error.
+ return MaybeHandle<FixedArray>();
+ }
+
+ if (!assertion_value->IsString()) {
+ this->Throw(*factory()->NewTypeError(
+ MessageTemplate::kNonStringImportAssertionValue));
+ return MaybeHandle<FixedArray>();
+ }
+
+ import_assertions_array->set((i * kAssertionEntrySizeForDynamicImport),
+ *assertion_key);
+ import_assertions_array->set((i * kAssertionEntrySizeForDynamicImport) + 1,
+ *assertion_value);
+ }
+
+ return import_assertions_array;
}
void Isolate::ClearKeptObjects() { heap()->ClearKeptObjects(); }
void Isolate::SetHostImportModuleDynamicallyCallback(
- HostImportModuleDynamicallyCallback callback) {
+ DeprecatedHostImportModuleDynamicallyCallback callback) {
host_import_module_dynamically_callback_ = callback;
}
+void Isolate::SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyWithImportAssertionsCallback callback) {
+ host_import_module_dynamically_with_import_assertions_callback_ = callback;
+}
+
MaybeHandle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
Handle<SourceTextModule> module) {
CHECK(module->import_meta().IsTheHole(this));
@@ -4257,8 +4299,7 @@ void Isolate::PrepareBuiltinLabelInfoMap() {
if (embedded_file_writer_ != nullptr) {
embedded_file_writer_->PrepareBuiltinLabelInfoMap(
heap()->construct_stub_create_deopt_pc_offset().value(),
- heap()->construct_stub_invoke_deopt_pc_offset().value(),
- heap()->arguments_adaptor_deopt_pc_offset().value());
+ heap()->construct_stub_invoke_deopt_pc_offset().value());
}
}
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 9dbe897ffd..6c458860e8 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -118,6 +118,7 @@ class Interpreter;
} // namespace interpreter
namespace compiler {
+class NodeObserver;
class PerIsolateCompilerCache;
} // namespace compiler
@@ -416,55 +417,55 @@ V8_EXPORT_PRIVATE void FreeCurrentEmbeddedBlob();
using DebugObjectCache = std::vector<Handle<HeapObject>>;
-#define ISOLATE_INIT_LIST(V) \
- /* Assembler state. */ \
- V(FatalErrorCallback, exception_behavior, nullptr) \
- V(OOMErrorCallback, oom_behavior, nullptr) \
- V(LogEventCallback, event_logger, nullptr) \
- V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
- V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback, \
- nullptr) \
- V(ModifyCodeGenerationFromStringsCallback2, modify_code_gen_callback2, \
- nullptr) \
- V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
- V(ExtensionCallback, wasm_module_callback, &NoExtension) \
- V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
- V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
- V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr) \
- V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr) \
- V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr) \
- /* State for Relocatable. */ \
- V(Relocatable*, relocatable_top, nullptr) \
- V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
- V(Object, string_stream_current_security_token, Object()) \
- V(const intptr_t*, api_external_references, nullptr) \
- V(AddressToIndexHashMap*, external_reference_map, nullptr) \
- V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
- V(MicrotaskQueue*, default_microtask_queue, nullptr) \
- V(CompilationStatistics*, turbo_statistics, nullptr) \
- V(CodeTracer*, code_tracer, nullptr) \
- V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
- V(PromiseRejectCallback, promise_reject_callback, nullptr) \
- V(const v8::StartupData*, snapshot_blob, nullptr) \
- V(int, code_and_metadata_size, 0) \
- V(int, bytecode_and_metadata_size, 0) \
- V(int, external_script_source_size, 0) \
- /* Number of CPU profilers running on the isolate. */ \
- V(size_t, num_cpu_profilers, 0) \
- /* true if a trace is being formatted through Error.prepareStackTrace. */ \
- V(bool, formatting_stack_trace, false) \
- /* Perform side effect checks on function call and API callbacks. */ \
- V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints) \
- /* Current code coverage mode */ \
- V(debug::CoverageMode, code_coverage_mode, debug::CoverageMode::kBestEffort) \
- V(debug::TypeProfileMode, type_profile_mode, debug::TypeProfileMode::kNone) \
- V(int, last_console_context_id, 0) \
- V(v8_inspector::V8Inspector*, inspector, nullptr) \
- V(bool, next_v8_call_is_safe_for_termination, false) \
- V(bool, only_terminate_in_safe_scope, false) \
- V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info) \
- V(int, embedder_wrapper_type_index, -1) \
- V(int, embedder_wrapper_object_index, -1)
+#define ISOLATE_INIT_LIST(V) \
+ /* Assembler state. */ \
+ V(FatalErrorCallback, exception_behavior, nullptr) \
+ V(OOMErrorCallback, oom_behavior, nullptr) \
+ V(LogEventCallback, event_logger, nullptr) \
+ V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+ V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback, \
+ nullptr) \
+ V(ModifyCodeGenerationFromStringsCallback2, modify_code_gen_callback2, \
+ nullptr) \
+ V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
+ V(ExtensionCallback, wasm_module_callback, &NoExtension) \
+ V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
+ V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
+ V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr) \
+ V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr) \
+ V(WasmExceptionsEnabledCallback, wasm_exceptions_enabled_callback, nullptr) \
+ /* State for Relocatable. */ \
+ V(Relocatable*, relocatable_top, nullptr) \
+ V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
+ V(Object, string_stream_current_security_token, Object()) \
+ V(const intptr_t*, api_external_references, nullptr) \
+ V(AddressToIndexHashMap*, external_reference_map, nullptr) \
+ V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
+ V(MicrotaskQueue*, default_microtask_queue, nullptr) \
+ V(CompilationStatistics*, turbo_statistics, nullptr) \
+ V(CodeTracer*, code_tracer, nullptr) \
+ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
+ V(PromiseRejectCallback, promise_reject_callback, nullptr) \
+ V(const v8::StartupData*, snapshot_blob, nullptr) \
+ V(int, code_and_metadata_size, 0) \
+ V(int, bytecode_and_metadata_size, 0) \
+ V(int, external_script_source_size, 0) \
+ /* Number of CPU profilers running on the isolate. */ \
+ V(size_t, num_cpu_profilers, 0) \
+ /* true if a trace is being formatted through Error.prepareStackTrace. */ \
+ V(bool, formatting_stack_trace, false) \
+ /* Perform side effect checks on function call and API callbacks. */ \
+ V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints) \
+ V(debug::TypeProfileMode, type_profile_mode, debug::TypeProfileMode::kNone) \
+ V(bool, disable_bytecode_flushing, false) \
+ V(int, last_console_context_id, 0) \
+ V(v8_inspector::V8Inspector*, inspector, nullptr) \
+ V(bool, next_v8_call_is_safe_for_termination, false) \
+ V(bool, only_terminate_in_safe_scope, false) \
+ V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info) \
+ V(int, embedder_wrapper_type_index, -1) \
+ V(int, embedder_wrapper_object_index, -1) \
+ V(compiler::NodeObserver*, node_observer, nullptr)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
inline void set_##name(type v) { thread_local_top()->name##_ = v; } \
@@ -671,8 +672,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
inline void set_pending_exception(Object exception_obj);
inline void clear_pending_exception();
- bool AreWasmThreadsEnabled(Handle<Context> context);
bool IsWasmSimdEnabled(Handle<Context> context);
+ bool AreWasmExceptionsEnabled(Handle<Context> context);
THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
@@ -1089,6 +1090,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
isolate_root_bias());
}
+ THREAD_LOCAL_TOP_ADDRESS(Address, thread_in_wasm_flag_address)
+
MaterializedObjectStore* materialized_object_store() {
return materialized_object_store_;
}
@@ -1303,7 +1306,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
// Returns true if array is the initial array prototype in any native context.
- bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
+ inline bool IsAnyInitialArrayPrototype(JSArray array);
std::unique_ptr<PersistentHandles> NewPersistentHandles();
@@ -1323,6 +1326,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
+ DCHECK_NOT_NULL(optimizing_compile_dispatcher_);
return optimizing_compile_dispatcher_;
}
// Flushes all pending concurrent optimzation jobs from the optimizing
@@ -1522,10 +1526,23 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void ClearKeptObjects();
+ // While deprecating v8::HostImportModuleDynamicallyCallback in v8.h we still
+ // need to support the version of the API that uses it, but we can't directly
+ // reference the deprecated version because of the enusing build warnings. So,
+ // we declare this matching type for temporary internal use.
+ // TODO(v8:10958) Delete this declaration and all references to it once
+ // v8::HostImportModuleDynamicallyCallback is removed.
+ typedef MaybeLocal<Promise> (*DeprecatedHostImportModuleDynamicallyCallback)(
+ v8::Local<v8::Context> context, v8::Local<v8::ScriptOrModule> referrer,
+ v8::Local<v8::String> specifier);
+
+ void SetHostImportModuleDynamicallyCallback(
+ DeprecatedHostImportModuleDynamicallyCallback callback);
void SetHostImportModuleDynamicallyCallback(
- HostImportModuleDynamicallyCallback callback);
+ HostImportModuleDynamicallyWithImportAssertionsCallback callback);
MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
- Handle<Script> referrer, Handle<Object> specifier);
+ Handle<Script> referrer, Handle<Object> specifier,
+ MaybeHandle<Object> maybe_import_assertions_argument);
void SetHostInitializeImportMetaObjectCallback(
HostInitializeImportMetaObjectCallback callback);
@@ -1572,6 +1589,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
RAILMode rail_mode() { return rail_mode_.load(); }
+ void set_code_coverage_mode(debug::CoverageMode coverage_mode) {
+ code_coverage_mode_.store(coverage_mode, std::memory_order_relaxed);
+ }
+ debug::CoverageMode code_coverage_mode() const {
+ return code_coverage_mode_.load(std::memory_order_relaxed);
+ }
+
double LoadStartTimeMs();
void IsolateInForegroundNotification();
@@ -1591,14 +1615,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
bool allow_atomics_wait() { return allow_atomics_wait_; }
- void set_supported_import_assertions(
- const std::vector<std::string>& supported_import_assertions) {
- supported_import_assertions_ = supported_import_assertions;
- }
- const std::vector<std::string>& supported_import_assertions() const {
- return supported_import_assertions_;
- }
-
// Register a finalizer to be called at isolate teardown.
void RegisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
@@ -1646,6 +1662,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return main_thread_local_isolate_.get();
}
+ LocalIsolate* AsLocalIsolate() { return main_thread_local_isolate(); }
+
LocalHeap* main_thread_local_heap();
LocalHeap* CurrentLocalHeap();
@@ -1830,9 +1848,23 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
void* atomics_wait_callback_data_ = nullptr;
PromiseHook promise_hook_ = nullptr;
- HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
- nullptr;
- std::vector<std::string> supported_import_assertions_;
+ DeprecatedHostImportModuleDynamicallyCallback
+ host_import_module_dynamically_callback_ = nullptr;
+ HostImportModuleDynamicallyWithImportAssertionsCallback
+ host_import_module_dynamically_with_import_assertions_callback_ = nullptr;
+ std::atomic<debug::CoverageMode> code_coverage_mode_{
+ debug::CoverageMode::kBestEffort};
+
+ // Helper function for RunHostImportModuleDynamicallyCallback.
+ // Unpacks import assertions, if present, from the second argument to dynamic
+ // import() and returns them in a FixedArray, sorted by code point order of
+ // the keys, in the form [key1, value1, key2, value2, ...]. Returns an empty
+ // MaybeHandle if an error was thrown. In this case, the host callback should
+ // not be called and instead the caller should use the pending exception to
+ // reject the import() call's Promise.
+ MaybeHandle<FixedArray> GetImportAssertionsFromArgument(
+ MaybeHandle<Object> maybe_import_assertions_argument);
+
HostInitializeImportMetaObjectCallback
host_initialize_import_meta_object_callback_ = nullptr;
base::Mutex rail_mutex_;
diff --git a/deps/v8/src/execution/local-isolate.cc b/deps/v8/src/execution/local-isolate.cc
index c79c8ac75c..77733907f8 100644
--- a/deps/v8/src/execution/local-isolate.cc
+++ b/deps/v8/src/execution/local-isolate.cc
@@ -20,8 +20,7 @@ LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind)
thread_id_(ThreadId::Current()),
stack_limit_(kind == ThreadKind::kMain
? isolate->stack_guard()->real_climit()
- : GetCurrentStackPosition() - FLAG_stack_size * KB),
- supported_import_assertions_(isolate->supported_import_assertions()) {}
+ : GetCurrentStackPosition() - FLAG_stack_size * KB) {}
LocalIsolate::~LocalIsolate() = default;
diff --git a/deps/v8/src/execution/local-isolate.h b/deps/v8/src/execution/local-isolate.h
index f978c1a2dc..c55f8dc65e 100644
--- a/deps/v8/src/execution/local-isolate.h
+++ b/deps/v8/src/execution/local-isolate.h
@@ -85,9 +85,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
bool is_main_thread() const { return heap_.is_main_thread(); }
- const std::vector<std::string>& supported_import_assertions() const {
- return supported_import_assertions_;
- }
+ LocalIsolate* AsLocalIsolate() { return this; }
private:
friend class v8::internal::LocalFactory;
@@ -101,7 +99,6 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
std::unique_ptr<LocalLogger> logger_;
ThreadId const thread_id_;
Address const stack_limit_;
- std::vector<std::string> supported_import_assertions_;
};
template <base::MutexSharedType kIsShared>
@@ -110,8 +107,7 @@ class V8_NODISCARD SharedMutexGuardIfOffThread<LocalIsolate, kIsShared> final {
SharedMutexGuardIfOffThread(base::SharedMutex* mutex, LocalIsolate* isolate) {
DCHECK_NOT_NULL(mutex);
DCHECK_NOT_NULL(isolate);
- DCHECK(!isolate->is_main_thread());
- mutex_guard_.emplace(mutex);
+ if (!isolate->is_main_thread()) mutex_guard_.emplace(mutex);
}
SharedMutexGuardIfOffThread(const SharedMutexGuardIfOffThread&) = delete;
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index 75426223b6..b0ac822298 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -16,9 +16,7 @@
#include "src/execution/isolate-inl.h"
#include "src/logging/counters.h"
#include "src/objects/foreign-inl.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/js-array-inl.h"
-#include "src/objects/keys.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
#include "src/parsing/parse-info.h"
@@ -220,586 +218,30 @@ std::unique_ptr<char[]> MessageHandler::GetLocalizedMessage(
namespace {
-Object EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
- if (!script->has_eval_from_shared()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
-
- Handle<SharedFunctionInfo> shared(script->eval_from_shared(), isolate);
- // Find the name of the function calling eval.
- if (shared->Name().BooleanValue(isolate)) {
- return shared->Name();
- }
-
- return shared->inferred_name();
-}
-
-MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
- Handle<Object> sourceURL(script->GetNameOrSourceURL(), isolate);
- if (!sourceURL->IsUndefined(isolate)) {
- DCHECK(sourceURL->IsString());
- return Handle<String>::cast(sourceURL);
- }
-
- IncrementalStringBuilder builder(isolate);
- builder.AppendCString("eval at ");
-
- Handle<Object> eval_from_function_name =
- handle(EvalFromFunctionName(isolate, script), isolate);
- if (eval_from_function_name->BooleanValue(isolate)) {
- Handle<String> str;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, str, Object::ToString(isolate, eval_from_function_name),
- String);
- builder.AppendString(str);
- } else {
- builder.AppendCString("<anonymous>");
- }
-
- if (script->has_eval_from_shared()) {
- Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared(),
- isolate);
- if (eval_from_shared->script().IsScript()) {
- Handle<Script> eval_from_script =
- handle(Script::cast(eval_from_shared->script()), isolate);
- builder.AppendCString(" (");
- if (eval_from_script->compilation_type() ==
- Script::COMPILATION_TYPE_EVAL) {
- // Eval script originated from another eval.
- Handle<String> str;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, str, FormatEvalOrigin(isolate, eval_from_script), String);
- builder.AppendString(str);
- } else {
- DCHECK(eval_from_script->compilation_type() !=
- Script::COMPILATION_TYPE_EVAL);
- // eval script originated from "real" source.
- Handle<Object> name_obj = handle(eval_from_script->name(), isolate);
- if (eval_from_script->name().IsString()) {
- builder.AppendString(Handle<String>::cast(name_obj));
-
- Script::PositionInfo info;
-
- if (Script::GetPositionInfo(eval_from_script,
- Script::GetEvalPosition(isolate, script),
- &info, Script::NO_OFFSET)) {
- builder.AppendCString(":");
-
- Handle<String> str = isolate->factory()->NumberToString(
- handle(Smi::FromInt(info.line + 1), isolate));
- builder.AppendString(str);
-
- builder.AppendCString(":");
-
- str = isolate->factory()->NumberToString(
- handle(Smi::FromInt(info.column + 1), isolate));
- builder.AppendString(str);
- }
- } else {
- DCHECK(!eval_from_script->name().IsString());
- builder.AppendCString("unknown source");
- }
- }
- }
- builder.AppendCString(")");
- }
-
- Handle<String> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result, builder.Finish(), String);
- return result;
-}
-
-} // namespace
-
-Handle<PrimitiveHeapObject> StackFrameBase::GetEvalOrigin() {
- if (!HasScript() || !IsEval()) return isolate_->factory()->undefined_value();
- return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
-}
-
-Handle<PrimitiveHeapObject> StackFrameBase::GetWasmModuleName() {
- return isolate_->factory()->undefined_value();
-}
-
-int StackFrameBase::GetWasmFunctionIndex() { return StackFrameBase::kNone; }
-
-Handle<HeapObject> StackFrameBase::GetWasmInstance() {
- return isolate_->factory()->undefined_value();
-}
-
-int StackFrameBase::GetScriptId() const {
- if (!HasScript()) return kNone;
- return GetScript()->id();
-}
-
-bool StackFrameBase::IsEval() {
- return HasScript() &&
- GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
-}
-
-void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
- int frame_ix) {
- DCHECK(!array->IsWasmFrame(frame_ix));
- isolate_ = isolate;
- receiver_ = handle(array->Receiver(frame_ix), isolate);
- function_ = handle(array->Function(frame_ix), isolate);
- code_ = handle(array->Code(frame_ix), isolate);
- offset_ = array->Offset(frame_ix).value();
- cached_position_ = base::nullopt;
-
- const int flags = array->Flags(frame_ix).value();
- is_constructor_ = (flags & FrameArray::kIsConstructor) != 0;
- is_strict_ = (flags & FrameArray::kIsStrict) != 0;
- is_async_ = (flags & FrameArray::kIsAsync) != 0;
- is_promise_all_ = (flags & FrameArray::kIsPromiseAll) != 0;
- is_promise_any_ = (flags & FrameArray::kIsPromiseAny) != 0;
-}
-
-JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
- Handle<JSFunction> function,
- Handle<AbstractCode> code, int offset)
- : StackFrameBase(isolate),
- receiver_(receiver),
- function_(function),
- code_(code),
- offset_(offset),
- cached_position_(base::nullopt),
- is_async_(false),
- is_constructor_(false),
- is_strict_(false) {}
-
-Handle<Object> JSStackFrame::GetFunction() const {
- return Handle<Object>::cast(function_);
-}
-
-Handle<Object> JSStackFrame::GetFileName() {
- if (!HasScript()) return isolate_->factory()->null_value();
- return handle(GetScript()->name(), isolate_);
-}
-
-Handle<PrimitiveHeapObject> JSStackFrame::GetFunctionName() {
- Handle<String> result = JSFunction::GetDebugName(function_);
- if (result->length() != 0) return result;
-
- if (HasScript() &&
- GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
- return isolate_->factory()->eval_string();
- }
- return isolate_->factory()->null_value();
-}
-
-namespace {
-
-bool CheckMethodName(Isolate* isolate, Handle<JSReceiver> receiver,
- Handle<Name> name, Handle<JSFunction> fun,
- LookupIterator::Configuration config) {
- LookupIterator::Key key(isolate, name);
- LookupIterator iter(isolate, receiver, key, config);
- if (iter.state() == LookupIterator::DATA) {
- return iter.GetDataValue().is_identical_to(fun);
- } else if (iter.state() == LookupIterator::ACCESSOR) {
- Handle<Object> accessors = iter.GetAccessors();
- if (accessors->IsAccessorPair()) {
- Handle<AccessorPair> pair = Handle<AccessorPair>::cast(accessors);
- return pair->getter() == *fun || pair->setter() == *fun;
- }
- }
- return false;
-}
-
-Handle<Object> ScriptNameOrSourceUrl(Handle<Script> script, Isolate* isolate) {
- Object name_or_url = script->source_url();
- if (!name_or_url.IsString()) name_or_url = script->name();
- return handle(name_or_url, isolate);
-}
-
-} // namespace
-
-Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
- if (!HasScript()) return isolate_->factory()->null_value();
- return ScriptNameOrSourceUrl(GetScript(), isolate_);
-}
-
-Handle<PrimitiveHeapObject> JSStackFrame::GetMethodName() {
- if (receiver_->IsNullOrUndefined(isolate_)) {
- return isolate_->factory()->null_value();
- }
-
- Handle<JSReceiver> receiver;
- if (!Object::ToObject(isolate_, receiver_).ToHandle(&receiver)) {
- DCHECK(isolate_->has_pending_exception());
- isolate_->clear_pending_exception();
- isolate_->set_external_caught_exception(false);
- return isolate_->factory()->null_value();
- }
-
- Handle<String> name(function_->shared().Name(), isolate_);
- name = String::Flatten(isolate_, name);
-
- // The static initializer function is not a method, so don't add a
- // class name, just return the function name.
- if (name->HasOneBytePrefix(CStrVector("<static_fields_initializer>"))) {
- return name;
- }
-
- // ES2015 gives getters and setters name prefixes which must
- // be stripped to find the property name.
- if (name->HasOneBytePrefix(CStrVector("get ")) ||
- name->HasOneBytePrefix(CStrVector("set "))) {
- name = isolate_->factory()->NewProperSubString(name, 4, name->length());
- }
- if (CheckMethodName(isolate_, receiver, name, function_,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
- return name;
- }
-
- HandleScope outer_scope(isolate_);
- Handle<PrimitiveHeapObject> result;
- for (PrototypeIterator iter(isolate_, receiver, kStartAtReceiver);
- !iter.IsAtEnd(); iter.Advance()) {
- Handle<Object> current = PrototypeIterator::GetCurrent(iter);
- if (!current->IsJSObject()) break;
- Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
- if (current_obj->IsAccessCheckNeeded()) break;
- Handle<FixedArray> keys =
- KeyAccumulator::GetOwnEnumPropertyKeys(isolate_, current_obj);
- for (int i = 0; i < keys->length(); i++) {
- HandleScope inner_scope(isolate_);
- if (!keys->get(i).IsName()) continue;
- Handle<Name> name_key(Name::cast(keys->get(i)), isolate_);
- if (!CheckMethodName(isolate_, current_obj, name_key, function_,
- LookupIterator::OWN_SKIP_INTERCEPTOR))
- continue;
- // Return null in case of duplicates to avoid confusion.
- if (!result.is_null()) return isolate_->factory()->null_value();
- result = inner_scope.CloseAndEscape(name_key);
- }
- }
-
- if (!result.is_null()) return outer_scope.CloseAndEscape(result);
- return isolate_->factory()->null_value();
-}
-
-Handle<PrimitiveHeapObject> JSStackFrame::GetTypeName() {
- // TODO(jgruber): Check for strict/constructor here as in
- // CallSitePrototypeGetThis.
-
- if (receiver_->IsNullOrUndefined(isolate_)) {
- return isolate_->factory()->null_value();
- } else if (receiver_->IsJSProxy()) {
- return isolate_->factory()->Proxy_string();
- }
-
- Handle<JSReceiver> receiver;
- if (!Object::ToObject(isolate_, receiver_).ToHandle(&receiver)) {
- DCHECK(isolate_->has_pending_exception());
- isolate_->clear_pending_exception();
- isolate_->set_external_caught_exception(false);
- return isolate_->factory()->null_value();
- }
-
- return JSReceiver::GetConstructorName(receiver);
-}
-
-int JSStackFrame::GetLineNumber() {
- DCHECK_LE(0, GetPosition());
- if (HasScript()) return Script::GetLineNumber(GetScript(), GetPosition()) + 1;
- return kNone;
-}
-
-int JSStackFrame::GetColumnNumber() {
- DCHECK_LE(0, GetPosition());
- if (HasScript()) {
- return Script::GetColumnNumber(GetScript(), GetPosition()) + 1;
- }
- return kNone;
-}
-
-int JSStackFrame::GetEnclosingLineNumber() {
- if (HasScript()) {
- Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
- return Script::GetLineNumber(GetScript(),
- shared->function_token_position()) + 1;
- } else {
- return kNone;
- }
-}
-
-int JSStackFrame::GetEnclosingColumnNumber() {
- if (HasScript()) {
- Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
- return Script::GetColumnNumber(GetScript(),
- shared->function_token_position()) + 1;
- } else {
- return kNone;
- }
-}
-
-int JSStackFrame::GetPromiseIndex() const {
- return (is_promise_all_ || is_promise_any_) ? offset_ : kNone;
-}
-
-bool JSStackFrame::IsNative() {
- return HasScript() && GetScript()->type() == Script::TYPE_NATIVE;
-}
-
-bool JSStackFrame::IsToplevel() {
- return receiver_->IsJSGlobalProxy() || receiver_->IsNullOrUndefined(isolate_);
-}
-
-int JSStackFrame::GetPosition() const {
- if (cached_position_) return *cached_position_;
-
- Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
- cached_position_ = code_->SourcePosition(offset_);
- return *cached_position_;
-}
-
-bool JSStackFrame::HasScript() const {
- return function_->shared().script().IsScript();
-}
-
-Handle<Script> JSStackFrame::GetScript() const {
- return handle(Script::cast(function_->shared().script()), isolate_);
-}
-
-void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
- int frame_ix) {
- // This function is called for compiled and interpreted wasm frames, and for
- // asm.js->wasm frames.
- DCHECK(array->IsWasmFrame(frame_ix) ||
- array->IsAsmJsWasmFrame(frame_ix));
- isolate_ = isolate;
- wasm_instance_ = handle(array->WasmInstance(frame_ix), isolate);
- wasm_func_index_ = array->WasmFunctionIndex(frame_ix).value();
- // The {WasmCode*} is held alive by the {GlobalWasmCodeRef}.
- auto global_wasm_code_ref =
- Managed<wasm::GlobalWasmCodeRef>::cast(array->WasmCodeObject(frame_ix));
- code_ = global_wasm_code_ref.get()->code();
- offset_ = array->Offset(frame_ix).value();
-}
-
-Handle<Object> WasmStackFrame::GetReceiver() const { return wasm_instance_; }
-
-Handle<Object> WasmStackFrame::GetFunction() const {
- return handle(Smi::FromInt(wasm_func_index_), isolate_);
-}
-
-Handle<PrimitiveHeapObject> WasmStackFrame::GetFunctionName() {
- Handle<PrimitiveHeapObject> name;
- Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
- isolate_);
- if (!WasmModuleObject::GetFunctionNameOrNull(isolate_, module_object,
- wasm_func_index_)
- .ToHandle(&name)) {
- name = isolate_->factory()->null_value();
- }
- return name;
-}
-
-Handle<Object> WasmStackFrame::GetScriptNameOrSourceUrl() {
- Handle<Script> script = GetScript();
- DCHECK_EQ(Script::TYPE_WASM, script->type());
- return ScriptNameOrSourceUrl(script, isolate_);
-}
-
-Handle<PrimitiveHeapObject> WasmStackFrame::GetWasmModuleName() {
- Handle<PrimitiveHeapObject> module_name;
- Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
- isolate_);
- if (!WasmModuleObject::GetModuleNameOrNull(isolate_, module_object)
- .ToHandle(&module_name)) {
- module_name = isolate_->factory()->null_value();
- }
- return module_name;
-}
-
-Handle<HeapObject> WasmStackFrame::GetWasmInstance() { return wasm_instance_; }
-
-int WasmStackFrame::GetPosition() const {
- return IsInterpreted() ? offset_ : code_->GetSourcePositionBefore(offset_);
-}
-
-int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); }
-
-int WasmStackFrame::GetEnclosingColumnNumber() {
- const int function_offset =
- GetWasmFunctionOffset(wasm_instance_->module(), wasm_func_index_);
- return function_offset;
-}
-
-int WasmStackFrame::GetModuleOffset() const {
- const int function_offset =
- GetWasmFunctionOffset(wasm_instance_->module(), wasm_func_index_);
- return function_offset + GetPosition();
-}
-
-Handle<Object> WasmStackFrame::GetFileName() { return Null(); }
-
-Handle<PrimitiveHeapObject> WasmStackFrame::Null() const {
- return isolate_->factory()->null_value();
-}
-
-bool WasmStackFrame::HasScript() const { return true; }
-
-Handle<Script> WasmStackFrame::GetScript() const {
- return handle(wasm_instance_->module_object().script(), isolate_);
-}
-
-void AsmJsWasmStackFrame::FromFrameArray(Isolate* isolate,
- Handle<FrameArray> array,
- int frame_ix) {
- DCHECK(array->IsAsmJsWasmFrame(frame_ix));
- WasmStackFrame::FromFrameArray(isolate, array, frame_ix);
- is_at_number_conversion_ =
- array->Flags(frame_ix).value() & FrameArray::kAsmJsAtNumberConversion;
-}
-
-Handle<Object> AsmJsWasmStackFrame::GetReceiver() const {
- return isolate_->global_proxy();
-}
-
-Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
- // TODO(clemensb): Return lazily created JSFunction.
- return Null();
-}
-
-Handle<Object> AsmJsWasmStackFrame::GetFileName() {
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK(script->IsUserJavaScript());
- return handle(script->name(), isolate_);
-}
-
-Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK_EQ(Script::TYPE_NORMAL, script->type());
- return ScriptNameOrSourceUrl(script, isolate_);
-}
-
-int AsmJsWasmStackFrame::GetPosition() const {
- DCHECK_LE(0, offset_);
- int byte_offset = code_->GetSourcePositionBefore(offset_);
- const wasm::WasmModule* module = wasm_instance_->module();
- return GetSourcePosition(module, wasm_func_index_, byte_offset,
- is_at_number_conversion_);
-}
-
-int AsmJsWasmStackFrame::GetLineNumber() {
- DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK(script->IsUserJavaScript());
- return Script::GetLineNumber(script, GetPosition()) + 1;
-}
-
-int AsmJsWasmStackFrame::GetColumnNumber() {
- DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK(script->IsUserJavaScript());
- return Script::GetColumnNumber(script, GetPosition()) + 1;
-}
-
-int AsmJsWasmStackFrame::GetEnclosingLineNumber() {
- DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK(script->IsUserJavaScript());
- int byte_offset = GetSourcePosition(wasm_instance_->module(),
- wasm_func_index_, 0,
- is_at_number_conversion_);
- return Script::GetLineNumber(script, byte_offset) + 1;
-}
-
-int AsmJsWasmStackFrame::GetEnclosingColumnNumber() {
- DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK(script->IsUserJavaScript());
- int byte_offset = GetSourcePosition(wasm_instance_->module(),
- wasm_func_index_, 0,
- is_at_number_conversion_);
- return Script::GetColumnNumber(script, byte_offset) + 1;
-}
-
-FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
- Handle<FrameArray> array, int frame_ix)
- : isolate_(isolate), array_(array), frame_ix_(frame_ix) {}
-
-bool FrameArrayIterator::HasFrame() const {
- return (frame_ix_ < array_->FrameCount());
-}
-
-void FrameArrayIterator::Advance() { frame_ix_++; }
-
-StackFrameBase* FrameArrayIterator::Frame() {
- DCHECK(HasFrame());
- const int flags = array_->Flags(frame_ix_).value();
- int flag_mask = FrameArray::kIsWasmFrame | FrameArray::kIsAsmJsWasmFrame;
- switch (flags & flag_mask) {
- case 0:
- js_frame_.FromFrameArray(isolate_, array_, frame_ix_);
- return &js_frame_;
- case FrameArray::kIsWasmFrame:
- wasm_frame_.FromFrameArray(isolate_, array_, frame_ix_);
- return &wasm_frame_;
- case FrameArray::kIsAsmJsWasmFrame:
- asm_wasm_frame_.FromFrameArray(isolate_, array_, frame_ix_);
- return &asm_wasm_frame_;
- default:
- UNREACHABLE();
- }
-}
-
-namespace {
-
-MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
- Handle<StackTraceFrame> frame) {
- Handle<JSFunction> target =
- handle(isolate->native_context()->callsite_function(), isolate);
-
- Handle<JSObject> obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, obj,
- JSObject::New(target, target, Handle<AllocationSite>::null()), Object);
-
- // TODO(szuend): Introduce a new symbol "call_site_frame_symbol" and set
- // it to the StackTraceFrame. The CallSite API builtins can then
- // be implemented using StackFrameInfo objects.
-
- Handle<FrameArray> frame_array(FrameArray::cast(frame->frame_array()),
- isolate);
- int frame_index = frame->frame_index();
-
- Handle<Symbol> key = isolate->factory()->call_site_frame_array_symbol();
- RETURN_ON_EXCEPTION(isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(
- obj, key, frame_array, DONT_ENUM),
- Object);
-
- key = isolate->factory()->call_site_frame_index_symbol();
- Handle<Object> value(Smi::FromInt(frame_index), isolate);
- RETURN_ON_EXCEPTION(
- isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(obj, key, value, DONT_ENUM),
- Object);
-
- return obj;
-}
-
// Convert the raw frames as written by Isolate::CaptureSimpleStackTrace into
// a JSArray of JSCallSite objects.
MaybeHandle<JSArray> GetStackFrames(Isolate* isolate,
- Handle<FixedArray> elems) {
- const int frame_count = elems->length();
-
- Handle<FixedArray> frames = isolate->factory()->NewFixedArray(frame_count);
- for (int i = 0; i < frame_count; i++) {
- Handle<Object> site;
- Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
- isolate);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, site, ConstructCallSite(isolate, frame),
- JSArray);
- frames->set(i, *site);
+ Handle<FixedArray> frames) {
+ int frame_count = frames->length();
+ Handle<JSFunction> constructor = isolate->callsite_function();
+ Handle<FixedArray> sites = isolate->factory()->NewFixedArray(frame_count);
+ for (int i = 0; i < frame_count; ++i) {
+ Handle<StackFrameInfo> frame(StackFrameInfo::cast(frames->get(i)), isolate);
+ Handle<JSObject> site;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, site,
+ JSObject::New(constructor, constructor, Handle<AllocationSite>::null()),
+ JSArray);
+ RETURN_ON_EXCEPTION(
+ isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ site, isolate->factory()->call_site_frame_info_symbol(), frame,
+ DONT_ENUM),
+ JSArray);
+ sites->set(i, *site);
}
- return isolate->factory()->NewJSArrayWithElements(frames);
+ return isolate->factory()->NewJSArrayWithElements(sites);
}
MaybeHandle<Object> AppendErrorString(Isolate* isolate, Handle<Object> error,
@@ -862,8 +304,8 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<FixedArray> elems = Handle<FixedArray>::cast(raw_stack);
const bool in_recursion = isolate->formatting_stack_trace();
- if (!in_recursion) {
- Handle<Context> error_context = error->GetCreationContext();
+ Handle<Context> error_context;
+ if (!in_recursion && error->GetCreationContext().ToHandle(&error_context)) {
DCHECK(error_context->IsNativeContext());
if (isolate->HasPrepareStackTraceCallback()) {
@@ -931,9 +373,8 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
for (int i = 0; i < elems->length(); ++i) {
builder.AppendCString("\n at ");
- Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
- isolate);
- SerializeStackTraceFrame(isolate, frame, &builder);
+ Handle<StackFrameInfo> frame(StackFrameInfo::cast(elems->get(i)), isolate);
+ SerializeStackFrameInfo(isolate, frame, &builder);
if (isolate->has_pending_exception()) {
// CallSite.toString threw. Parts of the current frame might have been
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index 162d4e71bb..43f99d1797 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -24,7 +24,6 @@ class WasmCode;
// Forward declarations.
class AbstractCode;
-class FrameArray;
class JSMessageObject;
class LookupIterator;
class PrimitiveHeapObject;
@@ -60,217 +59,6 @@ class V8_EXPORT_PRIVATE MessageLocation {
Handle<SharedFunctionInfo> shared_;
};
-class StackFrameBase {
- public:
- virtual ~StackFrameBase() = default;
-
- virtual Handle<Object> GetReceiver() const = 0;
- virtual Handle<Object> GetFunction() const = 0;
-
- virtual Handle<Object> GetFileName() = 0;
- virtual Handle<PrimitiveHeapObject> GetFunctionName() = 0;
- virtual Handle<Object> GetScriptNameOrSourceUrl() = 0;
- virtual Handle<PrimitiveHeapObject> GetMethodName() = 0;
- virtual Handle<PrimitiveHeapObject> GetTypeName() = 0;
- virtual Handle<PrimitiveHeapObject> GetEvalOrigin();
- virtual Handle<PrimitiveHeapObject> GetWasmModuleName();
- virtual Handle<HeapObject> GetWasmInstance();
-
- // Returns the script ID if one is attached, -1 otherwise.
- int GetScriptId() const;
-
- virtual int GetPosition() const = 0;
- // Return 1-based line number, including line offset.
- virtual int GetLineNumber() = 0;
- // Return 1-based column number, including column offset if first line.
- virtual int GetColumnNumber() = 0;
- // Return 0-based Wasm function index. Returns -1 for non-Wasm frames.
- virtual int GetWasmFunctionIndex();
-
- virtual int GetEnclosingColumnNumber() = 0;
- virtual int GetEnclosingLineNumber() = 0;
-
- // Returns the index of the rejected promise in the Promise combinator input,
- // or -1 if this frame is not a Promise combinator frame.
- virtual int GetPromiseIndex() const = 0;
-
- virtual bool IsNative() = 0;
- virtual bool IsToplevel() = 0;
- virtual bool IsEval();
- virtual bool IsAsync() const = 0;
- virtual bool IsPromiseAll() const = 0;
- virtual bool IsPromiseAny() const = 0;
- virtual bool IsConstructor() = 0;
- virtual bool IsStrict() const = 0;
-
- // Used to signal that the requested field is unknown.
- static const int kNone = -1;
-
- protected:
- StackFrameBase() = default;
- explicit StackFrameBase(Isolate* isolate) : isolate_(isolate) {}
- Isolate* isolate_;
-
- private:
- virtual bool HasScript() const = 0;
- virtual Handle<Script> GetScript() const = 0;
-};
-
-class JSStackFrame : public StackFrameBase {
- public:
- JSStackFrame(Isolate* isolate, Handle<Object> receiver,
- Handle<JSFunction> function, Handle<AbstractCode> code,
- int offset);
- ~JSStackFrame() override = default;
-
- Handle<Object> GetReceiver() const override { return receiver_; }
- Handle<Object> GetFunction() const override;
-
- Handle<Object> GetFileName() override;
- Handle<PrimitiveHeapObject> GetFunctionName() override;
- Handle<Object> GetScriptNameOrSourceUrl() override;
- Handle<PrimitiveHeapObject> GetMethodName() override;
- Handle<PrimitiveHeapObject> GetTypeName() override;
-
- int GetPosition() const override;
- int GetLineNumber() override;
- int GetColumnNumber() override;
-
- int GetEnclosingColumnNumber() override;
- int GetEnclosingLineNumber() override;
-
- int GetPromiseIndex() const override;
-
- bool IsNative() override;
- bool IsToplevel() override;
- bool IsAsync() const override { return is_async_; }
- bool IsPromiseAll() const override { return is_promise_all_; }
- bool IsPromiseAny() const override { return is_promise_any_; }
- bool IsConstructor() override { return is_constructor_; }
- bool IsStrict() const override { return is_strict_; }
-
- private:
- JSStackFrame() = default;
- void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
-
- bool HasScript() const override;
- Handle<Script> GetScript() const override;
-
- Handle<Object> receiver_;
- Handle<JSFunction> function_;
- Handle<AbstractCode> code_;
- int offset_;
- mutable base::Optional<int> cached_position_;
-
- bool is_async_ : 1;
- bool is_constructor_ : 1;
- bool is_promise_all_ : 1;
- bool is_promise_any_ : 1;
- bool is_strict_ : 1;
-
- friend class FrameArrayIterator;
-};
-
-class WasmStackFrame : public StackFrameBase {
- public:
- ~WasmStackFrame() override = default;
-
- Handle<Object> GetReceiver() const override;
- Handle<Object> GetFunction() const override;
-
- Handle<Object> GetFileName() override;
- Handle<PrimitiveHeapObject> GetFunctionName() override;
- Handle<Object> GetScriptNameOrSourceUrl() override;
- Handle<PrimitiveHeapObject> GetMethodName() override { return Null(); }
- Handle<PrimitiveHeapObject> GetTypeName() override { return Null(); }
- Handle<PrimitiveHeapObject> GetWasmModuleName() override;
- Handle<HeapObject> GetWasmInstance() override;
-
- int GetPosition() const override;
- int GetLineNumber() override { return 0; }
- int GetColumnNumber() override;
- int GetEnclosingColumnNumber() override;
- int GetEnclosingLineNumber() override { return 0; }
- int GetWasmFunctionIndex() override { return wasm_func_index_; }
-
- int GetPromiseIndex() const override { return GetPosition(); }
-
- bool IsNative() override { return false; }
- bool IsToplevel() override { return false; }
- bool IsAsync() const override { return false; }
- bool IsPromiseAll() const override { return false; }
- bool IsPromiseAny() const override { return false; }
- bool IsConstructor() override { return false; }
- bool IsStrict() const override { return false; }
- bool IsInterpreted() const { return code_ == nullptr; }
-
- protected:
- Handle<PrimitiveHeapObject> Null() const;
-
- bool HasScript() const override;
- Handle<Script> GetScript() const override;
-
- Handle<WasmInstanceObject> wasm_instance_;
- uint32_t wasm_func_index_;
- wasm::WasmCode* code_; // null for interpreted frames.
- int offset_;
-
- private:
- int GetModuleOffset() const;
-
- WasmStackFrame() = default;
- void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
-
- friend class FrameArrayIterator;
- friend class AsmJsWasmStackFrame;
-};
-
-class AsmJsWasmStackFrame : public WasmStackFrame {
- public:
- ~AsmJsWasmStackFrame() override = default;
-
- Handle<Object> GetReceiver() const override;
- Handle<Object> GetFunction() const override;
-
- Handle<Object> GetFileName() override;
- Handle<Object> GetScriptNameOrSourceUrl() override;
-
- int GetPosition() const override;
- int GetLineNumber() override;
- int GetColumnNumber() override;
-
- int GetEnclosingColumnNumber() override;
- int GetEnclosingLineNumber() override;
-
- private:
- friend class FrameArrayIterator;
- AsmJsWasmStackFrame() = default;
- void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
-
- bool is_at_number_conversion_;
-};
-
-class FrameArrayIterator {
- public:
- FrameArrayIterator(Isolate* isolate, Handle<FrameArray> array,
- int frame_ix = 0);
-
- StackFrameBase* Frame();
-
- bool HasFrame() const;
- void Advance();
-
- private:
- Isolate* isolate_;
-
- Handle<FrameArray> array_;
- int frame_ix_;
-
- WasmStackFrame wasm_frame_;
- AsmJsWasmStackFrame asm_wasm_frame_;
- JSStackFrame js_frame_;
-};
-
// Determines how stack trace collection skips frames.
enum FrameSkipMode {
// Unconditionally skips the first frame. Used e.g. when the Error constructor
diff --git a/deps/v8/src/execution/mips/frame-constants-mips.cc b/deps/v8/src/execution/mips/frame-constants-mips.cc
index 4c930e71a9..1c593c05bc 100644
--- a/deps/v8/src/execution/mips/frame-constants-mips.cc
+++ b/deps/v8/src/execution/mips/frame-constants-mips.cc
@@ -17,7 +17,7 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
-int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
diff --git a/deps/v8/src/execution/mips64/frame-constants-mips64.cc b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
index 97ef183592..cfe899730c 100644
--- a/deps/v8/src/execution/mips64/frame-constants-mips64.cc
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
@@ -17,7 +17,7 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
-int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
diff --git a/deps/v8/src/execution/mips64/frame-constants-mips64.h b/deps/v8/src/execution/mips64/frame-constants-mips64.h
index 0f20f55d76..096c7dbac4 100644
--- a/deps/v8/src/execution/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.h
@@ -29,10 +29,11 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
// See Generate_WasmCompileLazy in builtins-mips64.cc.
static constexpr int kWasmInstanceOffset =
TYPED_FRAME_PUSHED_VALUE_OFFSET(kNumberOfSavedAllParamRegs);
+
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
kNumberOfSavedGpParamRegs * kPointerSize +
- kNumberOfSavedFpParamRegs * kDoubleSize;
+ kNumberOfSavedFpParamRegs * kSimd128Size;
};
// Frame constructed by the {WasmDebugBreak} builtin.
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.cc b/deps/v8/src/execution/ppc/frame-constants-ppc.cc
index 97bef56a56..7ef3ea12bf 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.cc
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.cc
@@ -20,7 +20,7 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
return kConstantPoolRegister;
}
-int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index d29bd8c450..71596c561d 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -26,11 +26,15 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kNumberOfSavedFpParamRegs = 8;
// FP-relative.
+ // The instance is pushed as part of the saved registers. Being in {r10}, it
+ // is the first register pushed (highest register code in
+ // {wasm::kGpParamRegisters}).
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
kNumberOfSavedGpParamRegs * kSystemPointerSize +
- kNumberOfSavedFpParamRegs * kDoubleSize;
+ kNumberOfSavedFpParamRegs * kDoubleSize +
+ kNumberOfSavedFpParamRegs * kSimd128Size;
};
// Frame constructed by the {WasmDebugBreak} builtin.
diff --git a/deps/v8/src/execution/protectors-inl.h b/deps/v8/src/execution/protectors-inl.h
index 8fe8bed107..c4d1a3c681 100644
--- a/deps/v8/src/execution/protectors-inl.h
+++ b/deps/v8/src/execution/protectors-inl.h
@@ -6,7 +6,6 @@
#define V8_EXECUTION_PROTECTORS_INL_H_
#include "src/execution/protectors.h"
-#include "src/objects/contexts-inl.h"
#include "src/objects/property-cell-inl.h"
#include "src/objects/smi.h"
diff --git a/deps/v8/src/execution/protectors.cc b/deps/v8/src/execution/protectors.cc
index 9d3afd1ded..cbc6ce0b28 100644
--- a/deps/v8/src/execution/protectors.cc
+++ b/deps/v8/src/execution/protectors.cc
@@ -53,9 +53,7 @@ DECLARED_PROTECTORS_ON_ISOLATE(V)
TraceProtectorInvalidation(#name); \
} \
isolate->CountUsage(v8::Isolate::kInvalidated##name##Protector); \
- PropertyCell::SetValueWithInvalidation( \
- isolate, #cell, isolate->factory()->cell(), \
- handle(Smi::FromInt(kProtectorInvalid), isolate)); \
+ isolate->factory()->cell()->InvalidateProtector(); \
DCHECK(!Is##name##Intact(isolate)); \
}
DECLARED_PROTECTORS_ON_ISOLATE(INVALIDATE_PROTECTOR_ON_ISOLATE_DEFINITION)
diff --git a/deps/v8/src/execution/riscv64/frame-constants-riscv64.cc b/deps/v8/src/execution/riscv64/frame-constants-riscv64.cc
new file mode 100644
index 0000000000..13e91639c9
--- /dev/null
+++ b/deps/v8/src/execution/riscv64/frame-constants-riscv64.cc
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/execution/riscv64/frame-constants-riscv64.h"
+
+#include "src/codegen/riscv64/assembler-riscv64-inl.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/execution/riscv64/frame-constants-riscv64.h b/deps/v8/src/execution/riscv64/frame-constants-riscv64.h
new file mode 100644
index 0000000000..f5cb13c4e6
--- /dev/null
+++ b/deps/v8/src/execution/riscv64/frame-constants-riscv64.h
@@ -0,0 +1,86 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_RISCV64_FRAME_CONSTANTS_RISCV64_H_
+#define V8_EXECUTION_RISCV64_FRAME_CONSTANTS_RISCV64_H_
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+#include "src/execution/frame-constants.h"
+#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#include "src/wasm/wasm-linkage.h"
+
+namespace v8 {
+namespace internal {
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
+ static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+};
+
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs =
+ arraysize(wasm::kGpParamRegisters);
+ static constexpr int kNumberOfSavedFpParamRegs =
+ arraysize(wasm::kFpParamRegisters);
+ static constexpr int kNumberOfSavedAllParamRegs =
+ kNumberOfSavedGpParamRegs + kNumberOfSavedFpParamRegs;
+
+ // FP-relative.
+ // See Generate_WasmCompileLazy in builtins-mips64.cc.
+ static constexpr int kWasmInstanceOffset =
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(kNumberOfSavedAllParamRegs);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ // Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7);
+ static constexpr uint32_t kPushedGpRegs = wasm::kLiftoffAssemblerGpCacheRegs;
+
+ // constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
+ // ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2, fa3, fa4, fa5,
+ // fa6, fa7, ft8, ft9, ft10, ft11);
+ static constexpr uint32_t kPushedFpRegs = wasm::kLiftoffAssemblerFpCacheRegs;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_RISCV64_FRAME_CONSTANTS_RISCV64_H_
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.cc b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
new file mode 100644
index 0000000000..1d38d8c0ca
--- /dev/null
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -0,0 +1,3750 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Copyright(c) 2010 - 2017,
+// The Regents of the University of California(Regents).All Rights Reserved.
+//
+// Redistribution and use in source and binary forms,
+// with or without modification,
+// are permitted provided that the following
+// conditions are met : 1. Redistributions of source code must retain the
+// above copyright notice, this list of conditions and the following
+// disclaimer.2. Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer in
+// the
+// documentation and /
+// or
+// other materials provided with the distribution.3. Neither the name of
+// the Regents nor the names of its contributors may be used to endorse
+// or
+// promote products derived from
+// this software without specific prior written permission.
+//
+// IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT,
+// INDIRECT, SPECIAL,
+// INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+// ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
+// EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+// PARTICULAR PURPOSE.THE SOFTWARE AND ACCOMPANYING DOCUMENTATION,
+// IF ANY,
+// PROVIDED HEREUNDER IS PROVIDED
+// "AS IS".REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE,
+// SUPPORT, UPDATES, ENHANCEMENTS,
+// OR MODIFICATIONS.
+
+// The original source code covered by the above license above has been
+// modified significantly by the v8 project authors.
+
+#include "src/execution/riscv64/simulator-riscv64.h"
+
+// Only build the simulator if not compiling for real RISCV hardware.
+#if defined(USE_SIMULATOR)
+
+#include <limits.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdlib.h>
+
+#include "src/base/bits.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/riscv64/constants-riscv64.h"
+#include "src/diagnostics/disasm.h"
+#include "src/heap/combined-heap.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/vector.h"
+
+namespace v8 {
+namespace internal {
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get)
+
+// Util functions.
+inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); }
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
+}
+
+// Generated by Assembler::break_()/stop(), ebreak code is passed as immediate
+// field of a subsequent LUI instruction; otherwise returns -1
+static inline int32_t get_ebreak_code(Instruction* instr) {
+ DCHECK(instr->InstructionBits() == kBreakInstr);
+ byte* cur = reinterpret_cast<byte*>(instr);
+ Instruction* next_instr = reinterpret_cast<Instruction*>(cur + kInstrSize);
+ if (next_instr->BaseOpcodeFieldRaw() == RO_LUI)
+ return (next_instr->Imm20UValue());
+ else
+ return -1;
+}
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
+// Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+// The RiscvDebugger class is used by the simulator while debugging simulated
+// code.
+class RiscvDebugger {
+ public:
+ explicit RiscvDebugger(Simulator* sim) : sim_(sim) {}
+
+ void Debug();
+ // Print all registers with a nice formatting.
+ void PrintRegs(char name_prefix, int start_index, int end_index);
+ void PrintAllRegs();
+ void PrintAllRegsIncludingFPU();
+
+ static const Instr kNopInstr = 0x0;
+
+ private:
+ Simulator* sim_;
+
+ int64_t GetRegisterValue(int regnum);
+ int64_t GetFPURegisterValue(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
+ bool GetValue(const char* desc, int64_t* value);
+};
+
+inline void UNSUPPORTED() {
+ printf("Sim: Unsupported instruction.\n");
+ base::OS::Abort();
+}
+
+int64_t RiscvDebugger::GetRegisterValue(int regnum) {
+ if (regnum == kNumSimuRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_register(regnum);
+ }
+}
+
+int64_t RiscvDebugger::GetFPURegisterValue(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register(regnum);
+ }
+}
+
+float RiscvDebugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_float(regnum);
+ }
+}
+
+double RiscvDebugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_double(regnum);
+ }
+}
+
+bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
+ int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
+ if (regnum != kInvalidRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValue(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast<uint64_t*>(value)) ==
+ 1;
+ } else {
+ return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+ return false;
+}
+
+#define REG_INFO(name) \
+ name, GetRegisterValue(Registers::Number(name)), \
+ GetRegisterValue(Registers::Number(name))
+
+void RiscvDebugger::PrintRegs(char name_prefix, int start_index,
+ int end_index) {
+ EmbeddedVector<char, 10> name1, name2;
+ DCHECK(name_prefix == 'a' || name_prefix == 't' || name_prefix == 's');
+ DCHECK(start_index >= 0 && end_index <= 99);
+ int num_registers = (end_index - start_index) + 1;
+ for (int i = 0; i < num_registers / 2; i++) {
+ SNPrintF(name1, "%c%d", name_prefix, start_index + 2 * i);
+ SNPrintF(name2, "%c%d", name_prefix, start_index + 2 * i + 1);
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ REG_INFO(name1.begin()), REG_INFO(name2.begin()));
+ }
+ if (num_registers % 2 == 1) {
+ SNPrintF(name1, "%c%d", name_prefix, end_index);
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \n", REG_INFO(name1.begin()));
+ }
+}
+
+void RiscvDebugger::PrintAllRegs() {
+ PrintF("\n");
+ // ra, sp, gp
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64
+ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n",
+ REG_INFO("ra"), REG_INFO("sp"), REG_INFO("gp"));
+
+ // tp, fp, pc
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64
+ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n",
+ REG_INFO("tp"), REG_INFO("fp"), REG_INFO("pc"));
+
+ // print register a0, .., a7
+ PrintRegs('a', 0, 7);
+ // print registers s1, ..., s11
+ PrintRegs('s', 1, 11);
+ // print registers t0, ..., t6
+ PrintRegs('t', 0, 6);
+}
+
+#undef REG_INFO
+
+void RiscvDebugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) \
+ FPURegisters::Name(n), GetFPURegisterValue(n), GetFPURegisterValueDouble(n)
+
+ PrintAllRegs();
+
+ PrintF("\n\n");
+ // f0, f1, f2, ... f31.
+ DCHECK_EQ(kNumFPURegisters % 2, 0);
+ for (int i = 0; i < kNumFPURegisters; i += 2)
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e \t%3s: 0x%016" PRIx64 " %16.4e\n",
+ FPU_REG_INFO(i), FPU_REG_INFO(i + 1));
+#undef FPU_REG_INFO
+}
+
+void RiscvDebugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(
+ line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
+ sim_->InstructionDecode(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ PrintF("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ PrintAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ PrintAllRegsIncludingFPU();
+ } else {
+ int regnum = Registers::Number(arg1);
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (regnum != kInvalidRegister) {
+ value = GetRegisterValue(regnum);
+ PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value,
+ value);
+ } else if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ dvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n",
+ FPURegisters::Name(fpuregnum), value, dvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int64_t value;
+ float fvalue;
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ value &= 0xFFFFFFFFUL;
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print <fpu register> single\n");
+ }
+ } else {
+ PrintF("print <register> or print <fpu register> single\n");
+ }
+ }
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ StdoutStream os;
+ if (GetValue(arg1, &value)) {
+ Object obj(value);
+ os << arg1 << ": \n";
+#ifdef DEBUG
+ obj.Print(os);
+ os << "\n";
+#else
+ os << Brief(obj) << "\n";
+#endif
+ } else {
+ os << arg1 << " unrecognized\n";
+ }
+ } else {
+ PrintF("printobject <value>\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->get_register(Simulator::sp));
+ } else { // Command "mem".
+ if (argc < 2) {
+ PrintF("Need to specify <address> to mem command\n");
+ continue;
+ }
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ Object obj(*cur);
+ Heap* current_heap = sim_->isolate_->heap();
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
+ } else {
+ obj.ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.begin());
+ cur += kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::base::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0 ||
+ strcmp(cmd, "tbreak") == 0) {
+ bool is_tbreak = strcmp(cmd, "tbreak") == 0;
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ sim_->SetBreakpoint(reinterpret_cast<Instruction*>(value),
+ is_tbreak);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ sim_->ListBreakpoints();
+ PrintF("Use `break <address>` to set or disable a breakpoint\n");
+ PrintF(
+ "Use `tbreak <address>` to set or disable a temporary "
+ "breakpoint\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ PrintF("No flags on RISC-V !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+ // Print registers and disassemble.
+ PrintAllRegs();
+ PrintF("\n");
+
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // no length parameter passed, assume 10 instructions
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.begin());
+ cur += kInstrSize;
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ PrintF("cont (alias 'c')\n");
+ PrintF(" Continue execution\n");
+ PrintF("stepi (alias 'si')\n");
+ PrintF(" Step one instruction\n");
+ PrintF("print (alias 'p')\n");
+ PrintF(" print <register>\n");
+ PrintF(" Print register content\n");
+ PrintF(" Use register name 'all' to print all GPRs\n");
+ PrintF(" Use register name 'allf' to print all GPRs and FPRs\n");
+ PrintF("printobject (alias 'po')\n");
+ PrintF(" printobject <register>\n");
+ PrintF(" Print an object from a register\n");
+ PrintF("stack\n");
+ PrintF(" stack [<words>]\n");
+ PrintF(" Dump stack content, default dump 10 words)\n");
+ PrintF("mem\n");
+ PrintF(" mem <address> [<words>]\n");
+ PrintF(" Dump memory content, default dump 10 words)\n");
+ PrintF("flags\n");
+ PrintF(" print flags\n");
+ PrintF("disasm (alias 'di')\n");
+ PrintF(" disasm [<instructions>]\n");
+ PrintF(" disasm [<address/register>] (e.g., disasm pc) \n");
+ PrintF(" disasm [[<address/register>] <instructions>]\n");
+ PrintF(" Disassemble code, default is 10 instructions\n");
+ PrintF(" from pc\n");
+ PrintF("gdb \n");
+ PrintF(" Return to gdb if the simulator was started with gdb\n");
+ PrintF("break (alias 'b')\n");
+ PrintF(" break : list all breakpoints\n");
+ PrintF(" break <address> : set / enable / disable a breakpoint.\n");
+ PrintF("tbreak\n");
+ PrintF(" tbreak : list all breakpoints\n");
+ PrintF(
+ " tbreak <address> : set / enable / disable a temporary "
+ "breakpoint.\n");
+ PrintF(" Set a breakpoint enabled only for one stop. \n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and give control to the Debugger.\n");
+ PrintF(" All stop codes are watched:\n");
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+void Simulator::SetBreakpoint(Instruction* location, bool is_tbreak) {
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if (breakpoints_.at(i).location == location) {
+ if (breakpoints_.at(i).is_tbreak != is_tbreak) {
+ PrintF("Change breakpoint at %p to %s breakpoint\n",
+ reinterpret_cast<void*>(location),
+ is_tbreak ? "temporary" : "regular");
+ breakpoints_.at(i).is_tbreak = is_tbreak;
+ return;
+ }
+ PrintF("Existing breakpoint at %p was %s\n",
+ reinterpret_cast<void*>(location),
+ breakpoints_.at(i).enabled ? "disabled" : "enabled");
+ breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
+ return;
+ }
+ }
+ Breakpoint new_breakpoint = {location, true, is_tbreak};
+ breakpoints_.push_back(new_breakpoint);
+ PrintF("Set a %sbreakpoint at %p\n", is_tbreak ? "temporary " : "",
+ reinterpret_cast<void*>(location));
+}
+
+void Simulator::ListBreakpoints() {
+ PrintF("Breakpoints:\n");
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ PrintF("%p : %s %s\n",
+ reinterpret_cast<void*>(breakpoints_.at(i).location),
+ breakpoints_.at(i).enabled ? "enabled" : "disabled",
+ breakpoints_.at(i).is_tbreak ? ": temporary" : "");
+ }
+}
+
+void Simulator::CheckBreakpoints() {
+ bool hit_a_breakpoint = false;
+ bool is_tbreak = false;
+ Instruction* pc_ = reinterpret_cast<Instruction*>(get_pc());
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if ((breakpoints_.at(i).location == pc_) && breakpoints_.at(i).enabled) {
+ hit_a_breakpoint = true;
+ if (breakpoints_.at(i).is_tbreak) {
+ // Disable a temporary breakpoint.
+ is_tbreak = true;
+ breakpoints_.at(i).enabled = false;
+ }
+ break;
+ }
+ }
+ if (hit_a_breakpoint) {
+ PrintF("Hit %sa breakpoint at %p.\n", is_tbreak ? "and disabled " : "",
+ reinterpret_cast<void*>(pc_));
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ }
+}
+
+bool Simulator::ICacheMatch(void* one, void* two) {
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
+ return one == two;
+}
+
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+static bool AllOnOnePage(uintptr_t start, size_t size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr);
+}
+
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+ void* start_addr, size_t size) {
+ int64_t start = reinterpret_cast<int64_t>(start_addr);
+ int64_t intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ DCHECK_EQ((int64_t)0, start & CachePage::kPageMask);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
+
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page) {
+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
+ if (entry->value == nullptr) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+ intptr_t start, size_t size) {
+ DCHECK_LE(size, CachePage::kPageSize);
+ DCHECK(AllOnOnePage(start, size - 1));
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr) {
+ int64_t address = reinterpret_cast<int64_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), kInstrSize));
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+ stack_size_ = FLAG_sim_stack_size * KB;
+ stack_ = reinterpret_cast<char*>(malloc(stack_size_));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ // Reset debug helpers.
+ breakpoints_.clear();
+ // TODO(riscv): 'next' command
+ // break_on_next_ = false;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+
+ FCSR_ = 0;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stack_size_ - 64;
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ last_debugger_input_ = nullptr;
+}
+
+Simulator::~Simulator() {
+ GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_);
+ free(stack_);
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ DCHECK_NOT_NULL(isolate_data);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == nullptr) {
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+// Sets the register in the architecture state. It will also deal with
+// updating Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int64_t value) {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::set_dw_register(int reg, const int* dbl) {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ registers_[reg] = dbl[1];
+ registers_[reg] = registers_[reg] << 32;
+ registers_[reg] += dbl[0];
+}
+
+void Simulator::set_fpu_register(int fpureg, int64_t value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
+ // Set ONLY lower 32-bits, leaving upper bits untouched.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t* pword;
+ if (kArchEndian == kLittle) {
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ } else {
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]) + 1;
+ }
+ *pword = value;
+}
+
+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
+ // Set ONLY upper 32-bits, leaving lower bits untouched.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t* phiword;
+ if (kArchEndian == kLittle) {
+ phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+ } else {
+ phiword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ }
+ *phiword = value;
+}
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ FPUregisters_[fpureg] = box_float(value);
+}
+
+void Simulator::set_fpu_register_double(int fpureg, double value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::get_register(int reg) const {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == 0)
+ return 0;
+ else
+ return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
+}
+
+double Simulator::get_double_from_register_pair(int reg) {
+ // TODO(plind): bad ABI stuff, refactor or remove.
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[sizeof(registers_[0])];
+ memcpy(buffer, &registers_[reg], sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, sizeof(registers_[0]));
+ return (dm_val);
+}
+
+int64_t Simulator::get_fpu_register(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::get_fpu_register_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
+}
+
+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
+}
+
+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xFFFFFFFF);
+}
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ if (!is_boxed_float(FPUregisters_[fpureg])) {
+ return std::numeric_limits<float>::quiet_NaN();
+ }
+ return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
+}
+
+double Simulator::get_fpu_register_double(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *bit_cast<double*>(&FPUregisters_[fpureg]);
+}
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from fa0, fa1, and a0.
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
+ *x = get_fpu_register_double(fa0);
+ *y = get_fpu_register_double(fa1);
+ *z = static_cast<int32_t>(get_register(a0));
+}
+
+// The return value is in fa0.
+void Simulator::SetFpResult(const double& result) {
+ set_fpu_register_double(fa0, result);
+}
+
+// helper functions to read/write/set/clear CRC values/bits
+uint32_t Simulator::read_csr_value(uint32_t csr) {
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ return (FCSR_ & kFcsrFlagsMask);
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ return (FCSR_ & kFcsrFrmMask) >> kFcsrFrmShift;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ return (FCSR_ & kFcsrMask);
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+uint32_t Simulator::get_dynamic_rounding_mode() {
+ return read_csr_value(csr_frm);
+}
+
+void Simulator::write_csr_value(uint32_t csr, uint64_t val) {
+ uint32_t value = (uint32_t)val;
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ DCHECK(value <= ((1 << kFcsrFlagsBits) - 1));
+ FCSR_ = (FCSR_ & (~kFcsrFlagsMask)) | value;
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ DCHECK(value <= ((1 << kFcsrFrmBits) - 1));
+ FCSR_ = (FCSR_ & (~kFcsrFrmMask)) | (value << kFcsrFrmShift);
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ DCHECK(value <= ((1 << kFcsrBits) - 1));
+ FCSR_ = (FCSR_ & (~kFcsrMask)) | value;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::set_csr_bits(uint32_t csr, uint64_t val) {
+ uint32_t value = (uint32_t)val;
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ DCHECK(value <= ((1 << kFcsrFlagsBits) - 1));
+ FCSR_ = FCSR_ | value;
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ DCHECK(value <= ((1 << kFcsrFrmBits) - 1));
+ FCSR_ = FCSR_ | (value << kFcsrFrmShift);
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ DCHECK(value <= ((1 << kFcsrBits) - 1));
+ FCSR_ = FCSR_ | value;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::clear_csr_bits(uint32_t csr, uint64_t val) {
+ uint32_t value = (uint32_t)val;
+ switch (csr) {
+ case csr_fflags: // Floating-Point Accrued Exceptions (RW)
+ DCHECK(value <= ((1 << kFcsrFlagsBits) - 1));
+ FCSR_ = FCSR_ & (~value);
+ break;
+ case csr_frm: // Floating-Point Dynamic Rounding Mode (RW)
+ DCHECK(value <= ((1 << kFcsrFrmBits) - 1));
+ FCSR_ = FCSR_ & (~(value << kFcsrFrmShift));
+ break;
+ case csr_fcsr: // Floating-Point Control and Status Register (RW)
+ DCHECK(value <= ((1 << kFcsrBits) - 1));
+ FCSR_ = FCSR_ & (~value);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+bool Simulator::test_fflags_bits(uint32_t mask) {
+ return (FCSR_ & kFcsrFlagsMask & mask) != 0;
+}
+
+template <typename T>
+T Simulator::FMaxMinHelper(T a, T b, MaxMinKind kind) {
+ // set invalid bit for signaling nan
+ if ((a == std::numeric_limits<T>::signaling_NaN()) ||
+ (b == std::numeric_limits<T>::signaling_NaN())) {
+ set_csr_bits(csr_fflags, kInvalidOperation);
+ }
+
+ T result = 0;
+ if (std::isnan(a) && std::isnan(b)) {
+ result = a;
+ } else if (std::isnan(a)) {
+ result = b;
+ } else if (std::isnan(b)) {
+ result = a;
+ } else if (b == a) { // Handle -0.0 == 0.0 case.
+ if (kind == MaxMinKind::kMax) {
+ result = std::signbit(b) ? a : b;
+ } else {
+ result = std::signbit(b) ? b : a;
+ }
+ } else {
+ result = (kind == MaxMinKind::kMax) ? fmax(a, b) : fmin(a, b);
+ }
+
+ return result;
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+ DCHECK(has_bad_pc() || ((value % kInstrSize) == 0) ||
+ ((value % kShortInstrSize) == 0));
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+// The RISC-V spec leaves it open to the implementation on how to handle
+// unaligned reads and writes. For now, we simply disallow unaligned reads but
+// at some point, we may want to implement some other behavior.
+
+// TODO(plind): refactor this messy debug code when we do unaligned access.
+void Simulator::DieOrDebug() {
+ if ((1)) { // Flag for this was removed.
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ } else {
+ base::OS::Abort();
+ }
+}
+
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+ break;
+ case DWORD:
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64,
+ value, icount_, value, value);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e",
+ v.fmt_int64, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e",
+ v.fmt_int64, icount_, v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// TODO(plind): consider making icount_ printing a flag option.
+template <typename T>
+void Simulator::TraceMemRd(int64_t addr, T value, int64_t reg_value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ if (std::is_integral<T>::value) {
+ switch (sizeof(T)) {
+ case 1:
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int8:%" PRId8
+ " uint8:%" PRIu8 " <-- [addr: %" PRIx64 "]",
+ reg_value, icount_, static_cast<int8_t>(value),
+ static_cast<uint8_t>(value), addr);
+ break;
+ case 2:
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int16:%" PRId16
+ " uint16:%" PRIu16 " <-- [addr: %" PRIx64 "]",
+ reg_value, icount_, static_cast<int16_t>(value),
+ static_cast<uint16_t>(value), addr);
+ break;
+ case 4:
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " <-- [addr: %" PRIx64 "]",
+ reg_value, icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ break;
+ case 8:
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " <-- [addr: %" PRIx64 "]",
+ reg_value, icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (std::is_same<float, T>::value) {
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") flt:%e <-- [addr: %" PRIx64
+ "]",
+ reg_value, icount_, static_cast<float>(value), addr);
+ } else if (std::is_same<double, T>::value) {
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") dbl:%e <-- [addr: %" PRIx64
+ "]",
+ reg_value, icount_, static_cast<double>(value), addr);
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceMemWr(int64_t addr, T value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 1:
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int8:%" PRId8
+ " uint8:%" PRIu8 " --> [addr: %" PRIx64 "]",
+ icount_, static_cast<int8_t>(value),
+ static_cast<uint8_t>(value), addr);
+ break;
+ case 2:
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int16:%" PRId16
+ " uint16:%" PRIu16 " --> [addr: %" PRIx64 "]",
+ icount_, static_cast<int16_t>(value),
+ static_cast<uint16_t>(value), addr);
+ break;
+ case 4:
+ if (std::is_integral<T>::value) {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " --> [addr: %" PRIx64 "]",
+ icount_, static_cast<int32_t>(value),
+ static_cast<uint32_t>(value), addr);
+ } else {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64
+ ") flt:%e --> [addr: %" PRIx64 "]",
+ icount_, static_cast<float>(value), addr);
+ }
+ break;
+ case 8:
+ if (std::is_integral<T>::value) {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64 ") int64:%" PRId64
+ " uint64:%" PRIu64 " --> [addr: %" PRIx64 "]",
+ icount_, static_cast<int64_t>(value),
+ static_cast<uint64_t>(value), addr);
+ } else {
+ SNPrintF(trace_buf_,
+ " (%" PRIu64
+ ") dbl:%e --> [addr: %" PRIx64 "]",
+ icount_, static_cast<double>(value), addr);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// RISCV Memory Read/Write functions
+
+// TODO(RISCV): check whether the specific board supports unaligned load/store
+// (determined by EEI). For now, we assume the board does not support unaligned
+// load/store (e.g., trapping)
+template <typename T>
+T Simulator::ReadMem(int64_t addr, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+
+ // check for natural alignment
+ if ((addr & (sizeof(T) - 1)) != 0) {
+ PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+
+ T* ptr = reinterpret_cast<T*>(addr);
+ T value = *ptr;
+ return value;
+}
+
+template <typename T>
+void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+
+ // check for natural alignment
+ if ((addr & (sizeof(T) - 1)) != 0) {
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+
+ T* ptr = reinterpret_cast<T*>(addr);
+ TraceMemWr(addr, value);
+ *ptr = value;
+}
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + 1024;
+}
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ UNIMPLEMENTED_RISCV();
+}
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in
+// runtime.cc uses the ObjectPair which is essentially two 32-bit values
+// stuffed into a 64-bit value. With the code below we assume that all runtime
+// calls return 64 bits of result. If they don't, the a1 result register
+// contains a bogus value, which is fine because it is caller-saved.
+
+using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8, int64_t arg9);
+
+// These prototypes handle the four types of FP calls.
+using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1);
+using SimulatorRuntimeFPCall = double (*)(double darg0);
+using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
+ void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime. They are also used for debugging with simulator.
+void Simulator::SoftwareInterrupt() {
+ // There are two instructions that could get us here, the ebreak or ecall
+ // instructions are "SYSTEM" class opcode distinuished by Imm12Value field w/
+ // the rest of instruction fields being zero
+ int32_t func = instr_.Imm12Value();
+ // We first check if we met a call_rt_redirected.
+ if (instr_.InstructionBits() == rtCallRedirInstr) { // ECALL
+ Redirection* redirection = Redirection::FromInstruction(instr_.instr());
+
+ int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
+
+ int64_t arg0 = get_register(a0);
+ int64_t arg1 = get_register(a1);
+ int64_t arg2 = get_register(a2);
+ int64_t arg3 = get_register(a3);
+ int64_t arg4 = get_register(a4);
+ int64_t arg5 = get_register(a5);
+ int64_t arg6 = get_register(a6);
+ int64_t arg7 = get_register(a7);
+ int64_t arg8 = stack_pointer[0];
+ int64_t arg9 = stack_pointer[1];
+ STATIC_ASSERT(kMaxCParameters == 10);
+
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = get_register(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+
+ if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Call to host function at %p with args %f, %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ PrintF("Call to host function at %p with arg %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Call to host function at %p with args %f, %d",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(a0, static_cast<int64_t>(iresult));
+ // set_register(a1, static_cast<int64_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0);
+ }
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, Redirection::ReverseRedirection(arg1));
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
+ } else if (redirection->type() ==
+ ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ }
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host function at %p "
+ "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " , %08" PRIx64 " \n",
+ reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
+ arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ }
+ ObjectPair result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ set_register(a0, (int64_t)(result.x));
+ set_register(a1, (int64_t)(result.y));
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(a1),
+ get_register(a0));
+ }
+ set_register(ra, saved_ra);
+ set_pc(get_register(ra));
+
+ } else if (func == 1) { // EBREAK
+ int32_t code = get_ebreak_code(instr_.instr());
+ set_pc(get_pc() + kInstrSize * 2);
+ if (code != -1 && static_cast<uint32_t>(code) <= kMaxStopCode) {
+ if (IsWatchpoint(code)) {
+ PrintWatchpoint(code);
+ } else {
+ IncreaseStopCounter(code);
+ HandleStop(code);
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint64_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::PrintWatchpoint(uint64_t code) {
+ RiscvDebugger dbg(this);
+ ++break_count_;
+ PrintF("\n---- watchpoint %" PRId64 " marker: %3d (instr count: %8" PRId64
+ " ) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.PrintAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::HandleStop(uint64_t code) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (IsEnabledStop(code)) {
+ RiscvDebugger dbg(this);
+ PrintF("Simulator hit stop (%" PRId64 ")\n", code);
+ dbg.Debug();
+ }
+}
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+ if (instr->InstructionBits() != kBreakInstr) return false;
+ int32_t code = get_ebreak_code(instr);
+ return code != -1 && static_cast<uint32_t>(code) > kMaxWatchpointCode &&
+ static_cast<uint32_t>(code) <= kMaxStopCode;
+}
+
+bool Simulator::IsEnabledStop(uint64_t code) {
+ DCHECK_LE(code, kMaxStopCode);
+ DCHECK_GT(code, kMaxWatchpointCode);
+ return !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+void Simulator::EnableStop(uint64_t code) {
+ if (!IsEnabledStop(code)) {
+ watched_stops_[code].count &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::DisableStop(uint64_t code) {
+ if (IsEnabledStop(code)) {
+ watched_stops_[code].count |= kStopDisabledBit;
+ }
+}
+
+void Simulator::IncreaseStopCounter(uint64_t code) {
+ DCHECK_LE(code, kMaxStopCode);
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
+ PrintF("Stop counter for code %" PRId64
+ " has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watched_stops_[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops_[code].count++;
+ }
+}
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint64_t code) {
+ if (code <= kMaxWatchpointCode) {
+ PrintF("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops_[code].desc) {
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watched_stops_[code].desc);
+ } else {
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code,
+ code, state, count);
+ }
+ }
+}
+
+void Simulator::SignalException(Exception e) {
+ FATAL("Error: Exception %i raised.", static_cast<int>(e));
+}
+
+// RISCV Instruction Decode Routine
+void Simulator::DecodeRVRType() {
+ switch (instr_.InstructionBits() & kRTypeMask) {
+ case RO_ADD: {
+ set_rd(sext_xlen(rs1() + rs2()));
+ break;
+ }
+ case RO_SUB: {
+ set_rd(sext_xlen(rs1() - rs2()));
+ break;
+ }
+ case RO_SLL: {
+ set_rd(sext_xlen(rs1() << (rs2() & (xlen - 1))));
+ break;
+ }
+ case RO_SLT: {
+ set_rd(sreg_t(rs1()) < sreg_t(rs2()));
+ break;
+ }
+ case RO_SLTU: {
+ set_rd(reg_t(rs1()) < reg_t(rs2()));
+ break;
+ }
+ case RO_XOR: {
+ set_rd(rs1() ^ rs2());
+ break;
+ }
+ case RO_SRL: {
+ set_rd(sext_xlen(zext_xlen(rs1()) >> (rs2() & (xlen - 1))));
+ break;
+ }
+ case RO_SRA: {
+ set_rd(sext_xlen(sext_xlen(rs1()) >> (rs2() & (xlen - 1))));
+ break;
+ }
+ case RO_OR: {
+ set_rd(rs1() | rs2());
+ break;
+ }
+ case RO_AND: {
+ set_rd(rs1() & rs2());
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_ADDW: {
+ set_rd(sext32(rs1() + rs2()));
+ break;
+ }
+ case RO_SUBW: {
+ set_rd(sext32(rs1() - rs2()));
+ break;
+ }
+ case RO_SLLW: {
+ set_rd(sext32(rs1() << (rs2() & 0x1F)));
+ break;
+ }
+ case RO_SRLW: {
+ set_rd(sext32(uint32_t(rs1()) >> (rs2() & 0x1F)));
+ break;
+ }
+ case RO_SRAW: {
+ set_rd(sext32(int32_t(rs1()) >> (rs2() & 0x1F)));
+ break;
+ }
+#endif /* V8_TARGET_ARCH_64_BIT */
+ // TODO(riscv): Add RISCV M extension macro
+ case RO_MUL: {
+ set_rd(rs1() * rs2());
+ break;
+ }
+ case RO_MULH: {
+ set_rd(mulh(rs1(), rs2()));
+ break;
+ }
+ case RO_MULHSU: {
+ set_rd(mulhsu(rs1(), rs2()));
+ break;
+ }
+ case RO_MULHU: {
+ set_rd(mulhu(rs1(), rs2()));
+ break;
+ }
+ case RO_DIV: {
+ sreg_t lhs = sext_xlen(rs1());
+ sreg_t rhs = sext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(-1);
+ } else if (lhs == INT64_MIN && rhs == -1) {
+ set_rd(lhs);
+ } else {
+ set_rd(sext_xlen(lhs / rhs));
+ }
+ break;
+ }
+ case RO_DIVU: {
+ reg_t lhs = zext_xlen(rs1());
+ reg_t rhs = zext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(UINT64_MAX);
+ } else {
+ set_rd(zext_xlen(lhs / rhs));
+ }
+ break;
+ }
+ case RO_REM: {
+ sreg_t lhs = sext_xlen(rs1());
+ sreg_t rhs = sext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(lhs);
+ } else if (lhs == INT64_MIN && rhs == -1) {
+ set_rd(0);
+ } else {
+ set_rd(sext_xlen(lhs % rhs));
+ }
+ break;
+ }
+ case RO_REMU: {
+ reg_t lhs = zext_xlen(rs1());
+ reg_t rhs = zext_xlen(rs2());
+ if (rhs == 0) {
+ set_rd(lhs);
+ } else {
+ set_rd(zext_xlen(lhs % rhs));
+ }
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_MULW: {
+ set_rd(sext32(sext32(rs1()) * sext32(rs2())));
+ break;
+ }
+ case RO_DIVW: {
+ sreg_t lhs = sext32(rs1());
+ sreg_t rhs = sext32(rs2());
+ if (rhs == 0) {
+ set_rd(-1);
+ } else if (lhs == INT32_MIN && rhs == -1) {
+ set_rd(lhs);
+ } else {
+ set_rd(sext32(lhs / rhs));
+ }
+ break;
+ }
+ case RO_DIVUW: {
+ reg_t lhs = zext32(rs1());
+ reg_t rhs = zext32(rs2());
+ if (rhs == 0) {
+ set_rd(UINT32_MAX);
+ } else {
+ set_rd(zext32(lhs / rhs));
+ }
+ break;
+ }
+ case RO_REMW: {
+ sreg_t lhs = sext32(rs1());
+ sreg_t rhs = sext32(rs2());
+ if (rhs == 0) {
+ set_rd(lhs);
+ } else if (lhs == INT32_MIN && rhs == -1) {
+ set_rd(0);
+ } else {
+ set_rd(sext32(lhs % rhs));
+ }
+ break;
+ }
+ case RO_REMUW: {
+ reg_t lhs = zext32(rs1());
+ reg_t rhs = zext32(rs2());
+ if (rhs == 0) {
+ set_rd(zext32(lhs));
+ } else {
+ set_rd(zext32(lhs % rhs));
+ }
+ break;
+ }
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ // TODO(riscv): End Add RISCV M extension macro
+ default: {
+ switch (instr_.BaseOpcode()) {
+ case AMO:
+ DecodeRVRAType();
+ break;
+ case OP_FP:
+ DecodeRVRFPType();
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ }
+ }
+}
+
+float Simulator::RoundF2FHelper(float input_val, int rmode) {
+ if (rmode == DYN) rmode = get_dynamic_rounding_mode();
+
+ float rounded = 0;
+ switch (rmode) {
+ case RNE: { // Round to Nearest, tiest to Even
+ rounded = std::floorf(input_val);
+ float error = input_val - rounded;
+
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= input_val) && (input_val < 0.0)) {
+ rounded = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(rounded, 2) != 0))) {
+ rounded++;
+ }
+ break;
+ }
+ case RTZ: // Round towards Zero
+ rounded = std::truncf(input_val);
+ break;
+ case RDN: // Round Down (towards -infinity)
+ rounded = floorf(input_val);
+ break;
+ case RUP: // Round Up (towards +infinity)
+ rounded = ceilf(input_val);
+ break;
+ case RMM: // Round to Nearest, tiest to Max Magnitude
+ rounded = std::roundf(input_val);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ return rounded;
+}
+
+double Simulator::RoundF2FHelper(double input_val, int rmode) {
+ if (rmode == DYN) rmode = get_dynamic_rounding_mode();
+
+ double rounded = 0;
+ switch (rmode) {
+ case RNE: { // Round to Nearest, tiest to Even
+ rounded = std::floor(input_val);
+ double error = input_val - rounded;
+
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= input_val) && (input_val < 0.0)) {
+ rounded = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(rounded, 2) != 0))) {
+ rounded++;
+ }
+ break;
+ }
+ case RTZ: // Round towards Zero
+ rounded = std::trunc(input_val);
+ break;
+ case RDN: // Round Down (towards -infinity)
+ rounded = std::floor(input_val);
+ break;
+ case RUP: // Round Up (towards +infinity)
+ rounded = std::ceil(input_val);
+ break;
+ case RMM: // Round to Nearest, tiest to Max Magnitude
+ rounded = std::round(input_val);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return rounded;
+}
+
+// convert rounded floating-point to integer types, handle input values that
+// are out-of-range, underflow, or NaN, and set appropriate fflags
+template <typename I_TYPE, typename F_TYPE>
+I_TYPE Simulator::RoundF2IHelper(F_TYPE original, int rmode) {
+ DCHECK(std::is_integral<I_TYPE>::value);
+
+ DCHECK((std::is_same<F_TYPE, float>::value ||
+ std::is_same<F_TYPE, double>::value));
+
+ I_TYPE max_i = std::numeric_limits<I_TYPE>::max();
+ I_TYPE min_i = std::numeric_limits<I_TYPE>::min();
+
+ if (!std::isfinite(original)) {
+ set_fflags(kInvalidOperation);
+ if (std::isnan(original) ||
+ original == std::numeric_limits<F_TYPE>::infinity()) {
+ return max_i;
+ } else {
+ DCHECK(original == -std::numeric_limits<F_TYPE>::infinity());
+ return min_i;
+ }
+ }
+
+ F_TYPE rounded = RoundF2FHelper(original, rmode);
+ if (original != rounded) set_fflags(kInexact);
+
+ if (!std::isfinite(rounded)) {
+ set_fflags(kInvalidOperation);
+ if (std::isnan(rounded) ||
+ rounded == std::numeric_limits<F_TYPE>::infinity()) {
+ return max_i;
+ } else {
+ DCHECK(rounded == -std::numeric_limits<F_TYPE>::infinity());
+ return min_i;
+ }
+ }
+
+ // Since integer max values are either all 1s (for unsigned) or all 1s
+ // except for sign-bit (for signed), they cannot be represented precisely in
+ // floating point, in order to precisely tell whether the rounded floating
+ // point is within the max range, we compare against (max_i+1) which would
+ // have a single 1 w/ many trailing zeros
+ float max_i_plus_1 =
+ std::is_same<uint64_t, I_TYPE>::value
+ ? 0x1p64f // uint64_t::max + 1 cannot be represented in integers,
+ // so use its float representation directly
+ : static_cast<float>(static_cast<uint64_t>(max_i) + 1);
+ if (rounded >= max_i_plus_1) {
+ set_fflags(kOverflow | kInvalidOperation);
+ return max_i;
+ }
+
+ // Since min_i (either 0 for unsigned, or for signed) is represented
+ // precisely in floating-point, comparing rounded directly against min_i
+ if (rounded <= min_i) {
+ if (rounded < min_i) set_fflags(kOverflow | kInvalidOperation);
+ return min_i;
+ }
+
+ F_TYPE underflow_fval =
+ std::is_same<F_TYPE, float>::value ? FLT_MIN : DBL_MIN;
+ if (rounded < underflow_fval && rounded > -underflow_fval && rounded != 0) {
+ set_fflags(kUnderflow);
+ }
+
+ return static_cast<I_TYPE>(rounded);
+}
+
+template <typename T>
+static int64_t FclassHelper(T value) {
+ switch (std::fpclassify(value)) {
+ case FP_INFINITE:
+ return (std::signbit(value) ? kNegativeInfinity : kPositiveInfinity);
+ case FP_NAN:
+ return (isSnan(value) ? kSignalingNaN : kQuietNaN);
+ case FP_NORMAL:
+ return (std::signbit(value) ? kNegativeNormalNumber
+ : kPositiveNormalNumber);
+ case FP_SUBNORMAL:
+ return (std::signbit(value) ? kNegativeSubnormalNumber
+ : kPositiveSubnormalNumber);
+ case FP_ZERO:
+ return (std::signbit(value) ? kNegativeZero : kPositiveZero);
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <typename T>
+bool Simulator::CompareFHelper(T input1, T input2, FPUCondition cc) {
+ DCHECK(std::is_floating_point<T>::value);
+ bool result = false;
+ switch (cc) {
+ case LT:
+ case LE:
+ // FLT, FLE are signaling compares
+ if (std::isnan(input1) || std::isnan(input2)) {
+ set_fflags(kInvalidOperation);
+ result = false;
+ } else {
+ result = (cc == LT) ? (input1 < input2) : (input1 <= input2);
+ }
+ break;
+
+ case EQ:
+ if (std::numeric_limits<T>::signaling_NaN() == input1 ||
+ std::numeric_limits<T>::signaling_NaN() == input2) {
+ set_fflags(kInvalidOperation);
+ }
+ if (std::isnan(input1) || std::isnan(input2)) {
+ result = false;
+ } else {
+ result = (input1 == input2);
+ }
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ return result;
+}
+
+template <typename T>
+static inline bool is_invalid_fmul(T src1, T src2) {
+ return (isinf(src1) && src2 == static_cast<T>(0.0)) ||
+ (src1 == static_cast<T>(0.0) && isinf(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fadd(T src1, T src2) {
+ return (isinf(src1) && isinf(src2) &&
+ std::signbit(src1) != std::signbit(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fsub(T src1, T src2) {
+ return (isinf(src1) && isinf(src2) &&
+ std::signbit(src1) == std::signbit(src2));
+}
+
+template <typename T>
+static inline bool is_invalid_fdiv(T src1, T src2) {
+ return ((src1 == 0 && src2 == 0) || (isinf(src1) && isinf(src2)));
+}
+
+template <typename T>
+static inline bool is_invalid_fsqrt(T src1) {
+ return (src1 < 0);
+}
+
+void Simulator::DecodeRVRAType() {
+ // TODO(riscv): Add macro for RISCV A extension
+ // Special handling for A extension instructions because it uses func5
+ // For all A extension instruction, V8 simulator is pure sequential. No
+ // Memory address lock or other synchronizaiton behaviors.
+ switch (instr_.InstructionBits() & kRATypeMask) {
+ case RO_LR_W: {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ int64_t addr = rs1();
+ auto val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rd(sext32(val), false);
+ TraceMemRd(addr, val, get_register(rd_reg()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
+ break;
+ }
+ case RO_SC_W: {
+ int64_t addr = rs1();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ WriteMem<int32_t>(rs1(), (int32_t)rs2(), instr_.instr());
+ set_rd(0, false);
+ } else {
+ set_rd(1, false);
+ }
+ break;
+ }
+ case RO_AMOSWAP_W: {
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return (uint32_t)rs2(); }, instr_.instr(),
+ WORD)));
+ break;
+ }
+ case RO_AMOADD_W: {
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs + (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOXOR_W: {
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs ^ (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOAND_W: {
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs & (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOOR_W: {
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return lhs | (uint32_t)rs2(); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMIN_W: {
+ set_rd(sext32(amo<int32_t>(
+ rs1(), [&](int32_t lhs) { return std::min(lhs, (int32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMAX_W: {
+ set_rd(sext32(amo<int32_t>(
+ rs1(), [&](int32_t lhs) { return std::max(lhs, (int32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMINU_W: {
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return std::min(lhs, (uint32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+ case RO_AMOMAXU_W: {
+ set_rd(sext32(amo<uint32_t>(
+ rs1(), [&](uint32_t lhs) { return std::max(lhs, (uint32_t)rs2()); },
+ instr_.instr(), WORD)));
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_LR_D: {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ int64_t addr = rs1();
+ auto val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rd(val, false);
+ TraceMemRd(addr, val, get_register(rd_reg()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
+ break;
+ }
+ case RO_SC_D: {
+ int64_t addr = rs1();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr,
+ TransactionSize::DoubleWord) &&
+ (GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_))) {
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ WriteMem<int64_t>(rs1(), rs2(), instr_.instr());
+ set_rd(0, false);
+ } else {
+ set_rd(1, false);
+ }
+ break;
+ }
+ case RO_AMOSWAP_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return rs2(); }, instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOADD_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs + rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOXOR_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs ^ rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOAND_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs & rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOOR_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return lhs | rs2(); }, instr_.instr(),
+ DWORD));
+ break;
+ }
+ case RO_AMOMIN_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return std::min(lhs, rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOMAX_D: {
+ set_rd(amo<int64_t>(
+ rs1(), [&](int64_t lhs) { return std::max(lhs, rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOMINU_D: {
+ set_rd(amo<uint64_t>(
+ rs1(), [&](uint64_t lhs) { return std::min(lhs, (uint64_t)rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+ case RO_AMOMAXU_D: {
+ set_rd(amo<uint64_t>(
+ rs1(), [&](uint64_t lhs) { return std::max(lhs, (uint64_t)rs2()); },
+ instr_.instr(), DWORD));
+ break;
+ }
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ // TODO(riscv): End Add macro for RISCV A extension
+ default: {
+ UNSUPPORTED();
+ }
+ }
+}
+
+void Simulator::DecodeRVRFPType() {
+ // OP_FP instructions (F/D) uses func7 first. Some further uses func3 and
+ // rs2()
+
+ // kRATypeMask is only for func7
+ switch (instr_.InstructionBits() & kRFPTypeMask) {
+ // TODO(riscv): Add macro for RISCV F extension
+ case RO_FADD_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fadd(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 + frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FSUB_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fsub(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 - frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FMUL_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fmul(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 * frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FDIV_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fdiv(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else if (frs2 == 0.0f) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(frs1) == std::signbit(frs2)
+ ? std::numeric_limits<float>::infinity()
+ : -std::numeric_limits<float>::infinity());
+ } else {
+ return frs1 / frs2;
+ }
+ };
+ set_frd(CanonicalizeFPUOp2<float>(fn));
+ break;
+ }
+ case RO_FSQRT_S: {
+ if (instr_.Rs2Value() == 0b00000) {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs) {
+ if (is_invalid_fsqrt(frs)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return std::sqrt(frs);
+ }
+ };
+ set_frd(CanonicalizeFPUOp1<float>(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FSGNJ_S: { // RO_FSGNJN_S RO_FSQNJX_S
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FSGNJ_S
+ set_frd(fsgnj32(frs1(), frs2(), false, false));
+ break;
+ }
+ case 0b001: { // RO_FSGNJN_S
+ set_frd(fsgnj32(frs1(), frs2(), true, false));
+ break;
+ }
+ case 0b010: { // RO_FSQNJX_S
+ set_frd(fsgnj32(frs1(), frs2(), false, true));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMIN_S: { // RO_FMAX_S
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FMIN_S
+ set_frd(FMaxMinHelper(frs1(), frs2(), MaxMinKind::kMin));
+ break;
+ }
+ case 0b001: { // RO_FMAX_S
+ set_frd(FMaxMinHelper(frs1(), frs2(), MaxMinKind::kMax));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_W_S: { // RO_FCVT_WU_S , 64F RO_FCVT_L_S RO_FCVT_LU_S
+ float original_val = frs1();
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_W_S
+ set_rd(RoundF2IHelper<int32_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00001: { // RO_FCVT_WU_S
+ set_rd(RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b00010: { // RO_FCVT_L_S
+ set_rd(RoundF2IHelper<int64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00011: { // RO_FCVT_LU_S
+ set_rd(RoundF2IHelper<uint64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMV: { // RO_FCLASS_S
+ switch (instr_.Funct3Value()) {
+ case 0b000: {
+ if (instr_.Rs2Value() == 0b00000) {
+ // RO_FMV_X_W
+ set_rd(sext_xlen(get_fpu_register_word(rs1_reg())));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case 0b001: { // RO_FCLASS_S
+ set_rd(FclassHelper(frs1()));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ // TODO(RISCV): Implement handling of NaN (quiet and signalling).
+ case RO_FLE_S: { // RO_FEQ_S RO_FLT_S RO_FLE_S
+ switch (instr_.Funct3Value()) {
+ case 0b010: { // RO_FEQ_S
+ set_rd(CompareFHelper(frs1(), frs2(), EQ));
+ break;
+ }
+ case 0b001: { // RO_FLT_S
+ set_rd(CompareFHelper(frs1(), frs2(), LT));
+ break;
+ }
+ case 0b000: { // RO_FLE_S
+ set_rd(CompareFHelper(frs1(), frs2(), LE));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_S_W: { // RO_FCVT_S_WU , 64F RO_FCVT_S_L RO_FCVT_S_LU
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_S_W
+ set_frd(static_cast<float>((int32_t)rs1()));
+ break;
+ }
+ case 0b00001: { // RO_FCVT_S_WU
+ set_frd(static_cast<float>((uint32_t)rs1()));
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b00010: { // RO_FCVT_S_L
+ set_frd(static_cast<float>((int64_t)rs1()));
+ break;
+ }
+ case 0b00011: { // RO_FCVT_S_LU
+ set_frd(static_cast<float>((uint64_t)rs1()));
+ break;
+ }
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMV_W_X: {
+ if (instr_.Funct3Value() == 0b000) {
+ // since FMV preserves source bit-pattern, no need to canonize
+ set_frd(bit_cast<float>((uint32_t)rs1()));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ // TODO(riscv): Add macro for RISCV D extension
+ case RO_FADD_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fadd(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 + drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FSUB_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fsub(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 - drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FMUL_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fmul(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 * drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FDIV_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fdiv(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else if (drs2 == 0.0) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(drs1) == std::signbit(drs2)
+ ? std::numeric_limits<double>::infinity()
+ : -std::numeric_limits<double>::infinity());
+ } else {
+ return drs1 / drs2;
+ }
+ };
+ set_drd(CanonicalizeFPUOp2<double>(fn));
+ break;
+ }
+ case RO_FSQRT_D: {
+ if (instr_.Rs2Value() == 0b00000) {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs) {
+ if (is_invalid_fsqrt(drs)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return std::sqrt(drs);
+ }
+ };
+ set_drd(CanonicalizeFPUOp1<double>(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FSGNJ_D: { // RO_FSGNJN_D RO_FSQNJX_D
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FSGNJ_D
+ set_drd(fsgnj64(drs1(), drs2(), false, false));
+ break;
+ }
+ case 0b001: { // RO_FSGNJN_D
+ set_drd(fsgnj64(drs1(), drs2(), true, false));
+ break;
+ }
+ case 0b010: { // RO_FSQNJX_D
+ set_drd(fsgnj64(drs1(), drs2(), false, true));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FMIN_D: { // RO_FMAX_D
+ switch (instr_.Funct3Value()) {
+ case 0b000: { // RO_FMIN_D
+ set_drd(FMaxMinHelper(drs1(), drs2(), MaxMinKind::kMin));
+ break;
+ }
+ case 0b001: { // RO_FMAX_D
+ set_drd(FMaxMinHelper(drs1(), drs2(), MaxMinKind::kMax));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case (RO_FCVT_S_D & kRFPTypeMask): {
+ if (instr_.Rs2Value() == 0b00001) {
+ auto fn = [](double drs) { return static_cast<float>(drs); };
+ set_frd(CanonicalizeDoubleToFloatOperation(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FCVT_D_S: {
+ if (instr_.Rs2Value() == 0b00000) {
+ auto fn = [](float frs) { return static_cast<double>(frs); };
+ set_drd(CanonicalizeFloatToDoubleOperation(fn));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case RO_FLE_D: { // RO_FEQ_D RO_FLT_D RO_FLE_D
+ switch (instr_.Funct3Value()) {
+ case 0b010: { // RO_FEQ_S
+ set_rd(CompareFHelper(drs1(), drs2(), EQ));
+ break;
+ }
+ case 0b001: { // RO_FLT_D
+ set_rd(CompareFHelper(drs1(), drs2(), LT));
+ break;
+ }
+ case 0b000: { // RO_FLE_D
+ set_rd(CompareFHelper(drs1(), drs2(), LE));
+ break;
+ }
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
+ if (instr_.Rs2Value() != 0b00000) {
+ UNSUPPORTED();
+ break;
+ }
+ switch (instr_.Funct3Value()) {
+ case 0b001: { // RO_FCLASS_D
+ set_rd(FclassHelper(drs1()));
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b000: { // RO_FMV_X_D
+ set_rd(bit_cast<int64_t>(drs1()));
+ break;
+ }
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_W_D: { // RO_FCVT_WU_D , 64F RO_FCVT_L_D RO_FCVT_LU_D
+ double original_val = drs1();
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_W_D
+ set_rd(RoundF2IHelper<int32_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00001: { // RO_FCVT_WU_D
+ set_rd(RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b00010: { // RO_FCVT_L_D
+ set_rd(RoundF2IHelper<int64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+ case 0b00011: { // RO_FCVT_LU_D
+ set_rd(RoundF2IHelper<uint64_t>(original_val, instr_.RoundMode()));
+ break;
+ }
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+ case RO_FCVT_D_W: { // RO_FCVT_D_WU , 64F RO_FCVT_D_L RO_FCVT_D_LU
+ switch (instr_.Rs2Value()) {
+ case 0b00000: { // RO_FCVT_D_W
+ set_drd((int32_t)rs1());
+ break;
+ }
+ case 0b00001: { // RO_FCVT_D_WU
+ set_drd((uint32_t)rs1());
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case 0b00010: { // RO_FCVT_D_L
+ set_drd((int64_t)rs1());
+ break;
+ }
+ case 0b00011: { // RO_FCVT_D_LU
+ set_drd((uint64_t)rs1());
+ break;
+ }
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_FMV_D_X: {
+ if (instr_.Funct3Value() == 0b000 && instr_.Rs2Value() == 0b00000) {
+ // Since FMV preserves source bit-pattern, no need to canonize
+ set_drd(bit_cast<double>(rs1()));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+#endif /* V8_TARGET_ARCH_64_BIT */
+ default: {
+ UNSUPPORTED();
+ }
+ }
+}
+
+void Simulator::DecodeRVR4Type() {
+ switch (instr_.InstructionBits() & kR4TypeMask) {
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fadd(frs1 * frs2, frs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return std::fma(frs1, frs2, frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ case RO_FMSUB_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fsub(frs1 * frs2, frs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return std::fma(frs1, frs2, -frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ case RO_FNMSUB_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fsub(frs3, frs1 * frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return -std::fma(frs1, frs2, -frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ case RO_FNMADD_S: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float frs1, float frs2, float frs3) {
+ if (is_invalid_fmul(frs1, frs2) || is_invalid_fadd(frs1 * frs2, frs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return -std::fma(frs1, frs2, frs3);
+ }
+ };
+ set_frd(CanonicalizeFPUOp3<float>(fn));
+ break;
+ }
+ // TODO(riscv): use F Extension macro block
+ case RO_FMADD_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fadd(drs1 * drs2, drs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return std::fma(drs1, drs2, drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ case RO_FMSUB_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fsub(drs1 * drs2, drs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return std::fma(drs1, drs2, -drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ case RO_FNMSUB_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fsub(drs3, drs1 * drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return -std::fma(drs1, drs2, -drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ case RO_FNMADD_D: {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2, double drs3) {
+ if (is_invalid_fmul(drs1, drs2) || is_invalid_fadd(drs1 * drs2, drs3)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return -std::fma(drs1, drs2, drs3);
+ }
+ };
+ set_drd(CanonicalizeFPUOp3<double>(fn));
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeRVIType() {
+ switch (instr_.InstructionBits() & kITypeMask) {
+ case RO_JALR: {
+ set_rd(get_pc() + kInstrSize);
+ // Note: No need to shift 2 for JALR's imm12, but set lowest bit to 0.
+ int64_t next_pc = (rs1() + imm12()) & ~reg_t(1);
+ set_pc(next_pc);
+ break;
+ }
+ case RO_LB: {
+ int64_t addr = rs1() + imm12();
+ int8_t val = ReadMem<int8_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rd_reg()));
+ break;
+ }
+ case RO_LH: {
+ int64_t addr = rs1() + imm12();
+ int16_t val = ReadMem<int16_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rd_reg()));
+ break;
+ }
+ case RO_LW: {
+ int64_t addr = rs1() + imm12();
+ int32_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rd_reg()));
+ break;
+ }
+ case RO_LBU: {
+ int64_t addr = rs1() + imm12();
+ uint8_t val = ReadMem<uint8_t>(addr, instr_.instr());
+ set_rd(zext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rd_reg()));
+ break;
+ }
+ case RO_LHU: {
+ int64_t addr = rs1() + imm12();
+ uint16_t val = ReadMem<uint16_t>(addr, instr_.instr());
+ set_rd(zext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rd_reg()));
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_LWU: {
+ int64_t addr = rs1() + imm12();
+ uint32_t val = ReadMem<uint32_t>(addr, instr_.instr());
+ set_rd(zext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rd_reg()));
+ break;
+ }
+ case RO_LD: {
+ int64_t addr = rs1() + imm12();
+ int64_t val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rd_reg()));
+ break;
+ }
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ case RO_ADDI: {
+ set_rd(sext_xlen(rs1() + imm12()));
+ break;
+ }
+ case RO_SLTI: {
+ set_rd(sreg_t(rs1()) < sreg_t(imm12()));
+ break;
+ }
+ case RO_SLTIU: {
+ set_rd(reg_t(rs1()) < reg_t(imm12()));
+ break;
+ }
+ case RO_XORI: {
+ set_rd(imm12() ^ rs1());
+ break;
+ }
+ case RO_ORI: {
+ set_rd(imm12() | rs1());
+ break;
+ }
+ case RO_ANDI: {
+ set_rd(imm12() & rs1());
+ break;
+ }
+ case RO_SLLI: {
+ require(shamt6() < xlen);
+ set_rd(sext_xlen(rs1() << shamt6()));
+ break;
+ }
+ case RO_SRLI: { // RO_SRAI
+ if (!instr_.IsArithShift()) {
+ require(shamt6() < xlen);
+ set_rd(sext_xlen(zext_xlen(rs1()) >> shamt6()));
+ } else {
+ require(shamt6() < xlen);
+ set_rd(sext_xlen(sext_xlen(rs1()) >> shamt6()));
+ }
+ break;
+ }
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_ADDIW: {
+ set_rd(sext32(rs1() + imm12()));
+ break;
+ }
+ case RO_SLLIW: {
+ set_rd(sext32(rs1() << shamt5()));
+ break;
+ }
+ case RO_SRLIW: { // RO_SRAIW
+ if (!instr_.IsArithShift()) {
+ set_rd(sext32(uint32_t(rs1()) >> shamt5()));
+ } else {
+ set_rd(sext32(int32_t(rs1()) >> shamt5()));
+ }
+ break;
+ }
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ case RO_FENCE: {
+ // DO nothing in sumulator
+ break;
+ }
+ case RO_ECALL: { // RO_EBREAK
+ if (instr_.Imm12Value() == 0) { // ECALL
+ SoftwareInterrupt();
+ } else if (instr_.Imm12Value() == 1) { // EBREAK
+ SoftwareInterrupt();
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ // TODO(riscv): use Zifencei Standard Extension macro block
+ case RO_FENCE_I: {
+ // spike: flush icache.
+ break;
+ }
+ // TODO(riscv): use Zicsr Standard Extension macro block
+ case RO_CSRRW: {
+ if (rd_reg() != zero_reg) {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ }
+ write_csr_value(csr_reg(), rs1());
+ break;
+ }
+ case RO_CSRRS: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (rs1_reg() != zero_reg) {
+ set_csr_bits(csr_reg(), rs1());
+ }
+ break;
+ }
+ case RO_CSRRC: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (rs1_reg() != zero_reg) {
+ clear_csr_bits(csr_reg(), rs1());
+ }
+ break;
+ }
+ case RO_CSRRWI: {
+ if (rd_reg() != zero_reg) {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ }
+ write_csr_value(csr_reg(), imm5CSR());
+ break;
+ }
+ case RO_CSRRSI: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (imm5CSR() != 0) {
+ set_csr_bits(csr_reg(), imm5CSR());
+ }
+ break;
+ }
+ case RO_CSRRCI: {
+ set_rd(zext_xlen(read_csr_value(csr_reg())));
+ if (imm5CSR() != 0) {
+ clear_csr_bits(csr_reg(), imm5CSR());
+ }
+ break;
+ }
+ // TODO(riscv): use F Extension macro block
+ case RO_FLW: {
+ int64_t addr = rs1() + imm12();
+ float val = ReadMem<float>(addr, instr_.instr());
+ set_frd(val, false);
+ TraceMemRd(addr, val, get_fpu_register(frd_reg()));
+ break;
+ }
+ // TODO(riscv): use D Extension macro block
+ case RO_FLD: {
+ int64_t addr = rs1() + imm12();
+ double val = ReadMem<double>(addr, instr_.instr());
+ set_drd(val, false);
+ TraceMemRd(addr, val, get_fpu_register(frd_reg()));
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeRVSType() {
+ switch (instr_.InstructionBits() & kSTypeMask) {
+ case RO_SB:
+ WriteMem<uint8_t>(rs1() + s_imm12(), (uint8_t)rs2(), instr_.instr());
+ break;
+ case RO_SH:
+ WriteMem<uint16_t>(rs1() + s_imm12(), (uint16_t)rs2(), instr_.instr());
+ break;
+ case RO_SW:
+ WriteMem<uint32_t>(rs1() + s_imm12(), (uint32_t)rs2(), instr_.instr());
+ break;
+#ifdef V8_TARGET_ARCH_64_BIT
+ case RO_SD:
+ WriteMem<uint64_t>(rs1() + s_imm12(), (uint64_t)rs2(), instr_.instr());
+ break;
+#endif /*V8_TARGET_ARCH_64_BIT*/
+ // TODO(riscv): use F Extension macro block
+ case RO_FSW: {
+ WriteMem<uint32_t>(rs1() + s_imm12(),
+ (uint32_t)get_fpu_register_word(rs2_reg()),
+ instr_.instr());
+ break;
+ }
+ // TODO(riscv): use D Extension macro block
+ case RO_FSD: {
+ WriteMem<double>(rs1() + s_imm12(), drs2(), instr_.instr());
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeRVBType() {
+ switch (instr_.InstructionBits() & kBTypeMask) {
+ case RO_BEQ:
+ if (rs1() == rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BNE:
+ if (rs1() != rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BLT:
+ if (rs1() < rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BGE:
+ if (rs1() >= rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BLTU:
+ if ((reg_t)rs1() < (reg_t)rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ case RO_BGEU:
+ if ((reg_t)rs1() >= (reg_t)rs2()) {
+ int64_t next_pc = get_pc() + boffset();
+ set_pc(next_pc);
+ }
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+void Simulator::DecodeRVUType() {
+ // U Type doesn't have additoinal mask
+ switch (instr_.BaseOpcodeFieldRaw()) {
+ case RO_LUI:
+ set_rd(u_imm20());
+ break;
+ case RO_AUIPC:
+ set_rd(sext_xlen(u_imm20() + get_pc()));
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+void Simulator::DecodeRVJType() {
+ // J Type doesn't have additional mask
+ switch (instr_.BaseOpcodeValue()) {
+ case RO_JAL: {
+ set_rd(get_pc() + kInstrSize);
+ int64_t next_pc = get_pc() + imm20J();
+ set_pc(next_pc);
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+void Simulator::DecodeCRType() {
+ switch (instr_.RvcFunct4Value()) {
+ case 0b1000:
+ if (instr_.RvcRs1Value() != 0 && instr_.RvcRs2Value() == 0) { // c.jr
+ set_pc(rvc_rs1());
+ } else if (instr_.RvcRdValue() != 0 &&
+ instr_.RvcRs2Value() != 0) { // c.mv
+ set_rvc_rd(sext_xlen(rvc_rs2()));
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ case 0b1001:
+ if (instr_.RvcRs1Value() == 0 && instr_.RvcRs2Value() == 0) { // c.ebreak
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ } else if (instr_.RvcRdValue() != 0 &&
+ instr_.RvcRs2Value() == 0) { // c.jalr
+ set_register(ra, get_pc() + kShortInstrSize);
+ set_pc(rvc_rs1());
+ } else if (instr_.RvcRdValue() != 0 &&
+ instr_.RvcRs2Value() != 0) { // c.add
+ set_rvc_rd(sext_xlen(rvc_rs1() + rvc_rs2()));
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCAType() {
+ switch (instr_.InstructionBits() & kCATypeMask) {
+ case RO_C_SUB:
+ set_rvc_rs1s(sext_xlen(rvc_rs1s() - rvc_rs2s()));
+ break;
+ case RO_C_XOR:
+ set_rvc_rs1s(sext_xlen(rvc_rs1s() ^ rvc_rs2s()));
+ break;
+ case RO_C_OR:
+ set_rvc_rs1s(sext_xlen(rvc_rs1s() | rvc_rs2s()));
+ break;
+ case RO_C_AND:
+ set_rvc_rs1s(sext_xlen(rvc_rs1s() & rvc_rs2s()));
+ break;
+ case RO_C_SUBW:
+ set_rvc_rs1s(sext32(rvc_rs1s() - rvc_rs2s()));
+ break;
+ case RO_C_ADDW:
+ set_rvc_rs1s(sext32(rvc_rs1s() + rvc_rs2s()));
+ break;
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCIType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_NOP_ADDI:
+ if (instr_.RvcRdValue() == 0) // c.nop
+ break;
+ else // c.addi
+ set_rvc_rd(sext_xlen(rvc_rs1() + rvc_imm6()));
+ break;
+ case RO_C_ADDIW:
+ set_rvc_rd(sext32(rvc_rs1() + rvc_imm6()));
+ break;
+ case RO_C_LI:
+ set_rvc_rd(sext_xlen(rvc_imm6()));
+ break;
+ case RO_C_LUI_ADD:
+ if (instr_.RvcRdValue() == 2) {
+ // c.addi16sp
+ int64_t value = get_register(sp) + rvc_imm6_addi16sp();
+ set_register(sp, value);
+ } else if (instr_.RvcRdValue() != 0 && instr_.RvcRdValue() != 2) {
+ // c.lui
+ set_rvc_rd(rvc_u_imm6());
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ case RO_C_SLLI:
+ set_rvc_rd(sext_xlen(rvc_rs1() << rvc_shamt6()));
+ break;
+ case RO_C_FLDSP: {
+ int64_t addr = get_register(sp) + rvc_imm6_ldsp();
+ double val = ReadMem<double>(addr, instr_.instr());
+ set_rvc_drd(val, false);
+ TraceMemRd(addr, val, get_fpu_register(rvc_frd_reg()));
+ break;
+ }
+ case RO_C_LWSP: {
+ int64_t addr = get_register(sp) + rvc_imm6_lwsp();
+ int64_t val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rvc_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rvc_rd_reg()));
+ break;
+ }
+ case RO_C_LDSP: {
+ int64_t addr = get_register(sp) + rvc_imm6_ldsp();
+ int64_t val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rvc_rd(sext_xlen(val), false);
+ TraceMemRd(addr, val, get_register(rvc_rd_reg()));
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCIWType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_ADDI4SPN: {
+ set_rvc_rs2s(get_register(sp) + rvc_imm8_addi4spn());
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ }
+}
+
+void Simulator::DecodeCSSType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_FSDSP: {
+ int64_t addr = get_register(sp) + rvc_imm6_sdsp();
+ WriteMem<double>(addr, static_cast<double>(rvc_drs2()), instr_.instr());
+ break;
+ }
+ case RO_C_SWSP: {
+ int64_t addr = get_register(sp) + rvc_imm6_swsp();
+ WriteMem<int32_t>(addr, (int32_t)rvc_rs2(), instr_.instr());
+ break;
+ }
+ case RO_C_SDSP: {
+ int64_t addr = get_register(sp) + rvc_imm6_sdsp();
+ WriteMem<int64_t>(addr, (int64_t)rvc_rs2(), instr_.instr());
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCLType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_LW: {
+ int64_t addr = rvc_rs1s() + rvc_imm5_w();
+ auto val = ReadMem<int32_t>(addr, instr_.instr());
+ set_rvc_rs2s(sext_xlen(val), false);
+ break;
+ }
+ case RO_C_LD: {
+ int64_t addr = rvc_rs1s() + rvc_imm5_d();
+ auto val = ReadMem<int64_t>(addr, instr_.instr());
+ set_rvc_rs2s(sext_xlen(val), false);
+ break;
+ }
+ case RO_C_FLD: {
+ int64_t addr = rvc_rs1s() + rvc_imm5_d();
+ auto val = ReadMem<double>(addr, instr_.instr());
+ set_rvc_drs2s(sext_xlen(val), false);
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCSType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_SW: {
+ int64_t addr = rvc_rs1s() + rvc_imm5_w();
+ WriteMem<int32_t>(addr, (int32_t)rvc_rs2s(), instr_.instr());
+ break;
+ }
+ case RO_C_SD: {
+ int64_t addr = rvc_rs1s() + rvc_imm5_d();
+ WriteMem<int64_t>(addr, (int64_t)rvc_rs2s(), instr_.instr());
+ break;
+ }
+ case RO_C_FSD: {
+ int64_t addr = rvc_rs1s() + rvc_imm5_d();
+ WriteMem<double>(addr, static_cast<double>(rvc_drs2s()), instr_.instr());
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+void Simulator::DecodeCJType() {
+ switch (instr_.RvcOpcode()) {
+ case RO_C_J: {
+ set_pc(get_pc() + instr_.RvcImm11CJValue());
+ break;
+ }
+ default:
+ UNSUPPORTED();
+ }
+}
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(i_cache(), instr);
+ }
+ pc_modified_ = false;
+
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ if (::v8::internal::FLAG_trace_sim) {
+ SNPrintF(trace_buf_, " ");
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+
+ // PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ // reinterpret_cast<intptr_t>(instr), buffer.begin());
+ }
+
+ instr_ = instr;
+ switch (instr_.InstructionType()) {
+ case Instruction::kRType:
+ DecodeRVRType();
+ break;
+ case Instruction::kR4Type:
+ DecodeRVR4Type();
+ break;
+ case Instruction::kIType:
+ DecodeRVIType();
+ break;
+ case Instruction::kSType:
+ DecodeRVSType();
+ break;
+ case Instruction::kBType:
+ DecodeRVBType();
+ break;
+ case Instruction::kUType:
+ DecodeRVUType();
+ break;
+ case Instruction::kJType:
+ DecodeRVJType();
+ break;
+ case Instruction::kCRType:
+ DecodeCRType();
+ break;
+ case Instruction::kCAType:
+ DecodeCAType();
+ break;
+ case Instruction::kCJType:
+ DecodeCJType();
+ break;
+ case Instruction::kCIType:
+ DecodeCIType();
+ break;
+ case Instruction::kCIWType:
+ DecodeCIWType();
+ break;
+ case Instruction::kCSSType:
+ DecodeCSSType();
+ break;
+ case Instruction::kCLType:
+ DecodeCLType();
+ break;
+ case Instruction::kCSType:
+ DecodeCSType();
+ break;
+ default:
+ if (::v8::internal::FLAG_trace_sim) {
+ std::cout << "Unrecognized instruction [@pc=0x" << std::hex
+ << registers_[pc] << "]: 0x" << instr->InstructionBits()
+ << std::endl;
+ }
+ UNSUPPORTED();
+ }
+
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(" 0x%012" PRIxPTR " %ld %-44s %s\n",
+ reinterpret_cast<intptr_t>(instr), icount_, buffer.begin(),
+ trace_buf_.begin());
+ }
+
+ if (!pc_modified_) {
+ set_register(pc,
+ reinterpret_cast<int64_t>(instr) + instr->InstructionSize());
+ }
+}
+
+void Simulator::Execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
+ RiscvDebugger dbg(this);
+ dbg.Debug();
+ } else {
+ InstructionDecode(instr);
+ }
+ CheckBreakpoints();
+ program_counter = get_pc();
+ }
+}
+
+void Simulator::CallInternal(Address entry) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
+ // Prepare to execute the code at entry.
+ set_register(pc, static_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int64_t s0_val = get_register(s0);
+ int64_t s1_val = get_register(s1);
+ int64_t s2_val = get_register(s2);
+ int64_t s3_val = get_register(s3);
+ int64_t s4_val = get_register(s4);
+ int64_t s5_val = get_register(s5);
+ int64_t s6_val = get_register(s6);
+ int64_t s7_val = get_register(s7);
+ int64_t gp_val = get_register(gp);
+ int64_t sp_val = get_register(sp);
+ int64_t fp_val = get_register(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ set_register(s0, callee_saved_value);
+ set_register(s1, callee_saved_value);
+ set_register(s2, callee_saved_value);
+ set_register(s3, callee_saved_value);
+ set_register(s4, callee_saved_value);
+ set_register(s5, callee_saved_value);
+ set_register(s6, callee_saved_value);
+ set_register(s7, callee_saved_value);
+ set_register(gp, callee_saved_value);
+ set_register(fp, callee_saved_value);
+
+ // Start the simulation.
+ Execute();
+
+ // Check that the callee-saved registers have been preserved.
+ CHECK_EQ(callee_saved_value, get_register(s0));
+ CHECK_EQ(callee_saved_value, get_register(s1));
+ CHECK_EQ(callee_saved_value, get_register(s2));
+ CHECK_EQ(callee_saved_value, get_register(s3));
+ CHECK_EQ(callee_saved_value, get_register(s4));
+ CHECK_EQ(callee_saved_value, get_register(s5));
+ CHECK_EQ(callee_saved_value, get_register(s6));
+ CHECK_EQ(callee_saved_value, get_register(s7));
+ CHECK_EQ(callee_saved_value, get_register(gp));
+ CHECK_EQ(callee_saved_value, get_register(fp));
+
+ // Restore callee-saved registers with the original value.
+ set_register(s0, s0_val);
+ set_register(s1, s1_val);
+ set_register(s2, s2_val);
+ set_register(s3, s3_val);
+ set_register(s4, s4_val);
+ set_register(s5, s5_val);
+ set_register(s6, s6_val);
+ set_register(s7, s7_val);
+ set_register(gp, gp_val);
+ set_register(sp, sp_val);
+ set_register(fp, fp_val);
+}
+
+intptr_t Simulator::CallImpl(Address entry, int argument_count,
+ const intptr_t* arguments) {
+ constexpr int kRegisterPassedArguments = 8;
+ // Set up arguments.
+
+ // First four arguments passed in registers in both ABI's.
+ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
+ if (reg_arg_count > 0) set_register(a0, arguments[0]);
+ if (reg_arg_count > 1) set_register(a1, arguments[1]);
+ if (reg_arg_count > 2) set_register(a2, arguments[2]);
+ if (reg_arg_count > 3) set_register(a3, arguments[3]);
+
+ // Up to eight arguments passed in registers in N64 ABI.
+ // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
+ if (reg_arg_count > 4) set_register(a4, arguments[4]);
+ if (reg_arg_count > 5) set_register(a5, arguments[5]);
+ if (reg_arg_count > 6) set_register(a6, arguments[6]);
+ if (reg_arg_count > 7) set_register(a7, arguments[7]);
+
+ if (::v8::internal::FLAG_trace_sim) {
+ std::cout << "CallImpl: reg_arg_count = " << reg_arg_count << std::hex
+ << " entry-pc (JSEntry) = 0x" << entry << " a0 (Isolate) = 0x"
+ << get_register(a0) << " a1 (orig_func/new_target) = 0x"
+ << get_register(a1) << " a2 (func/target) = 0x"
+ << get_register(a2) << " a3 (receiver) = 0x" << get_register(a3)
+ << " a4 (argc) = 0x" << get_register(a4) << " a5 (argv) = 0x"
+ << get_register(a5) << std::endl;
+ }
+
+ // Remaining arguments passed on stack.
+ int64_t original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int stack_args_count = argument_count - reg_arg_count;
+ int stack_args_size = stack_args_count * sizeof(*arguments) + kCArgsSlotsSize;
+ int64_t entry_stack = original_stack - stack_args_size;
+
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count,
+ stack_args_count * sizeof(*arguments));
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
+
+ // Pop stack passed arguments.
+ CHECK_EQ(entry_stack, get_register(sp));
+ set_register(sp, original_stack);
+
+ // return get_register(a0);
+ // RISCV uses a0 to return result
+ return get_register(a0);
+}
+
+double Simulator::CallFP(Address entry, double d0, double d1) {
+ set_fpu_register_double(fa0, d0);
+ set_fpu_register_double(fa1, d1);
+ CallInternal(entry);
+ return get_fpu_register_double(fa0);
+}
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ int64_t new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::PopAddress() {
+ int64_t current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+Simulator::LocalMonitor::LocalMonitor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+ size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non linked load could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on load.
+ Clear();
+ }
+}
+
+void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr,
+ TransactionSize size) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+ size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on store.
+ Clear();
+ }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr,
+ TransactionSize size) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (addr == tagged_addr_ && size_ == size) {
+ Clear();
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ DCHECK(access_state_ == MonitorAccess::Open);
+ return false;
+ }
+}
+
+Simulator::GlobalMonitor::LinkedAddress::LinkedAddress()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ next_(nullptr),
+ prev_(nullptr),
+ failure_counter_(0) {}
+
+void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked(
+ uintptr_t addr) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the global monitor. As a result, it's
+ // most strict to unconditionally clear global monitors on store.
+ Clear_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked(
+ uintptr_t addr, bool is_requesting_thread) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (is_requesting_thread) {
+ if (addr == tagged_addr_) {
+ Clear_Locked();
+ // Introduce occasional sc/scd failures. This is to simulate the
+ // behavior of hardware, which can randomly fail due to background
+ // cache evictions.
+ if (failure_counter_++ >= kMaxFailureCounter) {
+ failure_counter_ = 0;
+ return false;
+ } else {
+ return true;
+ }
+ }
+ } else if ((addr & kExclusiveTaggedAddrMask) ==
+ (tagged_addr_ & kExclusiveTaggedAddrMask)) {
+ // Check the masked addresses when responding to a successful lock by
+ // another thread so the implementation is more conservative (i.e. the
+ // granularity of locking is as large as possible.)
+ Clear_Locked();
+ return false;
+ }
+ }
+ return false;
+}
+
+void Simulator::GlobalMonitor::NotifyLoadLinked_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ linked_address->NotifyLoadLinked_Locked(addr);
+ PrependProcessor_Locked(linked_address);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(
+ LinkedAddress* linked_address) {
+ // Notify each thread of the store operation.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ iter->NotifyStore_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ DCHECK(IsProcessorInLinkedList_Locked(linked_address));
+ if (linked_address->NotifyStoreConditional_Locked(addr, true)) {
+ // Notify the other processors that this StoreConditional succeeded.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ if (iter != linked_address) {
+ iter->NotifyStoreConditional_Locked(addr, false);
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+ LinkedAddress* linked_address) const {
+ return head_ == linked_address || linked_address->next_ ||
+ linked_address->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(
+ LinkedAddress* linked_address) {
+ if (IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (head_) {
+ head_->prev_ = linked_address;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = head_;
+ head_ = linked_address;
+}
+
+void Simulator::GlobalMonitor::RemoveLinkedAddress(
+ LinkedAddress* linked_address) {
+ base::MutexGuard lock_guard(&mutex);
+ if (!IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (linked_address->prev_) {
+ linked_address->prev_->next_ = linked_address->next_;
+ } else {
+ head_ = linked_address->next_;
+ }
+ if (linked_address->next_) {
+ linked_address->next_->prev_ = linked_address->prev_;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = nullptr;
+}
+
+#undef SScanF
+
+} // namespace internal
+} // namespace v8
+
+#endif // USE_SIMULATOR
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.h b/deps/v8/src/execution/riscv64/simulator-riscv64.h
new file mode 100644
index 0000000000..e51ec6472c
--- /dev/null
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.h
@@ -0,0 +1,820 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Copyright(c) 2010 - 2017,
+// The Regents of the University of California(Regents).All Rights Reserved.
+//
+// Redistribution and use in source and binary forms,
+// with or without modification,
+// are permitted provided that the following
+// conditions are met : 1. Redistributions of source code must retain the
+// above copyright notice, this list of conditions and the following
+// disclaimer.2. Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer in
+// the
+// documentation and /
+// or
+// other materials provided with the distribution.3. Neither the name of
+// the Regents nor the names of its contributors may be used to endorse
+// or
+// promote products derived from
+// this software without specific prior written permission.
+//
+// IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT,
+// INDIRECT, SPECIAL,
+// INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+// ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
+// EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+// PARTICULAR PURPOSE.THE SOFTWARE AND ACCOMPANYING DOCUMENTATION,
+// IF ANY,
+// PROVIDED HEREUNDER IS PROVIDED
+// "AS IS".REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE,
+// SUPPORT, UPDATES, ENHANCEMENTS,
+// OR MODIFICATIONS.
+
+// The original source code covered by the above license above has been
+// modified significantly by the v8 project authors.
+
+// Declares a Simulator for RISC-V instructions if we are not generating a
+// native RISC-V binary. This Simulator allows us to run and debug RISC-V code
+// generation on regular desktop machines. V8 calls into generated code via the
+// GeneratedCode wrapper, which will start execution in the Simulator or
+// forwards to the real entry on a RISC-V HW platform.
+
+#ifndef V8_EXECUTION_RISCV64_SIMULATOR_RISCV64_H_
+#define V8_EXECUTION_RISCV64_SIMULATOR_RISCV64_H_
+
+// globals.h defines USE_SIMULATOR.
+#include "src/common/globals.h"
+
+template <typename T>
+int Compare(const T& a, const T& b) {
+ if (a == b)
+ return 0;
+ else if (a < b)
+ return -1;
+ else
+ return 1;
+}
+
+// Returns the negative absolute value of its argument.
+template <typename T,
+ typename = typename std::enable_if<std::is_signed<T>::value>::type>
+T Nabs(T a) {
+ return a < 0 ? a : -a;
+}
+
+#if defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/base/hashmap.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/riscv64/constants-riscv64.h"
+#include "src/execution/simulator-base.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Utility types and functions for RISCV
+#ifdef V8_TARGET_ARCH_32_BIT
+using sreg_t = int32_t;
+using reg_t = uint32_t;
+#define xlen 32
+#elif V8_TARGET_ARCH_64_BIT
+using sreg_t = int64_t;
+using reg_t = uint64_t;
+#define xlen 64
+#else
+#error "Cannot detect Riscv's bitwidth"
+#endif
+
+#define sext32(x) ((sreg_t)(int32_t)(x))
+#define zext32(x) ((reg_t)(uint32_t)(x))
+#define sext_xlen(x) (((sreg_t)(x) << (64 - xlen)) >> (64 - xlen))
+#define zext_xlen(x) (((reg_t)(x) << (64 - xlen)) >> (64 - xlen))
+
+#define BIT(n) (0x1LL << n)
+#define QUIET_BIT_S(nan) (bit_cast<int32_t>(nan) & BIT(22))
+#define QUIET_BIT_D(nan) (bit_cast<int64_t>(nan) & BIT(51))
+static inline bool isSnan(float fp) { return !QUIET_BIT_S(fp); }
+static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
+#undef QUIET_BIT_S
+#undef QUIET_BIT_D
+
+inline uint64_t mulhu(uint64_t a, uint64_t b) {
+ __uint128_t full_result = ((__uint128_t)a) * ((__uint128_t)b);
+ return full_result >> 64;
+}
+
+inline int64_t mulh(int64_t a, int64_t b) {
+ __int128_t full_result = ((__int128_t)a) * ((__int128_t)b);
+ return full_result >> 64;
+}
+
+inline int64_t mulhsu(int64_t a, uint64_t b) {
+ __int128_t full_result = ((__int128_t)a) * ((__uint128_t)b);
+ return full_result >> 64;
+}
+
+// Floating point helpers
+#define F32_SIGN ((uint32_t)1 << 31)
+union u32_f32 {
+ uint32_t u;
+ float f;
+};
+inline float fsgnj32(float rs1, float rs2, bool n, bool x) {
+ u32_f32 a = {.f = rs1}, b = {.f = rs2};
+ u32_f32 res;
+ res.u =
+ (a.u & ~F32_SIGN) | ((((x) ? a.u : (n) ? F32_SIGN : 0) ^ b.u) & F32_SIGN);
+ return res.f;
+}
+#define F64_SIGN ((uint64_t)1 << 63)
+union u64_f64 {
+ uint64_t u;
+ double d;
+};
+inline double fsgnj64(double rs1, double rs2, bool n, bool x) {
+ u64_f64 a = {.d = rs1}, b = {.d = rs2};
+ u64_f64 res;
+ res.u =
+ (a.u & ~F64_SIGN) | ((((x) ? a.u : (n) ? F64_SIGN : 0) ^ b.u) & F64_SIGN);
+ return res.d;
+}
+
+inline bool is_boxed_float(int64_t v) { return (uint32_t)((v >> 32) + 1) == 0; }
+inline int64_t box_float(float v) {
+ return (0xFFFFFFFF00000000 | bit_cast<int32_t>(v));
+}
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* CachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+class SimInstructionBase : public InstructionBase {
+ public:
+ Type InstructionType() const { return type_; }
+ inline Instruction* instr() const { return instr_; }
+ inline int32_t operand() const { return operand_; }
+
+ protected:
+ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+ explicit SimInstructionBase(Instruction* instr) {}
+
+ int32_t operand_;
+ Instruction* instr_;
+ Type type_;
+
+ private:
+ DISALLOW_ASSIGN(SimInstructionBase);
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+ SimInstruction() {}
+
+ explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+ SimInstruction& operator=(Instruction* instr) {
+ operand_ = *reinterpret_cast<const int32_t*>(instr);
+ instr_ = instr;
+ type_ = InstructionBase::InstructionType();
+ DCHECK(reinterpret_cast<void*>(&operand_) == this);
+ return *this;
+ }
+};
+
+class Simulator : public SimulatorBase {
+ public:
+ friend class RiscvDebugger;
+
+ // Registers are declared in order. See SMRL chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ ra,
+ sp,
+ gp,
+ tp,
+ t0,
+ t1,
+ t2,
+ s0,
+ s1,
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ s9,
+ s10,
+ s11,
+ t3,
+ t4,
+ t5,
+ t6,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s0
+ };
+
+ // Coprocessor registers.
+ // Generated code will always use doubles. So we will only use even registers.
+ enum FPURegister {
+ ft0,
+ ft1,
+ ft2,
+ ft3,
+ ft4,
+ ft5,
+ ft6,
+ ft7,
+ fs0,
+ fs1,
+ fa0,
+ fa1,
+ fa2,
+ fa3,
+ fa4,
+ fa5,
+ fa6,
+ fa7,
+ fs2,
+ fs3,
+ fs4,
+ fs5,
+ fs6,
+ fs7,
+ fs8,
+ fs9,
+ fs10,
+ fs11,
+ ft8,
+ ft9,
+ ft10,
+ ft11,
+ kNumFPURegisters
+ };
+
+ explicit Simulator(Isolate* isolate);
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
+
+ // Accessors for register state. Reading the pc value adheres to the RISC-V
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int64_t value);
+ void set_register_word(int reg, int32_t value);
+ void set_dw_register(int dreg, const int* dbl);
+ int64_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
+
+ // Same for FPURegisters.
+ void set_fpu_register(int fpureg, int64_t value);
+ void set_fpu_register_word(int fpureg, int32_t value);
+ void set_fpu_register_hi_word(int fpureg, int32_t value);
+ void set_fpu_register_float(int fpureg, float value);
+ void set_fpu_register_double(int fpureg, double value);
+
+ int64_t get_fpu_register(int fpureg) const;
+ int32_t get_fpu_register_word(int fpureg) const;
+ int32_t get_fpu_register_signed_word(int fpureg) const;
+ int32_t get_fpu_register_hi_word(int fpureg) const;
+ float get_fpu_register_float(int fpureg) const;
+ double get_fpu_register_double(int fpureg) const;
+
+ // RV CSR manipulation
+ uint32_t read_csr_value(uint32_t csr);
+ void write_csr_value(uint32_t csr, uint64_t value);
+ void set_csr_bits(uint32_t csr, uint64_t flags);
+ void clear_csr_bits(uint32_t csr, uint64_t flags);
+
+ void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
+ void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
+
+ inline uint32_t get_dynamic_rounding_mode();
+ inline bool test_fflags_bits(uint32_t mask);
+
+ float RoundF2FHelper(float input_val, int rmode);
+ double RoundF2FHelper(double input_val, int rmode);
+ template <typename I_TYPE, typename F_TYPE>
+ I_TYPE RoundF2IHelper(F_TYPE original, int rmode);
+
+ template <typename T>
+ T FMaxMinHelper(T a, T b, MaxMinKind kind);
+
+ template <typename T>
+ bool CompareFHelper(T input1, T input2, FPUCondition cc);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ int64_t get_pc() const;
+
+ Address get_sp() const { return static_cast<Address>(get_register(sp)); }
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit(uintptr_t c_limit) const;
+
+ // Executes RISC-V instructions until the PC reaches end_sim_pc.
+ void Execute();
+
+ template <typename Return, typename... Args>
+ Return Call(Address entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
+
+ // Alternative: call a 2-argument double function.
+ double CallFP(Address entry, double d0, double d1);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
+ // ICache checking.
+ static bool ICacheMatch(void* one, void* two);
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count,
+ const intptr_t* arguments);
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instruction* instr, const char* format);
+
+ // Helpers for data value tracing.
+ enum TraceType {
+ BYTE,
+ HALF,
+ WORD,
+ DWORD,
+ FLOAT,
+ DOUBLE,
+ // FLOAT_DOUBLE,
+ // WORD_DWORD
+ };
+
+ // RISCV Memory read/write methods
+ template <typename T>
+ T ReadMem(int64_t addr, Instruction* instr);
+ template <typename T>
+ void WriteMem(int64_t addr, T value, Instruction* instr);
+ template <typename T, typename OP>
+ T amo(int64_t addr, OP f, Instruction* instr, TraceType t) {
+ auto lhs = ReadMem<T>(addr, instr);
+ // TODO(RISCV): trace memory read for AMO
+ WriteMem<T>(addr, (T)f(lhs), instr);
+ return lhs;
+ }
+
+ // Helper for debugging memory access.
+ inline void DieOrDebug();
+
+ void TraceRegWr(int64_t value, TraceType t = DWORD);
+ void TraceMemWr(int64_t addr, int64_t value, TraceType t);
+ template <typename T>
+ void TraceMemRd(int64_t addr, T value, int64_t reg_value);
+ template <typename T>
+ void TraceMemWr(int64_t addr, T value);
+
+ SimInstruction instr_;
+
+ // RISCV utlity API to access register value
+ inline int32_t rs1_reg() const { return instr_.Rs1Value(); }
+ inline int64_t rs1() const { return get_register(rs1_reg()); }
+ inline float frs1() const { return get_fpu_register_float(rs1_reg()); }
+ inline double drs1() const { return get_fpu_register_double(rs1_reg()); }
+ inline int32_t rs2_reg() const { return instr_.Rs2Value(); }
+ inline int64_t rs2() const { return get_register(rs2_reg()); }
+ inline float frs2() const { return get_fpu_register_float(rs2_reg()); }
+ inline double drs2() const { return get_fpu_register_double(rs2_reg()); }
+ inline int32_t rs3_reg() const { return instr_.Rs3Value(); }
+ inline int64_t rs3() const { return get_register(rs3_reg()); }
+ inline float frs3() const { return get_fpu_register_float(rs3_reg()); }
+ inline double drs3() const { return get_fpu_register_double(rs3_reg()); }
+ inline int32_t rd_reg() const { return instr_.RdValue(); }
+ inline int32_t frd_reg() const { return instr_.RdValue(); }
+ inline int32_t rvc_rs1_reg() const { return instr_.RvcRs1Value(); }
+ inline int64_t rvc_rs1() const { return get_register(rvc_rs1_reg()); }
+ inline int32_t rvc_rs2_reg() const { return instr_.RvcRs2Value(); }
+ inline int64_t rvc_rs2() const { return get_register(rvc_rs2_reg()); }
+ inline double rvc_drs2() const {
+ return get_fpu_register_double(rvc_rs2_reg());
+ }
+ inline int32_t rvc_rs1s_reg() const { return instr_.RvcRs1sValue(); }
+ inline int64_t rvc_rs1s() const { return get_register(rvc_rs1s_reg()); }
+ inline int32_t rvc_rs2s_reg() const { return instr_.RvcRs2sValue(); }
+ inline int64_t rvc_rs2s() const { return get_register(rvc_rs2s_reg()); }
+ inline double rvc_drs2s() const {
+ return get_fpu_register_double(rvc_rs2s_reg());
+ }
+ inline int32_t rvc_rd_reg() const { return instr_.RvcRdValue(); }
+ inline int32_t rvc_frd_reg() const { return instr_.RvcRdValue(); }
+ inline int16_t boffset() const { return instr_.BranchOffset(); }
+ inline int16_t imm12() const { return instr_.Imm12Value(); }
+ inline int32_t imm20J() const { return instr_.Imm20JValue(); }
+ inline int32_t imm5CSR() const { return instr_.Rs1Value(); }
+ inline int16_t csr_reg() const { return instr_.CsrValue(); }
+ inline int16_t rvc_imm6() const { return instr_.RvcImm6Value(); }
+ inline int16_t rvc_imm6_addi16sp() const {
+ return instr_.RvcImm6Addi16spValue();
+ }
+ inline int16_t rvc_imm8_addi4spn() const {
+ return instr_.RvcImm8Addi4spnValue();
+ }
+ inline int16_t rvc_imm6_lwsp() const { return instr_.RvcImm6LwspValue(); }
+ inline int16_t rvc_imm6_ldsp() const { return instr_.RvcImm6LdspValue(); }
+ inline int16_t rvc_imm6_swsp() const { return instr_.RvcImm6SwspValue(); }
+ inline int16_t rvc_imm6_sdsp() const { return instr_.RvcImm6SdspValue(); }
+ inline int16_t rvc_imm5_w() const { return instr_.RvcImm5WValue(); }
+ inline int16_t rvc_imm5_d() const { return instr_.RvcImm5DValue(); }
+
+ inline void set_rd(int64_t value, bool trace = true) {
+ set_register(rd_reg(), value);
+ if (trace) TraceRegWr(get_register(rd_reg()), DWORD);
+ }
+ inline void set_frd(float value, bool trace = true) {
+ set_fpu_register_float(rd_reg(), value);
+ if (trace) TraceRegWr(get_fpu_register_word(rd_reg()), FLOAT);
+ }
+ inline void set_drd(double value, bool trace = true) {
+ set_fpu_register_double(rd_reg(), value);
+ if (trace) TraceRegWr(get_fpu_register(rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_rd(int64_t value, bool trace = true) {
+ set_register(rvc_rd_reg(), value);
+ if (trace) TraceRegWr(get_register(rvc_rd_reg()), DWORD);
+ }
+ inline void set_rvc_rs1s(int64_t value, bool trace = true) {
+ set_register(rvc_rs1s_reg(), value);
+ if (trace) TraceRegWr(get_register(rvc_rs1s_reg()), DWORD);
+ }
+ inline void set_rvc_drd(double value, bool trace = true) {
+ set_fpu_register_double(rvc_rd_reg(), value);
+ if (trace) TraceRegWr(get_fpu_register(rvc_rd_reg()), DOUBLE);
+ }
+ inline void set_rvc_rs2s(double value, bool trace = true) {
+ set_register(rvc_rs2s_reg(), value);
+ if (trace) TraceRegWr(get_register(rvc_rs2s_reg()), DWORD);
+ }
+ inline void set_rvc_drs2s(double value, bool trace = true) {
+ set_fpu_register_double(rvc_rs2s_reg(), value);
+ if (trace) TraceRegWr(get_fpu_register(rvc_rs2s_reg()), DOUBLE);
+ }
+ inline int16_t shamt6() const { return (imm12() & 0x3F); }
+ inline int16_t shamt5() const { return (imm12() & 0x1F); }
+ inline int16_t rvc_shamt6() const { return instr_.RvcShamt6(); }
+ inline int32_t s_imm12() const { return instr_.StoreOffset(); }
+ inline int32_t u_imm20() const { return instr_.Imm20UValue() << 12; }
+ inline int32_t rvc_u_imm6() const { return instr_.RvcImm6Value() << 12; }
+ inline void require(bool check) {
+ if (!check) {
+ SignalException(kIllegalInstruction);
+ }
+ }
+
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOp3(Func fn) {
+ DCHECK(std::is_floating_point<T>::value);
+ T src1 = std::is_same<float, T>::value ? frs1() : drs1();
+ T src2 = std::is_same<float, T>::value ? frs2() : drs2();
+ T src3 = std::is_same<float, T>::value ? frs3() : drs3();
+ auto alu_out = fn(src1, src2, src3);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2) ||
+ std::isnan(src3)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1) || isSnan(src2) || isSnan(src3))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOp2(Func fn) {
+ DCHECK(std::is_floating_point<T>::value);
+ T src1 = std::is_same<float, T>::value ? frs1() : drs1();
+ T src2 = std::is_same<float, T>::value ? frs2() : drs2();
+ auto alu_out = fn(src1, src2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1) || std::isnan(src2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1) || isSnan(src2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename T, typename Func>
+ inline T CanonicalizeFPUOp1(Func fn) {
+ DCHECK(std::is_floating_point<T>::value);
+ T src1 = std::is_same<float, T>::value ? frs1() : drs1();
+ auto alu_out = fn(src1);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(src1)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(src1)) set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<T>::quiet_NaN();
+ }
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeDoubleToFloatOperation(Func fn) {
+ float alu_out = fn(drs1());
+ if (std::isnan(alu_out) || std::isnan(drs1()))
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeFloatToDoubleOperation(Func fn) {
+ double alu_out = fn(frs1());
+ if (std::isnan(alu_out) || std::isnan(frs1()))
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ return alu_out;
+ }
+
+ // RISCV decoding routine
+ void DecodeRVRType();
+ void DecodeRVR4Type();
+ void DecodeRVRFPType(); // Special routine for R/OP_FP type
+ void DecodeRVRAType(); // Special routine for R/AMO type
+ void DecodeRVIType();
+ void DecodeRVSType();
+ void DecodeRVBType();
+ void DecodeRVUType();
+ void DecodeRVJType();
+ void DecodeCRType();
+ void DecodeCAType();
+ void DecodeCIType();
+ void DecodeCIWType();
+ void DecodeCSSType();
+ void DecodeCLType();
+ void DecodeCSType();
+ void DecodeCJType();
+
+ // Used for breakpoints and traps.
+ void SoftwareInterrupt();
+
+ // Debug helpers
+
+ // Simulator breakpoints.
+ struct Breakpoint {
+ Instruction* location;
+ bool enabled;
+ bool is_tbreak;
+ };
+ std::vector<Breakpoint> breakpoints_;
+ void SetBreakpoint(Instruction* breakpoint, bool is_tbreak);
+ void ListBreakpoints();
+ void CheckBreakpoints();
+
+ // Stop helper functions.
+ bool IsWatchpoint(uint64_t code);
+ void PrintWatchpoint(uint64_t code);
+ void HandleStop(uint64_t code);
+ bool IsStopInstruction(Instruction* instr);
+ bool IsEnabledStop(uint64_t code);
+ void EnableStop(uint64_t code);
+ void DisableStop(uint64_t code);
+ void IncreaseStopCounter(uint64_t code);
+ void PrintStopInfo(uint64_t code);
+
+ // Executes one instruction.
+ void InstructionDecode(Instruction* instr);
+
+ // ICache.
+ static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr);
+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+ size_t size);
+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page);
+
+ enum Exception {
+ none,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions,
+ // RISCV illegual instruction exception
+ kIllegalInstruction,
+ };
+
+ // Exceptions.
+ void SignalException(Exception e);
+
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
+ void SetFpResult(const double& result);
+
+ void CallInternal(Address entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // Floating-point control and status register.
+ uint32_t FCSR_;
+
+ // Simulator support.
+ // Allocate 1MB for stack.
+ size_t stack_size_;
+ char* stack_;
+ bool pc_modified_;
+ int64_t icount_;
+ int break_count_;
+ EmbeddedVector<char, 128> trace_buf_;
+
+ // Debugger input.
+ char* last_debugger_input_;
+
+ v8::internal::Isolate* isolate_;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops_[kMaxStopCode + 1];
+
+ // Synchronization primitives.
+ enum class MonitorAccess {
+ Open,
+ RMW,
+ };
+
+ enum class TransactionSize {
+ None = 0,
+ Word = 4,
+ DoubleWord = 8,
+ };
+
+ // The least-significant bits of the address are ignored. The number of bits
+ // is implementation-defined, between 3 and minimum page size.
+ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
+
+ class LocalMonitor {
+ public:
+ LocalMonitor();
+
+ // These functions manage the state machine for the local monitor, but do
+ // not actually perform loads and stores. NotifyStoreConditional only
+ // returns true if the store conditional is allowed; the global monitor will
+ // still have to be checked to see whether the memory should be updated.
+ void NotifyLoad();
+ void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
+ void NotifyStore();
+ bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
+
+ private:
+ void Clear();
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ TransactionSize size_;
+ };
+
+ class GlobalMonitor {
+ public:
+ class LinkedAddress {
+ public:
+ LinkedAddress();
+
+ private:
+ friend class GlobalMonitor;
+ // These functions manage the state machine for the global monitor, but do
+ // not actually perform loads and stores.
+ void Clear_Locked();
+ void NotifyLoadLinked_Locked(uintptr_t addr);
+ void NotifyStore_Locked();
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ bool is_requesting_thread);
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ LinkedAddress* next_;
+ LinkedAddress* prev_;
+ // A scd can fail due to background cache evictions. Rather than
+ // simulating this, we'll just occasionally introduce cases where an
+ // store conditional fails. This will happen once after every
+ // kMaxFailureCounter exclusive stores.
+ static const int kMaxFailureCounter = 5;
+ int failure_counter_;
+ };
+
+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+ base::Mutex mutex;
+
+ void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
+ void NotifyStore_Locked(LinkedAddress* linked_address);
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ LinkedAddress* linked_address);
+
+ // Called when the simulator is destroyed.
+ void RemoveLinkedAddress(LinkedAddress* linked_address);
+
+ static GlobalMonitor* Get();
+
+ private:
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
+
+ bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
+ void PrependProcessor_Locked(LinkedAddress* linked_address);
+
+ LinkedAddress* head_ = nullptr;
+ };
+
+ LocalMonitor local_monitor_;
+ GlobalMonitor::LinkedAddress global_monitor_thread_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(USE_SIMULATOR)
+#endif // V8_EXECUTION_RISCV64_SIMULATOR_RISCV64_H_
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index 8ee053da75..013d7c0e9f 100644
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -27,16 +27,16 @@ static const int kProfilerTicksBeforeOptimization = 3;
// The number of ticks required for optimizing a function increases with
// the size of the bytecode. This is in addition to the
// kProfilerTicksBeforeOptimization required for any function.
-static const int kBytecodeSizeAllowancePerTick = 1200;
+static const int kBytecodeSizeAllowancePerTick = 1100;
// Maximum size in bytes of generate code for a function to allow OSR.
-static const int kOSRBytecodeSizeAllowanceBase = 132;
+static const int kOSRBytecodeSizeAllowanceBase = 119;
-static const int kOSRBytecodeSizeAllowancePerTick = 48;
+static const int kOSRBytecodeSizeAllowancePerTick = 44;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
-static const int kMaxBytecodeSizeForEarlyOpt = 90;
+static const int kMaxBytecodeSizeForEarlyOpt = 81;
// Number of times a function has to be seen on the stack before it is
// OSRed in TurboProp
@@ -48,6 +48,19 @@ static const int kMaxBytecodeSizeForEarlyOpt = 90;
// FLAG_ticks_scale_factor_for_top_tier.
static const int kProfilerTicksForTurboPropOSR = 4 * 10;
+// These are used to decide when we tiering up to Turboprop.
+// The number of ticks required for tiering up to Turboprop is based on how
+// "soon" the function becomes hot. We use kMidTierGlobalTicksScaleFactor to
+// scale the difference in global ticks since the last time a function saw a
+// tick. The scaled difference is used to to increase the number of ticks
+// required for tiering up to Turboprop.
+static const int kMidTierGlobalTicksScaleFactor = 100;
+
+// This is used to limit the number of additional ticks that the
+// kMidTierGlobalTicksScaleFactor can increase threshold for mid-tier tier
+// tierup.
+static const int kMaxAdditionalMidTierGlobalTicks = 10;
+
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
V(HotAndStable, "hot and stable") \
@@ -121,7 +134,7 @@ void TraceRecompile(JSFunction function, OptimizationReason reason,
} // namespace
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
- : isolate_(isolate), any_ic_changed_(false) {}
+ : isolate_(isolate), any_ic_changed_(false), current_global_ticks_(0) {}
void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason,
CodeKind code_kind) {
@@ -130,7 +143,7 @@ void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason,
function.MarkForOptimization(ConcurrencyMode::kConcurrent);
}
-void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
+void RuntimeProfiler::AttemptOnStackReplacement(UnoptimizedFrame* frame,
int loop_nesting_levels) {
JSFunction function = frame->function();
SharedFunctionInfo shared = function.shared();
@@ -151,7 +164,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
PrintF(scope.file(), "]\n");
}
- DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
+ DCHECK(frame->is_unoptimized());
int level = frame->GetBytecodeArray().osr_loop_nesting_level();
frame->GetBytecodeArray().set_osr_loop_nesting_level(std::min(
{level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker}));
@@ -160,7 +173,6 @@ void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
void RuntimeProfiler::MaybeOptimizeFrame(JSFunction function,
JavaScriptFrame* frame,
CodeKind code_kind) {
- DCHECK(CodeKindCanTierUp(code_kind));
if (function.IsInOptimizationQueue()) {
TraceInOptimizationQueue(function);
return;
@@ -177,13 +189,12 @@ void RuntimeProfiler::MaybeOptimizeFrame(JSFunction function,
// Note: We currently do not trigger OSR compilation from NCI or TP code.
// TODO(jgruber,v8:8888): But we should.
- if (frame->is_interpreted()) {
- DCHECK_EQ(code_kind, CodeKind::INTERPRETED_FUNCTION);
+ if (frame->is_unoptimized()) {
if (FLAG_always_osr) {
- AttemptOnStackReplacement(InterpretedFrame::cast(frame),
+ AttemptOnStackReplacement(UnoptimizedFrame::cast(frame),
AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
- } else if (MaybeOSR(function, InterpretedFrame::cast(frame))) {
+ } else if (MaybeOSR(function, UnoptimizedFrame::cast(frame))) {
return;
}
}
@@ -194,9 +205,12 @@ void RuntimeProfiler::MaybeOptimizeFrame(JSFunction function,
if (reason != OptimizationReason::kDoNotOptimize) {
Optimize(function, reason, code_kind);
}
+ function.feedback_vector()
+ .set_global_ticks_at_last_runtime_profiler_interrupt(
+ current_global_ticks_);
}
-bool RuntimeProfiler::MaybeOSR(JSFunction function, InterpretedFrame* frame) {
+bool RuntimeProfiler::MaybeOSR(JSFunction function, UnoptimizedFrame* frame) {
int ticks = function.feedback_vector().profiler_ticks();
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
@@ -220,7 +234,7 @@ bool RuntimeProfiler::MaybeOSR(JSFunction function, InterpretedFrame* frame) {
// OSR should happen roughly at the same with or without FLAG_turboprop.
// Turboprop has much lower interrupt budget so scale the ticks accordingly.
int scale_factor =
- FLAG_turboprop ? FLAG_ticks_scale_factor_for_top_tier : 1;
+ FLAG_turboprop ? FLAG_interrupt_budget_scale_factor_for_top_tier : 1;
int64_t scaled_ticks = static_cast<int64_t>(ticks) / scale_factor;
int64_t allowance = kOSRBytecodeSizeAllowanceBase +
scaled_ticks * kOSRBytecodeSizeAllowancePerTick;
@@ -237,18 +251,10 @@ namespace {
bool ShouldOptimizeAsSmallFunction(int bytecode_size, int ticks,
bool any_ic_changed,
bool active_tier_is_turboprop) {
+ if (FLAG_turboprop) return false;
if (any_ic_changed || bytecode_size >= kMaxBytecodeSizeForEarlyOpt)
return false;
- // Without turboprop we always allow early optimizations for small functions
- if (!FLAG_turboprop) return true;
- // For turboprop, we only do small function optimizations when tiering up from
- // TP-> TF. We should also scale the ticks, so we optimize small functions
- // when reaching one tick for top tier.
- // TODO(turboprop, mythria): Investigate if small function optimization is
- // required at all and avoid this if possible by changing the heuristics to
- // take function size into account.
- return active_tier_is_turboprop &&
- ticks > FLAG_ticks_scale_factor_for_top_tier;
+ return true;
}
} // namespace
@@ -263,12 +269,20 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
}
int ticks = function.feedback_vector().profiler_ticks();
bool active_tier_is_turboprop = function.ActiveTierIsMidtierTurboprop();
- int scale_factor =
- active_tier_is_turboprop ? FLAG_ticks_scale_factor_for_top_tier : 1;
int ticks_for_optimization =
kProfilerTicksBeforeOptimization +
(bytecode.length() / kBytecodeSizeAllowancePerTick);
- ticks_for_optimization *= scale_factor;
+ if (FLAG_turboprop && !active_tier_is_turboprop) {
+ DCHECK_EQ(function.NextTier(), CodeKind::TURBOPROP);
+ int global_ticks_diff =
+ (current_global_ticks_ -
+ function.feedback_vector()
+ .global_ticks_at_last_runtime_profiler_interrupt());
+ ticks_for_optimization =
+ ticks_for_optimization +
+ std::min(global_ticks_diff / kMidTierGlobalTicksScaleFactor,
+ kMaxAdditionalMidTierGlobalTicks);
+ }
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
} else if (ShouldOptimizeAsSmallFunction(bytecode.length(), ticks,
@@ -296,6 +310,10 @@ RuntimeProfiler::MarkCandidatesForOptimizationScope::
: handle_scope_(profiler->isolate_), profiler_(profiler) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.MarkCandidatesForOptimization");
+ if (profiler_->current_global_ticks_ <
+ FeedbackVector::GlobalTicksAtLastRuntimeProfilerInterruptBits::kMax - 1) {
+ profiler_->current_global_ticks_ += 1;
+ }
}
RuntimeProfiler::MarkCandidatesForOptimizationScope::
@@ -308,8 +326,7 @@ void RuntimeProfiler::MarkCandidatesForOptimization(JavaScriptFrame* frame) {
MarkCandidatesForOptimizationScope scope(this);
JSFunction function = frame->function();
- CodeKind code_kind = frame->is_interpreted() ? CodeKind::INTERPRETED_FUNCTION
- : function.code().kind();
+ CodeKind code_kind = function.GetActiveTier();
DCHECK(function.shared().is_compiled());
DCHECK(function.shared().IsInterpreted());
@@ -324,7 +341,7 @@ void RuntimeProfiler::MarkCandidatesForOptimization(JavaScriptFrame* frame) {
void RuntimeProfiler::MarkCandidatesForOptimizationFromBytecode() {
JavaScriptFrameIterator it(isolate_);
- DCHECK(it.frame()->is_interpreted());
+ DCHECK(it.frame()->is_unoptimized());
MarkCandidatesForOptimization(it.frame());
}
diff --git a/deps/v8/src/execution/runtime-profiler.h b/deps/v8/src/execution/runtime-profiler.h
index c985a3d1fd..60234d12be 100644
--- a/deps/v8/src/execution/runtime-profiler.h
+++ b/deps/v8/src/execution/runtime-profiler.h
@@ -14,7 +14,7 @@ namespace internal {
class BytecodeArray;
class Isolate;
-class InterpretedFrame;
+class UnoptimizedFrame;
class JavaScriptFrame;
class JSFunction;
enum class CodeKind;
@@ -31,7 +31,7 @@ class RuntimeProfiler {
void NotifyICChanged() { any_ic_changed_ = true; }
- void AttemptOnStackReplacement(InterpretedFrame* frame,
+ void AttemptOnStackReplacement(UnoptimizedFrame* frame,
int nesting_levels = 1);
private:
@@ -45,7 +45,7 @@ class RuntimeProfiler {
// Potentially attempts OSR from and returns whether no other
// optimization attempts should be made.
- bool MaybeOSR(JSFunction function, InterpretedFrame* frame);
+ bool MaybeOSR(JSFunction function, UnoptimizedFrame* frame);
OptimizationReason ShouldOptimize(JSFunction function,
BytecodeArray bytecode_array);
void Optimize(JSFunction function, OptimizationReason reason,
@@ -65,6 +65,7 @@ class RuntimeProfiler {
Isolate* isolate_;
bool any_ic_changed_;
+ unsigned int current_global_ticks_;
};
} // namespace internal
diff --git a/deps/v8/src/execution/s390/frame-constants-s390.cc b/deps/v8/src/execution/s390/frame-constants-s390.cc
index ea36f6b370..50f3445556 100644
--- a/deps/v8/src/execution/s390/frame-constants-s390.cc
+++ b/deps/v8/src/execution/s390/frame-constants-s390.cc
@@ -17,7 +17,7 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
-int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
diff --git a/deps/v8/src/execution/s390/frame-constants-s390.h b/deps/v8/src/execution/s390/frame-constants-s390.h
index 282bd673cd..9b8bbec9a1 100644
--- a/deps/v8/src/execution/s390/frame-constants-s390.h
+++ b/deps/v8/src/execution/s390/frame-constants-s390.h
@@ -30,11 +30,14 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
#endif
// FP-relative.
+ // The instance is pushed as part of the saved registers. Being in {r6}, it is
+ // the first register pushed (highest register code in
+ // {wasm::kGpParamRegisters}).
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
kNumberOfSavedGpParamRegs * kSystemPointerSize +
- kNumberOfSavedFpParamRegs * kDoubleSize;
+ kNumberOfSavedFpParamRegs * kSimd128Size;
};
// Frame constructed by the {WasmDebugBreak} builtin.
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 774cd75e22..30ad95c47b 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -778,20 +778,25 @@ void Simulator::EvalTableInit() {
V(vuplh, VUPLH, 0xE7D5) /* type = VRR_A VECTOR UNPACK LOGICAL HIGH */ \
V(vupl, VUPL, 0xE7D6) /* type = VRR_A VECTOR UNPACK LOW */ \
V(vuph, VUPH, 0xE7D7) /* type = VRR_A VECTOR UNPACK HIGH */ \
- V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
- V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
- V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
- V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */ \
- V(vceq, VCEQ, 0xE7F8) /* type = VRR_B VECTOR COMPARE EQUAL */ \
- V(vx, VX, 0xE76D) /* type = VRR_C VECTOR EXCLUSIVE OR */ \
- V(vchl, VCHL, 0xE7F9) /* type = VRR_B VECTOR COMPARE HIGH LOGICAL */ \
- V(vch, VCH, 0xE7FB) /* type = VRR_B VECTOR COMPARE HIGH */ \
- V(vo, VO, 0xE76A) /* type = VRR_C VECTOR OR */ \
- V(vn, VN, 0xE768) /* type = VRR_C VECTOR AND */ \
- V(vno, VNO, 0xE768B) /* type = VRR_C VECTOR NOR */ \
- V(vlc, VLC, 0xE7DE) /* type = VRR_A VECTOR LOAD COMPLEMENT */ \
- V(vsel, VSEL, 0xE78D) /* type = VRR_E VECTOR SELECT */ \
- V(vperm, VPERM, 0xE78C) /* type = VRR_E VECTOR PERMUTE */ \
+ V(vpopct, VPOPCT, 0xE750) /* type = VRR_A VECTOR POPULATION COUNT */ \
+ V(vcdg, VCDG, 0xE7C3) /* VECTOR FP CONVERT FROM FIXED */ \
+ V(vcdlg, VCDLG, 0xE7C1) /* VECTOR FP CONVERT FROM LOGICAL */ \
+ V(vcgd, VCGD, 0xE7C2) /* VECTOR FP CONVERT TO FIXED */ \
+ V(vclgd, VCLGD, 0xE7C0) /* VECTOR FP CONVERT TO LOGICAL */ \
+ V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
+ V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
+ V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
+ V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */ \
+ V(vceq, VCEQ, 0xE7F8) /* type = VRR_B VECTOR COMPARE EQUAL */ \
+ V(vx, VX, 0xE76D) /* type = VRR_C VECTOR EXCLUSIVE OR */ \
+ V(vchl, VCHL, 0xE7F9) /* type = VRR_B VECTOR COMPARE HIGH LOGICAL */ \
+ V(vch, VCH, 0xE7FB) /* type = VRR_B VECTOR COMPARE HIGH */ \
+ V(vo, VO, 0xE76A) /* type = VRR_C VECTOR OR */ \
+ V(vn, VN, 0xE768) /* type = VRR_C VECTOR AND */ \
+ V(vno, VNO, 0xE768B) /* type = VRR_C VECTOR NOR */ \
+ V(vlc, VLC, 0xE7DE) /* type = VRR_A VECTOR LOAD COMPLEMENT */ \
+ V(vsel, VSEL, 0xE78D) /* type = VRR_E VECTOR SELECT */ \
+ V(vperm, VPERM, 0xE78C) /* type = VRR_E VECTOR PERMUTE */ \
V(vbperm, VBPERM, 0xE785) /* type = VRR_C VECTOR BIT PERMUTE */ \
V(vtm, VTM, 0xE7D8) /* type = VRR_A VECTOR TEST UNDER MASK */ \
V(vesl, VESL, 0xE730) /* type = VRS_A VECTOR ELEMENT SHIFT LEFT */ \
@@ -1655,6 +1660,56 @@ T Simulator::get_high_register(int reg) const {
return static_cast<T>(registers_[reg] >> 32);
}
+template <class T, class R>
+static R ComputeSignedRoundingResult(T a, T n) {
+ constexpr T NINF = -std::numeric_limits<T>::infinity();
+ constexpr T PINF = std::numeric_limits<T>::infinity();
+ constexpr long double MN =
+ static_cast<long double>(std::numeric_limits<R>::min());
+ constexpr long double MP =
+ static_cast<long double>(std::numeric_limits<R>::max());
+
+ if (NINF <= a && a < MN && n < MN) {
+ return std::numeric_limits<R>::min();
+ } else if (NINF < a && a < MN && n == MN) {
+ return std::numeric_limits<R>::min();
+ } else if (MN <= a && a < 0.0) {
+ return static_cast<R>(n);
+ } else if (a == 0.0) {
+ return 0;
+ } else if (0.0 < a && a <= MP) {
+ return static_cast<R>(n);
+ } else if (MP < a && a <= PINF && n == MP) {
+ return std::numeric_limits<R>::max();
+ } else if (MP < a && a <= PINF && n > MP) {
+ return std::numeric_limits<R>::max();
+ } else if (std::isnan(a)) {
+ return std::numeric_limits<R>::min();
+ }
+ UNIMPLEMENTED();
+ return 0;
+}
+
+template <class T, class R>
+static R ComputeLogicalRoundingResult(T a, T n) {
+ constexpr T NINF = -std::numeric_limits<T>::infinity();
+ constexpr T PINF = std::numeric_limits<T>::infinity();
+ constexpr long double MP =
+ static_cast<long double>(std::numeric_limits<R>::max());
+
+ if (NINF <= a && a <= 0.0) {
+ return 0;
+ } else if (0.0 < a && a <= MP) {
+ return static_cast<R>(n);
+ } else if (MP < a && a <= PINF) {
+ return std::numeric_limits<R>::max();
+ } else if (std::isnan(a)) {
+ return 0;
+ }
+ UNIMPLEMENTED();
+ return 0;
+}
+
void Simulator::set_low_register(int reg, uint32_t value) {
uint64_t shifted_val = static_cast<uint64_t>(value);
uint64_t orig_val = static_cast<uint64_t>(registers_[reg]);
@@ -3418,11 +3473,10 @@ EVALUATE(VPKLS) {
template <class S, class D>
void VectorUnpackHigh(Simulator* sim, int dst, int src) {
constexpr size_t kItemCount = kSimd128Size / sizeof(D);
- D value = 0;
- for (size_t i = 0; i < kItemCount; i++) {
- value = sim->get_simd_register_by_lane<S>(src, i + kItemCount);
- sim->set_simd_register_by_lane<D>(dst, i, value);
- }
+ D temps[kItemCount] = {0};
+ // About overwriting if src and dst are the same register.
+ FOR_EACH_LANE(i, D) { temps[i] = sim->get_simd_register_by_lane<S>(src, i); }
+ FOR_EACH_LANE(i, D) { sim->set_simd_register_by_lane<D>(dst, i, temps[i]); }
}
#define CASE(i, S, D) \
@@ -3461,13 +3515,121 @@ EVALUATE(VUPLH) {
}
#undef CASE
+template <class S>
+void VectorPopulationCount(Simulator* sim, int dst, int src) {
+ FOR_EACH_LANE(i, S) {
+ sim->set_simd_register_by_lane<S>(
+ dst, i,
+ base::bits::CountPopulation(sim->get_simd_register_by_lane<S>(src, i)));
+ }
+}
+
+#define CASE(i, S) \
+ case i: \
+ VectorPopulationCount<S>(this, r1, r2); \
+ break;
+EVALUATE(VPOPCT) {
+ DCHECK_OPCODE(VPOPCT);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m5);
+ USE(m4);
+ switch (m3) {
+ CASE(0, uint8_t);
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+#undef CASE
+
+#define CASE(i, S, D) \
+ case i: { \
+ FOR_EACH_LANE(index, S) { \
+ set_simd_register_by_lane<D>( \
+ r1, index, static_cast<D>(get_simd_register_by_lane<S>(r2, index))); \
+ } \
+ break; \
+ }
+EVALUATE(VCDG) {
+ DCHECK_OPCODE(VCDG);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m4);
+ USE(m5);
+ switch (m3) {
+ CASE(2, int32_t, float);
+ CASE(3, int64_t, double);
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+EVALUATE(VCDLG) {
+ DCHECK_OPCODE(VCDLG);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m4);
+ USE(m5);
+ switch (m3) {
+ CASE(2, uint32_t, float);
+ CASE(3, uint64_t, double);
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+#undef CASE
+
+#define CASE(i, S, D, type) \
+ case i: { \
+ FOR_EACH_LANE(index, S) { \
+ S a = get_simd_register_by_lane<S>(r2, index); \
+ S n = ComputeRounding<S>(a, m5); \
+ set_simd_register_by_lane<D>( \
+ r1, index, \
+ static_cast<D>(Compute##type##RoundingResult<S, D>(a, n))); \
+ } \
+ break; \
+ }
+EVALUATE(VCGD) {
+ DCHECK_OPCODE(VCDG);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m4);
+ switch (m3) {
+ CASE(2, float, int32_t, Signed);
+ CASE(3, double, int64_t, Signed);
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+EVALUATE(VCLGD) {
+ DCHECK_OPCODE(VCLGD);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m4);
+ switch (m3) {
+ CASE(2, float, uint32_t, Logical);
+ CASE(3, double, uint64_t, Logical);
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+#undef CASE
+
template <class S, class D>
void VectorUnpackLow(Simulator* sim, int dst, int src) {
constexpr size_t kItemCount = kSimd128Size / sizeof(D);
D temps[kItemCount] = {0};
// About overwriting if src and dst are the same register.
- FOR_EACH_LANE(i, D) { temps[i] = sim->get_simd_register_by_lane<S>(src, i); }
- FOR_EACH_LANE(i, D) { sim->set_simd_register_by_lane<D>(dst, i, temps[i]); }
+ // Using the "false" argument here to make sure we use the "Low" side of the
+ // Simd register, being simulated by the LSB in memory.
+ FOR_EACH_LANE(i, D) {
+ temps[i] = sim->get_simd_register_by_lane<S>(src, i, false);
+ }
+ FOR_EACH_LANE(i, D) {
+ sim->set_simd_register_by_lane<D>(dst, i, temps[i], false);
+ }
}
#define CASE(i, S, D) \
@@ -3714,6 +3876,7 @@ EVALUATE(VPERM) {
DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5);
USE(m5);
USE(m6);
+ int8_t temp[kSimd128Size] = {0};
for (int i = 0; i < kSimd128Size; i++) {
int8_t lane_num = get_simd_register_by_lane<int8_t>(r4, i);
// Get the five least significant bits.
@@ -3723,8 +3886,10 @@ EVALUATE(VPERM) {
lane_num = lane_num - kSimd128Size;
reg = r3;
}
- int8_t result = get_simd_register_by_lane<int8_t>(reg, lane_num);
- set_simd_register_by_lane<int8_t>(r1, i, result);
+ temp[i] = get_simd_register_by_lane<int8_t>(reg, lane_num);
+ }
+ for (int i = 0; i < kSimd128Size; i++) {
+ set_simd_register_by_lane<int8_t>(r1, i, temp[i]);
}
return length;
}
@@ -5382,11 +5547,13 @@ EVALUATE(SRL) {
DCHECK_OPCODE(SRL);
DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
// only takes rightmost 6bits
- int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
- int shiftBits = (b2_val + d2) & 0x3F;
+ uint32_t b2_val = b2 == 0 ? 0 : get_low_register<uint32_t>(b2);
+ uint32_t shiftBits = (b2_val + d2) & 0x3F;
uint32_t r1_val = get_low_register<uint32_t>(r1);
uint32_t alu_out = 0;
- alu_out = r1_val >> shiftBits;
+ if (shiftBits < 32u) {
+ alu_out = r1_val >> shiftBits;
+ }
set_low_register(r1, alu_out);
return length;
}
@@ -5395,11 +5562,13 @@ EVALUATE(SLL) {
DCHECK_OPCODE(SLL);
DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2)
// only takes rightmost 6bits
- int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
- int shiftBits = (b2_val + d2) & 0x3F;
+ uint32_t b2_val = b2 == 0 ? 0 : get_low_register<uint32_t>(b2);
+ uint32_t shiftBits = (b2_val + d2) & 0x3F;
uint32_t r1_val = get_low_register<uint32_t>(r1);
uint32_t alu_out = 0;
- alu_out = r1_val << shiftBits;
+ if (shiftBits < 32u) {
+ alu_out = r1_val << shiftBits;
+ }
set_low_register(r1, alu_out);
return length;
}
@@ -5411,9 +5580,11 @@ EVALUATE(SRA) {
int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
int shiftBits = (b2_val + d2) & 0x3F;
int32_t r1_val = get_low_register<int32_t>(r1);
- int32_t alu_out = 0;
+ int32_t alu_out = -1;
bool isOF = false;
- alu_out = r1_val >> shiftBits;
+ if (shiftBits < 32) {
+ alu_out = r1_val >> shiftBits;
+ }
set_low_register(r1, alu_out);
SetS390ConditionCode<int32_t>(alu_out, 0);
SetS390OverflowCode(isOF);
@@ -5430,7 +5601,9 @@ EVALUATE(SLA) {
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForShiftLeft(r1_val, shiftBits);
- alu_out = r1_val << shiftBits;
+ if (shiftBits < 32) {
+ alu_out = r1_val << shiftBits;
+ }
set_low_register(r1, alu_out);
SetS390ConditionCode<int32_t>(alu_out, 0);
SetS390OverflowCode(isOF);
@@ -7411,36 +7584,6 @@ static int ComputeSignedRoundingConditionCode(T a, T n) {
return 0;
}
-template <class T, class R>
-static R ComputeSignedRoundingResult(T a, T n) {
- constexpr T NINF = -std::numeric_limits<T>::infinity();
- constexpr T PINF = std::numeric_limits<T>::infinity();
- constexpr long double MN =
- static_cast<long double>(std::numeric_limits<R>::min());
- constexpr long double MP =
- static_cast<long double>(std::numeric_limits<R>::max());
-
- if (NINF <= a && a < MN && n < MN) {
- return std::numeric_limits<R>::min();
- } else if (NINF < a && a < MN && n == MN) {
- return std::numeric_limits<R>::min();
- } else if (MN <= a && a < 0.0) {
- return static_cast<R>(n);
- } else if (a == 0.0) {
- return 0;
- } else if (0.0 < a && a <= MP) {
- return static_cast<R>(n);
- } else if (MP < a && a <= PINF && n == MP) {
- return std::numeric_limits<R>::max();
- } else if (MP < a && a <= PINF && n > MP) {
- return std::numeric_limits<R>::max();
- } else if (std::isnan(a)) {
- return std::numeric_limits<R>::min();
- }
- UNIMPLEMENTED();
- return 0;
-}
-
EVALUATE(CFDBRA) {
DCHECK_OPCODE(CFDBRA);
DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4);
@@ -7531,26 +7674,6 @@ static int ComputeLogicalRoundingConditionCode(T a, T n) {
return 0;
}
-template <class T, class R>
-static R ComputeLogicalRoundingResult(T a, T n) {
- constexpr T NINF = -std::numeric_limits<T>::infinity();
- constexpr T PINF = std::numeric_limits<T>::infinity();
- constexpr long double MP =
- static_cast<long double>(std::numeric_limits<R>::max());
-
- if (NINF <= a && a <= 0.0) {
- return 0;
- } else if (0.0 < a && a <= MP) {
- return static_cast<R>(n);
- } else if (MP < a && a <= PINF) {
- return std::numeric_limits<R>::max();
- } else if (std::isnan(a)) {
- return 0;
- }
- UNIMPLEMENTED();
- return 0;
-}
-
EVALUATE(CLFEBR) {
DCHECK_OPCODE(CLFEBR);
DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4);
@@ -10418,9 +10541,11 @@ EVALUATE(SRAK) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int shiftBits = (b2_val + d2) & 0x3F;
int32_t r3_val = get_low_register<int32_t>(r3);
- int32_t alu_out = 0;
+ int32_t alu_out = -1;
bool isOF = false;
- alu_out = r3_val >> shiftBits;
+ if (shiftBits < 32) {
+ alu_out = r3_val >> shiftBits;
+ }
set_low_register(r1, alu_out);
SetS390ConditionCode<int32_t>(alu_out, 0);
SetS390OverflowCode(isOF);
@@ -10438,7 +10563,9 @@ EVALUATE(SLAK) {
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForShiftLeft(r3_val, shiftBits);
- alu_out = r3_val << shiftBits;
+ if (shiftBits < 32) {
+ alu_out = r3_val << shiftBits;
+ }
set_low_register(r1, alu_out);
SetS390ConditionCode<int32_t>(alu_out, 0);
SetS390OverflowCode(isOF);
@@ -10454,12 +10581,14 @@ EVALUATE(SRLK) {
// unchanged in general register R3.
DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
// only takes rightmost 6 bits
- int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
- int shiftBits = (b2_val + d2) & 0x3F;
+ uint32_t b2_val = b2 == 0 ? 0 : get_low_register<uint32_t>(b2);
+ uint32_t shiftBits = (b2_val + d2) & 0x3F;
// unsigned
uint32_t r3_val = get_low_register<uint32_t>(r3);
uint32_t alu_out = 0;
- alu_out = r3_val >> shiftBits;
+ if (shiftBits < 32u) {
+ alu_out = r3_val >> shiftBits;
+ }
set_low_register(r1, alu_out);
return length;
}
@@ -10473,12 +10602,14 @@ EVALUATE(SLLK) {
// unchanged in general register R3.
DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
// only takes rightmost 6 bits
- int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
- int shiftBits = (b2_val + d2) & 0x3F;
+ uint32_t b2_val = b2 == 0 ? 0 : get_low_register<uint32_t>(b2);
+ uint32_t shiftBits = (b2_val + d2) & 0x3F;
// unsigned
uint32_t r3_val = get_low_register<uint32_t>(r3);
uint32_t alu_out = 0;
- alu_out = r3_val << shiftBits;
+ if (shiftBits < 32u) {
+ alu_out = r3_val << shiftBits;
+ }
set_low_register(r1, alu_out);
return length;
}
diff --git a/deps/v8/src/execution/s390/simulator-s390.h b/deps/v8/src/execution/s390/simulator-s390.h
index b509b9f926..6420c82273 100644
--- a/deps/v8/src/execution/s390/simulator-s390.h
+++ b/deps/v8/src/execution/s390/simulator-s390.h
@@ -137,26 +137,21 @@ class Simulator : public SimulatorBase {
void set_high_register(int reg, uint32_t value);
double get_double_from_register_pair(int reg);
+
+ // Unlike Integer values, Floating Point values are located on the left most
+ // side of a native 64 bit register. As FP registers are a subset of vector
+ // registers, 64 and 32 bit FP values need to be located on first lane (lane
+ // number 0) of a vector register.
template <class T>
T get_fpr(int dreg) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
- if (sizeof(T) == 8) {
- return get_simd_register_by_lane<T>(dreg, 0);
- } else {
- DCHECK_EQ(sizeof(T), 4);
- return get_simd_register_by_lane<T>(dreg, 1);
- }
+ return get_simd_register_by_lane<T>(dreg, 0);
}
template <class T>
void set_fpr(int dreg, const T val) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
- if (sizeof(T) == 8) {
- set_simd_register_by_lane(dreg, 0, val);
- } else {
- DCHECK_EQ(sizeof(T), 4);
- set_simd_register_by_lane(dreg, 1, val);
- }
+ set_simd_register_by_lane<T>(dreg, 0, val);
}
// Special case of set_register and get_register to access the raw PC value.
@@ -412,8 +407,27 @@ class Simulator : public SimulatorBase {
set_simd_register_by_lane(reg, 0, v);
}
+ // Vector register lane numbers on IBM machines are reversed compared to
+ // x64. For example, doing an I32x4 extract_lane with lane number 0 on x64
+ // will be equal to lane number 3 on IBM machines. Vector registers are only
+ // used for compiling Wasm code at the moment. Wasm is also little endian
+ // enforced. On s390 native, we manually do a reverse byte whenever values are
+ // loaded/stored from memory to a Simd register. On the simulator however, we
+ // do not reverse the bytes and data is just copied as is from one memory
+ // location to another location which represents a register. To keep the Wasm
+ // simulation accurate, we need to make sure accessing a lane is correctly
+ // simulated and as such we reverse the lane number on the getters and setters
+ // below. We need to be careful when getting/setting values on the Low or High
+ // side of a simulated register. In the simulation, "Low" is equal to the MSB
+ // and "High" is equal to the LSB on memory. "force_ibm_lane_numbering" could
+ // be used to disabled automatic lane number reversal and help with accessing
+ // the Low or High side of a simulated register.
template <class T>
- T get_simd_register_by_lane(int reg, int lane) {
+ T get_simd_register_by_lane(int reg, int lane,
+ bool force_ibm_lane_numbering = true) {
+ if (force_ibm_lane_numbering) {
+ lane = (kSimd128Size / sizeof(T)) - 1 - lane;
+ }
CHECK_LE(lane, kSimd128Size / sizeof(T));
CHECK_LT(reg, kNumFPRs);
CHECK_GE(lane, 0);
@@ -422,7 +436,11 @@ class Simulator : public SimulatorBase {
}
template <class T>
- void set_simd_register_by_lane(int reg, int lane, const T& value) {
+ void set_simd_register_by_lane(int reg, int lane, const T& value,
+ bool force_ibm_lane_numbering = true) {
+ if (force_ibm_lane_numbering) {
+ lane = (kSimd128Size / sizeof(T)) - 1 - lane;
+ }
CHECK_LE(lane, kSimd128Size / sizeof(T));
CHECK_LT(reg, kNumFPRs);
CHECK_GE(lane, 0);
diff --git a/deps/v8/src/execution/simulator-base.h b/deps/v8/src/execution/simulator-base.h
index af56e9d967..9edc60a3f3 100644
--- a/deps/v8/src/execution/simulator-base.h
+++ b/deps/v8/src/execution/simulator-base.h
@@ -88,9 +88,9 @@ class SimulatorBase {
static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
ConvertArg(T arg) {
static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
-#if V8_TARGET_ARCH_MIPS64
- // The MIPS64 calling convention is to sign extend all values, even unsigned
- // ones.
+#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
+ // The MIPS64 and RISCV64 calling convention is to sign extend all values,
+ // even unsigned ones.
using signed_t = typename std::make_signed<T>::type;
return static_cast<intptr_t>(static_cast<signed_t>(arg));
#else
@@ -124,6 +124,7 @@ class SimulatorBase {
// - V8_TARGET_ARCH_PPC: svc (Supervisor Call)
// - V8_TARGET_ARCH_PPC64: svc (Supervisor Call)
// - V8_TARGET_ARCH_S390: svc (Supervisor Call)
+// - V8_TARGET_ARCH_RISCV64: ecall (Supervisor Call)
class Redirection {
public:
Redirection(Address external_function, ExternalReference::Type type);
diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h
index 74763474c6..de322780b0 100644
--- a/deps/v8/src/execution/simulator.h
+++ b/deps/v8/src/execution/simulator.h
@@ -26,6 +26,8 @@
#include "src/execution/mips64/simulator-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/simulator-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/execution/riscv64/simulator-riscv64.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.cc b/deps/v8/src/execution/x64/frame-constants-x64.cc
index 716a6d7082..fb242505dd 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.cc
+++ b/deps/v8/src/execution/x64/frame-constants-x64.cc
@@ -17,7 +17,7 @@ Register JavaScriptFrame::fp_register() { return rbp; }
Register JavaScriptFrame::context_register() { return rsi; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
-int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index b98d6afbb4..1aa25fa832 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -163,6 +163,13 @@ struct MaybeBoolFlag {
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL false
#endif
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#define ENABLE_SPARKPLUG true
+#else
+// TODO(v8:11421): Enable Sparkplug for other architectures
+#define ENABLE_SPARKPLUG false
+#endif
+
// Supported ARM configurations are:
// "armv6": ARMv6 + VFPv2
// "armv7": ARMv7 + VFPv3-D32 + NEON
@@ -237,11 +244,8 @@ DEFINE_BOOL(allow_overwriting_for_next_flag, false,
// Flags for language modes and experimental language features.
DEFINE_BOOL(use_strict, false, "enforce strict mode")
-DEFINE_BOOL(es_staging, false,
- "enable test-worthy harmony features (for internal use only)")
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
-DEFINE_IMPLICATION(es_staging, harmony)
// Enabling FinalizationRegistry#cleanupSome also enables weak refs
DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
@@ -252,21 +256,22 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
V(harmony_weak_refs_with_cleanup_some, \
"harmony weak references with FinalizationRegistry.prototype.cleanupSome") \
- V(harmony_regexp_match_indices, "harmony regexp match indices") \
V(harmony_import_assertions, "harmony import assertions")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
V(harmony_intl_displaynames_date_types, "Intl.DisplayNames date types")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V) \
- V(harmony_top_level_await, "harmony top level await") \
- V(harmony_relative_indexing_methods, "harmony relative indexing methods")
+#define HARMONY_STAGED_BASE(V) \
+ V(harmony_top_level_await, "harmony top level await") \
+ V(harmony_relative_indexing_methods, "harmony relative indexing methods") \
+ V(harmony_private_brand_checks, "harmony private brand checks") \
+ V(harmony_class_static_blocks, "harmony static initializer blocks")
#ifdef V8_INTL_SUPPORT
#define HARMONY_STAGED(V) \
@@ -284,7 +289,8 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
V(harmony_weak_refs, "harmony weak references") \
V(harmony_string_replaceall, "harmony String.prototype.replaceAll") \
V(harmony_logical_assignment, "harmony logical assignment") \
- V(harmony_atomics_waitasync, "harmony Atomics.waitAsync")
+ V(harmony_atomics_waitasync, "harmony Atomics.waitAsync") \
+ V(harmony_regexp_match_indices, "harmony regexp match indices")
#ifdef V8_INTL_SUPPORT
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
@@ -424,6 +430,10 @@ DEFINE_BOOL(future, FUTURE_BOOL,
DEFINE_WEAK_IMPLICATION(future, write_protect_code_memory)
DEFINE_WEAK_IMPLICATION(future, finalize_streaming_on_background)
DEFINE_WEAK_IMPLICATION(future, super_ic)
+DEFINE_WEAK_IMPLICATION(future, turbo_inline_js_wasm_calls)
+#if ENABLE_SPARKPLUG
+DEFINE_WEAK_IMPLICATION(future, sparkplug)
+#endif
// Flags for jitless
DEFINE_BOOL(jitless, V8_LITE_BOOL,
@@ -436,6 +446,11 @@ DEFINE_NEG_IMPLICATION(jitless, track_field_types)
DEFINE_NEG_IMPLICATION(jitless, track_heap_object_fields)
// Regexps are interpreted.
DEFINE_IMPLICATION(jitless, regexp_interpret_all)
+#if ENABLE_SPARKPLUG
+// No Sparkplug compilation.
+DEFINE_NEG_IMPLICATION(jitless, sparkplug)
+DEFINE_NEG_IMPLICATION(jitless, always_sparkplug)
+#endif
// asm.js validation is disabled since it triggers wasm code generation.
DEFINE_NEG_IMPLICATION(jitless, validate_asm)
// --jitless also implies --no-expose-wasm, see InitializeOncePerProcessImpl.
@@ -497,14 +512,20 @@ DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, true,
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL_READONLY(string_slices, true, "use string slices")
-DEFINE_INT(interrupt_budget, 144 * KB,
+DEFINE_INT(interrupt_budget, 132 * KB,
"interrupt budget which should be used for the profiler counter")
// Flags for inline caching and feedback vectors.
DEFINE_BOOL(use_ic, true, "use inline caching")
-DEFINE_INT(budget_for_feedback_vector_allocation, 1 * KB,
+DEFINE_INT(budget_for_feedback_vector_allocation, 940,
"The budget in amount of bytecode executed by a function before we "
"decide to allocate feedback vectors")
+DEFINE_INT(scale_factor_for_feedback_allocation, 4,
+ "scale bytecode size for feedback vector allocation.")
+DEFINE_BOOL(feedback_allocation_on_bytecode_size, false,
+ "Instead of a fixed budget for lazy feedback vector allocation, "
+ "scale it based in the bytecode size.")
+DEFINE_IMPLICATION(sparkplug, feedback_allocation_on_bytecode_size)
DEFINE_BOOL(lazy_feedback_allocation, true, "Allocate feedback vectors lazily")
// Flags for Ignition.
@@ -525,9 +546,15 @@ DEFINE_BOOL(stress_lazy_source_positions, false,
"collect lazy source positions immediately after lazy compile")
DEFINE_STRING(print_bytecode_filter, "*",
"filter for selecting which functions to print bytecode")
-#ifdef V8_TRACE_IGNITION
+#ifdef V8_TRACE_UNOPTIMIZED
+DEFINE_BOOL(trace_unoptimized, false,
+ "trace the bytecodes executed by all unoptimized execution")
DEFINE_BOOL(trace_ignition, false,
"trace the bytecodes executed by the ignition interpreter")
+DEFINE_BOOL(trace_baseline_exec, false,
+ "trace the bytecodes executed by the baseline code")
+DEFINE_WEAK_IMPLICATION(trace_unoptimized, trace_ignition)
+DEFINE_WEAK_IMPLICATION(trace_unoptimized, trace_baseline_exec)
#endif
#ifdef V8_TRACE_FEEDBACK_UPDATES
DEFINE_BOOL(
@@ -543,7 +570,6 @@ DEFINE_STRING(trace_ignition_dispatches_output_file, nullptr,
"the file to which the bytecode handler dispatch table is "
"written (by default, the table is not written to a file)")
-DEFINE_BOOL(fast_math, true, "faster (but maybe less accurate) math functions")
DEFINE_BOOL(trace_track_allocation_sites, false,
"trace the tracking of allocation sites")
DEFINE_BOOL(trace_migration, false, "trace object migration")
@@ -554,19 +580,42 @@ DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
DEFINE_IMPLICATION(turboprop, turbo_direct_heap_access)
DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true,
"enable mid-tier register allocator for turboprop")
-DEFINE_BOOL(turboprop_as_midtier, false,
- "enable experimental turboprop mid-tier compiler")
-DEFINE_IMPLICATION(turboprop_as_midtier, turboprop)
-DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB)
+DEFINE_BOOL(
+ turboprop_as_toptier, false,
+ "enable experimental turboprop compiler without further tierup to turbofan")
+DEFINE_IMPLICATION(turboprop_as_toptier, turboprop)
+DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 14 * KB)
DEFINE_VALUE_IMPLICATION(turboprop, reuse_opt_code_count, 2)
DEFINE_UINT_READONLY(max_minimorphic_map_checks, 4,
"max number of map checks to perform in minimorphic state")
-// Since Turboprop uses much lower value for interrupt budget, we need to wait
-// for a higher number of ticks to tierup to Turbofan roughly match the default.
-// The default of 10 is approximately the ration of TP to TF interrupt budget.
-DEFINE_INT(ticks_scale_factor_for_top_tier, 10,
+// The scale factor determines the interrupt budget when tiering up from
+// Turboprop to TurboFan. The default of 10 is approximately the ratio of
+// Turboprop to the TurboFan interrupt budget.
+DEFINE_INT(interrupt_budget_scale_factor_for_top_tier, 10,
"scale factor for profiler ticks when tiering up from midtier")
+// Flags for Sparkplug
+#undef FLAG
+#if ENABLE_SPARKPLUG
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
+DEFINE_BOOL(sparkplug, false, "enable experimental Sparkplug baseline compiler")
+DEFINE_BOOL(always_sparkplug, false, "directly tier up to Sparkplug code")
+#if ENABLE_SPARKPLUG
+DEFINE_IMPLICATION(always_sparkplug, sparkplug)
+#endif
+DEFINE_STRING(sparkplug_filter, "*", "filter for Sparkplug baseline compiler")
+DEFINE_BOOL(trace_baseline, false, "trace baseline compilation")
+#if !defined(V8_OS_MACOSX) || !defined(V8_HOST_ARCH_ARM64)
+// Don't disable --write-protect-code-memory on Apple Silicon.
+DEFINE_NEG_IMPLICATION(sparkplug, write_protect_code_memory)
+#endif
+
+#undef FLAG
+#define FLAG FLAG_FULL
+
// Flags for concurrent recompilation.
DEFINE_BOOL(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
@@ -580,8 +629,14 @@ DEFINE_BOOL(block_concurrent_recompilation, false,
"block queued jobs until released")
DEFINE_BOOL(concurrent_inlining, false,
"run optimizing compiler's inlining phase on a separate thread")
+DEFINE_BOOL(stress_concurrent_inlining, false,
+ "makes concurrent inlining more likely to trigger in tests")
DEFINE_BOOL(turbo_direct_heap_access, false,
"access kNeverSerialized objects directly from the heap")
+DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
+DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
+DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
+ 15 * KB)
DEFINE_IMPLICATION(concurrent_inlining, turbo_direct_heap_access)
DEFINE_INT(max_serializer_nesting, 25,
"maximum levels for nesting child serializers")
@@ -660,15 +715,16 @@ DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
DEFINE_BOOL(function_context_specialization, false,
"enable function context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
-DEFINE_INT(max_inlined_bytecode_size, 500,
+DEFINE_INT(max_inlined_bytecode_size, 460,
"maximum size of bytecode for a single inlining")
-DEFINE_INT(max_inlined_bytecode_size_cumulative, 1000,
- "maximum cumulative size of bytecode considered for inlining")
-DEFINE_INT(max_inlined_bytecode_size_absolute, 5000,
+DEFINE_INT(max_inlined_bytecode_size_cumulative, 920,
"maximum cumulative size of bytecode considered for inlining")
-DEFINE_FLOAT(reserve_inline_budget_scale_factor, 1.2,
- "maximum cumulative size of bytecode considered for inlining")
-DEFINE_INT(max_inlined_bytecode_size_small, 30,
+DEFINE_INT(max_inlined_bytecode_size_absolute, 4600,
+ "maximum absolute size of bytecode considered for inlining")
+DEFINE_FLOAT(
+ reserve_inline_budget_scale_factor, 1.2,
+ "scale factor of bytecode size used to calculate the inlining budget")
+DEFINE_INT(max_inlined_bytecode_size_small, 27,
"maximum size of bytecode considered for small function inlining")
DEFINE_INT(max_optimized_bytecode_size, 60 * KB,
"maximum bytecode size to "
@@ -735,6 +791,9 @@ DEFINE_INT(reuse_opt_code_count, 0,
DEFINE_BOOL(turbo_dynamic_map_checks, true,
"use dynamic map checks when generating code for property accesses "
"if all handlers in an IC are the same for turboprop and NCI")
+DEFINE_BOOL(turbo_compress_translation_arrays, false,
+ "compress translation arrays (experimental)")
+DEFINE_BOOL(turbo_inline_js_wasm_calls, false, "inline JS->Wasm calls")
// Native context independent (NCI) code.
DEFINE_BOOL(turbo_nci, false,
@@ -742,20 +801,10 @@ DEFINE_BOOL(turbo_nci, false,
// TODO(v8:8888): Temporary until NCI caching is implemented or
// feedback collection is made unconditional.
DEFINE_IMPLICATION(turbo_nci, turbo_collect_feedback_in_generic_lowering)
-DEFINE_BOOL(turbo_nci_as_midtier, false,
- "insert NCI as a midtier compiler for testing purposes.")
DEFINE_BOOL(print_nci_code, false, "print native context independent code.")
DEFINE_BOOL(trace_turbo_nci, false, "trace native context independent code.")
DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, true,
"enable experimental feedback collection in generic lowering.")
-// TODO(jgruber,v8:8888): Remove this flag once we've settled on a codegen
-// strategy.
-DEFINE_BOOL(turbo_nci_delayed_codegen, true,
- "delay NCI codegen to reduce useless compilation work.")
-// TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing
-// strategy.
-DEFINE_BOOL(turbo_nci_cache_ageing, false,
- "enable ageing of the NCI code cache.")
// TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing
// strategy.
DEFINE_BOOL(isolate_script_cache_ageing, true,
@@ -780,7 +829,11 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
DEFINE_BOOL(wasm_generic_wrapper, true,
"allow use of the generic js-to-wasm wrapper instead of "
"per-signature wrappers")
+#ifdef V8_ENABLE_WEBASSEMBLY
DEFINE_BOOL(expose_wasm, true, "expose wasm interface to JavaScript")
+#else
+DEFINE_BOOL_READONLY(expose_wasm, false, "expose wasm interface to JavaScript")
+#endif
DEFINE_INT(wasm_num_compilation_tasks, 128,
"maximum number of parallel compilation tasks for wasm")
DEFINE_DEBUG_BOOL(trace_wasm_native_heap, false,
@@ -820,8 +873,11 @@ DEFINE_BOOL(liftoff_only, false,
DEFINE_IMPLICATION(liftoff_only, liftoff)
DEFINE_NEG_IMPLICATION(liftoff_only, wasm_tier_up)
DEFINE_NEG_IMPLICATION(fuzzing, liftoff_only)
-DEFINE_BOOL(experimental_liftoff_extern_ref, false,
+DEFINE_BOOL(experimental_liftoff_extern_ref, true,
"enable support for externref in Liftoff")
+DEFINE_DEBUG_BOOL(
+ enable_testing_opcode_in_wasm, false,
+ "enables a testing opcode in wasm that is only implemented in TurboFan")
// We can't tier up (from Liftoff to TurboFan) in single-threaded mode, hence
// disable tier up in that configuration for now.
DEFINE_NEG_IMPLICATION(single_threaded, wasm_tier_up)
@@ -837,9 +893,9 @@ DEFINE_INT(wasm_tier_mask_for_testing, 0,
DEFINE_BOOL(validate_asm, true, "validate asm.js modules before compiling")
DEFINE_BOOL(suppress_asm_messages, false,
"don't emit asm.js related messages (for golden file testing)")
-DEFINE_BOOL(trace_asm_time, false, "log asm.js timing info to the console")
+DEFINE_BOOL(trace_asm_time, false, "print asm.js timing info to the console")
DEFINE_BOOL(trace_asm_scanner, false,
- "log tokens encountered by asm.js scanner")
+ "print tokens encountered by asm.js scanner")
DEFINE_BOOL(trace_asm_parser, false, "verbose logging of asm.js parse failures")
DEFINE_BOOL(stress_validate_asm, false, "try to validate everything as asm.js")
@@ -877,6 +933,9 @@ DEFINE_BOOL(wasm_stack_checks, true,
DEFINE_BOOL(wasm_math_intrinsics, true,
"intrinsify some Math imports into wasm")
+DEFINE_BOOL(wasm_loop_unrolling, false,
+ "generate and then remove loop exits in wasm turbofan code "
+ "(placeholder for future loop unrolling feature)")
DEFINE_BOOL(wasm_trap_handler, true,
"use signal handlers to catch out of bounds memory access in wasm"
" (currently Linux x86_64 only)")
@@ -896,9 +955,6 @@ DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
"trace lazy compilation of wasm functions")
DEFINE_BOOL(wasm_lazy_validation, false,
"enable lazy validation for lazily compiled wasm functions")
-
-DEFINE_BOOL(wasm_grow_shared_memory, true,
- "allow growing shared WebAssembly memory objects")
DEFINE_BOOL(wasm_simd_post_mvp, false,
"allow experimental SIMD operations for prototyping that are not "
"included in the current proposal")
@@ -1017,26 +1073,14 @@ DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
"use concurrent marking")
DEFINE_BOOL(concurrent_array_buffer_sweeping, true,
"concurrently sweep array buffers")
-DEFINE_BOOL(concurrent_allocation, true, "concurrently allocate in old space")
DEFINE_BOOL(stress_concurrent_allocation, false,
"start background threads that allocate memory")
-DEFINE_BOOL(local_heaps, true, "allow heap access from background tasks")
-// Since the local_heaps flag is enabled by default, we defined reverse
-// implications to simplify disabling the flag.
-DEFINE_NEG_NEG_IMPLICATION(local_heaps, turbo_direct_heap_access)
-DEFINE_NEG_NEG_IMPLICATION(local_heaps, concurrent_inlining)
-DEFINE_NEG_NEG_IMPLICATION(local_heaps, concurrent_allocation)
-DEFINE_NEG_NEG_IMPLICATION(concurrent_allocation,
- finalize_streaming_on_background)
-DEFINE_NEG_NEG_IMPLICATION(concurrent_allocation, stress_concurrent_allocation)
DEFINE_BOOL(parallel_marking, V8_CONCURRENT_MARKING_BOOL,
"use parallel marking in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 10,
"number of fixpoint iterations it takes to switch to linear "
"ephemeron algorithm")
DEFINE_BOOL(trace_concurrent_marking, false, "trace concurrent marking")
-DEFINE_BOOL(concurrent_store_buffer, true,
- "use concurrent store buffer processing")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
DEFINE_BOOL(parallel_pointer_update, true,
@@ -1067,8 +1111,6 @@ DEFINE_GENERIC_IMPLICATION(
DEFINE_BOOL(track_retaining_path, false,
"enable support for tracking retaining path")
DEFINE_DEBUG_BOOL(trace_backing_store, false, "trace backing store events")
-DEFINE_BOOL(concurrent_array_buffer_freeing, true,
- "free array buffer allocations on a background thread")
DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics")
DEFINE_IMPLICATION(trace_gc_object_stats, track_gc_object_stats)
DEFINE_GENERIC_IMPLICATION(
@@ -1143,12 +1185,8 @@ DEFINE_BOOL(
"reclaim otherwise unreachable unmodified wrapper objects when possible")
// These flags will be removed after experiments. Do not rely on them.
-DEFINE_BOOL(gc_experiment_background_schedule, false,
- "new background GC schedule heuristics")
DEFINE_BOOL(gc_experiment_less_compaction, false,
"less compaction in non-memory reducing mode")
-DEFINE_BOOL(gc_experiment_reduce_concurrent_marking_tasks, false,
- "reduce the number of concurrent marking tasks")
DEFINE_BOOL(disable_abortjs, false, "disables AbortJS runtime function")
@@ -1202,6 +1240,10 @@ DEFINE_BOOL(partial_constant_pool, true,
DEFINE_STRING(sim_arm64_optional_features, "none",
"enable optional features on the simulator for testing: none or "
"all")
+DEFINE_BOOL(debug_riscv, false, "enable debug prints")
+// TODO(RISCV): https://github.com/v8-riscv/v8/issues/330
+DEFINE_BOOL(disable_riscv_constant_pool, true,
+ "disable constant pool (RISCV only)")
// Controlling source positions for Torque/CSA code.
DEFINE_BOOL(enable_source_at_csa_bind, false,
@@ -1280,6 +1322,7 @@ DEFINE_BOOL(trace_opt_verbose, false,
DEFINE_IMPLICATION(trace_opt_verbose, trace_opt)
DEFINE_BOOL(trace_opt_stats, false, "trace optimized compilation statistics")
DEFINE_BOOL(trace_deopt, false, "trace deoptimization")
+DEFINE_BOOL(log_deopt, false, "log deoptimization")
DEFINE_BOOL(trace_deopt_verbose, false, "extra verbose deoptimization tracing")
DEFINE_IMPLICATION(trace_deopt_verbose, trace_deopt)
DEFINE_BOOL(trace_file_names, false,
@@ -1317,6 +1360,10 @@ DEFINE_BOOL(
"print debug messages for side-effect-free debug-evaluate for testing")
DEFINE_BOOL(hard_abort, true, "abort by crashing")
+// disassembler
+DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
+ "When logging, try to use coloured output.")
+
// inspector
DEFINE_BOOL(expose_inspector_scripts, false,
"expose injected-script-source.js for debugging")
@@ -1344,6 +1391,8 @@ DEFINE_BOOL(heap_profiler_use_embedder_graph, true,
"Use the new EmbedderGraph API to get embedder nodes")
DEFINE_INT(heap_snapshot_string_limit, 1024,
"truncate strings to this length in the heap snapshot")
+DEFINE_BOOL(heap_profiler_show_hidden_objects, false,
+ "use 'native' rather than 'hidden' node type in snapshot")
// sampling-heap-profiler.cc
DEFINE_BOOL(sampling_heap_profiler_suppress_randomness, false,
@@ -1353,9 +1402,11 @@ DEFINE_BOOL(sampling_heap_profiler_suppress_randomness, false,
DEFINE_BOOL(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
// ic.cc
-DEFINE_BOOL(trace_ic, false,
- "trace inline cache state transitions for tools/ic-processor")
-DEFINE_IMPLICATION(trace_ic, log_code)
+DEFINE_BOOL(log_ic, false,
+ "Log inline cache state transitions for tools/ic-processor")
+DEFINE_BOOL(trace_ic, false, "See --log-ic")
+DEFINE_IMPLICATION(trace_ic, log_ic)
+DEFINE_IMPLICATION(log_ic, log_code)
DEFINE_GENERIC_IMPLICATION(
trace_ic, TracingFlags::ic_stats.store(
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE))
@@ -1367,16 +1418,16 @@ DEFINE_INT(max_valid_polymorphic_map_count, 4,
DEFINE_BOOL(native_code_counters, DEBUG_BOOL,
"generate extra code for manipulating stats counters")
-DEFINE_BOOL(super_ic, false, "use an IC for super property loads")
+DEFINE_BOOL(super_ic, true, "use an IC for super property loads")
// objects.cc
DEFINE_BOOL(thin_strings, true, "Enable ThinString support")
DEFINE_BOOL(trace_prototype_users, false,
"Trace updates to prototype user tracking")
DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
-DEFINE_BOOL(trace_maps, false, "trace map creation")
-DEFINE_BOOL(trace_maps_details, true, "also log map details")
-DEFINE_IMPLICATION(trace_maps, log_code)
+DEFINE_BOOL(log_maps, false, "Log map creation")
+DEFINE_BOOL(log_maps_details, true, "Also log map details")
+DEFINE_IMPLICATION(log_maps, log_code)
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
@@ -1388,13 +1439,14 @@ DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing, fuzzing)
DEFINE_BOOL(parse_only, false, "only parse the sources")
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
+#ifdef USE_SIMULATOR
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
DEFINE_BOOL(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC64)
+ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64)
DEFINE_INT(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
@@ -1405,16 +1457,15 @@ DEFINE_INT(sim_stack_alignment, 8,
DEFINE_INT(sim_stack_size, 2 * MB / KB,
"Stack size of the ARM64, MIPS, MIPS64 and PPC64 simulator "
"in kBytes (default is 2 MB)")
-DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
- "When logging, try to use coloured output.")
DEFINE_BOOL(trace_sim_messages, false,
"Trace simulator debug messages. Implied by --trace-sim.")
+#endif // USE_SIMULATOR
#if defined V8_TARGET_ARCH_ARM64
// pointer-auth-arm64.cc
-DEFINE_DEBUG_BOOL(sim_abort_on_bad_auth, false,
- "Stop execution when a pointer authentication fails in the "
- "ARM64 simulator.")
+DEFINE_BOOL(sim_abort_on_bad_auth, true,
+ "Stop execution when a pointer authentication fails in the "
+ "ARM64 simulator.")
#endif
// isolate.cc
@@ -1563,8 +1614,6 @@ DEFINE_NEG_NEG_IMPLICATION(text_is_readable, partial_constant_pool)
// Minor mark compact collector flags.
//
#ifdef ENABLE_MINOR_MC
-DEFINE_BOOL(minor_mc_parallel_marking, true,
- "use parallel marking for the young generation")
DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
"trace parallel marking for the young generation")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
@@ -1675,9 +1724,6 @@ DEFINE_BOOL(trace_normalization, false,
DEFINE_BOOL(trace_lazy, false, "trace lazy compilation")
// spaces.cc
-DEFINE_BOOL(collect_heap_spill_statistics, false,
- "report heap spill statistics along with heap_stats "
- "(requires heap_stats)")
DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
// Regexp
@@ -1698,6 +1744,10 @@ DEFINE_BOOL(trace_wasm_gdb_remote, false, "trace Webassembly GDB-remote server")
//
// Logging and profiling flags
//
+// Logging flag dependencies are are also set separately in
+// V8::InitializeOncePerProcessImpl. Please add your flag to the log_all_flags
+// list in v8.cc to properly set FLAG_log and automatically enable it with
+// --log-all.
#undef FLAG
#define FLAG FLAG_FULL
@@ -1710,6 +1760,7 @@ DEFINE_BOOL(logfile_per_isolate, true, "Separate log files for each isolate.")
DEFINE_BOOL(log, false,
"Minimal logging (no API, code, GC, suspect, or handles samples).")
DEFINE_BOOL(log_all, false, "Log all events to the log file.")
+
DEFINE_BOOL(log_api, false, "Log API events to the log file.")
DEFINE_BOOL(log_code, false,
"Log code events to the log file without profiling.")
@@ -1723,14 +1774,6 @@ DEFINE_BOOL(log_function_events, false,
"Log function events "
"(parse, compile, execute) separately.")
-DEFINE_IMPLICATION(log_all, log_api)
-DEFINE_IMPLICATION(log_all, log_code)
-DEFINE_IMPLICATION(log_all, log_code_disassemble)
-DEFINE_IMPLICATION(log_all, log_suspect)
-DEFINE_IMPLICATION(log_all, log_handles)
-DEFINE_IMPLICATION(log_all, log_internal_timer_events)
-DEFINE_IMPLICATION(log_all, log_function_events)
-
DEFINE_BOOL(detailed_line_info, false,
"Always generate detailed line information for CPU profiling.")
@@ -1862,21 +1905,12 @@ DEFINE_BOOL(print_regexp_bytecode, false, "print generated regexp bytecode")
DEFINE_BOOL(print_builtin_size, false, "print code size for builtins")
#ifdef ENABLE_DISASSEMBLER
-DEFINE_BOOL(sodium, false,
- "print generated code output suitable for use with "
- "the Sodium code viewer")
-
-DEFINE_IMPLICATION(sodium, print_code)
-DEFINE_IMPLICATION(sodium, print_opt_code)
-DEFINE_IMPLICATION(sodium, code_comments)
-
DEFINE_BOOL(print_all_code, false, "enable all flags related to printing code")
DEFINE_IMPLICATION(print_all_code, print_code)
DEFINE_IMPLICATION(print_all_code, print_opt_code)
DEFINE_IMPLICATION(print_all_code, print_code_verbose)
DEFINE_IMPLICATION(print_all_code, print_builtin_code)
DEFINE_IMPLICATION(print_all_code, print_regexp_code)
-DEFINE_IMPLICATION(print_all_code, code_comments)
#endif
#undef FLAG
@@ -1908,6 +1942,7 @@ DEFINE_BOOL(single_threaded, false, "disable the use of background tasks")
DEFINE_IMPLICATION(single_threaded, single_threaded_gc)
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(single_threaded, compiler_dispatcher)
+DEFINE_NEG_IMPLICATION(single_threaded, stress_concurrent_inlining)
//
// Parallel and concurrent GC (Orinoco) related flags.
@@ -1919,10 +1954,6 @@ DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_compaction)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_marking)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_pointer_update)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_scavenge)
-DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_store_buffer)
-#ifdef ENABLE_MINOR_MC
-DEFINE_NEG_IMPLICATION(single_threaded_gc, minor_mc_parallel_marking)
-#endif // ENABLE_MINOR_MC
DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_array_buffer_sweeping)
DEFINE_NEG_IMPLICATION(single_threaded_gc, stress_concurrent_allocation)
@@ -1949,10 +1980,6 @@ DEFINE_INT(dump_allocations_digest_at_alloc, -1,
DEFINE_BOOL(enable_embedded_constant_pool, V8_EMBEDDED_CONSTANT_POOL,
"enable use of embedded constant pools (PPC only)")
-DEFINE_BOOL(unbox_double_fields, V8_DOUBLE_FIELDS_UNBOXING,
- "enable in-object double fields unboxing (64-bit only)")
-DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
-
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
diff --git a/deps/v8/src/handles/handles-inl.h b/deps/v8/src/handles/handles-inl.h
index 0215d13ddb..360da25cf6 100644
--- a/deps/v8/src/handles/handles-inl.h
+++ b/deps/v8/src/handles/handles-inl.h
@@ -178,6 +178,8 @@ Address* HandleScope::CreateHandle(Isolate* isolate, Address value) {
Address* HandleScope::GetHandle(Isolate* isolate, Address value) {
DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK_WITH_MSG(isolate->thread_id() == ThreadId::Current(),
+ "main-thread handle can only be created on the main thread.");
HandleScopeData* data = isolate->handle_scope_data();
CanonicalHandleScope* canonical = data->canonical_scope;
return canonical ? canonical->Lookup(value) : CreateHandle(isolate, value);
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index 9a2b813263..392b1f8153 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -46,24 +46,22 @@ bool HandleBase::IsDereferenceAllowed() const {
if (isolate->IsBuiltinsTableHandleLocation(location_)) return true;
if (!AllowHandleDereference::IsAllowed()) return false;
- if (FLAG_local_heaps) {
- LocalHeap* local_heap = isolate->CurrentLocalHeap();
-
- // Local heap can't access handles when parked
- if (!local_heap->IsHandleDereferenceAllowed()) {
- StdoutStream{} << "Cannot dereference handle owned by "
- << "non-running local heap\n";
- return false;
- }
+ LocalHeap* local_heap = isolate->CurrentLocalHeap();
- // We are pretty strict with handle dereferences on background threads: A
- // background local heap is only allowed to dereference its own local or
- // persistent handles.
- if (!local_heap->is_main_thread()) {
- // The current thread owns the handle and thus can dereference it.
- return local_heap->ContainsPersistentHandle(location_) ||
- local_heap->ContainsLocalHandle(location_);
- }
+ // Local heap can't access handles when parked
+ if (!local_heap->IsHandleDereferenceAllowed()) {
+ StdoutStream{} << "Cannot dereference handle owned by "
+ << "non-running local heap\n";
+ return false;
+ }
+
+ // We are pretty strict with handle dereferences on background threads: A
+ // background local heap is only allowed to dereference its own local or
+ // persistent handles.
+ if (!local_heap->is_main_thread()) {
+ // The current thread owns the handle and thus can dereference it.
+ return local_heap->ContainsPersistentHandle(location_) ||
+ local_heap->ContainsLocalHandle(location_);
}
// If LocalHeap::Current() is null, we're on the main thread -- if we were to
// check main thread HandleScopes here, we should additionally check the
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index edc88a82eb..6bf60affca 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -32,6 +32,7 @@ class RootVisitor;
class SmallOrderedHashMap;
class SmallOrderedHashSet;
class SmallOrderedNameDictionary;
+class SwissNameDictionary;
class WasmExportedFunctionData;
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/handles/persistent-handles.cc b/deps/v8/src/handles/persistent-handles.cc
index df9a6d951c..c793d6aaa3 100644
--- a/deps/v8/src/handles/persistent-handles.cc
+++ b/deps/v8/src/handles/persistent-handles.cc
@@ -122,7 +122,7 @@ void PersistentHandlesList::Remove(PersistentHandles* persistent_handles) {
}
void PersistentHandlesList::Iterate(RootVisitor* visitor, Isolate* isolate) {
- DCHECK_IMPLIES(FLAG_local_heaps, isolate->heap()->safepoint()->IsActive());
+ DCHECK(isolate->heap()->safepoint()->IsActive());
base::MutexGuard guard(&persistent_handles_mutex_);
for (PersistentHandles* current = persistent_handles_head_; current;
current = current->next_) {
diff --git a/deps/v8/src/heap/base/asm/riscv64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/riscv64/push_registers_asm.cc
new file mode 100644
index 0000000000..1de4055a28
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/riscv64/push_registers_asm.cc
@@ -0,0 +1,45 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " addi sp, sp, -96 \n"
+ " sd ra, 88(sp) \n"
+ " sd s8, 80(sp) \n"
+ " sd sp, 72(sp) \n"
+ " sd gp, 64(sp) \n"
+ " sd s7, 56(sp) \n"
+ " sd s6, 48(sp) \n"
+ " sd s5, 40(sp) \n"
+ " sd s4, 32(sp) \n"
+ " sd s3, 24(sp) \n"
+ " sd s2, 16(sp) \n"
+ " sd s1, 8(sp) \n"
+ " sd s0, 0(sp) \n"
+ // Maintain frame pointer.
+ " mv s8, sp \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " mv a3, a2 \n"
+ " mv a2, sp \n"
+ // Call the callback.
+ " jalr a3 \n"
+ // Load return address.
+ " ld ra, 88(sp) \n"
+ // Restore frame pointer.
+ " ld s8, 80(sp) \n"
+ " addi sp, sp, 96 \n"
+ " jr ra \n");
diff --git a/deps/v8/src/heap/collection-barrier.cc b/deps/v8/src/heap/collection-barrier.cc
index 51f04bb94d..a111e17e05 100644
--- a/deps/v8/src/heap/collection-barrier.cc
+++ b/deps/v8/src/heap/collection-barrier.cc
@@ -70,9 +70,13 @@ void CollectionBarrier::StopTimeToCollectionTimer() {
DCHECK(timer_.IsStarted());
base::TimeDelta delta = timer_.Elapsed();
TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "V8.TimeToCollection", TRACE_EVENT_SCOPE_THREAD,
- "duration", delta.InMillisecondsF());
- heap_->isolate()->counters()->time_to_collection()->AddTimedSample(delta);
+ "V8.GC.TimeToCollectionOnBackground",
+ TRACE_EVENT_SCOPE_THREAD, "duration",
+ delta.InMillisecondsF());
+ heap_->isolate()
+ ->counters()
+ ->gc_time_to_collection_on_background()
+ ->AddTimedSample(delta);
timer_.Stop();
} else {
DCHECK_EQ(old_state, RequestState::kDefault);
diff --git a/deps/v8/src/heap/concurrent-allocator-inl.h b/deps/v8/src/heap/concurrent-allocator-inl.h
index 116bc55ac1..c92b91ca47 100644
--- a/deps/v8/src/heap/concurrent-allocator-inl.h
+++ b/deps/v8/src/heap/concurrent-allocator-inl.h
@@ -21,8 +21,6 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin) {
// TODO(dinfuehr): Add support for allocation observers
- CHECK(FLAG_concurrent_allocation);
-
#ifdef DEBUG
local_heap_->VerifyCurrent();
#endif
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index f49059d556..47cff165a4 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -61,7 +61,6 @@ void StressConcurrentAllocatorTask::RunInternal() {
// static
void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
- CHECK(FLAG_local_heaps && FLAG_concurrent_allocation);
auto task = std::make_unique<StressConcurrentAllocatorTask>(isolate);
const double kDelayInSeconds = 0.1;
V8::GetCurrentPlatform()->CallDelayedOnWorkerThread(std::move(task),
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 03aefd67b9..f05024039b 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -209,7 +209,6 @@ class ConcurrentMarkingVisitor final
template <typename T>
int VisitJSObjectSubclassFast(Map map, T object) {
- DCHECK_IMPLIES(FLAG_unbox_double_fields, map.HasFastPointerLayout());
using TBodyDescriptor = typename T::FastBodyDescriptor;
return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
}
@@ -358,9 +357,10 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
class ConcurrentMarking::JobTask : public v8::JobTask {
public:
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
- bool is_forced_gc)
+ BytecodeFlushMode bytecode_flush_mode, bool is_forced_gc)
: concurrent_marking_(concurrent_marking),
mark_compact_epoch_(mark_compact_epoch),
+ bytecode_flush_mode_(bytecode_flush_mode),
is_forced_gc_(is_forced_gc) {}
~JobTask() override = default;
@@ -369,7 +369,17 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
// v8::JobTask overrides.
void Run(JobDelegate* delegate) override {
- concurrent_marking_->Run(delegate, mark_compact_epoch_, is_forced_gc_);
+ if (delegate->IsJoiningThread()) {
+ // TRACE_GC is not needed here because the caller opens the right scope.
+ concurrent_marking_->Run(delegate, bytecode_flush_mode_,
+ mark_compact_epoch_, is_forced_gc_);
+ } else {
+ TRACE_GC_EPOCH(concurrent_marking_->heap_->tracer(),
+ GCTracer::Scope::MC_BACKGROUND_MARKING,
+ ThreadKind::kBackground);
+ concurrent_marking_->Run(delegate, bytecode_flush_mode_,
+ mark_compact_epoch_, is_forced_gc_);
+ }
}
size_t GetMaxConcurrency(size_t worker_count) const override {
@@ -379,6 +389,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
private:
ConcurrentMarking* concurrent_marking_;
const unsigned mark_compact_epoch_;
+ BytecodeFlushMode bytecode_flush_mode_;
const bool is_forced_gc_;
};
@@ -398,10 +409,9 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
#endif
}
-void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
- bool is_forced_gc) {
- TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_BACKGROUND_MARKING,
- ThreadKind::kBackground);
+void ConcurrentMarking::Run(JobDelegate* delegate,
+ BytecodeFlushMode bytecode_flush_mode,
+ unsigned mark_compact_epoch, bool is_forced_gc) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
uint8_t task_id = delegate->GetTaskId() + 1;
@@ -409,7 +419,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, weak_objects_, heap_,
- mark_compact_epoch, Heap::GetBytecodeFlushMode(),
+ mark_compact_epoch, bytecode_flush_mode,
heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
&task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =
@@ -537,9 +547,10 @@ void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
DCHECK(!job_handle_ || !job_handle_->IsValid());
job_handle_ = V8::GetCurrentPlatform()->PostJob(
- priority,
- std::make_unique<JobTask>(this, heap_->mark_compact_collector()->epoch(),
- heap_->is_current_gc_forced()));
+ priority, std::make_unique<JobTask>(
+ this, heap_->mark_compact_collector()->epoch(),
+ heap_->mark_compact_collector()->bytecode_flush_mode(),
+ heap_->is_current_gc_forced()));
DCHECK(job_handle_->IsValid());
}
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 86e161c8ab..c685f5cca6 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -105,8 +105,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
char cache_line_padding[64];
};
class JobTask;
- void Run(JobDelegate* delegate, unsigned mark_compact_epoch,
- bool is_forced_gc);
+ void Run(JobDelegate* delegate, BytecodeFlushMode bytecode_flush_mode,
+ unsigned mark_compact_epoch, bool is_forced_gc);
size_t GetMaxConcurrency(size_t worker_count);
std::unique_ptr<JobHandle> job_handle_;
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 413e3ecdd1..c0683ef7dd 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -4,6 +4,9 @@
#include "src/heap/cppgc-js/cpp-heap.h"
+#include <cstdint>
+
+#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
#include "include/v8-platform.h"
#include "include/v8.h"
@@ -27,6 +30,7 @@
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/sweeper.h"
#include "src/init/v8.h"
@@ -34,6 +38,16 @@
namespace v8 {
+// static
+constexpr uint16_t WrapperDescriptor::kUnknownEmbedderId;
+
+// static
+std::unique_ptr<CppHeap> CppHeap::Create(v8::Platform* platform,
+ const CppHeapCreateParams& params) {
+ return std::make_unique<internal::CppHeap>(platform, params.custom_spaces,
+ params.wrapper_descriptor);
+}
+
cppgc::AllocationHandle& CppHeap::GetAllocationHandle() {
return internal::CppHeap::From(this)->object_allocator();
}
@@ -42,20 +56,33 @@ cppgc::HeapHandle& CppHeap::GetHeapHandle() {
return *internal::CppHeap::From(this);
}
+void CppHeap::Terminate() { internal::CppHeap::From(this)->Terminate(); }
+
+cppgc::HeapStatistics CppHeap::CollectStatistics(
+ cppgc::HeapStatistics::DetailLevel detail_level) {
+ return internal::CppHeap::From(this)->AsBase().CollectStatistics(
+ detail_level);
+}
+
void JSHeapConsistency::DijkstraMarkingBarrierSlow(
cppgc::HeapHandle& heap_handle, const TracedReferenceBase& ref) {
auto& heap_base = cppgc::internal::HeapBase::From(heap_handle);
static_cast<JSVisitor*>(&heap_base.marker()->Visitor())->Trace(ref);
}
+void JSHeapConsistency::CheckWrapper(v8::Local<v8::Object>& wrapper,
+ int wrapper_index, const void* wrappable) {
+ CHECK_EQ(wrappable,
+ wrapper->GetAlignedPointerFromInternalField(wrapper_index));
+}
+
namespace internal {
namespace {
class CppgcPlatformAdapter final : public cppgc::Platform {
public:
- explicit CppgcPlatformAdapter(v8::Isolate* isolate)
- : platform_(V8::GetCurrentPlatform()), isolate_(isolate) {}
+ explicit CppgcPlatformAdapter(v8::Platform* platform) : platform_(platform) {}
CppgcPlatformAdapter(const CppgcPlatformAdapter&) = delete;
CppgcPlatformAdapter& operator=(const CppgcPlatformAdapter&) = delete;
@@ -81,9 +108,11 @@ class CppgcPlatformAdapter final : public cppgc::Platform {
return platform_->GetTracingController();
}
+ void SetIsolate(v8::Isolate* isolate) { isolate_ = isolate; }
+
private:
v8::Platform* platform_;
- v8::Isolate* isolate_;
+ v8::Isolate* isolate_ = nullptr;
};
class UnifiedHeapConcurrentMarker
@@ -160,25 +189,71 @@ void UnifiedHeapMarker::AddObject(void* object) {
} // namespace
CppHeap::CppHeap(
- v8::Isolate* isolate,
- const std::vector<std::unique_ptr<cppgc::CustomSpaceBase>>& custom_spaces)
- : cppgc::internal::HeapBase(std::make_shared<CppgcPlatformAdapter>(isolate),
- custom_spaces,
- cppgc::internal::HeapBase::StackSupport::
- kSupportsConservativeStackScan),
- isolate_(*reinterpret_cast<Isolate*>(isolate)) {
- CHECK(!FLAG_incremental_marking_wrappers);
- if (isolate_.heap_profiler()) {
- isolate_.heap_profiler()->AddBuildEmbedderGraphCallback(
+ v8::Platform* platform,
+ const std::vector<std::unique_ptr<cppgc::CustomSpaceBase>>& custom_spaces,
+ const v8::WrapperDescriptor& wrapper_descriptor,
+ std::unique_ptr<cppgc::internal::MetricRecorder> metric_recorder)
+ : cppgc::internal::HeapBase(
+ std::make_shared<CppgcPlatformAdapter>(platform), custom_spaces,
+ cppgc::internal::HeapBase::StackSupport::
+ kSupportsConservativeStackScan,
+ std::move(metric_recorder)),
+ wrapper_descriptor_(wrapper_descriptor) {
+ CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
+ wrapper_descriptor_.embedder_id_for_garbage_collected);
+ // Enter no GC scope. `AttachIsolate()` removes this and allows triggering
+ // garbage collections.
+ no_gc_scope_++;
+ stats_collector()->RegisterObserver(this);
+}
+
+CppHeap::~CppHeap() {
+ if (isolate_) {
+ isolate_->heap()->DetachCppHeap();
+ }
+}
+
+void CppHeap::Terminate() {
+ // Must not be attached to a heap when invoking termination GCs.
+ CHECK(!isolate_);
+ // Gracefully terminate the C++ heap invoking destructors.
+ HeapBase::Terminate();
+}
+
+void CppHeap::AttachIsolate(Isolate* isolate) {
+ CHECK_NULL(isolate_);
+ isolate_ = isolate;
+ static_cast<CppgcPlatformAdapter*>(platform())
+ ->SetIsolate(reinterpret_cast<v8::Isolate*>(isolate_));
+ if (isolate_->heap_profiler()) {
+ isolate_->heap_profiler()->AddBuildEmbedderGraphCallback(
&CppGraphBuilder::Run, this);
}
+ isolate_->heap()->SetEmbedderHeapTracer(this);
+ isolate_->heap()->local_embedder_heap_tracer()->SetWrapperDescriptor(
+ wrapper_descriptor_);
+ no_gc_scope_--;
}
-CppHeap::~CppHeap() {
- if (isolate_.heap_profiler()) {
- isolate_.heap_profiler()->RemoveBuildEmbedderGraphCallback(
+void CppHeap::DetachIsolate() {
+ // TODO(chromium:1056170): Investigate whether this can be enforced with a
+ // CHECK across all relevant embedders and setups.
+ if (!isolate_) return;
+
+ // Delegate to existing EmbedderHeapTracer API to finish any ongoing garbage
+ // collection.
+ FinalizeTracing();
+ sweeper_.FinishIfRunning();
+
+ if (isolate_->heap_profiler()) {
+ isolate_->heap_profiler()->RemoveBuildEmbedderGraphCallback(
&CppGraphBuilder::Run, this);
}
+ isolate_ = nullptr;
+ // Any future garbage collections will ignore the V8->C++ references.
+ isolate()->SetEmbedderHeapTracer(nullptr);
+ // Enter no GC scope.
+ no_gc_scope_++;
}
void CppHeap::RegisterV8References(
@@ -196,11 +271,12 @@ void CppHeap::TracePrologue(TraceFlags flags) {
// Finish sweeping in case it is still running.
sweeper_.FinishIfRunning();
+ current_flags_ = flags;
const UnifiedHeapMarker::MarkingConfig marking_config{
UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
cppgc::Heap::StackState::kNoHeapPointers,
cppgc::Heap::MarkingType::kIncrementalAndConcurrent,
- flags == TraceFlags::kForced
+ flags & TraceFlags::kForced
? UnifiedHeapMarker::MarkingConfig::IsForcedGC::kForced
: UnifiedHeapMarker::MarkingConfig::IsForcedGC::kNotForced};
if ((flags == TraceFlags::kReduceMemory) || (flags == TraceFlags::kForced)) {
@@ -211,28 +287,41 @@ void CppHeap::TracePrologue(TraceFlags flags) {
}
marker_ =
cppgc::internal::MarkerFactory::CreateAndStartMarking<UnifiedHeapMarker>(
- *isolate_.heap(), AsBase(), platform_.get(), marking_config);
+ *isolate_->heap(), AsBase(), platform_.get(), marking_config);
marking_done_ = false;
}
bool CppHeap::AdvanceTracing(double deadline_in_ms) {
- v8::base::TimeDelta deadline =
- is_in_final_pause_
- ? v8::base::TimeDelta::Max()
- : v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms);
+ // TODO(chromium:1154636): The kAtomicMark/kIncrementalMark scope below is
+ // needed for recording all cpp marking time. Note that it can lead to double
+ // accounting since this scope is also accounted under an outer v8 scope.
+ // Make sure to only account this scope once.
+ cppgc::internal::StatsCollector::EnabledScope stats_scope(
+ stats_collector(),
+ in_atomic_pause_ ? cppgc::internal::StatsCollector::kAtomicMark
+ : cppgc::internal::StatsCollector::kIncrementalMark);
+ const v8::base::TimeDelta deadline =
+ in_atomic_pause_ ? v8::base::TimeDelta::Max()
+ : v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms);
+ const size_t marked_bytes_limit = in_atomic_pause_ ? SIZE_MAX : 0;
// TODO(chromium:1056170): Replace when unified heap transitions to
// bytes-based deadline.
- marking_done_ = marker_->AdvanceMarkingWithMaxDuration(deadline);
- DCHECK_IMPLIES(is_in_final_pause_, marking_done_);
+ marking_done_ =
+ marker_->AdvanceMarkingWithLimits(deadline, marked_bytes_limit);
+ DCHECK_IMPLIES(in_atomic_pause_, marking_done_);
return marking_done_;
}
bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
+ CHECK(!in_disallow_gc_scope());
cppgc::internal::StatsCollector::EnabledScope stats_scope(
- AsBase(), cppgc::internal::StatsCollector::kAtomicMark);
- is_in_final_pause_ = true;
+ stats_collector(), cppgc::internal::StatsCollector::kAtomicMark);
+ in_atomic_pause_ = true;
+ if (override_stack_state_) {
+ stack_state = *override_stack_state_;
+ }
marker_->EnterAtomicPause(stack_state);
if (compactor_.CancelIfShouldNotCompact(cppgc::Heap::MarkingType::kAtomic,
stack_state)) {
@@ -241,22 +330,16 @@ void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
}
void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
- CHECK(is_in_final_pause_);
+ CHECK(in_atomic_pause_);
CHECK(marking_done_);
{
cppgc::internal::StatsCollector::EnabledScope stats_scope(
- AsBase(), cppgc::internal::StatsCollector::kAtomicMark);
- cppgc::internal::ObjectAllocator::NoAllocationScope no_allocation_scope_(
- object_allocator_);
+ stats_collector(), cppgc::internal::StatsCollector::kAtomicMark);
+ cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this);
marker_->LeaveAtomicPause();
- is_in_final_pause_ = false;
- }
- {
- cppgc::internal::ObjectAllocator::NoAllocationScope no_allocation_scope_(
- object_allocator_);
- prefinalizer_handler()->InvokePreFinalizers();
}
marker_.reset();
+ ExecutePreFinalizers();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
UnifiedHeapMarkingVerifier verifier(*this);
@@ -264,17 +347,48 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
#endif
{
- NoGCScope no_gc(*this);
+ cppgc::subtle::NoGarbageCollectionScope no_gc(*this);
cppgc::internal::Sweeper::SweepingConfig::CompactableSpaceHandling
compactable_space_handling = compactor_.CompactSpacesIfEnabled();
const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
- cppgc::internal::Sweeper::SweepingConfig::SweepingType::
- kIncrementalAndConcurrent,
+ // In case the GC was forced, also finalize sweeping right away.
+ current_flags_ & TraceFlags::kForced
+ ? cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic
+ : cppgc::internal::Sweeper::SweepingConfig::SweepingType::
+ kIncrementalAndConcurrent,
compactable_space_handling};
sweeper().Start(sweeping_config);
}
+ in_atomic_pause_ = false;
sweeper().NotifyDoneIfNeeded();
}
+void CppHeap::AllocatedObjectSizeIncreased(size_t bytes) {
+ buffered_allocated_bytes_ += static_cast<int64_t>(bytes);
+ ReportBufferedAllocationSizeIfPossible();
+}
+
+void CppHeap::AllocatedObjectSizeDecreased(size_t bytes) {
+ buffered_allocated_bytes_ -= static_cast<int64_t>(bytes);
+ ReportBufferedAllocationSizeIfPossible();
+}
+
+void CppHeap::ReportBufferedAllocationSizeIfPossible() {
+ // Avoid reporting to V8 in the following conditions as that may trigger GC
+ // finalizations where not allowed.
+ // - Recursive sweeping.
+ // - GC forbidden scope.
+ if (sweeper().IsSweepingOnMutatorThread() || in_no_gc_scope()) {
+ return;
+ }
+
+ if (buffered_allocated_bytes_ < 0) {
+ DecreaseAllocatedSize(static_cast<size_t>(-buffered_allocated_bytes_));
+ } else {
+ IncreaseAllocatedSize(static_cast<size_t>(buffered_allocated_bytes_));
+ }
+ buffered_allocated_bytes_ = 0;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index 44607e5897..47b63a5c5d 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -9,6 +9,7 @@
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/stats-collector.h"
namespace v8 {
@@ -17,9 +18,11 @@ class Isolate;
namespace internal {
// A C++ heap implementation used with V8 to implement unified heap.
-class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
- public v8::CppHeap,
- public v8::EmbedderHeapTracer {
+class V8_EXPORT_PRIVATE CppHeap final
+ : public cppgc::internal::HeapBase,
+ public v8::CppHeap,
+ public v8::EmbedderHeapTracer,
+ public cppgc::internal::StatsCollector::AllocationObserver {
public:
static CppHeap* From(v8::CppHeap* heap) {
return static_cast<CppHeap*>(heap);
@@ -28,9 +31,12 @@ class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
return static_cast<const CppHeap*>(heap);
}
- CppHeap(v8::Isolate* isolate,
- const std::vector<std::unique_ptr<cppgc::CustomSpaceBase>>&
- custom_spaces);
+ CppHeap(
+ v8::Platform* platform,
+ const std::vector<std::unique_ptr<cppgc::CustomSpaceBase>>& custom_spaces,
+ const v8::WrapperDescriptor& wrapper_descriptor,
+ std::unique_ptr<cppgc::internal::MetricRecorder> metric_recorder =
+ nullptr);
~CppHeap() final;
CppHeap(const CppHeap&) = delete;
@@ -39,6 +45,12 @@ class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
HeapBase& AsBase() { return *this; }
const HeapBase& AsBase() const { return *this; }
+ void AttachIsolate(Isolate* isolate);
+ void DetachIsolate();
+
+ void Terminate();
+
+ // v8::EmbedderHeapTracer interface.
void RegisterV8References(
const std::vector<std::pair<void*, void*> >& embedder_fields) final;
void TracePrologue(TraceFlags flags) final;
@@ -47,6 +59,11 @@ class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
void TraceEpilogue(TraceSummary* trace_summary) final;
void EnterFinalPause(EmbedderStackState stack_state) final;
+ // StatsCollector::AllocationObserver interface.
+ void AllocatedObjectSizeIncreased(size_t) final;
+ void AllocatedObjectSizeDecreased(size_t) final;
+ void ResetAllocatedObjectSize(size_t) final {}
+
private:
void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) final {
@@ -54,11 +71,18 @@ class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
// finalization is not needed) thus this method is left empty.
}
- void PostGarbageCollection() final {}
+ void ReportBufferedAllocationSizeIfPossible();
- Isolate& isolate_;
+ Isolate* isolate_ = nullptr;
bool marking_done_ = false;
- bool is_in_final_pause_ = false;
+ TraceFlags current_flags_ = TraceFlags::kNoFlags;
+
+ // Buffered allocated bytes. Reporting allocated bytes to V8 can trigger a GC
+ // atomic pause. Allocated bytes are buffer in case this is temporarily
+ // prohibited.
+ int64_t buffered_allocated_bytes_ = 0;
+
+ v8::WrapperDescriptor wrapper_descriptor_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index bbf9fff4a3..b89ff4f9a9 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -308,10 +308,10 @@ bool HasEmbedderDataBackref(Isolate* isolate, v8::Local<v8::Value> v8_value,
return false;
JSObject js_object = JSObject::cast(*v8_object);
- return js_object.GetEmbedderFieldCount() >= 2 &&
- LocalEmbedderHeapTracer::VerboseWrapperInfo(
- LocalEmbedderHeapTracer::ExtractWrapperInfo(isolate, js_object))
- .instance() == expected_backref;
+ return LocalEmbedderHeapTracer::VerboseWrapperInfo(
+ isolate->heap()->local_embedder_heap_tracer()->ExtractWrapperInfo(
+ isolate, js_object))
+ .instance() == expected_backref;
}
// The following implements a snapshotting algorithm for C++ objects that also
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index 16543fe019..e9da1163e4 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -34,8 +34,9 @@ void UnifiedHeapMarkingVisitorBase::VisitWeak(const void* object,
}
void UnifiedHeapMarkingVisitorBase::VisitEphemeron(const void* key,
+ const void* value,
TraceDescriptor value_desc) {
- marking_state_.ProcessEphemeron(key, value_desc);
+ marking_state_.ProcessEphemeron(key, value, value_desc, *this);
}
void UnifiedHeapMarkingVisitorBase::VisitWeakContainer(
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
index 1032a76006..721dbe5d98 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
@@ -41,7 +41,7 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
// C++ handling.
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
- void VisitEphemeron(const void*, TraceDescriptor) final;
+ void VisitEphemeron(const void*, const void*, TraceDescriptor) final;
void VisitWeakContainer(const void* self, TraceDescriptor strong_desc,
TraceDescriptor weak_desc, WeakCallback callback,
const void* data) final;
diff --git a/deps/v8/src/heap/cppgc/caged-heap.cc b/deps/v8/src/heap/cppgc/caged-heap.cc
index 1fdaaa733f..951fb0e853 100644
--- a/deps/v8/src/heap/cppgc/caged-heap.cc
+++ b/deps/v8/src/heap/cppgc/caged-heap.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "v8config.h" // NOLINT(build/include_directory)
+
#if !defined(CPPGC_CAGED_HEAP)
#error "Must be compiled with caged heap enabled"
#endif
diff --git a/deps/v8/src/heap/cppgc/compaction-worklists.h b/deps/v8/src/heap/cppgc/compaction-worklists.h
index 6222bd9a92..2c3ad147f4 100644
--- a/deps/v8/src/heap/cppgc/compaction-worklists.h
+++ b/deps/v8/src/heap/cppgc/compaction-worklists.h
@@ -12,8 +12,13 @@
namespace cppgc {
namespace internal {
-class CompactionWorklists {
+class CompactionWorklists final {
public:
+ CompactionWorklists() = default;
+
+ CompactionWorklists(const CompactionWorklists&) = delete;
+ CompactionWorklists& operator=(const CompactionWorklists&) = delete;
+
using MovableReference = const void*;
using MovableReferencesWorklist =
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
index 8efadc6f12..4a05746b72 100644
--- a/deps/v8/src/heap/cppgc/compactor.cc
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -435,7 +435,7 @@ Compactor::Compactor(RawHeap& heap) : heap_(heap) {
bool Compactor::ShouldCompact(
GarbageCollector::Config::MarkingType marking_type,
- GarbageCollector::Config::StackState stack_state) {
+ GarbageCollector::Config::StackState stack_state) const {
if (compactable_spaces_.empty() ||
(marking_type == GarbageCollector::Config::MarkingType::kAtomic &&
stack_state ==
@@ -484,8 +484,8 @@ bool Compactor::CancelIfShouldNotCompact(
Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
if (!is_enabled_) return CompactableSpaceHandling::kSweep;
- StatsCollector::DisabledScope stats_scope(*heap_.heap(),
- StatsCollector::kAtomicCompact);
+ StatsCollector::EnabledScope stats_scope(heap_.heap()->stats_collector(),
+ StatsCollector::kAtomicCompact);
MovableReferences movable_references(*heap_.heap());
diff --git a/deps/v8/src/heap/cppgc/compactor.h b/deps/v8/src/heap/cppgc/compactor.h
index d354274a33..ec7fc950a9 100644
--- a/deps/v8/src/heap/cppgc/compactor.h
+++ b/deps/v8/src/heap/cppgc/compactor.h
@@ -20,6 +20,9 @@ class V8_EXPORT_PRIVATE Compactor final {
explicit Compactor(RawHeap&);
~Compactor() { DCHECK(!is_enabled_); }
+ Compactor(const Compactor&) = delete;
+ Compactor& operator=(const Compactor&) = delete;
+
void InitializeIfShouldCompact(GarbageCollector::Config::MarkingType,
GarbageCollector::Config::StackState);
// Returns true is compaction was cancelled.
@@ -32,12 +35,11 @@ class V8_EXPORT_PRIVATE Compactor final {
}
void EnableForNextGCForTesting() { enable_for_next_gc_for_testing_ = true; }
-
bool IsEnabledForTesting() const { return is_enabled_; }
private:
bool ShouldCompact(GarbageCollector::Config::MarkingType,
- GarbageCollector::Config::StackState);
+ GarbageCollector::Config::StackState) const;
RawHeap& heap_;
// Compactor does not own the compactable spaces. The heap owns all spaces.
@@ -46,7 +48,6 @@ class V8_EXPORT_PRIVATE Compactor final {
std::unique_ptr<CompactionWorklists> compaction_worklists_;
bool is_enabled_ = false;
-
bool enable_for_next_gc_for_testing_ = false;
};
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.cc b/deps/v8/src/heap/cppgc/concurrent-marker.cc
index 8627829aad..34953b9ec3 100644
--- a/deps/v8/src/heap/cppgc/concurrent-marker.cc
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.cc
@@ -73,7 +73,8 @@ ConcurrentMarkingTask::ConcurrentMarkingTask(
void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
StatsCollector::EnabledConcurrentScope stats_scope(
- concurrent_marker_.heap(), StatsCollector::kConcurrentMark);
+ concurrent_marker_.heap().stats_collector(),
+ StatsCollector::kConcurrentMark);
if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
return;
@@ -150,17 +151,18 @@ void ConcurrentMarkingTask::ProcessWorklists(
{
StatsCollector::DisabledConcurrentScope stats_scope(
- concurrent_marker_.heap(),
+ concurrent_marker_.heap().stats_collector(),
StatsCollector::kConcurrentMarkProcessEphemerons);
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state
.ephemeron_pairs_for_processing_worklist(),
- [&concurrent_marking_state](
+ [&concurrent_marking_state, &concurrent_marking_visitor](
const MarkingWorklists::EphemeronPairItem& item) {
- concurrent_marking_state.ProcessEphemeron(item.key,
- item.value_desc);
+ concurrent_marking_state.ProcessEphemeron(
+ item.key, item.value, item.value_desc,
+ concurrent_marking_visitor);
})) {
return;
}
diff --git a/deps/v8/src/heap/cppgc/default-platform.cc b/deps/v8/src/heap/cppgc/default-platform.cc
index fd0a55bd82..46884d42df 100644
--- a/deps/v8/src/heap/cppgc/default-platform.cc
+++ b/deps/v8/src/heap/cppgc/default-platform.cc
@@ -4,20 +4,11 @@
#include <include/cppgc/default-platform.h>
-#if !CPPGC_IS_STANDALONE
-#include <v8.h>
-#endif // !CPPGC_IS_STANDALONE
-
namespace cppgc {
// static
void DefaultPlatform::InitializeProcess(DefaultPlatform* platform) {
-#if CPPGC_IS_STANDALONE
cppgc::InitializeProcess(platform->GetPageAllocator());
-#else
- // v8::V8::InitializePlatform transitively calls cppgc::InitializeProcess.
- v8::V8::InitializePlatform(platform->v8_platform_.get());
-#endif // CPPGC_IS_STANDALONE
}
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/free-list.cc b/deps/v8/src/heap/cppgc/free-list.cc
index b7252eefe9..934aeaf3f8 100644
--- a/deps/v8/src/heap/cppgc/free-list.cc
+++ b/deps/v8/src/heap/cppgc/free-list.cc
@@ -191,5 +191,26 @@ bool FreeList::IsConsistent(size_t index) const {
!free_list_tails_[index]->Next());
}
+void FreeList::CollectStatistics(
+ HeapStatistics::FreeListStatistics& free_list_stats) {
+ std::vector<size_t>& bucket_size = free_list_stats.bucket_size;
+ std::vector<size_t>& free_count = free_list_stats.free_count;
+ std::vector<size_t>& free_size = free_list_stats.free_size;
+ DCHECK(bucket_size.empty());
+ DCHECK(free_count.empty());
+ DCHECK(free_size.empty());
+ for (size_t i = 0; i < kPageSizeLog2; ++i) {
+ size_t entry_count = 0;
+ size_t entry_size = 0;
+ for (Entry* entry = free_list_heads_[i]; entry; entry = entry->Next()) {
+ ++entry_count;
+ entry_size += entry->GetSize();
+ }
+ bucket_size.push_back(static_cast<size_t>(1) << i);
+ free_count.push_back(entry_count);
+ free_size.push_back(entry_size);
+ }
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/free-list.h b/deps/v8/src/heap/cppgc/free-list.h
index ba578f3820..6906952102 100644
--- a/deps/v8/src/heap/cppgc/free-list.h
+++ b/deps/v8/src/heap/cppgc/free-list.h
@@ -7,6 +7,7 @@
#include <array>
+#include "include/cppgc/heap-statistics.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -45,6 +46,8 @@ class V8_EXPORT_PRIVATE FreeList {
bool Contains(Block) const;
+ void CollectStatistics(HeapStatistics::FreeListStatistics&);
+
private:
class Entry;
diff --git a/deps/v8/src/heap/cppgc/garbage-collector.h b/deps/v8/src/heap/cppgc/garbage-collector.h
index 3ea88d76fc..7abf17df5f 100644
--- a/deps/v8/src/heap/cppgc/garbage-collector.h
+++ b/deps/v8/src/heap/cppgc/garbage-collector.h
@@ -42,6 +42,13 @@ class GarbageCollector {
MarkingType::kIncremental, SweepingType::kAtomic};
}
+ static constexpr Config
+ PreciseIncrementalMarkingConcurrentSweepingConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kIncremental,
+ SweepingType::kIncrementalAndConcurrent};
+ }
+
static constexpr Config MinorPreciseAtomicConfig() {
return {CollectionType::kMinor, StackState::kNoHeapPointers,
MarkingType::kAtomic, SweepingType::kAtomic};
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.cc b/deps/v8/src/heap/cppgc/gc-info-table.cc
index 74ce4af874..384f8713ef 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.cc
+++ b/deps/v8/src/heap/cppgc/gc-info-table.cc
@@ -29,14 +29,7 @@ static_assert(v8::base::bits::IsPowerOfTwo(kEntrySize),
"GCInfoTable entries size must be power of "
"two");
-} // namespace
-
-GCInfoTable* GlobalGCInfoTable::global_table_ = nullptr;
-constexpr GCInfoIndex GCInfoTable::kMaxIndex;
-constexpr GCInfoIndex GCInfoTable::kMinIndex;
-constexpr GCInfoIndex GCInfoTable::kInitialWantedLimit;
-
-void GlobalGCInfoTable::Create(PageAllocator* page_allocator) {
+PageAllocator* GetAllocator(PageAllocator* page_allocator) {
if (!page_allocator) {
static v8::base::LeakyObject<v8::base::PageAllocator>
default_page_allocator;
@@ -44,9 +37,23 @@ void GlobalGCInfoTable::Create(PageAllocator* page_allocator) {
}
// TODO(chromium:1056170): Wrap page_allocator into LsanPageAllocator when
// running with LEAK_SANITIZER.
- static v8::base::LeakyObject<GCInfoTable> table(page_allocator);
+ return page_allocator;
+}
+
+} // namespace
+
+GCInfoTable* GlobalGCInfoTable::global_table_ = nullptr;
+constexpr GCInfoIndex GCInfoTable::kMaxIndex;
+constexpr GCInfoIndex GCInfoTable::kMinIndex;
+constexpr GCInfoIndex GCInfoTable::kInitialWantedLimit;
+
+// static
+void GlobalGCInfoTable::Initialize(PageAllocator* page_allocator) {
+ static v8::base::LeakyObject<GCInfoTable> table(GetAllocator(page_allocator));
if (!global_table_) {
global_table_ = table.get();
+ } else {
+ CHECK_EQ(page_allocator, global_table_->allocator());
}
}
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.h b/deps/v8/src/heap/cppgc/gc-info-table.h
index addcd0bd38..61de294426 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.h
+++ b/deps/v8/src/heap/cppgc/gc-info-table.h
@@ -63,10 +63,13 @@ class V8_EXPORT GCInfoTable final {
return table_[index];
}
- GCInfoIndex NumberOfGCInfosForTesting() const { return current_index_; }
+ GCInfoIndex NumberOfGCInfos() const { return current_index_; }
+
GCInfoIndex LimitForTesting() const { return limit_; }
GCInfo& TableSlotForTesting(GCInfoIndex index) { return table_[index]; }
+ PageAllocator* allocator() const { return page_allocator_; }
+
private:
void Resize();
@@ -93,8 +96,10 @@ class V8_EXPORT GlobalGCInfoTable final {
GlobalGCInfoTable(const GlobalGCInfoTable&) = delete;
GlobalGCInfoTable& operator=(const GlobalGCInfoTable&) = delete;
- // Sets up a singleton table that can be acquired using Get().
- static void Create(PageAllocator* page_allocator);
+ // Sets up the table with the provided `page_allocator`. Will use an internal
+ // allocator in case no PageAllocator is provided. May be called multiple
+ // times with the same `page_allocator` argument.
+ static void Initialize(PageAllocator* page_allocator);
// Accessors for the singleton table.
static GCInfoTable& GetMutable() { return *global_table_; }
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index a882c0792a..05cfb7fb47 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -4,12 +4,14 @@
#include "src/heap/cppgc/heap-base.h"
+#include "include/cppgc/heap-consistency.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/heap/base/stack.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-statistics-collector.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/marking-verifier.h"
@@ -56,7 +58,8 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
HeapBase::HeapBase(
std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
- StackSupport stack_support)
+ StackSupport stack_support,
+ std::unique_ptr<MetricRecorder> histogram_recorder)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
#if defined(CPPGC_CAGED_HEAP)
@@ -66,7 +69,8 @@ HeapBase::HeapBase(
page_backend_(
std::make_unique<PageBackend>(platform_->GetPageAllocator())),
#endif
- stats_collector_(std::make_unique<StatsCollector>()),
+ stats_collector_(std::make_unique<StatsCollector>(
+ std::move(histogram_recorder), platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>(*this)),
@@ -75,6 +79,8 @@ HeapBase::HeapBase(
stats_collector_.get()),
sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()),
stack_support_(stack_support) {
+ stats_collector_->RegisterObserver(
+ &allocation_observer_for_PROCESS_HEAP_STATISTICS_);
}
HeapBase::~HeapBase() = default;
@@ -83,14 +89,62 @@ size_t HeapBase::ObjectPayloadSize() const {
return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
}
-HeapBase::NoGCScope::NoGCScope(HeapBase& heap) : heap_(heap) {
- heap_.no_gc_scope_++;
+void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
+ if (marker_) marker_->AdvanceMarkingOnAllocation();
+}
+void HeapBase::ExecutePreFinalizers() {
+ // Pre finalizers are forbidden from allocating objects.
+ cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this);
+ prefinalizer_handler_->InvokePreFinalizers();
+}
+
+void HeapBase::Terminate() {
+ DCHECK(!IsMarking());
+ CHECK(!in_disallow_gc_scope());
+
+ sweeper().FinishIfRunning();
+
+ constexpr size_t kMaxTerminationGCs = 20;
+ size_t gc_count = 0;
+ do {
+ CHECK_LT(gc_count++, kMaxTerminationGCs);
+
+ // Clear root sets.
+ strong_persistent_region_.ClearAllUsedNodes();
+ strong_cross_thread_persistent_region_.ClearAllUsedNodes();
+ // Clear weak root sets, as the GC below does not execute weakness
+ // callbacks.
+ weak_persistent_region_.ClearAllUsedNodes();
+ weak_cross_thread_persistent_region_.ClearAllUsedNodes();
+
+ stats_collector()->NotifyMarkingStarted(
+ GarbageCollector::Config::CollectionType::kMajor,
+ GarbageCollector::Config::IsForcedGC::kForced);
+ stats_collector()->NotifyMarkingCompleted(0);
+ object_allocator().ResetLinearAllocationBuffers();
+ ExecutePreFinalizers();
+ sweeper().Start(
+ {Sweeper::SweepingConfig::SweepingType::kAtomic,
+ Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep});
+ sweeper().NotifyDoneIfNeeded();
+ } while (strong_persistent_region_.NodesInUse() > 0);
+
+ object_allocator().Terminate();
+ disallow_gc_scope_++;
}
-HeapBase::NoGCScope::~NoGCScope() { heap_.no_gc_scope_--; }
+HeapStatistics HeapBase::CollectStatistics(
+ HeapStatistics::DetailLevel detail_level) {
+ if (detail_level == HeapStatistics::DetailLevel::kBrief) {
+ return {stats_collector_->allocated_memory_size(),
+ stats_collector_->allocated_object_size(),
+ HeapStatistics::DetailLevel::kBrief,
+ {}};
+ }
-void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
- if (marker_) marker_->AdvanceMarkingOnAllocation();
+ sweeper_.FinishIfRunning();
+ object_allocator_.ResetLinearAllocationBuffers();
+ return HeapStatisticsCollector().CollectStatistics(this);
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index acff3f6936..16441a5993 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -8,15 +8,19 @@
#include <memory>
#include <set>
+#include "include/cppgc/heap-statistics.h"
#include "include/cppgc/heap.h"
#include "include/cppgc/internal/persistent-node.h"
#include "include/cppgc/macros.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/compactor.h"
#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/process-heap-statistics.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sweeper.h"
+#include "v8config.h" // NOLINT(build/include_directory)
#if defined(CPPGC_CAGED_HEAP)
#include "src/heap/cppgc/caged-heap.h"
@@ -29,6 +33,14 @@ class Stack;
} // namespace heap
namespace cppgc {
+namespace subtle {
+class DisallowGarbageCollectionScope;
+class NoGarbageCollectionScope;
+} // namespace subtle
+
+namespace testing {
+class OverrideEmbedderStackStateScope;
+} // namespace testing
class Platform;
@@ -53,22 +65,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
public:
using StackSupport = cppgc::Heap::StackSupport;
- // NoGCScope allows going over limits and avoids triggering garbage
- // collection triggered through allocations or even explicitly.
- class V8_EXPORT_PRIVATE V8_NODISCARD NoGCScope final {
- CPPGC_STACK_ALLOCATED();
-
- public:
- explicit NoGCScope(HeapBase& heap);
- ~NoGCScope();
-
- NoGCScope(const NoGCScope&) = delete;
- NoGCScope& operator=(const NoGCScope&) = delete;
-
- private:
- HeapBase& heap_;
- };
-
static HeapBase& From(cppgc::HeapHandle& heap_handle) {
return static_cast<HeapBase&>(heap_handle);
}
@@ -78,7 +74,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
HeapBase(std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
- StackSupport stack_support);
+ StackSupport stack_support,
+ std::unique_ptr<MetricRecorder> histogram_recorder);
virtual ~HeapBase();
HeapBase(const HeapBase&) = delete;
@@ -114,8 +111,10 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
Compactor& compactor() { return compactor_; }
ObjectAllocator& object_allocator() { return object_allocator_; }
+ const ObjectAllocator& object_allocator() const { return object_allocator_; }
Sweeper& sweeper() { return sweeper_; }
+ const Sweeper& sweeper() const { return sweeper_; }
PersistentRegion& GetStrongPersistentRegion() {
return strong_persistent_region_;
@@ -152,8 +151,15 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
void AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
- // Notifies the heap that a GC is done.
- virtual void PostGarbageCollection() = 0;
+ // Termination drops all roots (clears them out) and runs garbage collections
+ // in a bounded fixed point loop until no new objects are created in
+ // destructors. Exceeding the loop bound results in a crash.
+ void Terminate();
+
+ bool in_disallow_gc_scope() const { return disallow_gc_scope_ > 0; }
+ bool in_atomic_pause() const { return in_atomic_pause_; }
+
+ HeapStatistics CollectStatistics(HeapStatistics::DetailLevel);
protected:
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
@@ -161,6 +167,10 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
+ bool IsMarking() const { return marker_.get(); }
+
+ void ExecutePreFinalizers();
+
RawHeap raw_heap_;
std::shared_ptr<cppgc::Platform> platform_;
#if defined(CPPGC_CAGED_HEAP)
@@ -182,16 +192,25 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
PersistentRegion strong_cross_thread_persistent_region_;
PersistentRegion weak_cross_thread_persistent_region_;
+ ProcessHeapStatisticsUpdater::AllocationObserverImpl
+ allocation_observer_for_PROCESS_HEAP_STATISTICS_;
#if defined(CPPGC_YOUNG_GENERATION)
std::set<void*> remembered_slots_;
#endif
size_t no_gc_scope_ = 0;
+ size_t disallow_gc_scope_ = 0;
const StackSupport stack_support_;
+ std::unique_ptr<EmbedderStackState> override_stack_state_;
+
+ bool in_atomic_pause_ = false;
friend class MarkerBase::IncrementalMarkingTask;
friend class testing::TestWithHeap;
+ friend class cppgc::subtle::DisallowGarbageCollectionScope;
+ friend class cppgc::subtle::NoGarbageCollectionScope;
+ friend class cppgc::testing::OverrideEmbedderStackStateScope;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/heap-consistency.cc b/deps/v8/src/heap/cppgc/heap-consistency.cc
new file mode 100644
index 0000000000..504caa6f0d
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-consistency.cc
@@ -0,0 +1,66 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/heap-consistency.h"
+
+#include "include/cppgc/heap.h"
+#include "src/base/logging.h"
+#include "src/heap/cppgc/heap-base.h"
+
+namespace cppgc {
+namespace subtle {
+
+// static
+bool DisallowGarbageCollectionScope::IsGarbageCollectionAllowed(
+ cppgc::HeapHandle& heap_handle) {
+ auto& heap_base = internal::HeapBase::From(heap_handle);
+ return !heap_base.in_disallow_gc_scope();
+}
+
+// static
+void DisallowGarbageCollectionScope::Enter(cppgc::HeapHandle& heap_handle) {
+ auto& heap_base = internal::HeapBase::From(heap_handle);
+ heap_base.disallow_gc_scope_++;
+}
+
+// static
+void DisallowGarbageCollectionScope::Leave(cppgc::HeapHandle& heap_handle) {
+ auto& heap_base = internal::HeapBase::From(heap_handle);
+ DCHECK_GT(heap_base.disallow_gc_scope_, 0);
+ heap_base.disallow_gc_scope_--;
+}
+
+DisallowGarbageCollectionScope::DisallowGarbageCollectionScope(
+ cppgc::HeapHandle& heap_handle)
+ : heap_handle_(heap_handle) {
+ Enter(heap_handle);
+}
+
+DisallowGarbageCollectionScope::~DisallowGarbageCollectionScope() {
+ Leave(heap_handle_);
+}
+
+// static
+void NoGarbageCollectionScope::Enter(cppgc::HeapHandle& heap_handle) {
+ auto& heap_base = internal::HeapBase::From(heap_handle);
+ heap_base.no_gc_scope_++;
+}
+
+// static
+void NoGarbageCollectionScope::Leave(cppgc::HeapHandle& heap_handle) {
+ auto& heap_base = internal::HeapBase::From(heap_handle);
+ DCHECK_GT(heap_base.no_gc_scope_, 0);
+ heap_base.no_gc_scope_--;
+}
+
+NoGarbageCollectionScope::NoGarbageCollectionScope(
+ cppgc::HeapHandle& heap_handle)
+ : heap_handle_(heap_handle) {
+ Enter(heap_handle);
+}
+
+NoGarbageCollectionScope::~NoGarbageCollectionScope() { Leave(heap_handle_); }
+
+} // namespace subtle
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index ce850453b6..45ff4aa00c 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -73,6 +73,9 @@ class HeapObjectHeader {
inline void SetSize(size_t size);
template <AccessMode mode = AccessMode::kNonAtomic>
+ inline size_t PayloadSize() const;
+
+ template <AccessMode mode = AccessMode::kNonAtomic>
inline bool IsLargeObject() const;
template <AccessMode = AccessMode::kNonAtomic>
@@ -202,6 +205,11 @@ void HeapObjectHeader::SetSize(size_t size) {
}
template <AccessMode mode>
+size_t HeapObjectHeader::PayloadSize() const {
+ return GetSize<mode>() - sizeof(HeapObjectHeader);
+}
+
+template <AccessMode mode>
bool HeapObjectHeader::IsLargeObject() const {
return GetSize<mode>() == kLargeObjectSizeInHeader;
}
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index b2b3d83182..d573d675ee 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -15,6 +15,7 @@
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
namespace internal {
@@ -113,6 +114,7 @@ NormalPage* NormalPage::Create(PageBackend* page_backend,
void* memory = page_backend->AllocateNormalPageMemory(space->index());
auto* normal_page = new (memory) NormalPage(space->raw_heap()->heap(), space);
normal_page->SynchronizedStore();
+ normal_page->heap()->stats_collector()->NotifyAllocatedMemory(kPageSize);
return normal_page;
}
@@ -123,6 +125,7 @@ void NormalPage::Destroy(NormalPage* page) {
DCHECK_EQ(space->end(), std::find(space->begin(), space->end(), page));
page->~NormalPage();
PageBackend* backend = page->heap()->page_backend();
+ page->heap()->stats_collector()->NotifyFreedMemory(kPageSize);
backend->FreeNormalPageMemory(space->index(),
reinterpret_cast<Address>(page));
}
@@ -177,20 +180,26 @@ LargePage::LargePage(HeapBase* heap, BaseSpace* space, size_t size)
LargePage::~LargePage() = default;
// static
+size_t LargePage::AllocationSize(size_t payload_size) {
+ const size_t page_header_size =
+ RoundUp(sizeof(LargePage), kAllocationGranularity);
+ return page_header_size + payload_size;
+}
+
+// static
LargePage* LargePage::Create(PageBackend* page_backend, LargePageSpace* space,
size_t size) {
DCHECK_NOT_NULL(page_backend);
DCHECK_NOT_NULL(space);
DCHECK_LE(kLargeObjectSizeThreshold, size);
- const size_t page_header_size =
- RoundUp(sizeof(LargePage), kAllocationGranularity);
- const size_t allocation_size = page_header_size + size;
+ const size_t allocation_size = AllocationSize(size);
auto* heap = space->raw_heap()->heap();
void* memory = page_backend->AllocateLargePageMemory(allocation_size);
LargePage* page = new (memory) LargePage(heap, space, size);
page->SynchronizedStore();
+ page->heap()->stats_collector()->NotifyAllocatedMemory(allocation_size);
return page;
}
@@ -203,6 +212,8 @@ void LargePage::Destroy(LargePage* page) {
#endif
page->~LargePage();
PageBackend* backend = page->heap()->page_backend();
+ page->heap()->stats_collector()->NotifyFreedMemory(
+ AllocationSize(page->PayloadSize()));
backend->FreeLargePageMemory(reinterpret_cast<Address>(page));
}
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
index bc3762b4ae..5e238e5bb7 100644
--- a/deps/v8/src/heap/cppgc/heap-page.h
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -33,8 +33,7 @@ class V8_EXPORT_PRIVATE BasePage {
BasePage(const BasePage&) = delete;
BasePage& operator=(const BasePage&) = delete;
- HeapBase* heap() { return heap_; }
- const HeapBase* heap() const { return heap_; }
+ HeapBase* heap() const { return heap_; }
BaseSpace* space() { return space_; }
const BaseSpace* space() const { return space_; }
@@ -186,6 +185,8 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
class V8_EXPORT_PRIVATE LargePage final : public BasePage {
public:
+ // Returns the allocation size required for a payload of size |size|.
+ static size_t AllocationSize(size_t size);
// Allocates a new page in the detached state.
static LargePage* Create(PageBackend*, LargePageSpace*, size_t);
// Destroys and frees the page. The page must be detached from the
diff --git a/deps/v8/src/heap/cppgc/heap-state.cc b/deps/v8/src/heap/cppgc/heap-state.cc
new file mode 100644
index 0000000000..32084697c1
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-state.cc
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/heap-state.h"
+
+#include "src/heap/cppgc/heap-base.h"
+
+namespace cppgc {
+namespace subtle {
+
+// static
+bool HeapState::IsMarking(const HeapHandle& heap_handle) {
+ const auto& heap_base = internal::HeapBase::From(heap_handle);
+ const internal::MarkerBase* marker = heap_base.marker();
+ return marker && marker->IsMarking();
+}
+
+// static
+bool HeapState::IsSweeping(const HeapHandle& heap_handle) {
+ const auto& heap_base = internal::HeapBase::From(heap_handle);
+ return heap_base.sweeper().IsSweepingInProgress();
+}
+
+// static
+bool HeapState::IsInAtomicPause(const HeapHandle& heap_handle) {
+ const auto& heap_base = internal::HeapBase::From(heap_handle);
+ return heap_base.in_atomic_pause();
+}
+
+} // namespace subtle
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-statistics-collector.cc b/deps/v8/src/heap/cppgc/heap-statistics-collector.cc
new file mode 100644
index 0000000000..961148babd
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-statistics-collector.cc
@@ -0,0 +1,158 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-statistics-collector.h"
+
+#include <string>
+
+#include "include/cppgc/name-provider.h"
+#include "src/heap/cppgc/free-list.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/stats-collector.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+std::string GetNormalPageSpaceName(size_t index) {
+ // Check that space is not a large object space.
+ DCHECK_NE(RawHeap::kNumberOfRegularSpaces - 1, index);
+ // Handle regular normal page spaces.
+ if (index < RawHeap::kNumberOfRegularSpaces) {
+ return "NormalPageSpace" + std::to_string(index);
+ }
+ // Space is a custom space.
+ return "CustomSpace" +
+ std::to_string(index - RawHeap::kNumberOfRegularSpaces);
+}
+
+HeapStatistics::SpaceStatistics* InitializeSpace(HeapStatistics* stats,
+ std::string name) {
+ stats->space_stats.emplace_back();
+ HeapStatistics::SpaceStatistics* space_stats = &stats->space_stats.back();
+ space_stats->name = std::move(name);
+
+ if (!NameProvider::HideInternalNames()) {
+ const size_t num_types = GlobalGCInfoTable::Get().NumberOfGCInfos();
+ space_stats->object_stats.num_types = num_types;
+ space_stats->object_stats.type_name.resize(num_types);
+ space_stats->object_stats.type_count.resize(num_types);
+ space_stats->object_stats.type_bytes.resize(num_types);
+ }
+
+ return space_stats;
+}
+
+void FinalizePage(HeapStatistics::SpaceStatistics* space_stats,
+ HeapStatistics::PageStatistics** page_stats) {
+ if (*page_stats) {
+ DCHECK_NOT_NULL(space_stats);
+ space_stats->physical_size_bytes += (*page_stats)->physical_size_bytes;
+ space_stats->used_size_bytes += (*page_stats)->used_size_bytes;
+ }
+ *page_stats = nullptr;
+}
+
+void FinalizeSpace(HeapStatistics* stats,
+ HeapStatistics::SpaceStatistics** space_stats,
+ HeapStatistics::PageStatistics** page_stats) {
+ FinalizePage(*space_stats, page_stats);
+ if (*space_stats) {
+ DCHECK_NOT_NULL(stats);
+ stats->physical_size_bytes += (*space_stats)->physical_size_bytes;
+ stats->used_size_bytes += (*space_stats)->used_size_bytes;
+ }
+ *space_stats = nullptr;
+}
+
+void RecordObjectType(HeapStatistics::SpaceStatistics* space_stats,
+ HeapObjectHeader* header, size_t object_size) {
+ if (!NameProvider::HideInternalNames()) {
+ // Detailed names available.
+ GCInfoIndex gc_info_index = header->GetGCInfoIndex();
+ space_stats->object_stats.type_count[gc_info_index]++;
+ space_stats->object_stats.type_bytes[gc_info_index] += object_size;
+ if (space_stats->object_stats.type_name[gc_info_index].empty()) {
+ space_stats->object_stats.type_name[gc_info_index] =
+ header->GetName().value;
+ }
+ }
+}
+
+} // namespace
+
+HeapStatistics HeapStatisticsCollector::CollectStatistics(HeapBase* heap) {
+ HeapStatistics stats;
+ stats.detail_level = HeapStatistics::DetailLevel::kDetailed;
+ current_stats_ = &stats;
+
+ Traverse(&heap->raw_heap());
+ FinalizeSpace(current_stats_, &current_space_stats_, &current_page_stats_);
+
+ DCHECK_EQ(heap->stats_collector()->allocated_memory_size(),
+ stats.physical_size_bytes);
+ return stats;
+}
+
+bool HeapStatisticsCollector::VisitNormalPageSpace(NormalPageSpace* space) {
+ DCHECK_EQ(0u, space->linear_allocation_buffer().size());
+
+ FinalizeSpace(current_stats_, &current_space_stats_, &current_page_stats_);
+
+ current_space_stats_ =
+ InitializeSpace(current_stats_, GetNormalPageSpaceName(space->index()));
+
+ space->free_list().CollectStatistics(current_space_stats_->free_list_stats);
+
+ return false;
+}
+
+bool HeapStatisticsCollector::VisitLargePageSpace(LargePageSpace* space) {
+ FinalizeSpace(current_stats_, &current_space_stats_, &current_page_stats_);
+
+ current_space_stats_ = InitializeSpace(current_stats_, "LargePageSpace");
+
+ return false;
+}
+
+bool HeapStatisticsCollector::VisitNormalPage(NormalPage* page) {
+ DCHECK_NOT_NULL(current_space_stats_);
+ FinalizePage(current_space_stats_, &current_page_stats_);
+ current_space_stats_->page_stats.emplace_back(
+ HeapStatistics::PageStatistics{kPageSize, 0});
+ current_page_stats_ = &current_space_stats_->page_stats.back();
+ return false;
+}
+
+bool HeapStatisticsCollector::VisitLargePage(LargePage* page) {
+ DCHECK_NOT_NULL(current_space_stats_);
+ FinalizePage(current_space_stats_, &current_page_stats_);
+ HeapObjectHeader* object_header = page->ObjectHeader();
+ size_t object_size = page->PayloadSize();
+ RecordObjectType(current_space_stats_, object_header, object_size);
+ size_t allocated_size = LargePage::AllocationSize(object_size);
+ current_space_stats_->physical_size_bytes += allocated_size;
+ current_space_stats_->used_size_bytes += object_size;
+ current_space_stats_->page_stats.emplace_back(
+ HeapStatistics::PageStatistics{allocated_size, object_size});
+
+ return true;
+}
+
+bool HeapStatisticsCollector::VisitHeapObjectHeader(HeapObjectHeader* header) {
+ DCHECK(!header->IsLargeObject());
+ DCHECK_NOT_NULL(current_space_stats_);
+ DCHECK_NOT_NULL(current_page_stats_);
+ if (header->IsFree()) return true;
+ size_t object_size = header->GetSize();
+ RecordObjectType(current_space_stats_, header, object_size);
+ current_page_stats_->used_size_bytes += object_size;
+ return true;
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-statistics-collector.h b/deps/v8/src/heap/cppgc/heap-statistics-collector.h
new file mode 100644
index 0000000000..52c92198a8
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-statistics-collector.h
@@ -0,0 +1,35 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_STATISTICS_COLLECTOR_H_
+#define V8_HEAP_CPPGC_HEAP_STATISTICS_COLLECTOR_H_
+
+#include "include/cppgc/heap-statistics.h"
+#include "src/heap/cppgc/heap-visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapStatisticsCollector : private HeapVisitor<HeapStatisticsCollector> {
+ friend class HeapVisitor<HeapStatisticsCollector>;
+
+ public:
+ HeapStatistics CollectStatistics(HeapBase*);
+
+ private:
+ bool VisitNormalPageSpace(NormalPageSpace*);
+ bool VisitLargePageSpace(LargePageSpace*);
+ bool VisitNormalPage(NormalPage*);
+ bool VisitLargePage(LargePage*);
+ bool VisitHeapObjectHeader(HeapObjectHeader*);
+
+ HeapStatistics* current_stats_;
+ HeapStatistics::SpaceStatistics* current_space_stats_ = nullptr;
+ HeapStatistics::PageStatistics* current_page_stats_ = nullptr;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_STATISTICS_COLLECTOR_H_
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 0a79e80820..875eb19889 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -4,6 +4,7 @@
#include "src/heap/cppgc/heap.h"
+#include "include/cppgc/heap-consistency.h"
#include "src/heap/base/stack.h"
#include "src/heap/cppgc/garbage-collector.h"
#include "src/heap/cppgc/gc-invoker.h"
@@ -86,7 +87,8 @@ void CheckConfig(Heap::Config config, Heap::MarkingType marking_support,
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
cppgc::Heap::HeapOptions options)
- : HeapBase(platform, options.custom_spaces, options.stack_support),
+ : HeapBase(platform, options.custom_spaces, options.stack_support,
+ nullptr /* metric_recorder */),
gc_invoker_(this, platform_.get(), options.stack_support),
growing_(&gc_invoker_, stats_collector_.get(),
options.resource_constraints, options.marking_support,
@@ -100,13 +102,11 @@ Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
}
Heap::~Heap() {
- NoGCScope no_gc(*this);
+ subtle::NoGarbageCollectionScope no_gc(*this);
// Finish already running GC if any, but don't finalize live objects.
sweeper_.FinishIfRunning();
}
-bool Heap::IsMarking() const { return marker_.get(); }
-
void Heap::CollectGarbage(Config config) {
DCHECK_EQ(Config::MarkingType::kAtomic, config.marking_type);
CheckConfig(config, marking_support_, sweeping_support_);
@@ -149,7 +149,6 @@ void Heap::FinalizeIncrementalGarbageCollectionIfRunning(Config config) {
void Heap::StartGarbageCollection(Config config) {
DCHECK(!IsMarking());
-
DCHECK(!in_no_gc_scope());
// Finish sweeping in case it is still running.
@@ -172,41 +171,41 @@ void Heap::StartGarbageCollection(Config config) {
void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
DCHECK(IsMarking());
DCHECK(!in_no_gc_scope());
+ CHECK(!in_disallow_gc_scope());
config_.stack_state = stack_state;
+ if (override_stack_state_) {
+ config_.stack_state = *override_stack_state_;
+ }
+ in_atomic_pause_ = true;
{
// This guards atomic pause marking, meaning that no internal method or
// external callbacks are allowed to allocate new objects.
- ObjectAllocator::NoAllocationScope no_allocation_scope_(object_allocator_);
- marker_->FinishMarking(stack_state);
- }
- {
- // Pre finalizers are forbidden from allocating objects.
- ObjectAllocator::NoAllocationScope no_allocation_scope_(object_allocator_);
- prefinalizer_handler_->InvokePreFinalizers();
+ cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this);
+ marker_->FinishMarking(config_.stack_state);
}
marker_.reset();
+ ExecutePreFinalizers();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
MarkingVerifier verifier(*this);
- verifier.Run(stack_state);
+ verifier.Run(config_.stack_state);
#endif
- NoGCScope no_gc(*this);
+ subtle::NoGarbageCollectionScope no_gc(*this);
const Sweeper::SweepingConfig sweeping_config{
config_.sweeping_type,
Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep};
sweeper_.Start(sweeping_config);
+ in_atomic_pause_ = false;
sweeper_.NotifyDoneIfNeeded();
}
-void Heap::PostGarbageCollection() {}
-
void Heap::DisableHeapGrowingForTesting() { growing_.DisableForTesting(); }
void Heap::FinalizeIncrementalGarbageCollectionIfNeeded(
Config::StackState stack_state) {
StatsCollector::EnabledScope stats_scope(
- *this, StatsCollector::kMarkIncrementalFinalize);
+ stats_collector(), StatsCollector::kMarkIncrementalFinalize);
FinalizeGarbageCollection(stack_state);
}
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index b718341af1..41ef0cfd1c 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -46,10 +46,6 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
void FinalizeIncrementalGarbageCollectionIfNeeded(Config::StackState) final;
- void PostGarbageCollection() final;
-
- bool IsMarking() const;
-
Config config_;
GCInvoker gc_invoker_;
HeapGrowing growing_;
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
index cef34b1efe..0eae47e59d 100644
--- a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
@@ -23,10 +23,9 @@ void IncrementalMarkingSchedule::NotifyIncrementalMarkingStart() {
incremental_marking_start_time_ = v8::base::TimeTicks::Now();
}
-void IncrementalMarkingSchedule::UpdateIncrementalMarkedBytes(
+void IncrementalMarkingSchedule::UpdateMutatorThreadMarkedBytes(
size_t overall_marked_bytes) {
- DCHECK(!incremental_marking_start_time_.IsNull());
- incrementally_marked_bytes_ = overall_marked_bytes;
+ mutator_thread_marked_bytes_ = overall_marked_bytes;
}
void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
@@ -36,7 +35,7 @@ void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
}
size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() const {
- return incrementally_marked_bytes_ + GetConcurrentlyMarkedBytes();
+ return mutator_thread_marked_bytes_ + GetConcurrentlyMarkedBytes();
}
size_t IncrementalMarkingSchedule::GetConcurrentlyMarkedBytes() const {
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.h b/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
index a9a0f7d840..22f023e620 100644
--- a/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
@@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
void NotifyIncrementalMarkingStart();
- void UpdateIncrementalMarkedBytes(size_t);
+ void UpdateMutatorThreadMarkedBytes(size_t);
void AddConcurrentlyMarkedBytes(size_t);
size_t GetOverallMarkedBytes() const;
@@ -42,7 +42,7 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
v8::base::TimeTicks incremental_marking_start_time_;
- size_t incrementally_marked_bytes_ = 0;
+ size_t mutator_thread_marked_bytes_ = 0;
std::atomic_size_t concurrently_marked_bytes_{0};
// Using -1 as sentinel to denote
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index f6c0e74f62..b4f8cf2366 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -4,10 +4,12 @@
#include "src/heap/cppgc/marker.h"
+#include <cstdint>
#include <memory>
-#include "include/cppgc/internal/process-heap.h"
+#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
+#include "src/base/platform/time.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h"
@@ -17,6 +19,7 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/write-barrier.h"
#if defined(CPPGC_CAGED_HEAP)
#include "include/cppgc/internal/caged-heap-local-data.h"
@@ -32,9 +35,9 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
- ProcessHeap::EnterIncrementalOrConcurrentMarking();
+ WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
#if defined(CPPGC_CAGED_HEAP)
- heap.caged_heap().local_data().is_marking_in_progress = true;
+ heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
#endif
return true;
}
@@ -46,9 +49,9 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
- ProcessHeap::ExitIncrementalOrConcurrentMarking();
+ WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
#if defined(CPPGC_CAGED_HEAP)
- heap.caged_heap().local_data().is_marking_in_progress = false;
+ heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
#endif
return true;
}
@@ -60,7 +63,7 @@ void VisitRememberedSlots(HeapBase& heap,
MutatorMarkingState& mutator_marking_state) {
#if defined(CPPGC_YOUNG_GENERATION)
StatsCollector::EnabledScope stats_scope(
- heap, StatsCollector::kMarkVisitRememberedSets);
+ heap.stats_collector(), StatsCollector::kMarkVisitRememberedSets);
for (void* slot : heap.remembered_slots()) {
auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
->ObjectHeaderFromInnerAddress(slot);
@@ -148,7 +151,7 @@ MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
void MarkerBase::IncrementalMarkingTask::Run() {
if (handle_.IsCanceled()) return;
- StatsCollector::EnabledScope stats_scope(marker_->heap(),
+ StatsCollector::EnabledScope stats_scope(marker_->heap().stats_collector(),
StatsCollector::kIncrementalMark);
if (marker_->IncrementalMarkingStep(stack_state_)) {
@@ -199,19 +202,20 @@ MarkerBase::~MarkerBase() {
}
void MarkerBase::StartMarking() {
- DCHECK(!is_marking_started_);
+ DCHECK(!is_marking_);
StatsCollector::EnabledScope stats_scope(
- heap(), config_.marking_type == MarkingConfig::MarkingType::kAtomic
- ? StatsCollector::kAtomicMark
- : StatsCollector::kIncrementalMark);
+ heap().stats_collector(),
+ config_.marking_type == MarkingConfig::MarkingType::kAtomic
+ ? StatsCollector::kAtomicMark
+ : StatsCollector::kIncrementalMark);
heap().stats_collector()->NotifyMarkingStarted(config_.collection_type,
config_.is_forced_gc);
- is_marking_started_ = true;
+ is_marking_ = true;
if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
StatsCollector::EnabledScope stats_scope(
- heap(), StatsCollector::kMarkIncrementalStart);
+ heap().stats_collector(), StatsCollector::kMarkIncrementalStart);
// Performing incremental or concurrent marking.
schedule_.NotifyIncrementalMarkingStart();
@@ -227,7 +231,7 @@ void MarkerBase::StartMarking() {
}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
- StatsCollector::EnabledScope stats_scope(heap(),
+ StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkAtomicPrologue);
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
@@ -257,29 +261,28 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
}
void MarkerBase::LeaveAtomicPause() {
- StatsCollector::EnabledScope stats_scope(heap(),
+ StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkAtomicEpilogue);
DCHECK(!incremental_marking_handle_);
ResetRememberedSet(heap());
heap().stats_collector()->NotifyMarkingCompleted(
// GetOverallMarkedBytes also includes concurrently marked bytes.
schedule_.GetOverallMarkedBytes());
- is_marking_started_ = false;
+ is_marking_ = false;
{
// Weakness callbacks are forbidden from allocating objects.
- ObjectAllocator::NoAllocationScope no_allocation_scope_(
- heap_.object_allocator());
+ cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_);
ProcessWeakness();
}
g_process_mutex.Pointer()->Unlock();
}
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
- DCHECK(is_marking_started_);
- StatsCollector::EnabledScope stats_scope(heap(), StatsCollector::kAtomicMark);
+ DCHECK(is_marking_);
+ StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
+ StatsCollector::kAtomicMark);
EnterAtomicPause(stack_state);
- CHECK(ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
- v8::base::TimeTicks::Max()));
+ CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
mutator_marking_state_.Publish();
LeaveAtomicPause();
}
@@ -287,8 +290,8 @@ void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
void MarkerBase::ProcessWeakness() {
DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
- StatsCollector::DisabledScope stats_scope(
- heap(), StatsCollector::kWeakInvokeCallbacks);
+ StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
+ StatsCollector::kAtomicWeak);
heap().GetWeakPersistentRegion().Trace(&visitor());
// Processing cross-thread handles requires taking the process lock.
@@ -309,7 +312,7 @@ void MarkerBase::ProcessWeakness() {
}
void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
- StatsCollector::EnabledScope stats_scope(heap(),
+ StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkVisitRoots);
// Reset LABs before scanning roots. LABs are cleared to allow
@@ -319,12 +322,13 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
{
{
StatsCollector::DisabledScope inner_stats_scope(
- heap(), StatsCollector::kMarkVisitPersistents);
+ heap().stats_collector(), StatsCollector::kMarkVisitPersistents);
heap().GetStrongPersistentRegion().Trace(&visitor());
}
if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
StatsCollector::DisabledScope inner_stats_scope(
- heap(), StatsCollector::kMarkVisitCrossThreadPersistents);
+ heap().stats_collector(),
+ StatsCollector::kMarkVisitCrossThreadPersistents);
g_process_mutex.Get().AssertHeld();
heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
}
@@ -332,7 +336,7 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
StatsCollector::DisabledScope stack_stats_scope(
- heap(), StatsCollector::kMarkVisitStack);
+ heap().stats_collector(), StatsCollector::kMarkVisitStack);
heap().stack()->IteratePointers(&stack_visitor());
}
if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
@@ -358,33 +362,31 @@ bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
}
config_.stack_state = stack_state;
- return AdvanceMarkingWithDeadline();
+ return AdvanceMarkingWithLimits();
}
void MarkerBase::AdvanceMarkingOnAllocation() {
- if (AdvanceMarkingWithDeadline()) {
+ if (AdvanceMarkingWithLimits()) {
// Schedule another incremental task for finalizing without a stack.
ScheduleIncrementalMarkingTask();
}
}
-bool MarkerBase::AdvanceMarkingWithMaxDuration(
- v8::base::TimeDelta max_duration) {
- return AdvanceMarkingWithDeadline(max_duration);
-}
-
-bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
+bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
+ size_t marked_bytes_limit) {
bool is_done = false;
- if (!incremental_marking_disabled_for_testing_) {
- size_t step_size_in_bytes =
- GetNextIncrementalStepDuration(schedule_, heap_);
+ if (!main_marking_disabled_for_testing_) {
+ if (marked_bytes_limit == 0) {
+ marked_bytes_limit = mutator_marking_state_.marked_bytes() +
+ GetNextIncrementalStepDuration(schedule_, heap_);
+ }
StatsCollector::EnabledScope deadline_scope(
- heap(), StatsCollector::kMarkTransitiveClosureWithDeadline,
- "deadline_ms", max_duration.InMillisecondsF());
+ heap().stats_collector(),
+ StatsCollector::kMarkTransitiveClosureWithDeadline, "deadline_ms",
+ max_duration.InMillisecondsF());
is_done = ProcessWorklistsWithDeadline(
- mutator_marking_state_.marked_bytes() + step_size_in_bytes,
- v8::base::TimeTicks::Now() + max_duration);
- schedule_.UpdateIncrementalMarkedBytes(
+ marked_bytes_limit, v8::base::TimeTicks::Now() + max_duration);
+ schedule_.UpdateMutatorThreadMarkedBytes(
mutator_marking_state_.marked_bytes());
}
mutator_marking_state_.Publish();
@@ -403,7 +405,7 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
StatsCollector::EnabledScope stats_scope(
- heap(), StatsCollector::kMarkTransitiveClosure);
+ heap().stats_collector(), StatsCollector::kMarkTransitiveClosure);
do {
if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
schedule_.ShouldFlushEphemeronPairs()) {
@@ -415,7 +417,7 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
// checks to guarantee the deadline is not exceeded.
{
StatsCollector::EnabledScope inner_scope(
- heap(), StatsCollector::kMarkProcessBailOutObjects);
+ heap().stats_collector(), StatsCollector::kMarkProcessBailOutObjects);
if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
5>(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
@@ -431,7 +433,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
{
StatsCollector::EnabledScope inner_scope(
- heap(), StatsCollector::kMarkProcessNotFullyconstructedWorklist);
+ heap().stats_collector(),
+ StatsCollector::kMarkProcessNotFullyconstructedWorklist);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_
@@ -447,7 +450,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
{
StatsCollector::EnabledScope inner_scope(
- heap(), StatsCollector::kMarkProcessMarkingWorklist);
+ heap().stats_collector(),
+ StatsCollector::kMarkProcessMarkingWorklist);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.marking_worklist(),
@@ -465,7 +469,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
{
StatsCollector::EnabledScope inner_scope(
- heap(), StatsCollector::kMarkProcessWriteBarrierWorklist);
+ heap().stats_collector(),
+ StatsCollector::kMarkProcessWriteBarrierWorklist);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(),
@@ -480,13 +485,13 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
{
StatsCollector::EnabledScope stats_scope(
- heap(), StatsCollector::kMarkProcessEphemerons);
+ heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
[this](const MarkingWorklists::EphemeronPairItem& item) {
- mutator_marking_state_.ProcessEphemeron(item.key,
- item.value_desc);
+ mutator_marking_state_.ProcessEphemeron(
+ item.key, item.value, item.value_desc, visitor());
})) {
return false;
}
@@ -497,7 +502,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
void MarkerBase::MarkNotFullyConstructedObjects() {
StatsCollector::DisabledScope stats_scope(
- heap(), StatsCollector::kMarkVisitNotFullyConstructedObjects);
+ heap().stats_collector(),
+ StatsCollector::kMarkVisitNotFullyConstructedObjects);
std::unordered_set<HeapObjectHeader*> objects =
mutator_marking_state_.not_fully_constructed_worklist().Extract();
for (HeapObjectHeader* object : objects) {
@@ -515,8 +521,8 @@ void MarkerBase::ClearAllWorklistsForTesting() {
if (compaction_worklists) compaction_worklists->ClearForTesting();
}
-void MarkerBase::DisableIncrementalMarkingForTesting() {
- incremental_marking_disabled_for_testing_ = true;
+void MarkerBase::SetMainThreadMarkingDisabledForTesting(bool value) {
+ main_marking_disabled_for_testing_ = value;
}
void MarkerBase::WaitForConcurrentMarkingForTesting() {
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 97792b7504..50288bd0cb 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -30,9 +30,9 @@ class MarkerFactory;
// phase:
// 1. StartMarking() [Called implicitly when creating a Marker using
// MarkerFactory]
-// 2. AdvanceMarkingWithDeadline() [Optional, depending on environment.]
+// 2. AdvanceMarkingWithLimits() [Optional, depending on environment.]
// 3. EnterAtomicPause()
-// 4. AdvanceMarkingWithDeadline()
+// 4. AdvanceMarkingWithLimits()
// 5. LeaveAtomicPause()
//
// Alternatively, FinishMarking combines steps 3.-5.
@@ -69,10 +69,14 @@ class V8_EXPORT_PRIVATE MarkerBase {
// - Updates the MarkingConfig if the stack state has changed;
void EnterAtomicPause(MarkingConfig::StackState);
- // Makes marking progress.
+ // Makes marking progress. A `marked_bytes_limit` of 0 means that the limit
+ // is determined by the internal marking scheduler.
+ //
// TODO(chromium:1056170): Remove TimeDelta argument when unified heap no
// longer uses it.
- bool AdvanceMarkingWithMaxDuration(v8::base::TimeDelta);
+ bool AdvanceMarkingWithLimits(
+ v8::base::TimeDelta = kMaximumIncrementalStepDuration,
+ size_t marked_bytes_limit = 0);
// Makes marking progress when allocation a new lab.
void AdvanceMarkingOnAllocation();
@@ -83,7 +87,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
// Combines:
// - EnterAtomicPause()
- // - AdvanceMarkingWithDeadline()
+ // - AdvanceMarkingWithLimits()
// - ProcessWeakness()
// - LeaveAtomicPause()
void FinishMarking(MarkingConfig::StackState);
@@ -121,12 +125,14 @@ class V8_EXPORT_PRIVATE MarkerBase {
Handle handle_;
};
- void DisableIncrementalMarkingForTesting();
+ void SetMainThreadMarkingDisabledForTesting(bool);
void WaitForConcurrentMarkingForTesting();
void NotifyCompactionCancelled();
+ bool IsMarking() const { return is_marking_; }
+
protected:
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);
@@ -147,12 +153,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
virtual ConservativeTracingVisitor& conservative_visitor() = 0;
virtual heap::base::StackVisitor& stack_visitor() = 0;
- // Makes marking progress.
- // TODO(chromium:1056170): Remove TimeDelta argument when unified heap no
- // longer uses it.
- bool AdvanceMarkingWithDeadline(
- v8::base::TimeDelta = kMaximumIncrementalStepDuration);
-
bool ProcessWorklistsWithDeadline(size_t, v8::base::TimeTicks);
void VisitRoots(MarkingConfig::StackState);
@@ -172,13 +172,13 @@ class V8_EXPORT_PRIVATE MarkerBase {
MarkingWorklists marking_worklists_;
MutatorMarkingState mutator_marking_state_;
- bool is_marking_started_{false};
+ bool is_marking_{false};
IncrementalMarkingSchedule schedule_;
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
- bool incremental_marking_disabled_for_testing_{false};
+ bool main_marking_disabled_for_testing_{false};
friend class MarkerFactory;
};
diff --git a/deps/v8/src/heap/cppgc/marking-state.cc b/deps/v8/src/heap/cppgc/marking-state.cc
index ec2d80b2f2..e8f99c5936 100644
--- a/deps/v8/src/heap/cppgc/marking-state.cc
+++ b/deps/v8/src/heap/cppgc/marking-state.cc
@@ -6,6 +6,7 @@
#include <unordered_set>
+#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
@@ -22,7 +23,7 @@ void MutatorMarkingState::FlushNotFullyConstructedObjects() {
void MutatorMarkingState::FlushDiscoveredEphemeronPairs() {
StatsCollector::EnabledScope stats_scope(
- heap_, StatsCollector::kMarkFlushEphemerons);
+ heap_.stats_collector(), StatsCollector::kMarkFlushEphemerons);
discovered_ephemeron_pairs_worklist_.Publish();
if (!discovered_ephemeron_pairs_worklist_.IsGlobalEmpty()) {
ephemeron_pairs_for_processing_worklist_.Merge(
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 65185c36e2..777b396f00 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -8,6 +8,7 @@
#include <algorithm>
#include "include/cppgc/trace-trait.h"
+#include "include/cppgc/visitor.h"
#include "src/heap/cppgc/compaction-worklists.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -48,7 +49,8 @@ class MarkingStateBase {
inline void ProcessWeakContainer(const void*, TraceDescriptor, WeakCallback,
const void*);
- inline void ProcessEphemeron(const void*, TraceDescriptor);
+ inline void ProcessEphemeron(const void*, const void*, TraceDescriptor,
+ Visitor&);
inline void AccountMarkedBytes(const HeapObjectHeader&);
inline void AccountMarkedBytes(size_t);
@@ -265,16 +267,23 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
if (desc.callback) PushMarked(header, desc);
}
-void MarkingStateBase::ProcessEphemeron(const void* key,
- TraceDescriptor value_desc) {
+void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
+ TraceDescriptor value_desc,
+ Visitor& visitor) {
// Filter out already marked keys. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
if (HeapObjectHeader::FromPayload(key).IsMarked<AccessMode::kAtomic>()) {
- MarkAndPush(value_desc.base_object_payload, value_desc);
+ if (value_desc.base_object_payload) {
+ MarkAndPush(value_desc.base_object_payload, value_desc);
+ } else {
+ // If value_desc.base_object_payload is nullptr, the value is not GCed and
+ // should be immediately traced.
+ value_desc.callback(&visitor, value);
+ }
return;
}
- discovered_ephemeron_pairs_worklist_.Push({key, value_desc});
+ discovered_ephemeron_pairs_worklist_.Push({key, value, value_desc});
}
void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 009228a8ff..76f39230ed 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -45,6 +45,9 @@ void VerificationState::VerifyMarked(const void* base_object_payload) const {
void MarkingVerifierBase::VisitInConstructionConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
CHECK(header.IsMarked());
+ if (in_construction_objects_->find(&header) !=
+ in_construction_objects_->end())
+ return;
in_construction_objects_->insert(&header);
callback(this, header);
}
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
index 896b12fc6c..fb51ccc303 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -25,9 +25,9 @@ void MarkingVisitorBase::VisitWeak(const void* object, TraceDescriptor desc,
weak_member);
}
-void MarkingVisitorBase::VisitEphemeron(const void* key,
+void MarkingVisitorBase::VisitEphemeron(const void* key, const void* value,
TraceDescriptor value_desc) {
- marking_state_.ProcessEphemeron(key, value_desc);
+ marking_state_.ProcessEphemeron(key, value, value_desc, *this);
}
void MarkingVisitorBase::VisitWeakContainer(const void* object,
@@ -65,7 +65,9 @@ void ConservativeMarkingVisitor::VisitFullyConstructedConservatively(
void ConservativeMarkingVisitor::VisitInConstructionConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
DCHECK(!marking_state_.IsMarkedWeakContainer(header));
- marking_state_.MarkNoPush(header);
+ // In construction objects found through conservative can be marked if they
+ // hold a reference to themselves.
+ if (!marking_state_.MarkNoPush(header)) return;
marking_state_.AccountMarkedBytes(header);
callback(this, header);
}
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.h b/deps/v8/src/heap/cppgc/marking-visitor.h
index 91cca87dd9..4692b32025 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.h
+++ b/deps/v8/src/heap/cppgc/marking-visitor.h
@@ -28,7 +28,7 @@ class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
protected:
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
- void VisitEphemeron(const void*, TraceDescriptor) final;
+ void VisitEphemeron(const void*, const void*, TraceDescriptor) final;
void VisitWeakContainer(const void* self, TraceDescriptor strong_desc,
TraceDescriptor weak_desc, WeakCallback callback,
const void* data) final;
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.h b/deps/v8/src/heap/cppgc/marking-worklists.h
index de7e44e4da..4ad136b353 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.h
+++ b/deps/v8/src/heap/cppgc/marking-worklists.h
@@ -63,6 +63,7 @@ class MarkingWorklists {
struct EphemeronPairItem {
const void* key;
+ const void* value;
TraceDescriptor value_desc;
};
diff --git a/deps/v8/src/heap/cppgc/metric-recorder.h b/deps/v8/src/heap/cppgc/metric-recorder.h
new file mode 100644
index 0000000000..6e9d4d0787
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/metric-recorder.h
@@ -0,0 +1,69 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_METRIC_RECORDER_H_
+#define V8_HEAP_CPPGC_METRIC_RECORDER_H_
+
+#include <cstdint>
+
+namespace cppgc {
+namespace internal {
+
+class StatsCollector;
+
+/**
+ * Base class used for reporting GC statistics histograms. Embedders interested
+ * in collecting histgorams should implement the virtual AddMainThreadEvent
+ * methods below and pass an instance of the implementation during Heap
+ * creation.
+ */
+class MetricRecorder {
+ public:
+ struct CppGCFullCycle {
+ struct IncrementalPhases {
+ int64_t mark_duration_us;
+ int64_t sweep_duration_us;
+ };
+ struct Phases : public IncrementalPhases {
+ int64_t weak_duration_us;
+ int64_t compact_duration_us;
+ };
+ struct Sizes {
+ int64_t before_bytes;
+ int64_t after_bytes;
+ int64_t freed_bytes;
+ };
+
+ Phases total;
+ Phases main_thread;
+ Phases main_thread_atomic;
+ IncrementalPhases main_thread_incremental;
+ Sizes objects;
+ Sizes memory;
+ double collection_rate_in_percent;
+ double efficiency_in_bytes_per_us;
+ double main_thread_efficiency_in_bytes_per_us;
+ };
+
+ struct CppGCMainThreadIncrementalMark {
+ int64_t duration_us;
+ };
+
+ struct CppGCMainThreadIncrementalSweep {
+ int64_t duration_us;
+ };
+
+ virtual ~MetricRecorder() = default;
+
+ virtual void AddMainThreadEvent(const CppGCFullCycle& event) {}
+ virtual void AddMainThreadEvent(const CppGCMainThreadIncrementalMark& event) {
+ }
+ virtual void AddMainThreadEvent(
+ const CppGCMainThreadIncrementalSweep& event) {}
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_METRIC_RECORDER_H_
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 1f63c1a104..60ad19a984 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -118,6 +118,8 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
size_t size, GCInfoIndex gcinfo) {
DCHECK_EQ(0, size & kAllocationMask);
DCHECK_LE(kFreeListEntrySize, size);
+ // Out-of-line allocation allows for checking this is all situations.
+ CHECK(!in_disallow_gc_scope());
// 1. If this allocation is big enough, allocate a large object.
if (size >= kLargeObjectSizeThreshold) {
@@ -134,14 +136,26 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
// 3. Lazily sweep pages of this heap until we find a freed area for
// this allocation or we finish sweeping all pages of this heap.
- // {
- // StatsCollector::EnabledScope stats_scope(
- // *space->raw_heap()->heap(), StatsCollector::kSweepOnAllocation);
- // // TODO(chromium:1056170): Add lazy sweep.
- // }
+ Sweeper& sweeper = raw_heap_->heap()->sweeper();
+ // TODO(chromium:1056170): Investigate whether this should be a loop which
+ // would result in more agressive re-use of memory at the expense of
+ // potentially larger allocation time.
+ if (sweeper.SweepForAllocationIfRunning(space, size)) {
+ // Sweeper found a block of at least `size` bytes. Allocation from the free
+ // list may still fail as actual buckets are not exhaustively searched for
+ // a suitable block. Instead, buckets are tested from larger sizes that are
+ // guaranteed to fit the block to smaller bucket sizes that may only
+ // potentially fit the block. For the bucket that may exactly fit the
+ // allocation of `size` bytes (no overallocation), only the first entry is
+ // checked.
+ if (void* result = AllocateFromFreeList(space, size, gcinfo)) {
+ return result;
+ }
+ }
// 4. Complete sweeping.
- raw_heap_->heap()->sweeper().FinishIfRunning();
+ sweeper.FinishIfRunning();
+ // TODO(chromium:1056170): Make use of the synchronously freed memory.
// 5. Add a new page to this heap.
auto* new_page = NormalPage::Create(page_backend_, space);
@@ -189,14 +203,12 @@ void ObjectAllocator::ResetLinearAllocationBuffers() {
visitor.Traverse(raw_heap_);
}
-ObjectAllocator::NoAllocationScope::NoAllocationScope(
- ObjectAllocator& allocator)
- : allocator_(allocator) {
- allocator.no_allocation_scope_++;
+void ObjectAllocator::Terminate() {
+ ResetLinearAllocationBuffers();
}
-ObjectAllocator::NoAllocationScope::~NoAllocationScope() {
- allocator_.no_allocation_scope_--;
+bool ObjectAllocator::in_disallow_gc_scope() const {
+ return raw_heap_->heap()->in_disallow_gc_scope();
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index 7516c7576e..1768a638ea 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -31,22 +31,6 @@ class PageBackend;
class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
public:
- // NoAllocationScope is used in debug mode to catch unwanted allocations. E.g.
- // allocations during GC.
- class V8_EXPORT_PRIVATE V8_NODISCARD NoAllocationScope final {
- CPPGC_STACK_ALLOCATED();
-
- public:
- explicit NoAllocationScope(ObjectAllocator&);
- ~NoAllocationScope();
-
- NoAllocationScope(const NoAllocationScope&) = delete;
- NoAllocationScope& operator=(const NoAllocationScope&) = delete;
-
- private:
- ObjectAllocator& allocator_;
- };
-
ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
StatsCollector* stats_collector);
@@ -56,14 +40,17 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
void ResetLinearAllocationBuffers();
+ // Terminate the allocator. Subsequent allocation calls result in a crash.
+ void Terminate();
+
private:
+ bool in_disallow_gc_scope() const;
+
// Returns the initially tried SpaceType to allocate an object of |size| bytes
// on. Returns the largest regular object size bucket for large objects.
inline static RawHeap::RegularSpaceType GetInitialSpaceIndexForSize(
size_t size);
- bool is_allocation_allowed() const { return no_allocation_scope_ == 0; }
-
inline void* AllocateObjectOnSpace(NormalPageSpace* space, size_t size,
GCInfoIndex gcinfo);
void* OutOfLineAllocate(NormalPageSpace*, size_t, GCInfoIndex);
@@ -73,11 +60,10 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
RawHeap* raw_heap_;
PageBackend* page_backend_;
StatsCollector* stats_collector_;
- size_t no_allocation_scope_ = 0;
};
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
- DCHECK(is_allocation_allowed());
+ DCHECK(!in_disallow_gc_scope());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
const RawHeap::RegularSpaceType type =
@@ -88,7 +74,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
CustomSpaceIndex space_index) {
- DCHECK(is_allocation_allowed());
+ DCHECK(!in_disallow_gc_scope());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
return AllocateObjectOnSpace(
diff --git a/deps/v8/src/heap/cppgc/object-size-trait.cc b/deps/v8/src/heap/cppgc/object-size-trait.cc
new file mode 100644
index 0000000000..bd0dd3d640
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-size-trait.cc
@@ -0,0 +1,36 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/object-size-trait.h"
+
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollected(
+ const void* object) {
+ const auto& header = HeapObjectHeader::FromPayload(object);
+ return header.IsLargeObject()
+ ? static_cast<const LargePage*>(BasePage::FromPayload(&header))
+ ->PayloadSize()
+ : header.PayloadSize();
+}
+
+// static
+size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollectedMixin(
+ const void* address) {
+ // `address` is guaranteed to be on a normal page because large object mixins
+ // are not supported.
+ const auto& header =
+ BasePage::FromPayload(address)
+ ->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
+ DCHECK(!header.IsLargeObject());
+ return header.PayloadSize();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap.h b/deps/v8/src/heap/cppgc/object-start-bitmap.h
index 38ba5ca886..da5df3932e 100644
--- a/deps/v8/src/heap/cppgc/object-start-bitmap.h
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap.h
@@ -10,7 +10,7 @@
#include <array>
-#include "include/cppgc/internal/process-heap.h"
+#include "include/cppgc/internal/write-barrier.h"
#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
#include "src/base/macros.h"
@@ -217,7 +217,7 @@ bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
#if defined(V8_TARGET_ARCH_ARM)
// Use non-atomic accesses on ARMv7 when marking is not active.
if (mode == AccessMode::kAtomic) {
- if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking()))
+ if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking()))
return true;
}
#endif // defined(V8_TARGET_ARCH_ARM)
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
index b9585f4be7..db3a01cafd 100644
--- a/deps/v8/src/heap/cppgc/persistent-node.cc
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -13,24 +13,37 @@
namespace cppgc {
namespace internal {
-PersistentRegion::~PersistentRegion() {
+PersistentRegion::~PersistentRegion() { ClearAllUsedNodes(); }
+
+void PersistentRegion::ClearAllUsedNodes() {
for (auto& slots : nodes_) {
for (auto& node : *slots) {
if (node.IsUsed()) {
static_cast<PersistentBase*>(node.owner())->ClearFromGC();
+ // Add nodes back to the free list to allow reusing for subsequent
+ // creation calls.
+ node.InitializeAsFreeNode(free_list_head_);
+ free_list_head_ = &node;
+ CPPGC_DCHECK(nodes_in_use_ > 0);
+ nodes_in_use_--;
}
}
}
+ CPPGC_DCHECK(0u == nodes_in_use_);
}
size_t PersistentRegion::NodesInUse() const {
- return std::accumulate(
+#ifdef DEBUG
+ const size_t accumulated_nodes_in_use_ = std::accumulate(
nodes_.cbegin(), nodes_.cend(), 0u, [](size_t acc, const auto& slots) {
return acc + std::count_if(slots->cbegin(), slots->cend(),
[](const PersistentNode& node) {
return node.IsUsed();
});
});
+ DCHECK_EQ(accumulated_nodes_in_use_, nodes_in_use_);
+#endif // DEBUG
+ return nodes_in_use_;
}
void PersistentRegion::EnsureNodeSlots() {
@@ -77,5 +90,10 @@ PersistentRegionLock::~PersistentRegionLock() {
g_process_mutex.Pointer()->Unlock();
}
+// static
+void PersistentRegionLock::AssertLocked() {
+ return g_process_mutex.Pointer()->AssertHeld();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/platform.cc b/deps/v8/src/heap/cppgc/platform.cc
index 4d64951588..90516d6065 100644
--- a/deps/v8/src/heap/cppgc/platform.cc
+++ b/deps/v8/src/heap/cppgc/platform.cc
@@ -10,16 +10,22 @@
namespace cppgc {
+namespace {
+PageAllocator* g_page_allocator = nullptr;
+} // namespace
+
TracingController* Platform::GetTracingController() {
static v8::base::LeakyObject<TracingController> tracing_controller;
return tracing_controller.get();
}
void InitializeProcess(PageAllocator* page_allocator) {
- internal::GlobalGCInfoTable::Create(page_allocator);
+ CHECK(!g_page_allocator);
+ internal::GlobalGCInfoTable::Initialize(page_allocator);
+ g_page_allocator = page_allocator;
}
-void ShutdownProcess() {}
+void ShutdownProcess() { g_page_allocator = nullptr; }
namespace internal {
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
index 4fc5abb279..4544763bf3 100644
--- a/deps/v8/src/heap/cppgc/pointer-policies.cc
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -21,24 +21,26 @@ void EnabledCheckingPolicy::CheckPointer(const void* ptr) {
// TODO(chromium:1056170): Provide implementation.
}
-PersistentRegion& StrongPersistentPolicy::GetPersistentRegion(void* object) {
+PersistentRegion& StrongPersistentPolicy::GetPersistentRegion(
+ const void* object) {
auto* heap = BasePage::FromPayload(object)->heap();
return heap->GetStrongPersistentRegion();
}
-PersistentRegion& WeakPersistentPolicy::GetPersistentRegion(void* object) {
+PersistentRegion& WeakPersistentPolicy::GetPersistentRegion(
+ const void* object) {
auto* heap = BasePage::FromPayload(object)->heap();
return heap->GetWeakPersistentRegion();
}
PersistentRegion& StrongCrossThreadPersistentPolicy::GetPersistentRegion(
- void* object) {
+ const void* object) {
auto* heap = BasePage::FromPayload(object)->heap();
return heap->GetStrongCrossThreadPersistentRegion();
}
PersistentRegion& WeakCrossThreadPersistentPolicy::GetPersistentRegion(
- void* object) {
+ const void* object) {
auto* heap = BasePage::FromPayload(object)->heap();
return heap->GetWeakCrossThreadPersistentRegion();
}
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
index 1cfa5c7163..993ba54854 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -49,7 +49,7 @@ void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
void PreFinalizerHandler::InvokePreFinalizers() {
StatsCollector::DisabledScope stats_scope(
- heap_, StatsCollector::kSweepInvokePreFinalizers);
+ heap_.stats_collector(), StatsCollector::kSweepInvokePreFinalizers);
DCHECK(CurrentThreadIsCreationThread());
LivenessBroker liveness_broker = LivenessBrokerFactory::Create();
diff --git a/deps/v8/src/heap/cppgc/process-heap-statistics.cc b/deps/v8/src/heap/cppgc/process-heap-statistics.cc
new file mode 100644
index 0000000000..9d38d694b9
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/process-heap-statistics.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/process-heap-statistics.h"
+
+namespace cppgc {
+
+std::atomic_size_t ProcessHeapStatistics::total_allocated_space_{0};
+std::atomic_size_t ProcessHeapStatistics::total_allocated_object_size_{0};
+
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/process-heap-statistics.h b/deps/v8/src/heap/cppgc/process-heap-statistics.h
new file mode 100644
index 0000000000..2d7bfa117f
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/process-heap-statistics.h
@@ -0,0 +1,73 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PROCESS_HEAP_STATISTICS_H_
+#define V8_HEAP_CPPGC_PROCESS_HEAP_STATISTICS_H_
+
+#include "include/cppgc/process-heap-statistics.h"
+#include "src/heap/cppgc/stats-collector.h"
+
+namespace cppgc {
+namespace internal {
+
+class ProcessHeapStatisticsUpdater {
+ public:
+ // Allocation observer implementation for heaps should register to contribute
+ // to ProcessHeapStatistics. The heap is responsible for allocating and
+ // registering the obsrever impl with its stats collector.
+ class AllocationObserverImpl final
+ : public StatsCollector::AllocationObserver {
+ public:
+ void AllocatedObjectSizeIncreased(size_t bytes) final {
+ ProcessHeapStatisticsUpdater::IncreaseTotalAllocatedObjectSize(bytes);
+ object_size_changes_since_last_reset_ += bytes;
+ }
+
+ void AllocatedObjectSizeDecreased(size_t bytes) final {
+ ProcessHeapStatisticsUpdater::DecreaseTotalAllocatedObjectSize(bytes);
+ object_size_changes_since_last_reset_ -= bytes;
+ }
+
+ void ResetAllocatedObjectSize(size_t bytes) final {
+ ProcessHeapStatisticsUpdater::DecreaseTotalAllocatedObjectSize(
+ object_size_changes_since_last_reset_);
+ ProcessHeapStatisticsUpdater::IncreaseTotalAllocatedObjectSize(bytes);
+ object_size_changes_since_last_reset_ = bytes;
+ }
+
+ void AllocatedSizeIncreased(size_t bytes) final {
+ ProcessHeapStatisticsUpdater::IncreaseTotalAllocatedSpace(bytes);
+ }
+
+ void AllocatedSizeDecreased(size_t bytes) final {
+ ProcessHeapStatisticsUpdater::DecreaseTotalAllocatedSpace(bytes);
+ }
+
+ private:
+ size_t object_size_changes_since_last_reset_ = 0;
+ };
+
+ // For cppgc::ProcessHeapStatistics
+ static void IncreaseTotalAllocatedObjectSize(size_t delta) {
+ ::cppgc::ProcessHeapStatistics::total_allocated_object_size_.fetch_add(
+ delta, std::memory_order_relaxed);
+ }
+ static void DecreaseTotalAllocatedObjectSize(size_t delta) {
+ ::cppgc::ProcessHeapStatistics::total_allocated_object_size_.fetch_sub(
+ delta, std::memory_order_relaxed);
+ }
+ static void IncreaseTotalAllocatedSpace(size_t delta) {
+ ::cppgc::ProcessHeapStatistics::total_allocated_space_.fetch_add(
+ delta, std::memory_order_relaxed);
+ }
+ static void DecreaseTotalAllocatedSpace(size_t delta) {
+ ::cppgc::ProcessHeapStatistics::total_allocated_space_.fetch_sub(
+ delta, std::memory_order_relaxed);
+ }
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PROCESS_HEAP_STATISTICS_H_
diff --git a/deps/v8/src/heap/cppgc/process-heap.cc b/deps/v8/src/heap/cppgc/process-heap.cc
index 76a4a5dff5..e084ea1264 100644
--- a/deps/v8/src/heap/cppgc/process-heap.cc
+++ b/deps/v8/src/heap/cppgc/process-heap.cc
@@ -4,13 +4,9 @@
#include "src/heap/cppgc/process-heap.h"
-#include "include/cppgc/internal/process-heap.h"
-
namespace cppgc {
namespace internal {
-AtomicEntryFlag ProcessHeap::concurrent_marking_flag_;
-
v8::base::LazyMutex g_process_mutex = LAZY_MUTEX_INITIALIZER;
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index 8753d96bdd..677216f772 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -8,6 +8,7 @@
#include <cmath>
#include "src/base/logging.h"
+#include "src/heap/cppgc/metric-recorder.h"
namespace cppgc {
namespace internal {
@@ -15,6 +16,12 @@ namespace internal {
// static
constexpr size_t StatsCollector::kAllocationThresholdBytes;
+StatsCollector::StatsCollector(
+ std::unique_ptr<MetricRecorder> histogram_recorder, Platform* platform)
+ : metric_recorder_(std::move(histogram_recorder)), platform_(platform) {
+ USE(platform_);
+}
+
void StatsCollector::RegisterObserver(AllocationObserver* observer) {
DCHECK_EQ(allocation_observers_.end(),
std::find(allocation_observers_.begin(),
@@ -88,9 +95,18 @@ void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
DCHECK_EQ(GarbageCollectionState::kMarking, gc_state_);
gc_state_ = GarbageCollectionState::kSweeping;
current_.marked_bytes = marked_bytes;
+ current_.object_size_before_sweep_bytes =
+ previous_.marked_bytes + allocated_bytes_since_end_of_marking_ +
+ allocated_bytes_since_safepoint_ -
+ explicitly_freed_bytes_since_safepoint_;
allocated_bytes_since_safepoint_ = 0;
explicitly_freed_bytes_since_safepoint_ = 0;
+ DCHECK_LE(memory_freed_bytes_since_end_of_marking_, memory_allocated_bytes_);
+ memory_allocated_bytes_ -= memory_freed_bytes_since_end_of_marking_;
+ current_.memory_size_before_sweep_bytes = memory_allocated_bytes_;
+ memory_freed_bytes_since_end_of_marking_ = 0;
+
ForAllAllocationObservers([marked_bytes](AllocationObserver* observer) {
observer->ResetAllocatedObjectSize(marked_bytes);
});
@@ -109,11 +125,100 @@ double StatsCollector::GetRecentAllocationSpeedInBytesPerMs() const {
(current_time - time_of_last_end_of_marking_).InMillisecondsF();
}
+namespace {
+
+int64_t SumPhases(const MetricRecorder::CppGCFullCycle::Phases& phases) {
+ return phases.mark_duration_us + phases.weak_duration_us +
+ phases.compact_duration_us + phases.sweep_duration_us;
+}
+
+MetricRecorder::CppGCFullCycle GetFullCycleEventForMetricRecorder(
+ int64_t atomic_mark_us, int64_t atomic_weak_us, int64_t atomic_compact_us,
+ int64_t atomic_sweep_us, int64_t incremental_mark_us,
+ int64_t incremental_sweep_us, int64_t concurrent_mark_us,
+ int64_t concurrent_sweep_us, int64_t objects_before_bytes,
+ int64_t objects_after_bytes, int64_t objects_freed_bytes,
+ int64_t memory_before_bytes, int64_t memory_after_bytes,
+ int64_t memory_freed_bytes) {
+ MetricRecorder::CppGCFullCycle event;
+ // MainThread.Incremental:
+ event.main_thread_incremental.mark_duration_us = incremental_mark_us;
+ event.main_thread_incremental.sweep_duration_us = incremental_sweep_us;
+ // MainThread.Atomic:
+ event.main_thread_atomic.mark_duration_us = atomic_mark_us;
+ event.main_thread_atomic.weak_duration_us = atomic_weak_us;
+ event.main_thread_atomic.compact_duration_us = atomic_compact_us;
+ event.main_thread_atomic.sweep_duration_us = atomic_sweep_us;
+ // MainThread:
+ event.main_thread.mark_duration_us =
+ event.main_thread_atomic.mark_duration_us +
+ event.main_thread_incremental.mark_duration_us;
+ event.main_thread.weak_duration_us =
+ event.main_thread_atomic.weak_duration_us;
+ event.main_thread.compact_duration_us =
+ event.main_thread_atomic.compact_duration_us;
+ event.main_thread.sweep_duration_us =
+ event.main_thread_atomic.sweep_duration_us +
+ event.main_thread_incremental.sweep_duration_us;
+ // Total:
+ event.total.mark_duration_us =
+ event.main_thread.mark_duration_us + concurrent_mark_us;
+ event.total.weak_duration_us = event.main_thread.weak_duration_us;
+ event.total.compact_duration_us = event.main_thread.compact_duration_us;
+ event.total.sweep_duration_us =
+ event.main_thread.sweep_duration_us + concurrent_sweep_us;
+ // Objects:
+ event.objects.before_bytes = objects_before_bytes;
+ event.objects.after_bytes = objects_after_bytes;
+ event.objects.freed_bytes = objects_freed_bytes;
+ // Memory:
+ event.memory.before_bytes = memory_before_bytes;
+ event.memory.after_bytes = memory_after_bytes;
+ event.memory.freed_bytes = memory_freed_bytes;
+ // Collection Rate:
+ event.collection_rate_in_percent =
+ static_cast<double>(event.objects.after_bytes) /
+ event.objects.before_bytes;
+ // Efficiency:
+ event.efficiency_in_bytes_per_us =
+ static_cast<double>(event.objects.freed_bytes) / SumPhases(event.total);
+ event.main_thread_efficiency_in_bytes_per_us =
+ static_cast<double>(event.objects.freed_bytes) /
+ SumPhases(event.main_thread);
+ return event;
+}
+
+} // namespace
+
void StatsCollector::NotifySweepingCompleted() {
DCHECK_EQ(GarbageCollectionState::kSweeping, gc_state_);
gc_state_ = GarbageCollectionState::kNotRunning;
previous_ = std::move(current_);
current_ = Event();
+ if (metric_recorder_) {
+ MetricRecorder::CppGCFullCycle event = GetFullCycleEventForMetricRecorder(
+ previous_.scope_data[kAtomicMark].InMicroseconds(),
+ previous_.scope_data[kAtomicWeak].InMicroseconds(),
+ previous_.scope_data[kAtomicCompact].InMicroseconds(),
+ previous_.scope_data[kAtomicSweep].InMicroseconds(),
+ previous_.scope_data[kIncrementalMark].InMicroseconds(),
+ previous_.scope_data[kIncrementalSweep].InMicroseconds(),
+ previous_.concurrent_scope_data[kConcurrentMark],
+ previous_.concurrent_scope_data[kConcurrentSweep],
+ previous_.object_size_before_sweep_bytes /* objects_before */,
+ previous_.marked_bytes /* objects_after */,
+ previous_.object_size_before_sweep_bytes -
+ previous_.marked_bytes /* objects_freed */,
+ previous_.memory_size_before_sweep_bytes /* memory_before */,
+ previous_.memory_size_before_sweep_bytes -
+ memory_freed_bytes_since_end_of_marking_ /* memory_after */,
+ memory_freed_bytes_since_end_of_marking_ /* memory_freed */);
+ metric_recorder_->AddMainThreadEvent(event);
+ }
+}
+
+size_t StatsCollector::allocated_memory_size() const {
+ return memory_allocated_bytes_;
}
size_t StatsCollector::allocated_object_size() const {
@@ -129,5 +234,39 @@ size_t StatsCollector::allocated_object_size() const {
allocated_bytes_since_end_of_marking_);
}
+void StatsCollector::NotifyAllocatedMemory(int64_t size) {
+ memory_allocated_bytes_ += size;
+ ForAllAllocationObservers([size](AllocationObserver* observer) {
+ observer->AllocatedSizeIncreased(static_cast<size_t>(size));
+ });
+}
+
+void StatsCollector::NotifyFreedMemory(int64_t size) {
+ memory_freed_bytes_since_end_of_marking_ += size;
+ ForAllAllocationObservers([size](AllocationObserver* observer) {
+ observer->AllocatedSizeDecreased(static_cast<size_t>(size));
+ });
+}
+
+void StatsCollector::RecordHistogramSample(ScopeId scope_id_,
+ v8::base::TimeDelta time) {
+ switch (scope_id_) {
+ case kIncrementalMark: {
+ MetricRecorder::CppGCMainThreadIncrementalMark event{
+ time.InMicroseconds()};
+ metric_recorder_->AddMainThreadEvent(event);
+ break;
+ }
+ case kIncrementalSweep: {
+ MetricRecorder::CppGCMainThreadIncrementalSweep event{
+ time.InMicroseconds()};
+ metric_recorder_->AddMainThreadEvent(event);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index 29e9c63f88..c9945e28d5 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -10,21 +10,28 @@
#include <vector>
+#include "include/cppgc/platform.h"
+#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/time.h"
#include "src/heap/cppgc/garbage-collector.h"
-#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/trace-event.h"
namespace cppgc {
namespace internal {
+// Histogram scopes contribute to histogram as well as to traces and metrics.
+// Other scopes contribute only to traces and metrics.
+#define CPPGC_FOR_ALL_HISTOGRAM_SCOPES(V) \
+ V(AtomicMark) \
+ V(AtomicWeak) \
+ V(AtomicCompact) \
+ V(AtomicSweep) \
+ V(IncrementalMark) \
+ V(IncrementalSweep)
+
#define CPPGC_FOR_ALL_SCOPES(V) \
- V(AtomicMark) \
- V(AtomicSweep) \
- V(AtomicCompact) \
- V(IncrementalMark) \
- V(IncrementalSweep) \
V(MarkIncrementalStart) \
V(MarkIncrementalFinalize) \
V(MarkAtomicPrologue) \
@@ -43,17 +50,17 @@ namespace internal {
V(MarkVisitCrossThreadPersistents) \
V(MarkVisitStack) \
V(MarkVisitRememberedSets) \
- V(WeakInvokeCallbacks) \
V(SweepInvokePreFinalizers) \
V(SweepIdleStep) \
V(SweepOnAllocation) \
V(SweepFinalize)
-#define CPPGC_FOR_ALL_CONCURRENT_SCOPES(V) \
- V(ConcurrentMarkProcessEphemerons) \
- V(ConcurrentMark) \
+#define CPPGC_FOR_ALL_HISTOGRAM_CONCURRENT_SCOPES(V) \
+ V(ConcurrentMark) \
V(ConcurrentSweep)
+#define CPPGC_FOR_ALL_CONCURRENT_SCOPES(V) V(ConcurrentMarkProcessEphemerons)
+
// Sink for various time and memory statistics.
class V8_EXPORT_PRIVATE StatsCollector final {
using CollectionType = GarbageCollector::Config::CollectionType;
@@ -66,6 +73,8 @@ class V8_EXPORT_PRIVATE StatsCollector final {
enum ScopeId {
#define CPPGC_DECLARE_ENUM(name) k##name,
+ CPPGC_FOR_ALL_HISTOGRAM_SCOPES(CPPGC_DECLARE_ENUM)
+ kNumHistogramScopeIds,
CPPGC_FOR_ALL_SCOPES(CPPGC_DECLARE_ENUM)
#undef CPPGC_DECLARE_ENUM
kNumScopeIds,
@@ -73,6 +82,8 @@ class V8_EXPORT_PRIVATE StatsCollector final {
enum ConcurrentScopeId {
#define CPPGC_DECLARE_ENUM(name) k##name,
+ CPPGC_FOR_ALL_HISTOGRAM_CONCURRENT_SCOPES(CPPGC_DECLARE_ENUM)
+ kNumHistogramConcurrentScopeIds,
CPPGC_FOR_ALL_CONCURRENT_SCOPES(CPPGC_DECLARE_ENUM)
#undef CPPGC_DECLARE_ENUM
kNumConcurrentScopeIds
@@ -85,14 +96,17 @@ class V8_EXPORT_PRIVATE StatsCollector final {
struct Event final {
V8_EXPORT_PRIVATE explicit Event();
- v8::base::TimeDelta scope_data[kNumScopeIds];
- v8::base::Atomic32 concurrent_scope_data[kNumConcurrentScopeIds]{0};
+ v8::base::TimeDelta scope_data[kNumHistogramScopeIds];
+ v8::base::Atomic32 concurrent_scope_data[kNumHistogramConcurrentScopeIds]{
+ 0};
size_t epoch = -1;
CollectionType collection_type = CollectionType::kMajor;
IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
// Marked bytes collected during marking.
size_t marked_bytes = 0;
+ size_t object_size_before_sweep_bytes = -1;
+ size_t memory_size_before_sweep_bytes = -1;
};
private:
@@ -106,6 +120,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
case k##name: \
return type == CollectionType::kMajor ? "CppGC." #name \
: "CppGC." #name ".Minor";
+ CPPGC_FOR_ALL_HISTOGRAM_SCOPES(CPPGC_CASE)
CPPGC_FOR_ALL_SCOPES(CPPGC_CASE)
#undef CPPGC_CASE
default:
@@ -120,6 +135,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
case k##name: \
return type == CollectionType::kMajor ? "CppGC." #name \
: "CppGC." #name ".Minor";
+ CPPGC_FOR_ALL_HISTOGRAM_CONCURRENT_SCOPES(CPPGC_CASE)
CPPGC_FOR_ALL_CONCURRENT_SCOPES(CPPGC_CASE)
#undef CPPGC_CASE
default:
@@ -139,9 +155,9 @@ class V8_EXPORT_PRIVATE StatsCollector final {
public:
template <typename... Args>
- InternalScope(HeapBase& heap, ScopeIdType scope_id, Args... args)
- : heap_(heap),
- stats_collector_(heap_.stats_collector()),
+ InternalScope(StatsCollector* stats_collector, ScopeIdType scope_id,
+ Args... args)
+ : stats_collector_(stats_collector),
start_time_(v8::base::TimeTicks::Now()),
scope_id_(scope_id) {
DCHECK_LE(0, scope_id_);
@@ -149,6 +165,10 @@ class V8_EXPORT_PRIVATE StatsCollector final {
scope_category == kMutatorThread
? static_cast<int>(kNumScopeIds)
: static_cast<int>(kNumConcurrentScopeIds));
+ DCHECK_NE(static_cast<int>(scope_id_),
+ scope_category == kMutatorThread
+ ? static_cast<int>(kNumHistogramScopeIds)
+ : static_cast<int>(kNumHistogramConcurrentScopeIds));
StartTrace(args...);
}
@@ -160,6 +180,10 @@ class V8_EXPORT_PRIVATE StatsCollector final {
InternalScope(const InternalScope&) = delete;
InternalScope& operator=(const InternalScope&) = delete;
+ void DecreaseStartTimeForTesting(v8::base::TimeDelta delta) {
+ start_time_ -= delta;
+ }
+
private:
void* operator new(size_t, void*) = delete;
void* operator new(size_t) = delete;
@@ -180,9 +204,8 @@ class V8_EXPORT_PRIVATE StatsCollector final {
inline void IncreaseScopeTime();
- HeapBase& heap_;
StatsCollector* const stats_collector_;
- const v8::base::TimeTicks start_time_;
+ v8::base::TimeTicks start_time_;
const ScopeIdType scope_id_;
};
@@ -203,21 +226,28 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// the deltas is interesting.
//
// May trigger GC.
- virtual void AllocatedObjectSizeIncreased(size_t) = 0;
- virtual void AllocatedObjectSizeDecreased(size_t) = 0;
+ virtual void AllocatedObjectSizeIncreased(size_t) {}
+ virtual void AllocatedObjectSizeDecreased(size_t) {}
// Called when the exact size of allocated object size is known. In
// practice, this is after marking when marked bytes == allocated bytes.
//
// Must not trigger GC synchronously.
- virtual void ResetAllocatedObjectSize(size_t) = 0;
+ virtual void ResetAllocatedObjectSize(size_t) {}
+
+ // Called upon allocating/releasing chunks of memory (e.g. pages) that can
+ // contain objects.
+ //
+ // Must not trigger GC.
+ virtual void AllocatedSizeIncreased(size_t) {}
+ virtual void AllocatedSizeDecreased(size_t) {}
};
// Observers are implemented using virtual calls. Avoid notifications below
// reasonably interesting sizes.
static constexpr size_t kAllocationThresholdBytes = 1024;
- StatsCollector() = default;
+ StatsCollector(std::unique_ptr<MetricRecorder>, Platform*);
StatsCollector(const StatsCollector&) = delete;
StatsCollector& operator=(const StatsCollector&) = delete;
@@ -240,6 +270,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// is finished at this point.
void NotifySweepingCompleted();
+ size_t allocated_memory_size() const;
// Size of live objects in bytes on the heap. Based on the most recent marked
// bytes and the bytes allocated since last marking.
size_t allocated_object_size() const;
@@ -248,6 +279,14 @@ class V8_EXPORT_PRIVATE StatsCollector final {
const Event& GetPreviousEventForTesting() const { return previous_; }
+ void NotifyAllocatedMemory(int64_t);
+ void NotifyFreedMemory(int64_t);
+
+ void SetMetricRecorderForTesting(
+ std::unique_ptr<MetricRecorder> histogram_recorder) {
+ metric_recorder_ = std::move(histogram_recorder);
+ }
+
private:
enum class GarbageCollectionState : uint8_t {
kNotRunning,
@@ -255,6 +294,9 @@ class V8_EXPORT_PRIVATE StatsCollector final {
kSweeping
};
+ void RecordHistogramSample(ScopeId, v8::base::TimeDelta);
+ void RecordHistogramSample(ConcurrentScopeId, v8::base::TimeDelta) {}
+
// Invokes |callback| for all registered observers.
template <typename Callback>
void ForAllAllocationObservers(Callback callback);
@@ -274,6 +316,9 @@ class V8_EXPORT_PRIVATE StatsCollector final {
int64_t allocated_bytes_since_safepoint_ = 0;
int64_t explicitly_freed_bytes_since_safepoint_ = 0;
+ int64_t memory_allocated_bytes_ = 0;
+ int64_t memory_freed_bytes_since_end_of_marking_ = 0;
+
// vector to allow fast iteration of observers. Register/Unregisters only
// happens on startup/teardown.
std::vector<AllocationObserver*> allocation_observers_;
@@ -285,6 +330,10 @@ class V8_EXPORT_PRIVATE StatsCollector final {
Event current_;
// The previous GC event which is populated at NotifySweepingFinished.
Event previous_;
+
+ std::unique_ptr<MetricRecorder> metric_recorder_;
+
+ Platform* platform_;
};
template <typename Callback>
@@ -311,6 +360,12 @@ template <StatsCollector::TraceCategory trace_category,
template <typename... Args>
void StatsCollector::InternalScope<trace_category, scope_category>::StartTrace(
Args... args) {
+ // Top level scopes that contribute to histogram should always be enabled.
+ DCHECK_IMPLIES(static_cast<int>(scope_id_) <
+ (scope_category == kMutatorThread
+ ? static_cast<int>(kNumHistogramScopeIds)
+ : static_cast<int>(kNumHistogramConcurrentScopeIds)),
+ trace_category == StatsCollector::TraceCategory::kEnabled);
if (trace_category == StatsCollector::TraceCategory::kEnabled)
StartTraceImpl(args...);
}
@@ -371,18 +426,26 @@ template <StatsCollector::TraceCategory trace_category,
void StatsCollector::InternalScope<trace_category,
scope_category>::IncreaseScopeTime() {
DCHECK_NE(GarbageCollectionState::kNotRunning, stats_collector_->gc_state_);
+ // Only record top level scopes.
+ if (static_cast<int>(scope_id_) >=
+ (scope_category == kMutatorThread
+ ? static_cast<int>(kNumHistogramScopeIds)
+ : static_cast<int>(kNumHistogramConcurrentScopeIds)))
+ return;
v8::base::TimeDelta time = v8::base::TimeTicks::Now() - start_time_;
if (scope_category == StatsCollector::ScopeContext::kMutatorThread) {
stats_collector_->current_.scope_data[scope_id_] += time;
+ if (stats_collector_->metric_recorder_)
+ stats_collector_->RecordHistogramSample(scope_id_, time);
return;
}
// scope_category == StatsCollector::ScopeContext::kConcurrentThread
using Atomic32 = v8::base::Atomic32;
- const int64_t ms = time.InMicroseconds();
- DCHECK(ms <= std::numeric_limits<Atomic32>::max());
+ const int64_t us = time.InMicroseconds();
+ DCHECK_LE(us, std::numeric_limits<Atomic32>::max());
v8::base::Relaxed_AtomicIncrement(
&stats_collector_->current_.concurrent_scope_data[scope_id_],
- static_cast<Atomic32>(ms));
+ static_cast<Atomic32>(us));
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index d4880d72dd..573838b4c4 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -104,6 +104,7 @@ struct SpaceState {
FreeList cached_free_list;
std::vector<FreeList::Block> unfinalized_free_list;
bool is_empty = false;
+ size_t largest_new_free_list_entry = 0;
};
ThreadSafeStack<BasePage*> unswept_pages;
@@ -122,7 +123,10 @@ void StickyUnmark(HeapObjectHeader* header) {
// Builder that finalizes objects and adds freelist entries right away.
class InlinedFinalizationBuilder final {
public:
- using ResultType = bool;
+ struct ResultType {
+ bool is_empty = false;
+ size_t largest_new_free_list_entry = 0;
+ };
explicit InlinedFinalizationBuilder(BasePage* page) : page_(page) {}
@@ -136,7 +140,9 @@ class InlinedFinalizationBuilder final {
space->free_list().Add({start, size});
}
- ResultType GetResult(bool is_empty) { return is_empty; }
+ ResultType GetResult(bool is_empty, size_t largest_new_free_list_entry) {
+ return {is_empty, largest_new_free_list_entry};
+ }
private:
BasePage* page_;
@@ -167,8 +173,9 @@ class DeferredFinalizationBuilder final {
found_finalizer_ = false;
}
- ResultType&& GetResult(bool is_empty) {
+ ResultType&& GetResult(bool is_empty, size_t largest_new_free_list_entry) {
result_.is_empty = is_empty;
+ result_.largest_new_free_list_entry = largest_new_free_list_entry;
return std::move(result_);
}
@@ -185,6 +192,8 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
bitmap.Clear();
+ size_t largest_new_free_list_entry = 0;
+
Address start_of_gap = page->PayloadStart();
for (Address begin = page->PayloadStart(), end = page->PayloadEnd();
begin != end;) {
@@ -205,8 +214,11 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
// The object is alive.
const Address header_address = reinterpret_cast<Address>(header);
if (start_of_gap != header_address) {
- builder.AddFreeListEntry(
- start_of_gap, static_cast<size_t>(header_address - start_of_gap));
+ size_t new_free_list_entry_size =
+ static_cast<size_t>(header_address - start_of_gap);
+ builder.AddFreeListEntry(start_of_gap, new_free_list_entry_size);
+ largest_new_free_list_entry =
+ std::max(largest_new_free_list_entry, new_free_list_entry_size);
bitmap.SetBit(start_of_gap);
}
StickyUnmark(header);
@@ -223,7 +235,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
}
const bool is_empty = (start_of_gap == page->PayloadStart());
- return builder.GetResult(is_empty);
+ return builder.GetResult(is_empty, largest_new_free_list_entry);
}
// SweepFinalizer is responsible for heap/space/page finalization. Finalization
@@ -297,12 +309,20 @@ class SweepFinalizer final {
space_freelist.Add(std::move(entry));
}
+ largest_new_free_list_entry_ = std::max(
+ page_state->largest_new_free_list_entry, largest_new_free_list_entry_);
+
// Add the page to the space.
page->space()->AddPage(page);
}
+ size_t largest_new_free_list_entry() const {
+ return largest_new_free_list_entry_;
+ }
+
private:
cppgc::Platform* platform_;
+ size_t largest_new_free_list_entry_ = 0;
};
class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
@@ -315,11 +335,13 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
void Sweep() {
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
- Traverse(*page);
+ SweepPage(*page);
}
}
}
+ void SweepPage(BasePage* page) { Traverse(page); }
+
bool SweepWithDeadline(double deadline_in_seconds) {
DCHECK(platform_);
static constexpr double kSlackInSeconds = 0.001;
@@ -345,6 +367,10 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
return true;
}
+ size_t largest_new_free_list_entry() const {
+ return largest_new_free_list_entry_;
+ }
+
private:
bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
static constexpr size_t kDeadlineCheckInterval = 8;
@@ -362,11 +388,14 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
}
bool VisitNormalPage(NormalPage* page) {
- const bool is_empty = SweepNormalPage<InlinedFinalizationBuilder>(page);
- if (is_empty) {
+ const InlinedFinalizationBuilder::ResultType result =
+ SweepNormalPage<InlinedFinalizationBuilder>(page);
+ if (result.is_empty) {
NormalPage::Destroy(page);
} else {
page->space()->AddPage(page);
+ largest_new_free_list_entry_ = std::max(
+ result.largest_new_free_list_entry, largest_new_free_list_entry_);
}
return true;
}
@@ -385,6 +414,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
SpaceStates* states_;
cppgc::Platform* platform_;
+ size_t largest_new_free_list_entry_ = 0;
};
class ConcurrentSweepTask final : public cppgc::JobTask,
@@ -397,7 +427,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
void Run(cppgc::JobDelegate* delegate) final {
StatsCollector::EnabledConcurrentScope stats_scope(
- heap_, StatsCollector::kConcurrentSweep);
+ heap_.stats_collector(), StatsCollector::kConcurrentSweep);
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
@@ -496,13 +526,12 @@ class Sweeper::SweeperImpl final {
: heap_(heap),
stats_collector_(stats_collector),
space_states_(heap->size()),
- platform_(platform),
- foreground_task_runner_(platform_->GetForegroundTaskRunner()) {}
+ platform_(platform) {}
~SweeperImpl() { CancelSweepers(); }
void Start(SweepingConfig config) {
- StatsCollector::EnabledScope stats_scope(*heap_->heap(),
+ StatsCollector::EnabledScope stats_scope(heap_->heap()->stats_collector(),
StatsCollector::kAtomicSweep);
is_in_progress_ = true;
#if DEBUG
@@ -522,13 +551,54 @@ class Sweeper::SweeperImpl final {
}
}
+ bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
+ if (!is_in_progress_) return false;
+
+ // Bail out for recursive sweeping calls. This can happen when finalizers
+ // allocate new memory.
+ if (is_sweeping_on_mutator_thread_) return false;
+
+ StatsCollector::EnabledScope stats_scope(heap_->heap()->stats_collector(),
+ StatsCollector::kIncrementalSweep);
+ StatsCollector::EnabledScope inner_scope(
+ heap_->heap()->stats_collector(), StatsCollector::kSweepOnAllocation);
+ MutatorThreadSweepingScope sweeping_in_progresss(*this);
+
+ SpaceState& space_state = space_states_[space->index()];
+
+ {
+ // First, process unfinalized pages as finalizing a page is faster than
+ // sweeping.
+ SweepFinalizer finalizer(platform_);
+ while (auto page = space_state.swept_unfinalized_pages.Pop()) {
+ finalizer.FinalizePage(&*page);
+ if (size <= finalizer.largest_new_free_list_entry()) return true;
+ }
+ }
+ {
+ // Then, if no matching slot is found in the unfinalized pages, search the
+ // unswept page. This also helps out the concurrent sweeper.
+ MutatorThreadSweeper sweeper(&space_states_, platform_);
+ while (auto page = space_state.unswept_pages.Pop()) {
+ sweeper.SweepPage(*page);
+ if (size <= sweeper.largest_new_free_list_entry()) return true;
+ }
+ }
+
+ return false;
+ }
+
void FinishIfRunning() {
if (!is_in_progress_) return;
+ // Bail out for recursive sweeping calls. This can happen when finalizers
+ // allocate new memory.
+ if (is_sweeping_on_mutator_thread_) return;
+
{
StatsCollector::EnabledScope stats_scope(
- *heap_->heap(), StatsCollector::kIncrementalSweep);
- StatsCollector::EnabledScope inner_scope(*heap_->heap(),
+ heap_->heap()->stats_collector(), StatsCollector::kIncrementalSweep);
+ StatsCollector::EnabledScope inner_scope(heap_->heap()->stats_collector(),
StatsCollector::kSweepFinalize);
if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
concurrent_sweeper_handle_->UpdatePriorityEnabled()) {
@@ -543,6 +613,8 @@ class Sweeper::SweeperImpl final {
void Finish() {
DCHECK(is_in_progress_);
+ MutatorThreadSweepingScope sweeping_in_progresss(*this);
+
// First, call finalizers on the mutator thread.
SweepFinalizer finalizer(platform_);
finalizer.FinalizeHeap(&space_states_);
@@ -566,8 +638,6 @@ class Sweeper::SweeperImpl final {
DCHECK(notify_done_pending_);
notify_done_pending_ = false;
stats_collector_->NotifySweepingCompleted();
- // Notify the heap that GC is finished.
- heap_->heap()->PostGarbageCollection();
}
void NotifyDoneIfNeeded() {
@@ -579,7 +649,32 @@ class Sweeper::SweeperImpl final {
if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Join();
}
+ bool IsSweepingOnMutatorThread() const {
+ return is_sweeping_on_mutator_thread_;
+ }
+
+ bool IsSweepingInProgress() const { return is_in_progress_; }
+
private:
+ class MutatorThreadSweepingScope final {
+ public:
+ explicit MutatorThreadSweepingScope(SweeperImpl& sweeper)
+ : sweeper_(sweeper) {
+ DCHECK(!sweeper_.is_sweeping_on_mutator_thread_);
+ sweeper_.is_sweeping_on_mutator_thread_ = true;
+ }
+ ~MutatorThreadSweepingScope() {
+ sweeper_.is_sweeping_on_mutator_thread_ = false;
+ }
+
+ MutatorThreadSweepingScope(const MutatorThreadSweepingScope&) = delete;
+ MutatorThreadSweepingScope& operator=(const MutatorThreadSweepingScope&) =
+ delete;
+
+ private:
+ SweeperImpl& sweeper_;
+ };
+
class IncrementalSweepTask : public cppgc::IdleTask {
public:
using Handle = SingleThreadedHandle;
@@ -598,17 +693,20 @@ class Sweeper::SweeperImpl final {
void Run(double deadline_in_seconds) override {
if (handle_.IsCanceled() || !sweeper_->is_in_progress_) return;
+ MutatorThreadSweepingScope sweeping_in_progresss(*sweeper_);
+
bool sweep_complete;
{
StatsCollector::EnabledScope stats_scope(
- *sweeper_->heap_->heap(), StatsCollector::kIncrementalSweep);
+ sweeper_->heap_->heap()->stats_collector(),
+ StatsCollector::kIncrementalSweep);
MutatorThreadSweeper sweeper(&sweeper_->space_states_,
sweeper_->platform_);
{
StatsCollector::EnabledScope stats_scope(
- *sweeper_->heap_->heap(), StatsCollector::kSweepIdleStep,
- "idleDeltaInSeconds",
+ sweeper_->heap_->heap()->stats_collector(),
+ StatsCollector::kSweepIdleStep, "idleDeltaInSeconds",
(deadline_in_seconds -
sweeper_->platform_->MonotonicallyIncreasingTime()));
@@ -632,12 +730,11 @@ class Sweeper::SweeperImpl final {
void ScheduleIncrementalSweeping() {
DCHECK(platform_);
- if (!foreground_task_runner_ ||
- !foreground_task_runner_->IdleTasksEnabled())
- return;
+ auto runner = platform_->GetForegroundTaskRunner();
+ if (!runner || !runner->IdleTasksEnabled()) return;
incremental_sweeper_handle_ =
- IncrementalSweepTask::Post(this, foreground_task_runner_.get());
+ IncrementalSweepTask::Post(this, runner.get());
}
void ScheduleConcurrentSweeping() {
@@ -665,11 +762,14 @@ class Sweeper::SweeperImpl final {
StatsCollector* stats_collector_;
SpaceStates space_states_;
cppgc::Platform* platform_;
- std::shared_ptr<cppgc::TaskRunner> foreground_task_runner_;
IncrementalSweepTask::Handle incremental_sweeper_handle_;
std::unique_ptr<cppgc::JobHandle> concurrent_sweeper_handle_;
+ // Indicates whether the sweeping phase is in progress.
bool is_in_progress_ = false;
bool notify_done_pending_ = false;
+ // Indicates whether whether the sweeper (or its finalization) is currently
+ // running on the main thread.
+ bool is_sweeping_on_mutator_thread_ = false;
};
Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
@@ -684,6 +784,16 @@ void Sweeper::WaitForConcurrentSweepingForTesting() {
impl_->WaitForConcurrentSweepingForTesting();
}
void Sweeper::NotifyDoneIfNeeded() { impl_->NotifyDoneIfNeeded(); }
+bool Sweeper::SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
+ return impl_->SweepForAllocationIfRunning(space, size);
+}
+bool Sweeper::IsSweepingOnMutatorThread() const {
+ return impl_->IsSweepingOnMutatorThread();
+}
+
+bool Sweeper::IsSweepingInProgress() const {
+ return impl_->IsSweepingInProgress();
+}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index 053b0035c3..7d6ffc2587 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -19,6 +19,7 @@ namespace internal {
class StatsCollector;
class RawHeap;
class ConcurrentSweeperTest;
+class NormalPageSpace;
class V8_EXPORT_PRIVATE Sweeper final {
public:
@@ -41,6 +42,13 @@ class V8_EXPORT_PRIVATE Sweeper final {
void Start(SweepingConfig);
void FinishIfRunning();
void NotifyDoneIfNeeded();
+ // SweepForAllocationIfRunning sweeps the given |space| until a slot that can
+ // fit an allocation of size |size| is found. Returns true if a slot was
+ // found.
+ bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size);
+
+ bool IsSweepingOnMutatorThread() const;
+ bool IsSweepingInProgress() const;
private:
void WaitForConcurrentSweepingForTesting();
diff --git a/deps/v8/src/heap/cppgc/testing.cc b/deps/v8/src/heap/cppgc/testing.cc
new file mode 100644
index 0000000000..bd72a3dfe1
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/testing.cc
@@ -0,0 +1,27 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/testing.h"
+
+#include "src/base/logging.h"
+#include "src/heap/cppgc/heap-base.h"
+
+namespace cppgc {
+namespace testing {
+
+OverrideEmbedderStackStateScope::OverrideEmbedderStackStateScope(
+ HeapHandle& heap_handle, EmbedderStackState state)
+ : heap_handle_(heap_handle) {
+ auto& heap = internal::HeapBase::From(heap_handle_);
+ CHECK_NULL(heap.override_stack_state_.get());
+ heap.override_stack_state_ = std::make_unique<EmbedderStackState>(state);
+}
+
+OverrideEmbedderStackStateScope::~OverrideEmbedderStackStateScope() {
+ auto& heap = internal::HeapBase::From(heap_handle_);
+ heap.override_stack_state_.reset();
+}
+
+} // namespace testing
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/trace-event.h b/deps/v8/src/heap/cppgc/trace-event.h
index 69b18888cf..6bde1448e3 100644
--- a/deps/v8/src/heap/cppgc/trace-event.h
+++ b/deps/v8/src/heap/cppgc/trace-event.h
@@ -116,7 +116,7 @@ enum CategoryGroupEnabledFlags {
#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
DCHECK_NOT_NULL(name); \
do { \
- cppgc::Platform* platform = heap_.platform(); \
+ cppgc::Platform* platform = stats_collector_->platform_; \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
cppgc::internal::AddTraceEvent( \
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 336c0ba82c..75ff5ef626 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/cppgc/internal/write-barrier.h"
+#include "src/heap/cppgc/write-barrier.h"
+#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/internal/pointer-policies.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -19,6 +20,9 @@
namespace cppgc {
namespace internal {
+// static
+AtomicEntryFlag WriteBarrier::incremental_or_concurrent_marking_flag_;
+
namespace {
void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker,
@@ -27,7 +31,7 @@ void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker,
DCHECK(reinterpret_cast<CagedHeapLocalData*>(
reinterpret_cast<uintptr_t>(value) &
~(kCagedHeapReservationAlignment - 1))
- ->is_marking_in_progress);
+ ->is_incremental_marking_in_progress);
#endif
DCHECK(header.IsMarked<AccessMode::kAtomic>());
DCHECK(marker);
@@ -60,9 +64,10 @@ void WriteBarrier::DijkstraMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value);
const auto* heap = page->heap();
- // Marker being not set up means that no incremental/concurrent marking is in
- // progress.
- if (!heap->marker()) return;
+ // GetWriteBarrierType() checks marking state.
+ DCHECK(heap->marker());
+ // No write barriers should be executed from atomic pause marking.
+ DCHECK(!heap->in_atomic_pause());
auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
@@ -76,13 +81,13 @@ void WriteBarrier::DijkstraMarkingBarrierRangeSlow(
HeapHandle& heap_handle, const void* first_element, size_t element_size,
size_t number_of_elements, TraceCallback trace_callback) {
auto& heap_base = HeapBase::From(heap_handle);
- MarkerBase* marker = heap_base.marker();
- if (!marker) {
- return;
- }
- ObjectAllocator::NoAllocationScope no_allocation(
- heap_base.object_allocator());
+ // GetWriteBarrierType() checks marking state.
+ DCHECK(heap_base.marker());
+ // No write barriers should be executed from atomic pause marking.
+ DCHECK(!heap_base.in_atomic_pause());
+
+ cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_base);
const char* array = static_cast<const char*>(first_element);
while (number_of_elements-- > 0) {
trace_callback(&heap_base.marker()->Visitor(), array);
@@ -103,9 +108,10 @@ void WriteBarrier::SteeleMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value);
const auto* heap = page->heap();
- // Marker being not set up means that no incremental/concurrent marking is in
- // progress.
- if (!heap->marker()) return;
+ // GetWriteBarrierType() checks marking state.
+ DCHECK(heap->marker());
+ // No write barriers should be executed from atomic pause marking.
+ DCHECK(!heap->in_atomic_pause());
auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
@@ -120,12 +126,17 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
const AgeTable& age_table,
const void* slot,
uintptr_t value_offset) {
+ // A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
+ // of the barrier. This is a result of the order of bailouts where not marking
+ // results in applying the generational barrier.
+ if (local_data.heap_base->in_atomic_pause()) return;
+
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return;
// Record slot.
local_data.heap_base->remembered_slots().insert(const_cast<void*>(slot));
}
-#endif
+#endif // CPPGC_YOUNG_GENERATION
#if V8_ENABLE_CHECKS
// static
@@ -134,5 +145,43 @@ void WriteBarrier::CheckParams(Type expected_type, const Params& params) {
}
#endif // V8_ENABLE_CHECKS
+// static
+bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(const void* object,
+ HeapHandle** handle) {
+ // Large objects cannot have mixins, so we are guaranteed to always have
+ // a pointer on the same page.
+ const auto* page = BasePage::FromPayload(object);
+ *handle = page->heap();
+ const MarkerBase* marker = page->heap()->marker();
+ return marker && marker->IsMarking();
+}
+
+// static
+bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(HeapHandle& heap_handle) {
+ const auto& heap_base = internal::HeapBase::From(heap_handle);
+ const MarkerBase* marker = heap_base.marker();
+ return marker && marker->IsMarking();
+}
+
+#if defined(CPPGC_CAGED_HEAP)
+
+// static
+bool WriteBarrierTypeForCagedHeapPolicy::IsMarking(
+ const HeapHandle& heap_handle, WriteBarrier::Params& params) {
+ const auto& heap_base = internal::HeapBase::From(heap_handle);
+ if (const MarkerBase* marker = heap_base.marker()) {
+ return marker->IsMarking();
+ }
+ // Also set caged heap start here to avoid another call immediately after
+ // checking IsMarking().
+#if defined(CPPGC_YOUNG_GENERATION)
+ params.start =
+ reinterpret_cast<uintptr_t>(&heap_base.caged_heap().local_data());
+#endif // !CPPGC_YOUNG_GENERATION
+ return false;
+}
+
+#endif // CPPGC_CAGED_HEAP
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/write-barrier.h b/deps/v8/src/heap/cppgc/write-barrier.h
new file mode 100644
index 0000000000..0758b01655
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/write-barrier.h
@@ -0,0 +1,22 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_WRITE_BARRIER_H_
+#define V8_HEAP_CPPGC_WRITE_BARRIER_H_
+
+#include "include/cppgc/internal/write-barrier.h"
+
+namespace cppgc {
+namespace internal {
+
+class WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater {
+ public:
+ static void Enter() { incremental_or_concurrent_marking_flag_.Enter(); }
+ static void Exit() { incremental_or_concurrent_marking_flag_.Exit(); }
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_WRITE_BARRIER_H_
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 01c0402f7e..c83cbcb5a2 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -4,6 +4,7 @@
#include "src/heap/embedder-tracing.h"
+#include "include/v8-cppgc.h"
#include "src/base/logging.h"
#include "src/heap/gc-tracer.h"
#include "src/objects/embedder-data-slot.h"
@@ -74,9 +75,33 @@ void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
}
}
+namespace {
+
+bool ExtractWrappableInfo(Isolate* isolate, JSObject js_object,
+ const WrapperDescriptor& wrapper_descriptor,
+ LocalEmbedderHeapTracer::WrapperInfo* info) {
+ DCHECK(js_object.IsApiWrapper());
+ if (js_object.GetEmbedderFieldCount() < 2) return false;
+
+ if (EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_type_index)
+ .ToAlignedPointerSafe(isolate, &info->first) &&
+ info->first &&
+ EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_instance_index)
+ .ToAlignedPointerSafe(isolate, &info->second) &&
+ info->second) {
+ return (wrapper_descriptor.embedder_id_for_garbage_collected ==
+ WrapperDescriptor::kUnknownEmbedderId) ||
+ (*static_cast<uint16_t*>(info->first) ==
+ wrapper_descriptor.embedder_id_for_garbage_collected);
+ }
+ return false;
+}
+
+} // namespace
+
LocalEmbedderHeapTracer::ProcessingScope::ProcessingScope(
LocalEmbedderHeapTracer* tracer)
- : tracer_(tracer) {
+ : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor_) {
wrapper_cache_.reserve(kWrapperCacheSize);
}
@@ -86,19 +111,11 @@ LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
}
}
-// static
LocalEmbedderHeapTracer::WrapperInfo
LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate,
JSObject js_object) {
- DCHECK_GE(js_object.GetEmbedderFieldCount(), 2);
- DCHECK(js_object.IsApiWrapper());
-
WrapperInfo info;
- if (EmbedderDataSlot(js_object, 0)
- .ToAlignedPointerSafe(isolate, &info.first) &&
- info.first &&
- EmbedderDataSlot(js_object, 1)
- .ToAlignedPointerSafe(isolate, &info.second)) {
+ if (ExtractWrappableInfo(isolate, js_object, wrapper_descriptor_, &info)) {
return info;
}
return {nullptr, nullptr};
@@ -107,14 +124,12 @@ LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate,
void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
JSObject js_object) {
DCHECK(js_object.IsApiWrapper());
- if (js_object.GetEmbedderFieldCount() < 2) return;
-
- WrapperInfo info =
- LocalEmbedderHeapTracer::ExtractWrapperInfo(tracer_->isolate_, js_object);
- if (VerboseWrapperInfo(info).is_valid()) {
+ WrapperInfo info;
+ if (ExtractWrappableInfo(tracer_->isolate_, js_object, wrapper_descriptor_,
+ &info)) {
wrapper_cache_.push_back(std::move(info));
+ FlushWrapperCacheIfFull();
}
- FlushWrapperCacheIfFull();
}
void LocalEmbedderHeapTracer::ProcessingScope::FlushWrapperCacheIfFull() {
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 2237780434..80b98394b5 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_EMBEDDER_TRACING_H_
#define V8_HEAP_EMBEDDER_TRACING_H_
+#include "include/v8-cppgc.h"
#include "include/v8.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
@@ -24,15 +25,16 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
// internals in a named way. See ProcessingScope::TracePossibleJSWrapper()
// below on how a V8 object is parsed to gather the information.
struct VerboseWrapperInfo {
- explicit VerboseWrapperInfo(const WrapperInfo& raw_info)
+ constexpr explicit VerboseWrapperInfo(const WrapperInfo& raw_info)
: raw_info(raw_info) {}
// Information describing the type pointed to via instance().
void* type_info() const { return raw_info.first; }
// Direct pointer to an instance described by type_info().
void* instance() const { return raw_info.second; }
-
- bool is_valid() const { return type_info(); }
+ // Returns whether the info is empty and thus does not keep a C++ object
+ // alive.
+ bool is_empty() const { return !type_info() || !instance(); }
const WrapperInfo& raw_info;
};
@@ -52,11 +54,10 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void FlushWrapperCacheIfFull();
LocalEmbedderHeapTracer* const tracer_;
+ const WrapperDescriptor wrapper_descriptor_;
WrapperCache wrapper_cache_;
};
- static WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object);
-
explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
~LocalEmbedderHeapTracer() {
@@ -121,9 +122,27 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
size_t used_size() const { return remote_stats_.used_size; }
size_t allocated_size() const { return remote_stats_.allocated_size; }
+ WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object);
+
+ void SetWrapperDescriptor(const WrapperDescriptor& wrapper_descriptor) {
+ wrapper_descriptor_ = wrapper_descriptor;
+ }
+
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
+ static constexpr WrapperDescriptor::InternalFieldIndex
+ kDefaultWrapperTypeEmbedderIndex = 0;
+ static constexpr WrapperDescriptor::InternalFieldIndex
+ kDefaultWrapperInstanceEmbedderIndex = 1;
+
+ static constexpr WrapperDescriptor GetDefaultWrapperDescriptor() {
+ // The default descriptor assumes the indices that known embedders use.
+ return WrapperDescriptor(kDefaultWrapperTypeEmbedderIndex,
+ kDefaultWrapperInstanceEmbedderIndex,
+ WrapperDescriptor::kUnknownEmbedderId);
+ }
+
Isolate* const isolate_;
EmbedderHeapTracer* remote_tracer_ = nullptr;
@@ -147,6 +166,11 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
size_t allocated_size_limit_for_check = 0;
} remote_stats_;
+ // Default descriptor only used when the embedder is using EmbedderHeapTracer.
+ // The value is overriden by CppHeap with values that the embedder provided
+ // upon initialization.
+ WrapperDescriptor wrapper_descriptor_ = GetDefaultWrapperDescriptor();
+
friend class EmbedderStackStateScope;
};
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 30b568d8ef..bc6d60edac 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -22,6 +22,7 @@
#include "src/objects/source-text-module.h"
#include "src/objects/string-inl.h"
#include "src/objects/string.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/objects/template-objects-inl.h"
namespace v8 {
@@ -385,6 +386,20 @@ FactoryBase<Impl>::NewArrayBoilerplateDescription(
}
template <typename Impl>
+Handle<RegExpBoilerplateDescription>
+FactoryBase<Impl>::NewRegExpBoilerplateDescription(Handle<FixedArray> data,
+ Handle<String> source,
+ Smi flags) {
+ Handle<RegExpBoilerplateDescription> result =
+ Handle<RegExpBoilerplateDescription>::cast(NewStruct(
+ REG_EXP_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
+ result->set_data(*data);
+ result->set_source(*source);
+ result->set_flags(flags.value());
+ return result;
+}
+
+template <typename Impl>
Handle<TemplateObjectDescription>
FactoryBase<Impl>::NewTemplateObjectDescription(
Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings) {
@@ -676,8 +691,11 @@ template <typename Impl>
Handle<ScopeInfo> FactoryBase<Impl>::NewScopeInfo(int length,
AllocationType type) {
DCHECK(type == AllocationType::kOld || type == AllocationType::kReadOnly);
- return Handle<ScopeInfo>::cast(NewFixedArrayWithMap(
- read_only_roots().scope_info_map_handle(), length, type));
+ Handle<HeapObject> result =
+ Handle<HeapObject>::cast(NewFixedArray(length, type));
+ result->set_map_after_allocation(*read_only_roots().scope_info_map_handle(),
+ SKIP_WRITE_BARRIER);
+ return Handle<ScopeInfo>::cast(result);
}
template <typename Impl>
@@ -831,6 +849,43 @@ HeapObject FactoryBase<Impl>::AllocateRaw(int size, AllocationType allocation,
return impl()->AllocateRaw(size, allocation, alignment);
}
+template <typename Impl>
+Handle<SwissNameDictionary>
+FactoryBase<Impl>::NewSwissNameDictionaryWithCapacity(
+ int capacity, AllocationType allocation) {
+ DCHECK(SwissNameDictionary::IsValidCapacity(capacity));
+
+ if (capacity == 0) {
+ DCHECK_NE(read_only_roots().at(RootIndex::kEmptySwissPropertyDictionary),
+ kNullAddress);
+
+ return read_only_roots().empty_swiss_property_dictionary_handle();
+ }
+
+ if (capacity > SwissNameDictionary::MaxCapacity()) {
+ isolate()->FatalProcessOutOfHeapMemory("invalid table size");
+ }
+
+ int meta_table_length = SwissNameDictionary::MetaTableSizeFor(capacity);
+ Handle<ByteArray> meta_table =
+ impl()->NewByteArray(meta_table_length, allocation);
+
+ Map map = read_only_roots().swiss_name_dictionary_map();
+ int size = SwissNameDictionary::SizeFor(capacity);
+ HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
+ Handle<SwissNameDictionary> table(SwissNameDictionary::cast(result),
+ isolate());
+ table->Initialize(isolate(), *meta_table, capacity);
+ return table;
+}
+
+template <typename Impl>
+Handle<SwissNameDictionary> FactoryBase<Impl>::NewSwissNameDictionary(
+ int at_least_space_for, AllocationType allocation) {
+ return NewSwissNameDictionaryWithCapacity(
+ SwissNameDictionary::CapacityFor(at_least_space_for), allocation);
+}
+
// Instantiate FactoryBase for the two variants we want.
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) FactoryBase<Factory>;
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index 39b18b39fc..e6cc12e996 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -23,6 +23,7 @@ class SeqTwoByteString;
class FreshlyAllocatedBigInt;
class ObjectBoilerplateDescription;
class ArrayBoilerplateDescription;
+class RegExpBoilerplateDescription;
class TemplateObjectDescription;
class SourceTextModuleInfo;
class PreparseData;
@@ -137,6 +138,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<ArrayBoilerplateDescription> NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
+ Handle<RegExpBoilerplateDescription> NewRegExpBoilerplateDescription(
+ Handle<FixedArray> data, Handle<String> source, Smi flags);
+
// Create a new TemplateObjectDescription struct.
Handle<TemplateObjectDescription> NewTemplateObjectDescription(
Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings);
@@ -217,6 +221,13 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<ClassPositions> NewClassPositions(int start, int end);
+ Handle<SwissNameDictionary> NewSwissNameDictionary(
+ int at_least_space_for = kSwissNameDictionaryInitialCapacity,
+ AllocationType allocation = AllocationType::kYoung);
+
+ Handle<SwissNameDictionary> NewSwissNameDictionaryWithCapacity(
+ int capacity, AllocationType allocation);
+
protected:
// Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
HeapObject AllocateRawArray(int size, AllocationType allocation);
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 2fd2aaba4b..4d12c9e9da 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -43,7 +43,6 @@
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/foreign-inl.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
@@ -331,6 +330,15 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
return result;
}
+Handle<BaselineData> Factory::NewBaselineData(
+ Handle<Code> code, Handle<HeapObject> function_data) {
+ Handle<BaselineData> baseline_data = Handle<BaselineData>::cast(
+ NewStruct(BASELINE_DATA_TYPE, AllocationType::kOld));
+ baseline_data->set_baseline_code(*code);
+ baseline_data->set_data(*function_data);
+ return baseline_data;
+}
+
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -424,8 +432,8 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
*feedback_vector_map());
Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
vector->set_shared_function_info(*shared);
- vector->set_maybe_optimized_code(
- HeapObjectReference::ClearedValue(isolate()));
+ vector->set_maybe_optimized_code(HeapObjectReference::ClearedValue(isolate()),
+ kReleaseStore);
vector->set_length(length);
vector->set_invocation_count(0);
vector->set_profiler_ticks(0);
@@ -468,14 +476,6 @@ Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(int length) {
return array;
}
-Handle<FrameArray> Factory::NewFrameArray(int number_of_frames) {
- DCHECK_LE(0, number_of_frames);
- Handle<FixedArray> result =
- NewFixedArrayWithHoles(FrameArray::LengthFor(number_of_frames));
- result->set(FrameArray::kFrameCountIndex, Smi::zero());
- return Handle<FrameArray>::cast(result);
-}
-
template <typename T>
Handle<T> Factory::AllocateSmallOrderedHashTable(Handle<Map> map, int capacity,
AllocationType allocation) {
@@ -553,6 +553,27 @@ Handle<PropertyDescriptorObject> Factory::NewPropertyDescriptorObject() {
return object;
}
+Handle<SwissNameDictionary> Factory::CreateCanonicalEmptySwissNameDictionary() {
+ // This function is only supposed to be used to create the canonical empty
+ // version and should not be used afterwards.
+ DCHECK_EQ(kNullAddress, ReadOnlyRoots(isolate()).at(
+ RootIndex::kEmptySwissPropertyDictionary));
+
+ ReadOnlyRoots roots(isolate());
+
+ Handle<ByteArray> empty_meta_table =
+ NewByteArray(SwissNameDictionary::kMetaTableEnumerationTableStartOffset,
+ AllocationType::kReadOnly);
+
+ Map map = roots.swiss_name_dictionary_map();
+ int size = SwissNameDictionary::SizeFor(0);
+ HeapObject obj =
+ AllocateRawWithImmortalMap(size, AllocationType::kReadOnly, map);
+ SwissNameDictionary result = SwissNameDictionary::cast(obj);
+ result.Initialize(isolate(), *empty_meta_table, 0);
+ return handle(result, isolate());
+}
+
// Internalized strings are created in the old generation (data space).
Handle<String> Factory::InternalizeUtf8String(
const Vector<const char>& string) {
@@ -1308,16 +1329,15 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
}
Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
- Handle<Map> parent) {
+ Handle<Map> opt_parent) {
Handle<ArrayList> subtypes = ArrayList::New(isolate(), 0);
Handle<FixedArray> supertypes;
- if (parent->IsWasmStructMap() || parent->IsWasmArrayMap()) {
- supertypes = CopyFixedArrayAndGrow(
- handle(parent->wasm_type_info().supertypes(), isolate()), 1);
- supertypes->set(supertypes->length() - 1, *parent);
+ if (opt_parent.is_null()) {
+ supertypes = NewUninitializedFixedArray(0);
} else {
- supertypes = NewUninitializedFixedArray(1);
- supertypes->set(0, *parent);
+ supertypes = CopyFixedArrayAndGrow(
+ handle(opt_parent->wasm_type_info().supertypes(), isolate()), 1);
+ supertypes->set(supertypes->length() - 1, *opt_parent);
}
Map map = *wasm_type_info_map();
HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
@@ -1325,7 +1345,6 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
Handle<WasmTypeInfo> info(WasmTypeInfo::cast(result), isolate());
info->AllocateExternalPointerEntries(isolate());
info->set_foreign_address(isolate(), type_address);
- info->set_parent(*parent);
info->set_supertypes(*supertypes);
info->set_subtypes(*subtypes);
return info;
@@ -1374,6 +1393,8 @@ Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
}
Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
+ PropertyDetails details,
+ Handle<Object> value,
AllocationType allocation) {
DCHECK(name->IsUniqueName());
STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
@@ -1382,12 +1403,18 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
Handle<PropertyCell> cell(PropertyCell::cast(result), isolate());
cell->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
- cell->set_property_details(PropertyDetails(Smi::zero()));
cell->set_name(*name);
- cell->set_value(*the_hole_value());
+ cell->set_value(*value);
+ cell->set_property_details_raw(details.AsSmi());
return cell;
}
+Handle<PropertyCell> Factory::NewProtector() {
+ return NewPropertyCell(
+ empty_string(), PropertyDetails::Empty(PropertyCellType::kConstantType),
+ handle(Smi::FromInt(Protectors::kProtectorValid), isolate()));
+}
+
Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
int slack) {
int capacity = TransitionArray::LengthFor(number_of_transitions + slack);
@@ -1445,7 +1472,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
int inobject_properties) {
map.set_instance_type(type);
map.set_prototype(*null_value(), SKIP_WRITE_BARRIER);
- map.set_constructor_or_backpointer(*null_value(), SKIP_WRITE_BARRIER);
+ map.set_constructor_or_back_pointer(*null_value(), SKIP_WRITE_BARRIER);
map.set_instance_size(instance_size);
if (map.IsJSObjectMap()) {
DCHECK(!ReadOnlyHeap::Contains(map));
@@ -1463,12 +1490,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map.SetInObjectUnusedPropertyFields(inobject_properties);
map.SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
- if (FLAG_unbox_double_fields) {
- map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout(),
- kReleaseStore);
- }
- // Must be called only after |instance_type|, |instance_size| and
- // |layout_descriptor| are set.
+ // Must be called only after |instance_type| and |instance_size| are set.
map.set_visitor_id(Map::GetVisitorId(map));
map.set_bit_field(0);
map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
@@ -1482,7 +1504,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.clear_padding();
map.set_elements_kind(elements_kind);
isolate()->counters()->maps_created()->Increment();
- if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
+ if (FLAG_log_maps) LOG(isolate(), MapCreate(map));
return map;
}
@@ -2052,8 +2074,8 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
PropertyDetails d(kAccessor, details.attributes(),
PropertyCellType::kMutable);
Handle<Name> name(descs->GetKey(i), isolate());
- Handle<PropertyCell> cell = NewPropertyCell(name);
- cell->set_value(descs->GetStrongValue(i));
+ Handle<Object> value(descs->GetStrongValue(i), isolate());
+ Handle<PropertyCell> cell = NewPropertyCell(name, d, value);
// |dictionary| already contains enough space for all properties.
USE(GlobalDictionary::Add(isolate(), dictionary, name, cell, d));
}
@@ -2125,7 +2147,7 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
Handle<AllocationSite> allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
- DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
+ DCHECK(!InstanceTypeChecker::IsJSFunction((map->instance_type())));
// Both types of global objects should be allocated using
// AllocateGlobalObject to be properly initialized.
@@ -2337,10 +2359,10 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
module->set_requested_modules(*requested_modules);
module->set_status(Module::kUninstantiated);
module->set_exception(roots.the_hole_value());
+ module->set_top_level_capability(roots.undefined_value());
module->set_import_meta(roots.the_hole_value());
module->set_dfs_index(-1);
module->set_dfs_ancestor_index(-1);
- module->set_top_level_capability(roots.undefined_value());
module->set_flags(0);
module->set_async(IsAsyncModule(sfi->kind()));
module->set_async_evaluating(false);
@@ -2367,6 +2389,7 @@ Handle<SyntheticModule> Factory::NewSyntheticModule(
module->set_module_namespace(roots.undefined_value());
module->set_status(Module::kUninstantiated);
module->set_exception(roots.the_hole_value());
+ module->set_top_level_capability(roots.undefined_value());
module->set_name(*module_name);
module->set_export_names(*export_names);
module->set_exports(*exports);
@@ -2572,7 +2595,8 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
isolate(), prototype,
JSReceiver::GetPrototype(isolate(), target_function), JSBoundFunction);
- SaveAndSwitchContext save(isolate(), *target_function->GetCreationContext());
+ SaveAndSwitchContext save(
+ isolate(), *target_function->GetCreationContext().ToHandleChecked());
// Create the [[BoundArguments]] for the result.
Handle<FixedArray> bound_arguments;
@@ -2936,99 +2960,18 @@ Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
return new_break_point;
}
-Handle<StackTraceFrame> Factory::NewStackTraceFrame(
- Handle<FrameArray> frame_array, int index) {
- Handle<StackTraceFrame> frame = Handle<StackTraceFrame>::cast(
- NewStruct(STACK_TRACE_FRAME_TYPE, AllocationType::kYoung));
- frame->set_frame_array(*frame_array);
- frame->set_frame_index(index);
- frame->set_frame_info(*undefined_value());
-
- return frame;
-}
-
Handle<StackFrameInfo> Factory::NewStackFrameInfo(
- Handle<FrameArray> frame_array, int index) {
- FrameArrayIterator it(isolate(), frame_array, index);
- DCHECK(it.HasFrame());
-
- const bool is_wasm = frame_array->IsAnyWasmFrame(index);
- StackFrameBase* frame = it.Frame();
-
- int line = frame->GetLineNumber();
- int column = frame->GetColumnNumber();
- int wasm_function_index = frame->GetWasmFunctionIndex();
-
- const int script_id = frame->GetScriptId();
-
- Handle<Object> script_name = frame->GetFileName();
- Handle<Object> script_or_url = frame->GetScriptNameOrSourceUrl();
-
- // TODO(szuend): Adjust this, once it is decided what name to use in both
- // "simple" and "detailed" stack traces. This code is for
- // backwards compatibility to fullfill test expectations.
- Handle<PrimitiveHeapObject> function_name = frame->GetFunctionName();
- bool is_user_java_script = false;
- if (!is_wasm) {
- Handle<Object> function = frame->GetFunction();
- if (function->IsJSFunction()) {
- Handle<JSFunction> fun = Handle<JSFunction>::cast(function);
-
- is_user_java_script = fun->shared().IsUserJavaScript();
- }
- }
-
- Handle<PrimitiveHeapObject> method_name = undefined_value();
- Handle<PrimitiveHeapObject> type_name = undefined_value();
- Handle<PrimitiveHeapObject> eval_origin = frame->GetEvalOrigin();
- Handle<PrimitiveHeapObject> wasm_module_name = frame->GetWasmModuleName();
- Handle<HeapObject> wasm_instance = frame->GetWasmInstance();
-
- // MethodName and TypeName are expensive to look up, so they are only
- // included when they are strictly needed by the stack trace
- // serialization code.
- // Note: The {is_method_call} predicate needs to be kept in sync with
- // the corresponding predicate in the stack trace serialization code
- // in stack-frame-info.cc.
- const bool is_toplevel = frame->IsToplevel();
- const bool is_constructor = frame->IsConstructor();
- const bool is_method_call = !(is_toplevel || is_constructor);
- if (is_method_call) {
- method_name = frame->GetMethodName();
- type_name = frame->GetTypeName();
- }
-
- Handle<StackFrameInfo> info = Handle<StackFrameInfo>::cast(
- NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
-
- DisallowGarbageCollection no_gc;
-
- info->set_flag(0);
- info->set_is_wasm(is_wasm);
- info->set_is_asmjs_wasm(frame_array->IsAsmJsWasmFrame(index));
- info->set_is_user_java_script(is_user_java_script);
- info->set_line_number(line);
- info->set_column_number(column);
- info->set_wasm_function_index(wasm_function_index);
- info->set_script_id(script_id);
-
- info->set_script_name(*script_name);
- info->set_script_name_or_source_url(*script_or_url);
- info->set_function_name(*function_name);
- info->set_method_name(*method_name);
- info->set_type_name(*type_name);
- info->set_eval_origin(*eval_origin);
- info->set_wasm_module_name(*wasm_module_name);
- info->set_wasm_instance(*wasm_instance);
-
- info->set_is_eval(frame->IsEval());
- info->set_is_constructor(is_constructor);
- info->set_is_toplevel(is_toplevel);
- info->set_is_async(frame->IsAsync());
- info->set_is_promise_all(frame->IsPromiseAll());
- info->set_is_promise_any(frame->IsPromiseAny());
- info->set_promise_combinator_index(frame->GetPromiseIndex());
-
+ Handle<Object> receiver_or_instance, Handle<Object> function,
+ Handle<HeapObject> code_object, int code_offset_or_source_position,
+ int flags, Handle<FixedArray> parameters) {
+ Handle<StackFrameInfo> info =
+ Handle<StackFrameInfo>::cast(NewStruct(STACK_FRAME_INFO_TYPE));
+ info->set_receiver_or_instance(*receiver_or_instance);
+ info->set_function(*function);
+ info->set_code_object(*code_object);
+ info->set_code_offset_or_source_position(code_offset_or_source_position);
+ info->set_flags(flags);
+ info->set_parameters(*parameters);
return info;
}
@@ -3326,7 +3269,6 @@ Handle<Map> Factory::CreateStrictFunctionMap(
} else {
++descriptors_count; // name accessor.
}
- if (IsFunctionModeWithHomeObject(function_mode)) ++inobject_properties_count;
descriptors_count += inobject_properties_count;
Handle<Map> map = NewMap(
@@ -3372,15 +3314,6 @@ Handle<Map> Factory::CreateStrictFunctionMap(
map->AppendDescriptor(isolate(), &d);
}
- STATIC_ASSERT(JSFunction::kMaybeHomeObjectDescriptorIndex == 2);
- if (IsFunctionModeWithHomeObject(function_mode)) {
- // Add home object field.
- Handle<Name> name = isolate()->factory()->home_object_symbol();
- Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
- DONT_ENUM, Representation::Tagged());
- map->AppendDescriptor(isolate(), &d);
- }
-
if (IsFunctionModeWithPrototype(function_mode)) {
// Add prototype accessor.
PropertyAttributes attribs =
@@ -3494,10 +3427,12 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
Handle<JSFunction> result = BuildRaw(code);
- if (have_cached_code) {
+ if (have_cached_code || code->kind() == CodeKind::BASELINE) {
IsCompiledScope is_compiled_scope(sfi_->is_compiled_scope(isolate_));
JSFunction::EnsureFeedbackVector(result, &is_compiled_scope);
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(sfi_, code);
+ if (FLAG_trace_turbo_nci && have_cached_code) {
+ CompilationCacheCode::TraceHit(sfi_, code);
+ }
}
Compiler::PostInstantiation(result);
@@ -3511,7 +3446,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
Handle<Map> map = maybe_map_.ToHandleChecked();
Handle<FeedbackCell> feedback_cell = maybe_feedback_cell_.ToHandleChecked();
- DCHECK_EQ(JS_FUNCTION_TYPE, map->instance_type());
+ DCHECK(InstanceTypeChecker::IsJSFunction(map->instance_type()));
// Allocation.
Handle<JSFunction> function(
@@ -3523,7 +3458,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
function->set_shared(*sfi_);
function->set_context(*context_);
function->set_raw_feedback_cell(*feedback_cell);
- function->set_code(*code);
+ function->set_code(*code, kReleaseStore);
if (map->has_prototype_slot()) {
function->set_prototype_or_initial_map(
ReadOnlyRoots(isolate).the_hole_value());
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 77cbaf8221..b6fb9a2942 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -7,6 +7,7 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
+#include "src/baseline/baseline.h"
#include "src/builtins/builtins.h"
#include "src/common/globals.h"
#include "src/execution/messages.h"
@@ -60,7 +61,6 @@ class RegExpMatchInfo;
class ScriptContextTable;
class SourceTextModule;
class StackFrameInfo;
-class StackTraceFrame;
class StringSet;
class StoreHandler;
class SyntheticModule;
@@ -75,25 +75,18 @@ enum class InitializedFlag : uint8_t;
enum FunctionMode {
kWithNameBit = 1 << 0,
- kWithHomeObjectBit = 1 << 1,
- kWithWritablePrototypeBit = 1 << 2,
- kWithReadonlyPrototypeBit = 1 << 3,
+ kWithWritablePrototypeBit = 1 << 1,
+ kWithReadonlyPrototypeBit = 1 << 2,
kWithPrototypeBits = kWithWritablePrototypeBit | kWithReadonlyPrototypeBit,
// Without prototype.
FUNCTION_WITHOUT_PROTOTYPE = 0,
METHOD_WITH_NAME = kWithNameBit,
- METHOD_WITH_HOME_OBJECT = kWithHomeObjectBit,
- METHOD_WITH_NAME_AND_HOME_OBJECT = kWithNameBit | kWithHomeObjectBit,
// With writable prototype.
FUNCTION_WITH_WRITEABLE_PROTOTYPE = kWithWritablePrototypeBit,
FUNCTION_WITH_NAME_AND_WRITEABLE_PROTOTYPE =
kWithWritablePrototypeBit | kWithNameBit,
- FUNCTION_WITH_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE =
- kWithWritablePrototypeBit | kWithHomeObjectBit,
- FUNCTION_WITH_NAME_AND_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE =
- kWithWritablePrototypeBit | kWithNameBit | kWithHomeObjectBit,
// With readonly prototype.
FUNCTION_WITH_READONLY_PROTOTYPE = kWithReadonlyPrototypeBit,
@@ -113,6 +106,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return handle(obj, isolate());
}
+ Handle<BaselineData> NewBaselineData(Handle<Code> code,
+ Handle<HeapObject> function_data);
+
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
@@ -154,8 +150,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Allocate a new fixed double array with hole values.
Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(int size);
- Handle<FrameArray> NewFrameArray(int number_of_frames);
-
// Allocates a NameDictionary with an internal capacity calculated such that
// |at_least_space_for| entries can be added without reallocating.
Handle<NameDictionary> NewNameDictionary(int at_least_space_for);
@@ -177,6 +171,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
int capacity = kSmallOrderedHashMapMinCapacity,
AllocationType allocation = AllocationType::kYoung);
+ Handle<SwissNameDictionary> CreateCanonicalEmptySwissNameDictionary();
+
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
@@ -373,10 +369,13 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
Handle<BreakPoint> NewBreakPoint(int id, Handle<String> condition);
- Handle<StackTraceFrame> NewStackTraceFrame(Handle<FrameArray> frame_array,
- int index);
- Handle<StackFrameInfo> NewStackFrameInfo(Handle<FrameArray> frame_array,
- int index);
+
+ Handle<StackFrameInfo> NewStackFrameInfo(Handle<Object> receiver_or_instance,
+ Handle<Object> function,
+ Handle<HeapObject> code_object,
+ int code_offset_or_source_position,
+ int flags,
+ Handle<FixedArray> parameters);
// Allocate various microtasks.
Handle<CallableTask> NewCallableTask(Handle<JSReceiver> callable,
@@ -393,7 +392,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Cell> NewCell(Handle<Object> value);
Handle<PropertyCell> NewPropertyCell(
- Handle<Name> name, AllocationType allocation = AllocationType::kOld);
+ Handle<Name> name, PropertyDetails details, Handle<Object> value,
+ AllocationType allocation = AllocationType::kOld);
+ Handle<PropertyCell> NewProtector();
Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
@@ -555,7 +556,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<JSModuleNamespace> NewJSModuleNamespace();
Handle<WasmTypeInfo> NewWasmTypeInfo(Address type_address,
- Handle<Map> parent);
+ Handle<Map> opt_parent);
Handle<SourceTextModule> NewSourceTextModule(Handle<SharedFunctionInfo> code);
Handle<SyntheticModule> NewSyntheticModule(
@@ -705,10 +706,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return (function_mode & kWithNameBit) != 0;
}
- static bool IsFunctionModeWithHomeObject(FunctionMode function_mode) {
- return (function_mode & kWithHomeObjectBit) != 0;
- }
-
Handle<Map> CreateSloppyFunctionMap(
FunctionMode function_mode, MaybeHandle<JSFunction> maybe_empty_function);
@@ -844,6 +841,14 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return *this;
}
+ CodeBuilder& set_bytecode_offset_table(Handle<ByteArray> table) {
+ DCHECK(!table.is_null());
+ // TODO(v8:11429): Rename this and clean up calls to SourcePositionTable
+ // under Baseline.
+ source_position_table_ = table;
+ return *this;
+ }
+
CodeBuilder& set_deoptimization_data(
Handle<DeoptimizationData> deopt_data) {
DCHECK(!deopt_data.is_null());
@@ -891,6 +896,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
int32_t builtin_index_ = Builtins::kNoBuiltinId;
uint32_t inlined_bytecode_size_ = 0;
int32_t kind_specific_flags_ = 0;
+ // Contains bytecode offset table for baseline
Handle<ByteArray> source_position_table_;
Handle<DeoptimizationData> deoptimization_data_ =
DeoptimizationData::Empty(isolate_);
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index a14a7b0b38..be7d0ea0fe 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -573,7 +573,7 @@ void GCTracer::PrintNVP() const {
"mutator=%.1f "
"gc=%s "
"reduce_memory=%d "
- "stop_the_world=%.2f "
+ "time_to_safepoint=%.2f "
"heap.prologue=%.2f "
"heap.epilogue=%.2f "
"heap.epilogue.reduce_new_space=%.2f "
@@ -593,6 +593,7 @@ void GCTracer::PrintNVP() const {
"scavenge.sweep_array_buffers=%.2f "
"background.scavenge.parallel=%.2f "
"background.unmapper=%.2f "
+ "unmapper=%.2f "
"incremental.steps_count=%d "
"incremental.steps_took=%.1f "
"scavenge_throughput=%.f "
@@ -614,7 +615,7 @@ void GCTracer::PrintNVP() const {
"unmapper_chunks=%d "
"context_disposal_rate=%.1f\n",
duration, spent_in_mutator, current_.TypeName(true),
- current_.reduce_memory, current_.scopes[Scope::STOP_THE_WORLD],
+ current_.reduce_memory, current_.scopes[Scope::TIME_TO_SAFEPOINT],
current_.scopes[Scope::HEAP_PROLOGUE],
current_.scopes[Scope::HEAP_EPILOGUE],
current_.scopes[Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE],
@@ -636,6 +637,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS],
current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL],
current_.scopes[Scope::BACKGROUND_UNMAPPER],
+ current_.scopes[Scope::UNMAPPER],
current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
.steps,
current_.scopes[Scope::MC_INCREMENTAL],
@@ -660,7 +662,7 @@ void GCTracer::PrintNVP() const {
"reduce_memory=%d "
"minor_mc=%.2f "
"finish_sweeping=%.2f "
- "stop_the_world=%.2f "
+ "time_to_safepoint=%.2f "
"mark=%.2f "
"mark.seed=%.2f "
"mark.roots=%.2f "
@@ -678,12 +680,13 @@ void GCTracer::PrintNVP() const {
"background.evacuate.copy=%.2f "
"background.evacuate.update_pointers=%.2f "
"background.unmapper=%.2f "
+ "unmapper=%.2f "
"update_marking_deque=%.2f "
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
current_.scopes[Scope::MINOR_MC],
current_.scopes[Scope::MINOR_MC_SWEEPING],
- current_.scopes[Scope::STOP_THE_WORLD],
+ current_.scopes[Scope::TIME_TO_SAFEPOINT],
current_.scopes[Scope::MINOR_MC_MARK],
current_.scopes[Scope::MINOR_MC_MARK_SEED],
current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
@@ -702,6 +705,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY],
current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::BACKGROUND_UNMAPPER],
+ current_.scopes[Scope::UNMAPPER],
current_.scopes[Scope::MINOR_MC_MARKING_DEQUE],
current_.scopes[Scope::MINOR_MC_RESET_LIVENESS]);
break;
@@ -712,7 +716,7 @@ void GCTracer::PrintNVP() const {
"mutator=%.1f "
"gc=%s "
"reduce_memory=%d "
- "stop_the_world=%.2f "
+ "time_to_safepoint=%.2f "
"heap.prologue=%.2f "
"heap.embedder_tracing_epilogue=%.2f "
"heap.epilogue=%.2f "
@@ -740,7 +744,6 @@ void GCTracer::PrintNVP() const {
"evacuate.update_pointers=%.1f "
"evacuate.update_pointers.to_new_roots=%.1f "
"evacuate.update_pointers.slots.main=%.1f "
- "evacuate.update_pointers.slots.map_space=%.1f "
"evacuate.update_pointers.weak=%.1f "
"finish=%.1f "
"finish.sweep_array_buffers=%.1f "
@@ -784,6 +787,7 @@ void GCTracer::PrintNVP() const {
"background.evacuate.copy=%.1f "
"background.evacuate.update_pointers=%.1f "
"background.unmapper=%.1f "
+ "unmapper=%.1f "
"total_size_before=%zu "
"total_size_after=%zu "
"holes_size_before=%zu "
@@ -803,7 +807,7 @@ void GCTracer::PrintNVP() const {
"context_disposal_rate=%.1f "
"compaction_speed=%.f\n",
duration, spent_in_mutator, current_.TypeName(true),
- current_.reduce_memory, current_.scopes[Scope::STOP_THE_WORLD],
+ current_.reduce_memory, current_.scopes[Scope::TIME_TO_SAFEPOINT],
current_.scopes[Scope::HEAP_PROLOGUE],
current_.scopes[Scope::HEAP_EMBEDDER_TRACING_EPILOGUE],
current_.scopes[Scope::HEAP_EPILOGUE],
@@ -831,7 +835,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
current_.scopes[Scope::MC_FINISH],
current_.scopes[Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS],
@@ -882,9 +885,10 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_BACKGROUND_EVACUATE_COPY],
current_.scopes[Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::BACKGROUND_UNMAPPER],
- current_.start_object_size, current_.end_object_size,
- current_.start_holes_size, current_.end_holes_size,
- allocated_since_last_gc, heap_->promoted_objects_size(),
+ current_.scopes[Scope::UNMAPPER], current_.start_object_size,
+ current_.end_object_size, current_.start_holes_size,
+ current_.end_holes_size, allocated_since_last_gc,
+ heap_->promoted_objects_size(),
heap_->semi_space_copied_object_size(),
heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
heap_->nodes_promoted_, heap_->promotion_ratio_,
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 2e6f099688..8372dd518d 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -72,6 +72,19 @@ Address AllocationResult::ToAddress() {
return HeapObject::cast(object_).address();
}
+// static
+BytecodeFlushMode Heap::GetBytecodeFlushMode(Isolate* isolate) {
+ if (isolate->disable_bytecode_flushing()) {
+ return BytecodeFlushMode::kDoNotFlushBytecode;
+ }
+ if (FLAG_stress_flush_bytecode) {
+ return BytecodeFlushMode::kStressFlushBytecode;
+ } else if (FLAG_flush_bytecode) {
+ return BytecodeFlushMode::kFlushBytecode;
+ }
+ return BytecodeFlushMode::kDoNotFlushBytecode;
+}
+
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 235d17615d..23b84296be 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -145,11 +145,6 @@ void Heap_GenerationalEphemeronKeyBarrierSlow(Heap* heap,
heap->RecordEphemeronKeyWrite(table, slot);
}
-void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
- DCHECK_EQ(Smi::zero(), arguments_adaptor_deopt_pc_offset());
- set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
DCHECK_EQ(Smi::zero(), construct_stub_create_deopt_pc_offset());
set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
@@ -1126,6 +1121,10 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->InvokeGCEpilogueCallbacksInSafepoint();
+ });
+
#define UPDATE_COUNTERS_FOR_SPACE(space) \
isolate_->counters()->space##_bytes_available()->Set( \
static_cast<int>(space()->Available())); \
@@ -1798,12 +1797,8 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
}
const size_t old_generation_space_available = OldGenerationSpaceAvailable();
- const base::Optional<size_t> global_memory_available =
- GlobalMemoryAvailable();
- if (old_generation_space_available < new_space_->Capacity() ||
- (global_memory_available &&
- *global_memory_available < new_space_->Capacity())) {
+ if (old_generation_space_available < new_space_->Capacity()) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
}
}
@@ -1998,7 +1993,6 @@ GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
size_t Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
- base::Optional<SafepointScope> optional_safepoint_scope;
if (IsYoungGenerationCollector(collector)) {
CompleteSweepingYoung(collector);
@@ -2017,9 +2011,7 @@ size_t Heap::PerformGarbageCollection(
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
- if (FLAG_local_heaps) {
- optional_safepoint_scope.emplace(this);
- }
+ SafepointScope safepoint_scope(this);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -3298,7 +3290,6 @@ void Heap::MakeHeapIterable() {
}
void Heap::MakeLocalHeapLabsIterable() {
- if (!FLAG_local_heaps) return;
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MakeLinearAllocationAreaIterable();
});
@@ -4507,10 +4498,8 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
isolate_->handle_scope_implementer()->Iterate(v);
#endif
- if (FLAG_local_heaps) {
- safepoint_->Iterate(&left_trim_visitor);
- safepoint_->Iterate(v);
- }
+ safepoint_->Iterate(&left_trim_visitor);
+ safepoint_->Iterate(v);
isolate_->persistent_handles_list()->Iterate(&left_trim_visitor, isolate_);
isolate_->persistent_handles_list()->Iterate(v, isolate_);
@@ -4743,12 +4732,6 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
configured_ = true;
}
-void Heap::ConfigureCppHeap(std::shared_ptr<CppHeapCreateParams> params) {
- cpp_heap_ = std::make_unique<CppHeap>(
- reinterpret_cast<v8::Isolate*>(isolate()), params->custom_spaces);
- SetEmbedderHeapTracer(CppHeap::From(cpp_heap_.get()));
-}
-
void Heap::AddToRingBuffer(const char* string) {
size_t first_part =
std::min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
@@ -5390,6 +5373,8 @@ void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
DCHECK_EQ(gc_state(), HeapState::NOT_IN_GC);
+ // Setting a tracer is only supported when CppHeap is not used.
+ DCHECK_IMPLIES(tracer, !cpp_heap_);
local_embedder_heap_tracer()->SetRemoteTracer(tracer);
}
@@ -5397,6 +5382,16 @@ EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
return local_embedder_heap_tracer()->remote_tracer();
}
+void Heap::AttachCppHeap(v8::CppHeap* cpp_heap) {
+ CppHeap::From(cpp_heap)->AttachIsolate(isolate());
+ cpp_heap_ = cpp_heap;
+}
+
+void Heap::DetachCppHeap() {
+ CppHeap::From(cpp_heap_)->DetachIsolate();
+ cpp_heap_ = nullptr;
+}
+
EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
if (is_current_gc_forced()) {
return EmbedderHeapTracer::TraceFlags::kForced;
@@ -5523,7 +5518,10 @@ void Heap::TearDown() {
dead_object_stats_.reset();
local_embedder_heap_tracer_.reset();
- cpp_heap_.reset();
+ if (cpp_heap_) {
+ CppHeap::From(cpp_heap_)->DetachIsolate();
+ cpp_heap_ = nullptr;
+ }
external_string_table_.TearDown();
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index a636c5d8c4..b1ccc4391e 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -35,7 +35,7 @@
#include "src/objects/visitors.h"
#include "src/roots/roots.h"
#include "src/utils/allocation.h"
-#include "testing/gtest/include/gtest/gtest_prod.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
@@ -445,14 +445,7 @@ class Heap {
// Helper function to get the bytecode flushing mode based on the flags. This
// is required because it is not safe to acess flags in concurrent marker.
- static inline BytecodeFlushMode GetBytecodeFlushMode() {
- if (FLAG_stress_flush_bytecode) {
- return BytecodeFlushMode::kStressFlushBytecode;
- } else if (FLAG_flush_bytecode) {
- return BytecodeFlushMode::kFlushBytecode;
- }
- return BytecodeFlushMode::kDoNotFlushBytecode;
- }
+ static inline BytecodeFlushMode GetBytecodeFlushMode(Isolate* isolate);
static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
@@ -1111,7 +1104,6 @@ class Heap {
// ===========================================================================
// Setters for code offsets of well-known deoptimization targets.
- void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
void SetConstructStubCreateDeoptPCOffset(int pc_offset);
void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
void SetInterpreterEntryReturnPCOffset(int pc_offset);
@@ -1145,10 +1137,10 @@ class Heap {
// Unified heap (C++) support. ===============================================
// ===========================================================================
- V8_EXPORT_PRIVATE void ConfigureCppHeap(
- std::shared_ptr<CppHeapCreateParams> params);
+ V8_EXPORT_PRIVATE void AttachCppHeap(v8::CppHeap* cpp_heap);
+ V8_EXPORT_PRIVATE void DetachCppHeap();
- v8::CppHeap* cpp_heap() const { return cpp_heap_.get(); }
+ v8::CppHeap* cpp_heap() const { return cpp_heap_; }
// ===========================================================================
// External string table API. ================================================
@@ -2244,7 +2236,9 @@ class Heap {
std::unique_ptr<AllocationObserver> stress_concurrent_allocation_observer_;
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
std::unique_ptr<MarkingBarrier> marking_barrier_;
- std::unique_ptr<v8::CppHeap> cpp_heap_;
+
+ // The embedder owns the C++ heap.
+ v8::CppHeap* cpp_heap_ = nullptr;
StrongRootsEntry* strong_roots_head_ = nullptr;
base::Mutex strong_roots_mutex_;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 7b6b01855d..a093835981 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -261,11 +261,9 @@ void IncrementalMarking::StartBlackAllocation() {
heap()->old_space()->MarkLinearAllocationAreaBlack();
heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack();
- if (FLAG_local_heaps) {
- heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->MarkLinearAllocationAreaBlack();
- });
- }
+ heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MarkLinearAllocationAreaBlack();
+ });
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n");
@@ -277,11 +275,8 @@ void IncrementalMarking::PauseBlackAllocation() {
heap()->old_space()->UnmarkLinearAllocationArea();
heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea();
- if (FLAG_local_heaps) {
- heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->UnmarkLinearAllocationArea();
- });
- }
+ heap()->safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); });
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n");
@@ -600,19 +595,17 @@ void IncrementalMarking::Stop() {
is_compacting_ = false;
FinishBlackAllocation();
- if (FLAG_local_heaps) {
- // Merge live bytes counters of background threads
- for (auto pair : background_live_bytes_) {
- MemoryChunk* memory_chunk = pair.first;
- intptr_t live_bytes = pair.second;
+ // Merge live bytes counters of background threads
+ for (auto pair : background_live_bytes_) {
+ MemoryChunk* memory_chunk = pair.first;
+ intptr_t live_bytes = pair.second;
- if (live_bytes) {
- marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
- }
+ if (live_bytes) {
+ marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
}
-
- background_live_bytes_.clear();
}
+
+ background_live_bytes_.clear();
}
@@ -876,7 +869,8 @@ void IncrementalMarking::AdvanceOnAllocation() {
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
+ TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
+ ThreadKind::kMain);
ScheduleBytesToMarkBasedOnAllocation();
Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index c520e1e556..0956a7b0d6 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -26,6 +26,9 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
#endif
+ // Each allocation is supposed to be a safepoint.
+ Safepoint();
+
bool large_object = size_in_bytes > Heap::MaxRegularHeapObjectSize(type);
CHECK_EQ(type, AllocationType::kOld);
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index 38f2cf17a8..85c36baaee 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -52,7 +52,7 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
marking_barrier_(new MarkingBarrier(this)),
old_space_allocator_(this, heap->old_space()) {
heap_->safepoint()->AddLocalHeap(this, [this] {
- if (FLAG_local_heaps && !is_main_thread()) {
+ if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get());
if (heap_->incremental_marking()->IsMarking()) {
marking_barrier_->Activate(
@@ -75,7 +75,7 @@ LocalHeap::~LocalHeap() {
heap_->safepoint()->RemoveLocalHeap(this, [this] {
old_space_allocator_.FreeLinearAllocationArea();
- if (FLAG_local_heaps && !is_main_thread()) {
+ if (!is_main_thread()) {
marking_barrier_->Publish();
WriteBarrier::ClearForThread(marking_barrier_.get());
}
@@ -85,6 +85,8 @@ LocalHeap::~LocalHeap() {
DCHECK_EQ(current_local_heap, this);
current_local_heap = nullptr;
}
+
+ DCHECK(gc_epilogue_callbacks_.empty());
}
void LocalHeap::EnsurePersistentHandles() {
@@ -133,7 +135,7 @@ bool LocalHeap::IsParked() {
void LocalHeap::Park() {
base::MutexGuard guard(&state_mutex_);
- CHECK(state_ == ThreadState::Running);
+ CHECK_EQ(ThreadState::Running, state_);
state_ = ThreadState::Parked;
state_change_.NotifyAll();
}
@@ -204,5 +206,31 @@ Address LocalHeap::PerformCollectionAndAllocateAgain(
heap_->FatalProcessOutOfMemory("LocalHeap: allocation failed");
}
+void LocalHeap::AddGCEpilogueCallback(GCEpilogueCallback* callback,
+ void* data) {
+ DCHECK(!IsParked());
+ std::pair<GCEpilogueCallback*, void*> callback_and_data(callback, data);
+ DCHECK_EQ(std::find(gc_epilogue_callbacks_.begin(),
+ gc_epilogue_callbacks_.end(), callback_and_data),
+ gc_epilogue_callbacks_.end());
+ gc_epilogue_callbacks_.push_back(callback_and_data);
+}
+
+void LocalHeap::RemoveGCEpilogueCallback(GCEpilogueCallback* callback,
+ void* data) {
+ DCHECK(!IsParked());
+ std::pair<GCEpilogueCallback*, void*> callback_and_data(callback, data);
+ auto it = std::find(gc_epilogue_callbacks_.begin(),
+ gc_epilogue_callbacks_.end(), callback_and_data);
+ *it = gc_epilogue_callbacks_.back();
+ gc_epilogue_callbacks_.pop_back();
+}
+
+void LocalHeap::InvokeGCEpilogueCallbacksInSafepoint() {
+ for (auto callback_and_data : gc_epilogue_callbacks_) {
+ callback_and_data.first(callback_and_data.second);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index 16eb08d841..8b5a6545de 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -33,6 +33,8 @@ class LocalHandles;
// some time or for blocking operations like locking a mutex.
class V8_EXPORT_PRIVATE LocalHeap {
public:
+ using GCEpilogueCallback = void(void* data);
+
explicit LocalHeap(
Heap* heap, ThreadKind kind,
std::unique_ptr<PersistentHandles> persistent_handles = nullptr);
@@ -133,6 +135,13 @@ class V8_EXPORT_PRIVATE LocalHeap {
// Requests GC and blocks until the collection finishes.
void PerformCollection();
+ // Adds a callback that is invoked with the given |data| after each GC.
+ // The callback is invoked on the main thread before any background thread
+ // resumes. The callback must not allocate or make any other calls that
+ // can trigger GC.
+ void AddGCEpilogueCallback(GCEpilogueCallback* callback, void* data);
+ void RemoveGCEpilogueCallback(GCEpilogueCallback* callback, void* data);
+
private:
enum class ThreadState {
// Threads in this state need to be stopped in a safepoint.
@@ -164,6 +173,8 @@ class V8_EXPORT_PRIVATE LocalHeap {
void EnterSafepoint();
+ void InvokeGCEpilogueCallbacksInSafepoint();
+
Heap* heap_;
bool is_main_thread_;
@@ -182,6 +193,8 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::unique_ptr<PersistentHandles> persistent_handles_;
std::unique_ptr<MarkingBarrier> marking_barrier_;
+ std::vector<std::pair<GCEpilogueCallback*, void*>> gc_epilogue_callbacks_;
+
ConcurrentAllocator old_space_allocator_;
friend class Heap;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 28f40d432b..f46ca120d4 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -494,12 +494,13 @@ void MarkCompactCollector::StartMarking() {
contexts.push_back(context->ptr());
}
}
+ bytecode_flush_mode_ = Heap::GetBytecodeFlushMode(isolate());
marking_worklists()->CreateContextWorklists(contexts);
local_marking_worklists_ =
std::make_unique<MarkingWorklists::Local>(marking_worklists());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), weak_objects(), heap_,
- epoch(), Heap::GetBytecodeFlushMode(),
+ epoch(), bytecode_flush_mode(),
heap_->local_embedder_heap_tracer()->InUse(),
heap_->is_current_gc_forced());
// Marking bits are cleared by the sweeper.
@@ -857,11 +858,9 @@ void MarkCompactCollector::Prepare() {
space->PrepareForMarkCompact();
}
- if (FLAG_local_heaps) {
- // Fill and reset all background thread LABs
- heap_->safepoint()->IterateLocalHeaps(
- [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
- }
+ // Fill and reset all background thread LABs
+ heap_->safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
// All objects are guaranteed to be initialized in atomic pause
heap()->new_lo_space()->ResetPendingObject();
@@ -1873,9 +1872,7 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
!it.done(); it.Advance()) {
- if (it.frame()->type() == StackFrame::INTERPRETED) {
- return;
- }
+ if (it.frame()->is_unoptimized()) return;
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
if (!code.CanDeoptAt(it.frame()->pc())) {
@@ -2116,7 +2113,7 @@ void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
- Object potential_parent = dead_target.constructor_or_backpointer();
+ Object potential_parent = dead_target.constructor_or_back_pointer();
if (potential_parent.IsMap()) {
Map parent = Map::cast(potential_parent);
DisallowGarbageCollection no_gc_obviously;
@@ -2208,6 +2205,8 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// performing the unusual task of decompiling.
shared_info.set_function_data(uncompiled_data, kReleaseStore);
DCHECK(!shared_info.is_compiled());
+
+ PROFILE(heap()->isolate(), BytecodeFlushEvent(compiled_data_start));
}
void MarkCompactCollector::ClearOldBytecodeCandidates() {
@@ -2254,14 +2253,14 @@ void MarkCompactCollector::ClearFullMapTransitions() {
// filled. Allow it.
if (array.GetTargetIfExists(0, isolate(), &map)) {
DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
- Object constructor_or_backpointer = map.constructor_or_backpointer();
- if (constructor_or_backpointer.IsSmi()) {
+ Object constructor_or_back_pointer = map.constructor_or_back_pointer();
+ if (constructor_or_back_pointer.IsSmi()) {
DCHECK(isolate()->has_active_deserializer());
- DCHECK_EQ(constructor_or_backpointer,
+ DCHECK_EQ(constructor_or_back_pointer,
Deserializer::uninitialized_field_value());
continue;
}
- Map parent = Map::cast(map.constructor_or_backpointer());
+ Map parent = Map::cast(map.constructor_or_back_pointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
DescriptorArray descriptors =
@@ -2323,7 +2322,7 @@ bool MarkCompactCollector::CompactTransitionArray(Map map,
// Compact all live transitions to the left.
for (int i = 0; i < num_transitions; ++i) {
Map target = transitions.GetTarget(i);
- DCHECK_EQ(target.constructor_or_backpointer(), map);
+ DCHECK_EQ(target.constructor_or_back_pointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
if (!descriptors.is_null() &&
target.instance_descriptors(kRelaxedLoad) == descriptors) {
@@ -2398,13 +2397,6 @@ void MarkCompactCollector::TrimDescriptorArray(Map map,
TrimEnumCache(map, descriptors);
descriptors.Sort();
-
- if (FLAG_unbox_double_fields) {
- LayoutDescriptor layout_descriptor = map.layout_descriptor(kAcquireLoad);
- layout_descriptor = layout_descriptor.Trim(heap_, map, descriptors,
- number_of_own_descriptors);
- SLOW_DCHECK(layout_descriptor.IsConsistentWithMap(map, true));
- }
}
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
map.set_owns_descriptors(true);
@@ -3502,12 +3494,10 @@ class PointersUpdatingJob : public v8::JobTask {
explicit PointersUpdatingJob(
Isolate* isolate,
std::vector<std::unique_ptr<UpdatingItem>> updating_items,
- base::Optional<size_t> slots, GCTracer::Scope::ScopeId scope,
- GCTracer::Scope::ScopeId background_scope)
+ GCTracer::Scope::ScopeId scope, GCTracer::Scope::ScopeId background_scope)
: updating_items_(std::move(updating_items)),
remaining_updating_items_(updating_items_.size()),
generator_(updating_items_.size()),
- slots_(slots),
tracer_(isolate->heap()->tracer()),
scope_(scope),
background_scope_(background_scope) {}
@@ -3542,17 +3532,7 @@ class PointersUpdatingJob : public v8::JobTask {
size_t items = remaining_updating_items_.load(std::memory_order_relaxed);
if (!FLAG_parallel_pointer_update) return items > 0;
const size_t kMaxPointerUpdateTasks = 8;
- const size_t kSlotsPerTask = 600;
- size_t wanted_tasks = items;
- // Limit the number of update tasks as task creation often dominates the
- // actual work that is being done.
- if (slots_ && *slots_ > 0) {
- // Round up to ensure enough workers for all items.
- wanted_tasks = std::min<size_t>(
- items, (*slots_ + kSlotsPerTask - 1) / kSlotsPerTask);
- }
- size_t max_concurrency =
- std::min<size_t>(kMaxPointerUpdateTasks, wanted_tasks);
+ size_t max_concurrency = std::min<size_t>(kMaxPointerUpdateTasks, items);
DCHECK_IMPLIES(items > 0, max_concurrency > 0);
return max_concurrency;
}
@@ -3561,7 +3541,6 @@ class PointersUpdatingJob : public v8::JobTask {
std::vector<std::unique_ptr<UpdatingItem>> updating_items_;
std::atomic<size_t> remaining_updating_items_{0};
IndexGenerator generator_;
- const base::Optional<size_t> slots_;
GCTracer* tracer_;
GCTracer::Scope::ScopeId scope_;
@@ -3951,6 +3930,8 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
RememberedSetUpdatingMode::ALL);
CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::ALL);
CollectToSpaceUpdatingItems(&updating_items);
updating_items.push_back(
@@ -3959,36 +3940,13 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
V8::GetCurrentPlatform()
->PostJob(v8::TaskPriority::kUserBlocking,
std::make_unique<PointersUpdatingJob>(
- isolate(), std::move(updating_items), old_to_new_slots_,
+ isolate(), std::move(updating_items),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
->Join();
}
{
- // - Update pointers in map space in a separate phase to avoid data races
- // with Map->LayoutDescriptor edge.
- // - Update array buffer trackers in the second phase to have access to
- // byte length which is potentially a HeapNumber.
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
- std::vector<std::unique_ptr<UpdatingItem>> updating_items;
-
- CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
- RememberedSetUpdatingMode::ALL);
- if (!updating_items.empty()) {
- V8::GetCurrentPlatform()
- ->PostJob(
- v8::TaskPriority::kUserBlocking,
- std::make_unique<PointersUpdatingJob>(
- isolate(), std::move(updating_items), old_to_new_slots_,
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
- GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
- ->Join();
- }
- }
-
- {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
// Update pointers from external string table.
@@ -4480,7 +4438,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
->PostJob(
v8::TaskPriority::kUserBlocking,
std::make_unique<PointersUpdatingJob>(
- isolate(), std::move(updating_items), old_to_new_slots_,
+ isolate(), std::move(updating_items),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
->Join();
@@ -4763,10 +4721,6 @@ class YoungGenerationMarkingTask {
Page::kPageSize);
}
- int slots() const { return slots_; }
-
- void IncrementSlots() { ++slots_; }
-
void MarkObject(Object object) {
if (!Heap::InYoungGeneration(object)) return;
HeapObject heap_object = HeapObject::cast(object);
@@ -4799,7 +4753,6 @@ class YoungGenerationMarkingTask {
MinorMarkCompactCollector::MarkingState* marking_state_;
YoungGenerationMarkingVisitor visitor_;
std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
- int slots_ = 0;
};
class PageMarkingItem : public ParallelWorkItem {
@@ -4864,7 +4817,6 @@ class PageMarkingItem : public ParallelWorkItem {
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
- task->IncrementSlots();
return KEEP_SLOT;
}
return REMOVE_SLOT;
@@ -4878,14 +4830,13 @@ class YoungGenerationMarkingJob : public v8::JobTask {
YoungGenerationMarkingJob(
Isolate* isolate, MinorMarkCompactCollector* collector,
MinorMarkCompactCollector::MarkingWorklist* global_worklist,
- std::vector<PageMarkingItem> marking_items, std::atomic<int>* slots)
+ std::vector<PageMarkingItem> marking_items)
: isolate_(isolate),
collector_(collector),
global_worklist_(global_worklist),
marking_items_(std::move(marking_items)),
remaining_marking_items_(marking_items_.size()),
- generator_(marking_items_.size()),
- slots_(slots) {}
+ generator_(marking_items_.size()) {}
void Run(JobDelegate* delegate) override {
if (delegate->IsJoiningThread()) {
@@ -4921,7 +4872,6 @@ class YoungGenerationMarkingJob : public v8::JobTask {
ProcessMarkingItems(&task);
task.EmptyMarkingWorklist();
task.FlushLiveBytes();
- *slots_ += task.slots();
}
if (FLAG_trace_minor_mc_parallel_marking) {
PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
@@ -4952,12 +4902,10 @@ class YoungGenerationMarkingJob : public v8::JobTask {
std::vector<PageMarkingItem> marking_items_;
std::atomic_size_t remaining_marking_items_{0};
IndexGenerator generator_;
- std::atomic<int>* slots_;
};
void MinorMarkCompactCollector::MarkRootSetInParallel(
RootMarkingVisitor* root_visitor) {
- std::atomic<int> slots;
{
std::vector<PageMarkingItem> marking_items;
@@ -4992,14 +4940,12 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
V8::GetCurrentPlatform()
->PostJob(v8::TaskPriority::kUserBlocking,
std::make_unique<YoungGenerationMarkingJob>(
- isolate(), this, worklist(), std::move(marking_items),
- &slots))
+ isolate(), this, worklist(), std::move(marking_items)))
->Join();
DCHECK(worklist()->IsEmpty());
}
}
- old_to_new_slots_ = slots;
}
void MinorMarkCompactCollector::MarkLiveObjects() {
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 2663eb9aec..733588ae80 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -238,8 +238,6 @@ class MarkCompactCollectorBase {
int NumberOfParallelCompactionTasks();
Heap* heap_;
- // Number of old to new slots. Should be computed during MarkLiveObjects.
- base::Optional<size_t> old_to_new_slots_;
};
class MinorMarkingState final
@@ -569,6 +567,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch() const { return epoch_; }
+ BytecodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; }
+
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
@@ -784,6 +784,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// around.
unsigned epoch_ = 0;
+ // Bytecode flushing is disabled when the code coverage mode is changed. Since
+ // that can happen while a GC is happening and we need the
+ // bytecode_flush_mode_ to remain the same through out a GC, we record this at
+ // the start of each GC.
+ BytecodeFlushMode bytecode_flush_mode_;
+
friend class FullEvacuator;
friend class RecordMigratedSlotVisitor;
};
diff --git a/deps/v8/src/heap/marking-barrier.cc b/deps/v8/src/heap/marking-barrier.cc
index 8e73518d97..130c707f41 100644
--- a/deps/v8/src/heap/marking-barrier.cc
+++ b/deps/v8/src/heap/marking-barrier.cc
@@ -94,36 +94,27 @@ void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo,
// static
void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting) {
heap->marking_barrier()->Activate(is_compacting);
- if (FLAG_local_heaps) {
- heap->safepoint()->IterateLocalHeaps(
- [is_compacting](LocalHeap* local_heap) {
- local_heap->marking_barrier()->Activate(is_compacting);
- });
- }
+ heap->safepoint()->IterateLocalHeaps([is_compacting](LocalHeap* local_heap) {
+ local_heap->marking_barrier()->Activate(is_compacting);
+ });
}
// static
void MarkingBarrier::DeactivateAll(Heap* heap) {
heap->marking_barrier()->Deactivate();
- if (FLAG_local_heaps) {
- heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->marking_barrier()->Deactivate();
- });
- }
+ heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->marking_barrier()->Deactivate();
+ });
}
// static
void MarkingBarrier::PublishAll(Heap* heap) {
heap->marking_barrier()->Publish();
- if (FLAG_local_heaps) {
- heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->marking_barrier()->Publish();
- });
- }
+ heap->safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) { local_heap->marking_barrier()->Publish(); });
}
void MarkingBarrier::Publish() {
- DCHECK_IMPLIES(!is_main_thread_barrier_, FLAG_local_heaps);
if (is_activated_) {
worklist_.Publish();
for (auto& it : typed_slots_map_) {
@@ -153,7 +144,6 @@ void MarkingBarrier::DeactivateSpace(NewSpace* space) {
void MarkingBarrier::Deactivate() {
is_activated_ = false;
is_compacting_ = false;
- DCHECK_IMPLIES(!is_main_thread_barrier_, FLAG_local_heaps);
if (is_main_thread_barrier_) {
DeactivateSpace(heap_->old_space());
DeactivateSpace(heap_->map_space());
@@ -191,7 +181,6 @@ void MarkingBarrier::ActivateSpace(NewSpace* space) {
void MarkingBarrier::Activate(bool is_compacting) {
DCHECK(!is_activated_);
DCHECK(worklist_.IsLocalEmpty());
- DCHECK_IMPLIES(!is_main_thread_barrier_, FLAG_local_heaps);
is_compacting_ = is_compacting;
is_activated_ = true;
if (is_main_thread_barrier_) {
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index ce82fb4d1f..fe9975659f 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -163,12 +163,14 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
UnmapFreeMemoryJob& operator=(const UnmapFreeMemoryJob&) = delete;
void Run(JobDelegate* delegate) override {
- TRACE_GC1(tracer_, GCTracer::Scope::BACKGROUND_UNMAPPER,
- ThreadKind::kBackground);
- unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(
- delegate);
- if (FLAG_trace_unmapper) {
- PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
+ if (delegate->IsJoiningThread()) {
+ TRACE_GC(tracer_, GCTracer::Scope::UNMAPPER);
+ RunImpl(delegate);
+
+ } else {
+ TRACE_GC1(tracer_, GCTracer::Scope::BACKGROUND_UNMAPPER,
+ ThreadKind::kBackground);
+ RunImpl(delegate);
}
}
@@ -182,6 +184,13 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
}
private:
+ void RunImpl(JobDelegate* delegate) {
+ unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(
+ delegate);
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
+ }
+ }
Unmapper* const unmapper_;
GCTracer* const tracer_;
};
diff --git a/deps/v8/src/heap/memory-chunk-layout.cc b/deps/v8/src/heap/memory-chunk-layout.cc
index e89a01fb0a..3b437928e6 100644
--- a/deps/v8/src/heap/memory-chunk-layout.cc
+++ b/deps/v8/src/heap/memory-chunk-layout.cc
@@ -41,7 +41,7 @@ size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
- return RoundUp(MemoryChunk::kHeaderSize + Bitmap::kSize, kTaggedSize);
+ return RoundUp(MemoryChunk::kHeaderSize + Bitmap::kSize, kDoubleSize);
}
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index d6cfe7249e..029b77beb4 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -429,7 +429,7 @@ void NewSpace::ResetParkedAllocationBuffers() {
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
- DCHECK_IMPLIES(FLAG_local_heaps, heap()->safepoint()->IsActive());
+ DCHECK(heap()->safepoint()->IsActive());
// Double the semispace size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity());
size_t new_capacity = std::min(
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index c06e589c07..82fc1e73cd 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -36,13 +36,11 @@ class FieldStatsCollector : public ObjectVisitor {
FieldStatsCollector(size_t* tagged_fields_count,
size_t* embedder_fields_count,
size_t* inobject_smi_fields_count,
- size_t* unboxed_double_fields_count,
size_t* boxed_double_fields_count,
size_t* string_data_count, size_t* raw_fields_count)
: tagged_fields_count_(tagged_fields_count),
embedder_fields_count_(embedder_fields_count),
inobject_smi_fields_count_(inobject_smi_fields_count),
- unboxed_double_fields_count_(unboxed_double_fields_count),
boxed_double_fields_count_(boxed_double_fields_count),
string_data_count_(string_data_count),
raw_fields_count_(raw_fields_count) {}
@@ -68,20 +66,9 @@ class FieldStatsCollector : public ObjectVisitor {
*embedder_fields_count_ += field_stats.embedded_fields_count_;
// Smi fields are also included into pointer words.
- DCHECK_LE(
- field_stats.unboxed_double_fields_count_ * kDoubleSize / kTaggedSize,
- raw_fields_count_in_object);
tagged_fields_count_in_object -= field_stats.smi_fields_count_;
*tagged_fields_count_ -= field_stats.smi_fields_count_;
*inobject_smi_fields_count_ += field_stats.smi_fields_count_;
-
- // The rest are data words.
- DCHECK_LE(
- field_stats.unboxed_double_fields_count_ * kDoubleSize / kTaggedSize,
- raw_fields_count_in_object);
- raw_fields_count_in_object -=
- field_stats.unboxed_double_fields_count_ * kDoubleSize / kTaggedSize;
- *unboxed_double_fields_count_ += field_stats.unboxed_double_fields_count_;
} else if (host.IsHeapNumber()) {
DCHECK_LE(kDoubleSize / kTaggedSize, raw_fields_count_in_object);
raw_fields_count_in_object -= kDoubleSize / kTaggedSize;
@@ -117,14 +104,10 @@ class FieldStatsCollector : public ObjectVisitor {
private:
struct JSObjectFieldStats {
- JSObjectFieldStats()
- : embedded_fields_count_(0),
- smi_fields_count_(0),
- unboxed_double_fields_count_(0) {}
+ JSObjectFieldStats() : embedded_fields_count_(0), smi_fields_count_(0) {}
unsigned embedded_fields_count_ : kDescriptorIndexBitCount;
unsigned smi_fields_count_ : kDescriptorIndexBitCount;
- unsigned unboxed_double_fields_count_ : kDescriptorIndexBitCount;
};
std::unordered_map<Map, JSObjectFieldStats, Object::Hasher>
field_stats_cache_;
@@ -134,7 +117,6 @@ class FieldStatsCollector : public ObjectVisitor {
size_t* const tagged_fields_count_;
size_t* const embedder_fields_count_;
size_t* const inobject_smi_fields_count_;
- size_t* const unboxed_double_fields_count_;
size_t* const boxed_double_fields_count_;
size_t* const string_data_count_;
size_t* const raw_fields_count_;
@@ -157,10 +139,6 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
FieldIndex index = FieldIndex::ForDescriptor(map, descriptor);
// Stop on first out-of-object field.
if (!index.is_inobject()) break;
- if (details.representation().IsDouble() &&
- map.IsUnboxedDoubleField(index)) {
- ++stats.unboxed_double_fields_count_;
- }
if (details.representation().IsSmi()) {
++stats.smi_fields_count_;
}
@@ -184,7 +162,6 @@ void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
tagged_fields_count_ = 0;
embedder_fields_count_ = 0;
inobject_smi_fields_count_ = 0;
- unboxed_double_fields_count_ = 0;
boxed_double_fields_count_ = 0;
string_data_count_ = 0;
raw_fields_count_ = 0;
@@ -247,8 +224,6 @@ void ObjectStats::PrintJSON(const char* key) {
embedder_fields_count_ * kEmbedderDataSlotSize);
PrintF(", \"inobject_smi_fields\": %zu",
inobject_smi_fields_count_ * kTaggedSize);
- PrintF(", \"unboxed_double_fields\": %zu",
- unboxed_double_fields_count_ * kDoubleSize);
PrintF(", \"boxed_double_fields\": %zu",
boxed_double_fields_count_ * kDoubleSize);
PrintF(", \"string_data\": %zu", string_data_count_ * kTaggedSize);
@@ -307,8 +282,6 @@ void ObjectStats::Dump(std::stringstream& stream) {
<< (embedder_fields_count_ * kEmbedderDataSlotSize);
stream << ",\"inobject_smi_fields\": "
<< (inobject_smi_fields_count_ * kTaggedSize);
- stream << ",\"unboxed_double_fields\": "
- << (unboxed_double_fields_count_ * kDoubleSize);
stream << ",\"boxed_double_fields\": "
<< (boxed_double_fields_count_ * kDoubleSize);
stream << ",\"string_data\": " << (string_data_count_ * kTaggedSize);
@@ -475,7 +448,6 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
field_stats_collector_(
&stats->tagged_fields_count_, &stats->embedder_fields_count_,
&stats->inobject_smi_fields_count_,
- &stats->unboxed_double_fields_count_,
&stats->boxed_double_fields_count_, &stats->string_data_count_,
&stats->raw_fields_count_) {}
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index fe27095ee5..7255e0dabd 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -167,7 +167,6 @@ class ObjectStats {
size_t tagged_fields_count_;
size_t embedder_fields_count_;
size_t inobject_smi_fields_count_;
- size_t unboxed_double_fields_count_;
size_t boxed_double_fields_count_;
size_t string_data_count_;
size_t raw_fields_count_;
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 7dd7da570e..d7127c5c47 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -14,46 +14,47 @@
namespace v8 {
namespace internal {
-#define TYPED_VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(CoverageInfo) \
- V(DataHandler) \
- V(EmbedderDataArray) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FeedbackMetadata) \
- V(FixedDoubleArray) \
- V(JSArrayBuffer) \
- V(JSDataView) \
- V(JSFunction) \
- V(JSObject) \
- V(JSTypedArray) \
- V(WeakCell) \
- V(JSWeakCollection) \
- V(JSWeakRef) \
- V(Map) \
- V(NativeContext) \
- V(PreparseData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(SmallOrderedNameDictionary) \
- V(SourceTextModule) \
- V(Symbol) \
- V(SyntheticModule) \
- V(TransitionArray) \
- V(WasmArray) \
- V(WasmIndirectFunctionTable) \
- V(WasmInstanceObject) \
- V(WasmStruct) \
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(CoverageInfo) \
+ V(DataHandler) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackMetadata) \
+ V(FixedDoubleArray) \
+ V(JSArrayBuffer) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSTypedArray) \
+ V(WeakCell) \
+ V(JSWeakCollection) \
+ V(JSWeakRef) \
+ V(Map) \
+ V(NativeContext) \
+ V(PreparseData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
+ V(SwissNameDictionary) \
+ V(Symbol) \
+ V(SyntheticModule) \
+ V(TransitionArray) \
+ V(WasmArray) \
+ V(WasmIndirectFunctionTable) \
+ V(WasmInstanceObject) \
+ V(WasmStruct) \
V(WasmTypeInfo)
#define FORWARD_DECLARE(TypeName) class TypeName;
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index 233bd60d35..5168f0f053 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -326,9 +326,7 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Optional<base::MutexGuard> guard_;
};
- bool SupportsConcurrentAllocation() {
- return FLAG_concurrent_allocation && !is_local_space();
- }
+ bool SupportsConcurrentAllocation() { return !is_local_space(); }
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit);
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index ad5c4cbe53..6ae7b9e680 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -18,12 +18,11 @@ GlobalSafepoint::GlobalSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
void GlobalSafepoint::EnterSafepointScope() {
- if (!FLAG_local_heaps) return;
-
if (++active_safepoint_scopes_ > 1) return;
- TimedHistogramScope timer(heap_->isolate()->counters()->stop_the_world());
- TRACE_GC(heap_->tracer(), GCTracer::Scope::STOP_THE_WORLD);
+ TimedHistogramScope timer(
+ heap_->isolate()->counters()->gc_time_to_safepoint());
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT);
local_heaps_mutex_.Lock();
@@ -53,8 +52,6 @@ void GlobalSafepoint::EnterSafepointScope() {
}
void GlobalSafepoint::LeaveSafepointScope() {
- if (!FLAG_local_heaps) return;
-
DCHECK_GT(active_safepoint_scopes_, 0);
if (--active_safepoint_scopes_ > 0) return;
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 9eb1677180..a5c8a41ea5 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -410,7 +410,13 @@ void ScavengerCollector::CollectGarbage() {
MemoryChunk* chunk;
while (empty_chunks.Pop(kMainThreadId, &chunk)) {
- RememberedSet<OLD_TO_NEW>::CheckPossiblyEmptyBuckets(chunk);
+ // Since sweeping was already restarted only check chunks that already got
+ // swept.
+ if (chunk->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::CheckPossiblyEmptyBuckets(chunk);
+ } else {
+ chunk->possibly_empty_buckets()->Release();
+ }
}
#ifdef DEBUG
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 030f38fd2c..878f47eb00 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -24,7 +24,6 @@
#include "src/objects/instance-type-inl.h"
#include "src/objects/js-generator.h"
#include "src/objects/js-weak-refs.h"
-#include "src/objects/layout-descriptor.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/lookup-cache.h"
#include "src/objects/map.h"
@@ -163,12 +162,6 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
SKIP_WRITE_BARRIER);
map.set_instance_type(instance_type);
map.set_instance_size(instance_size);
- // Initialize to only containing tagged fields.
- if (FLAG_unbox_double_fields) {
- map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout(),
- kReleaseStore);
- }
- // GetVisitorId requires a properly initialized LayoutDescriptor.
map.set_visitor_id(Map::GetVisitorId(map));
map.set_inobject_properties_start_or_constructor_function_index(0);
DCHECK(!map.IsJSObjectMap());
@@ -192,12 +185,8 @@ void Heap::FinalizePartialMap(Map map) {
map.set_dependent_code(DependentCode::cast(roots.empty_weak_fixed_array()));
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map.SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0);
- if (FLAG_unbox_double_fields) {
- map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout(),
- kReleaseStore);
- }
map.set_prototype(roots.null_value());
- map.set_constructor_or_backpointer(roots.null_value());
+ map.set_constructor_or_back_pointer(roots.null_value());
}
AllocationResult Heap::Allocate(Map map, AllocationType allocation_type) {
@@ -470,6 +459,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(ORDERED_HASH_SET_TYPE, ordered_hash_set)
ALLOCATE_VARSIZE_MAP(ORDERED_NAME_DICTIONARY_TYPE, ordered_name_dictionary)
ALLOCATE_VARSIZE_MAP(NAME_DICTIONARY_TYPE, name_dictionary)
+ ALLOCATE_VARSIZE_MAP(SWISS_NAME_DICTIONARY_TYPE, swiss_name_dictionary)
ALLOCATE_VARSIZE_MAP(GLOBAL_DICTIONARY_TYPE, global_dictionary)
ALLOCATE_VARSIZE_MAP(NUMBER_DICTIONARY_TYPE, number_dictionary)
ALLOCATE_VARSIZE_MAP(SIMPLE_NUMBER_DICTIONARY_TYPE,
@@ -504,14 +494,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
- // The wasm_rttcanon_* maps are never used for real objects, only as
- // sentinels. They are maps so that they fit in with their subtype maps
- // (which are real maps).
- ALLOCATE_MAP(WASM_STRUCT_TYPE, 0, wasm_rttcanon_eqref)
- ALLOCATE_MAP(WASM_STRUCT_TYPE, 0, wasm_rttcanon_externref)
- ALLOCATE_MAP(WASM_STRUCT_TYPE, 0, wasm_rttcanon_funcref)
- ALLOCATE_MAP(WASM_STRUCT_TYPE, 0, wasm_rttcanon_i31ref)
- ALLOCATE_MAP(WASM_STRUCT_TYPE, 0, wasm_rttcanon_anyref)
ALLOCATE_MAP(WASM_TYPE_INFO_TYPE, WasmTypeInfo::kSize, wasm_type_info)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
@@ -532,16 +514,16 @@ bool Heap::CreateInitialMaps() {
AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
obj.set_map_after_allocation(roots.scope_info_map(), SKIP_WRITE_BARRIER);
- FixedArray::cast(obj).set_length(ScopeInfo::kVariablePartIndex);
+ ScopeInfo::cast(obj).set_length(ScopeInfo::kVariablePartIndex);
int flags = ScopeInfo::IsEmptyBit::encode(true);
DCHECK_EQ(ScopeInfo::LanguageModeBit::decode(flags), LanguageMode::kSloppy);
DCHECK_EQ(ScopeInfo::ReceiverVariableBits::decode(flags),
VariableAllocationInfo::NONE);
DCHECK_EQ(ScopeInfo::FunctionVariableBits::decode(flags),
VariableAllocationInfo::NONE);
- ScopeInfo::cast(obj).SetFlags(flags);
- ScopeInfo::cast(obj).SetContextLocalCount(0);
- ScopeInfo::cast(obj).SetParameterCount(0);
+ ScopeInfo::cast(obj).set_flags(flags);
+ ScopeInfo::cast(obj).set_context_local_count(0);
+ ScopeInfo::cast(obj).set_parameter_count(0);
}
set_empty_scope_info(ScopeInfo::cast(obj));
@@ -621,74 +603,6 @@ bool Heap::CreateInitialMaps() {
set_empty_closure_feedback_cell_array(ClosureFeedbackCellArray::cast(obj));
}
- // Set up the WasmTypeInfo objects for built-in generic Wasm RTTs.
- // anyref:
- {
- /* Subtypes. We do not cache subtypes for (rtt.canon any). */
- int slot_count = ArrayList::kHeaderFields;
- if (!AllocateRaw(ArrayList::SizeFor(slot_count), AllocationType::kOld)
- .To(&obj)) {
- return false;
- }
- obj.set_map_after_allocation(roots.array_list_map());
- ArrayList subtypes = ArrayList::cast(obj);
- subtypes.set_length(slot_count);
- subtypes.SetLength(0);
- /* TypeInfo */
- if (!AllocateRaw(WasmTypeInfo::kSize, AllocationType::kOld).To(&obj)) {
- return false;
- }
- obj.set_map_after_allocation(roots.wasm_type_info_map(),
- SKIP_WRITE_BARRIER);
- WasmTypeInfo type_info = WasmTypeInfo::cast(obj);
- type_info.set_subtypes(subtypes);
- type_info.set_supertypes(roots.empty_fixed_array());
- type_info.set_parent(roots.null_map());
- type_info.clear_foreign_address(isolate());
- wasm_rttcanon_anyref_map().set_wasm_type_info(type_info);
- }
-
- // Rest of builtin types:
-#define ALLOCATE_TYPE_INFO(which) \
- { \
- /* Subtypes */ \
- int slot_count = ArrayList::kHeaderFields; \
- if (!AllocateRaw(ArrayList::SizeFor(slot_count), AllocationType::kOld) \
- .To(&obj)) { \
- return false; \
- } \
- obj.set_map_after_allocation(roots.array_list_map()); \
- ArrayList subtypes = ArrayList::cast(obj); \
- subtypes.set_length(slot_count); \
- subtypes.SetLength(0); \
- /* Supertypes */ \
- if (!AllocateRaw(FixedArray::SizeFor(1), AllocationType::kOld).To(&obj)) { \
- return false; \
- } \
- obj.set_map_after_allocation(roots.fixed_array_map(), SKIP_WRITE_BARRIER); \
- FixedArray supertypes = FixedArray::cast(obj); \
- supertypes.set_length(1); \
- supertypes.set(0, wasm_rttcanon_anyref_map()); \
- /* TypeInfo */ \
- if (!AllocateRaw(WasmTypeInfo::kSize, AllocationType::kOld).To(&obj)) { \
- return false; \
- } \
- obj.set_map_after_allocation(roots.wasm_type_info_map(), \
- SKIP_WRITE_BARRIER); \
- WasmTypeInfo type_info = WasmTypeInfo::cast(obj); \
- type_info.set_subtypes(subtypes); \
- type_info.set_supertypes(supertypes); \
- type_info.set_parent(wasm_rttcanon_anyref_map()); \
- type_info.clear_foreign_address(isolate()); \
- wasm_rttcanon_##which##_map().set_wasm_type_info(type_info); \
- }
-
- ALLOCATE_TYPE_INFO(eqref)
- ALLOCATE_TYPE_INFO(externref)
- ALLOCATE_TYPE_INFO(funcref)
- ALLOCATE_TYPE_INFO(i31ref)
-#undef ALLOCATE_TYPE_INFO
-
DCHECK(!InYoungGeneration(roots.empty_fixed_array()));
roots.bigint_map().SetConstructorFunctionIndex(
@@ -905,6 +819,11 @@ void Heap::CreateInitialObjects() {
.ToHandleChecked();
set_empty_ordered_property_dictionary(*empty_ordered_property_dictionary);
+ // Allocate the empty SwissNameDictionary
+ Handle<SwissNameDictionary> empty_swiss_property_dictionary =
+ factory->CreateCanonicalEmptySwissNameDictionary();
+ set_empty_swiss_property_dictionary(*empty_swiss_property_dictionary);
+
// Allocate the empty FeedbackMetadata.
Handle<FeedbackMetadata> empty_feedback_metadata =
factory->NewFeedbackMetadata(0, 0, AllocationType::kReadOnly);
@@ -931,125 +850,23 @@ void Heap::CreateInitialObjects() {
script->set_origin_options(ScriptOriginOptions(true, false));
set_empty_script(*script);
- {
- Handle<PropertyCell> cell = factory->NewPropertyCell(
- factory->empty_string(), AllocationType::kReadOnly);
- cell->set_value(roots.the_hole_value());
- set_empty_property_cell(*cell);
- }
-
// Protectors
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_array_constructor_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_no_elements_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_array_iterator_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_map_iterator_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_set_iterator_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_is_concat_spreadable_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_array_species_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_typed_array_species_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_promise_species_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_regexp_species_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_string_iterator_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_string_length_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_array_buffer_detaching_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_promise_hook_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_promise_resolve_protector(*cell);
- }
-
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- set_promise_then_protector(*cell);
- }
+ set_array_buffer_detaching_protector(*factory->NewProtector());
+ set_array_constructor_protector(*factory->NewProtector());
+ set_array_iterator_protector(*factory->NewProtector());
+ set_array_species_protector(*factory->NewProtector());
+ set_is_concat_spreadable_protector(*factory->NewProtector());
+ set_map_iterator_protector(*factory->NewProtector());
+ set_no_elements_protector(*factory->NewProtector());
+ set_promise_hook_protector(*factory->NewProtector());
+ set_promise_resolve_protector(*factory->NewProtector());
+ set_promise_species_protector(*factory->NewProtector());
+ set_promise_then_protector(*factory->NewProtector());
+ set_regexp_species_protector(*factory->NewProtector());
+ set_set_iterator_protector(*factory->NewProtector());
+ set_string_iterator_protector(*factory->NewProtector());
+ set_string_length_protector(*factory->NewProtector());
+ set_typed_array_species_protector(*factory->NewProtector());
set_serialized_objects(roots.empty_fixed_array());
set_serialized_global_proxy_sizes(roots.empty_fixed_array());
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 8335bc5e88..1494102df9 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -255,17 +255,13 @@ void AccessorAssembler::HandleLoadField(TNode<JSObject> holder,
exit_point->Return(LoadObjectField(holder, offset));
BIND(&is_double);
- if (FLAG_unbox_double_fields) {
- *var_double_value = LoadObjectField<Float64T>(holder, offset);
- } else {
- TNode<Object> heap_number = LoadObjectField(holder, offset);
- // This is not an "old" Smi value from before a Smi->Double transition.
- // Rather, it's possible that since the last update of this IC, the Double
- // field transitioned to a Tagged field, and was then assigned a Smi.
- GotoIf(TaggedIsSmi(heap_number), miss);
- GotoIfNot(IsHeapNumber(CAST(heap_number)), miss);
- *var_double_value = LoadHeapNumberValue(CAST(heap_number));
- }
+ TNode<Object> heap_number = LoadObjectField(holder, offset);
+ // This is not an "old" Smi value from before a Smi->Double transition.
+ // Rather, it's possible that since the last update of this IC, the Double
+ // field transitioned to a Tagged field, and was then assigned a Smi.
+ GotoIf(TaggedIsSmi(heap_number), miss);
+ GotoIfNot(IsHeapNumber(CAST(heap_number)), miss);
+ *var_double_value = LoadHeapNumberValue(CAST(heap_number));
Goto(rebox_double);
}
@@ -278,13 +274,11 @@ void AccessorAssembler::HandleLoadField(TNode<JSObject> holder,
exit_point->Return(value);
BIND(&is_double);
- if (!FLAG_unbox_double_fields) {
- // This is not an "old" Smi value from before a Smi->Double transition.
- // Rather, it's possible that since the last update of this IC, the Double
- // field transitioned to a Tagged field, and was then assigned a Smi.
- GotoIf(TaggedIsSmi(value), miss);
- GotoIfNot(IsHeapNumber(CAST(value)), miss);
- }
+ // This is not an "old" Smi value from before a Smi->Double transition.
+ // Rather, it's possible that since the last update of this IC, the Double
+ // field transitioned to a Tagged field, and was then assigned a Smi.
+ GotoIf(TaggedIsSmi(value), miss);
+ GotoIfNot(IsHeapNumber(CAST(value)), miss);
*var_double_value = LoadHeapNumberValue(CAST(value));
Goto(rebox_double);
}
@@ -941,6 +935,8 @@ void AccessorAssembler::HandleLoadICProtoHandler(
BIND(&is_smi);
{
+ // If the "maybe_holder_or_constant" in the handler is a smi, then it's
+ // guaranteed that it's not a holder object, but a constant value.
CSA_ASSERT(
this,
WordEqual(
@@ -1066,6 +1062,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
properties, CAST(p->name()), &dictionary_found, &var_name_index, miss);
BIND(&dictionary_found);
{
+ Label if_constant(this), done(this);
TNode<Uint32T> details =
LoadDetailsByKeyIndex(properties, var_name_index.value());
// Check that the property is a writable data property (no accessor).
@@ -1074,9 +1071,26 @@ void AccessorAssembler::HandleStoreICHandlerCase(
STATIC_ASSERT(kData == 0);
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
+ GotoIf(IsPropertyDetailsConst(details), &if_constant);
+ }
+
StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
p->value());
Return(p->value());
+
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
+ BIND(&if_constant);
+ {
+ TNode<Object> prev_value =
+ LoadValueByKeyIndex(properties, var_name_index.value());
+ BranchIfSameValue(prev_value, p->value(), &done, miss,
+ SameValueMode::kNumbersOnly);
+ }
+
+ BIND(&done);
+ Return(p->value());
+ }
}
BIND(&if_fast_smi);
@@ -1297,8 +1311,9 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
}
TNode<BoolT> AccessorAssembler::IsPropertyDetailsConst(TNode<Uint32T> details) {
- return Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details),
- Int32Constant(static_cast<int32_t>(VariableMode::kConst)));
+ return Word32Equal(
+ DecodeWord32<PropertyDetails::ConstnessField>(details),
+ Int32Constant(static_cast<int32_t>(PropertyConstness::kConst)));
}
void AccessorAssembler::OverwriteExistingFastDataProperty(
@@ -1346,36 +1361,21 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
BIND(&double_rep);
{
TNode<Float64T> double_value = ChangeNumberToFloat64(CAST(value));
- if (FLAG_unbox_double_fields) {
- if (do_transitioning_store) {
- StoreMap(object, object_map);
- } else {
- Label store_value(this);
- GotoIfNot(IsPropertyDetailsConst(details), &store_value);
- TNode<Float64T> current_value =
- LoadObjectField<Float64T>(object, field_offset);
- BranchIfSameNumberValue(current_value, double_value, &store_value,
- slow);
- BIND(&store_value);
- }
- StoreObjectFieldNoWriteBarrier(object, field_offset, double_value);
+ if (do_transitioning_store) {
+ TNode<HeapNumber> heap_number =
+ AllocateHeapNumberWithValue(double_value);
+ StoreMap(object, object_map);
+ StoreObjectField(object, field_offset, heap_number);
} else {
- if (do_transitioning_store) {
- TNode<HeapNumber> heap_number =
- AllocateHeapNumberWithValue(double_value);
- StoreMap(object, object_map);
- StoreObjectField(object, field_offset, heap_number);
- } else {
- TNode<HeapNumber> heap_number =
- CAST(LoadObjectField(object, field_offset));
- Label store_value(this);
- GotoIfNot(IsPropertyDetailsConst(details), &store_value);
- TNode<Float64T> current_value = LoadHeapNumberValue(heap_number);
- BranchIfSameNumberValue(current_value, double_value, &store_value,
- slow);
- BIND(&store_value);
- StoreHeapNumberValue(heap_number, double_value);
- }
+ TNode<HeapNumber> heap_number =
+ CAST(LoadObjectField(object, field_offset));
+ Label store_value(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &store_value);
+ TNode<Float64T> current_value = LoadHeapNumberValue(heap_number);
+ BranchIfSameNumberValue(current_value, double_value, &store_value,
+ slow);
+ BIND(&store_value);
+ StoreHeapNumberValue(heap_number, double_value);
}
Goto(&done);
}
@@ -1883,13 +1883,6 @@ void AccessorAssembler::HandleStoreFieldAndReturn(
Label property_and_offset_ready(this);
- // If we are unboxing double fields, and this is an in-object field, the
- // property_storage and offset are already pointing to the double-valued
- // field.
- if (FLAG_unbox_double_fields) {
- GotoIf(is_inobject, &property_and_offset_ready);
- }
-
// Store the double value directly into the mutable HeapNumber.
TNode<Object> field = LoadObjectField(property_storage, offset);
CSA_ASSERT(this, IsHeapNumber(CAST(field)));
@@ -2762,12 +2755,15 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), non_inlined(this, Label::kDeferred),
- try_polymorphic(this), miss(this, Label::kDeferred);
+ try_polymorphic(this), miss(this, Label::kDeferred),
+ no_feedback(this, Label::kDeferred);
TNode<Map> lookup_start_object_map =
LoadReceiverMap(p->receiver_and_lookup_start_object());
GotoIf(IsDeprecatedMap(lookup_start_object_map), &miss);
+ GotoIf(IsUndefined(p->vector()), &no_feedback);
+
// Check monomorphic case.
TNode<MaybeObject> feedback =
TryMonomorphicCase(p->slot(), CAST(p->vector()), lookup_start_object_map,
@@ -2795,6 +2791,16 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
&if_handler, &miss, &direct_exit);
}
+ BIND(&no_feedback);
+ {
+ Comment("LoadIC_nofeedback");
+ // Call into the stub that implements the non-inlined parts of LoadIC.
+ direct_exit.ReturnCallStub(
+ Builtins::CallableFor(isolate(), Builtins::kLoadIC_NoFeedback),
+ p->context(), p->receiver(), p->name(),
+ SmiConstant(FeedbackSlotKind::kLoadProperty));
+ }
+
BIND(&miss);
direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context(),
p->receiver_and_lookup_start_object(),
@@ -2890,8 +2896,7 @@ void AccessorAssembler::LoadIC_NoFeedback(const LoadICParameters* p,
// Special case for Function.prototype load, because it's very common
// for ICs that are only executed once (MyFunc.prototype.foo = ...).
Label not_function_prototype(this, Label::kDeferred);
- GotoIfNot(InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE),
- &not_function_prototype);
+ GotoIfNot(IsJSFunctionInstanceType(instance_type), &not_function_prototype);
GotoIfNot(IsPrototypeString(p->name()), &not_function_prototype);
GotoIfPrototypeRequiresRuntimeLookup(CAST(lookup_start_object),
@@ -3068,36 +3073,15 @@ void AccessorAssembler::ScriptContextTableLookup(
ScriptContextTable::kFirstContextSlotIndex * kTaggedSize));
TNode<ScopeInfo> scope_info =
CAST(LoadContextElement(script_context, Context::SCOPE_INFO_INDEX));
- TNode<IntPtrT> length = LoadAndUntagFixedArrayBaseLength(scope_info);
- GotoIf(IntPtrLessThanOrEqual(length, IntPtrConstant(0)), &loop);
-
- TVARIABLE(IntPtrT, scope_var_index,
- IntPtrConstant(ScopeInfo::kVariablePartIndex - 1));
- TNode<IntPtrT> num_scope_vars = SmiUntag(CAST(LoadFixedArrayElement(
- scope_info, IntPtrConstant(ScopeInfo::Fields::kContextLocalCount))));
- TNode<IntPtrT> end_index = IntPtrAdd(
- num_scope_vars, IntPtrConstant(ScopeInfo::kVariablePartIndex));
- Label loop_scope_info(this, &scope_var_index);
- Goto(&loop_scope_info);
-
- BIND(&loop_scope_info);
- {
- scope_var_index = IntPtrAdd(scope_var_index.value(), IntPtrConstant(1));
- GotoIf(IntPtrGreaterThanOrEqual(scope_var_index.value(), end_index),
- &loop);
-
- TNode<Object> var_name =
- LoadFixedArrayElement(scope_info, scope_var_index.value(), 0);
- GotoIf(TaggedNotEqual(var_name, name), &loop_scope_info);
-
- TNode<IntPtrT> var_index =
- IntPtrAdd(IntPtrConstant(Context::MIN_CONTEXT_SLOTS),
- IntPtrSub(scope_var_index.value(),
- IntPtrConstant(ScopeInfo::kVariablePartIndex)));
- TNode<Object> result = LoadContextElement(script_context, var_index);
- GotoIf(IsTheHole(result), found_hole);
- Return(result);
- }
+
+ TNode<IntPtrT> context_local_index =
+ IndexOfLocalName(scope_info, name, &loop);
+
+ TNode<IntPtrT> var_index = IntPtrAdd(
+ IntPtrConstant(Context::MIN_CONTEXT_SLOTS), context_local_index);
+ TNode<Object> result = LoadContextElement(script_context, var_index);
+ GotoIf(IsTheHole(result), found_hole);
+ Return(result);
}
}
@@ -3435,7 +3419,10 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
}
void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
- Label if_lexical_var(this), if_heapobject(this);
+ Label no_feedback(this, Label::kDeferred), if_lexical_var(this),
+ if_heapobject(this);
+ GotoIf(IsUndefined(pp->vector()), &no_feedback);
+
TNode<MaybeObject> maybe_weak_ref =
LoadFeedbackVectorSlot(CAST(pp->vector()), pp->slot());
Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_heapobject);
@@ -3490,6 +3477,12 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
StoreContextElement(script_context, slot_index, pp->value());
Return(pp->value());
}
+
+ BIND(&no_feedback);
+ {
+ TailCallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, pp->context(),
+ pp->value(), pp->name());
+ }
}
void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
@@ -3517,8 +3510,7 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
GotoIf(Word32Equal(type, Int32Constant(
static_cast<int>(PropertyCellType::kConstant))),
&constant);
-
- GotoIf(IsTheHole(cell_contents), miss);
+ CSA_ASSERT(this, Word32BinaryNot(IsTheHole(cell_contents)));
GotoIf(Word32Equal(
type, Int32Constant(static_cast<int>(PropertyCellType::kMutable))),
@@ -3550,6 +3542,9 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
BIND(&constant);
{
+ // Since |value| is never the hole, the equality check below also handles an
+ // invalidated property cell correctly.
+ CSA_ASSERT(this, Word32BinaryNot(IsTheHole(value)));
GotoIfNot(TaggedEqual(cell_contents, value), miss);
exit_point->Return(value);
}
@@ -3842,6 +3837,18 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
}
+void AccessorAssembler::GenerateLoadICBaseline() {
+ using Descriptor = LoadBaselineDescriptor;
+
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
+}
+
void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
using Descriptor = LoadDescriptor;
@@ -3870,6 +3877,20 @@ void AccessorAssembler::GenerateLoadSuperIC() {
LoadSuperIC(&p);
}
+void AccessorAssembler::GenerateLoadSuperICBaseline() {
+ using Descriptor = LoadWithReceiverBaselineDescriptor;
+
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto lookup_start_object = Parameter<Object>(Descriptor::kLookupStartObject);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ TailCallBuiltin(Builtins::kLoadSuperIC, context, receiver,
+ lookup_start_object, name, slot, vector);
+}
+
void AccessorAssembler::GenerateLoadGlobalIC_NoFeedback() {
using Descriptor = LoadGlobalNoFeedbackDescriptor;
@@ -3912,6 +3933,79 @@ void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
TailCallStub(callable, context, name, slot, vector);
}
+void AccessorAssembler::GenerateLoadGlobalICBaseline(TypeofMode typeof_mode) {
+ using Descriptor = LoadGlobalBaselineDescriptor;
+
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ Callable callable =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), typeof_mode);
+ TailCallStub(callable, context, name, slot, vector);
+}
+
+void AccessorAssembler::GenerateLookupContextBaseline(TypeofMode typeof_mode) {
+ using Descriptor = LookupBaselineDescriptor;
+ auto depth = Parameter<TaggedIndex>(Descriptor::kDepth);
+ TNode<Context> context = LoadContextFromBaseline();
+
+ Label slowpath(this, Label::kDeferred);
+
+ // Check for context extensions to allow the fast path.
+ TNode<Context> slot_context = GotoIfHasContextExtensionUpToDepth(
+ context, Unsigned(TruncateWordToInt32(TaggedIndexToIntPtr(depth))),
+ &slowpath);
+
+ // Fast path does a normal load context.
+ {
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ Return(LoadContextElement(slot_context, TaggedIndexToIntPtr(slot)));
+ }
+
+ // Slow path when we have to call out to the runtime.
+ BIND(&slowpath);
+ {
+ auto name = Parameter<Object>(Descriptor::kName);
+ Runtime::FunctionId function_id = typeof_mode == INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlotInsideTypeof
+ : Runtime::kLoadLookupSlot;
+ TailCallRuntime(function_id, context, name);
+ }
+}
+
+void AccessorAssembler::GenerateLookupGlobalICBaseline(TypeofMode typeof_mode) {
+ using Descriptor = LookupBaselineDescriptor;
+
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto depth = Parameter<TaggedIndex>(Descriptor::kDepth);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ TNode<Context> context = LoadContextFromBaseline();
+
+ Label slowpath(this, Label::kDeferred);
+
+ // Check for context extensions to allow the fast path
+ GotoIfHasContextExtensionUpToDepth(
+ context, Unsigned(TruncateWordToInt32(TaggedIndexToIntPtr(depth))),
+ &slowpath);
+
+ // Fast path does a normal load global
+ {
+ Callable callable =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), typeof_mode);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TailCallStub(callable, context, name, slot, vector);
+ }
+
+ // Slow path when we have to call out to the runtime
+ BIND(&slowpath);
+ Runtime::FunctionId function_id = typeof_mode == INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlotInsideTypeof
+ : Runtime::kLoadLookupSlot;
+ TailCallRuntime(function_id, context, name);
+}
+
void AccessorAssembler::GenerateKeyedLoadIC() {
using Descriptor = LoadWithVectorDescriptor;
@@ -3951,6 +4045,19 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
vector);
}
+void AccessorAssembler::GenerateKeyedLoadICBaseline() {
+ using Descriptor = LoadBaselineDescriptor;
+
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ TailCallBuiltin(Builtins::kKeyedLoadIC, context, receiver, name, slot,
+ vector);
+}
+
void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
using Descriptor = LoadDescriptor;
@@ -4002,6 +4109,18 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
}
+void AccessorAssembler::GenerateStoreGlobalICBaseline() {
+ using Descriptor = StoreGlobalBaselineDescriptor;
+
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
+}
+
void AccessorAssembler::GenerateStoreIC() {
using Descriptor = StoreWithVectorDescriptor;
@@ -4030,6 +4149,20 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
vector);
}
+void AccessorAssembler::GenerateStoreICBaseline() {
+ using Descriptor = StoreBaselineDescriptor;
+
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ TailCallBuiltin(Builtins::kStoreIC, context, receiver, name, value, slot,
+ vector);
+}
+
void AccessorAssembler::GenerateKeyedStoreIC() {
using Descriptor = StoreWithVectorDescriptor;
@@ -4058,6 +4191,20 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
vector);
}
+void AccessorAssembler::GenerateKeyedStoreICBaseline() {
+ using Descriptor = StoreBaselineDescriptor;
+
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ TailCallBuiltin(Builtins::kKeyedStoreIC, context, receiver, name, value, slot,
+ vector);
+}
+
void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
using Descriptor = StoreWithVectorDescriptor;
@@ -4072,6 +4219,21 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
StoreInArrayLiteralIC(&p);
}
+void AccessorAssembler::GenerateStoreInArrayLiteralICBaseline() {
+ using Descriptor = StoreBaselineDescriptor;
+
+ auto array = Parameter<Object>(Descriptor::kReceiver);
+ auto index = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ TailCallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array, index,
+ value, slot, vector);
+}
+
void AccessorAssembler::GenerateCloneObjectIC_Slow() {
using Descriptor = CloneObjectWithVectorDescriptor;
auto source = Parameter<Object>(Descriptor::kSource);
@@ -4125,6 +4287,20 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
Return(result);
}
+void AccessorAssembler::GenerateCloneObjectICBaseline() {
+ using Descriptor = CloneObjectBaselineDescriptor;
+
+ auto source = Parameter<Object>(Descriptor::kSource);
+ auto flags = Parameter<Smi>(Descriptor::kFlags);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ TailCallBuiltin(Builtins::kCloneObjectIC, context, source, flags, slot,
+ vector);
+}
+
void AccessorAssembler::GenerateCloneObjectIC() {
using Descriptor = CloneObjectWithVectorDescriptor;
auto source = Parameter<Object>(Descriptor::kSource);
@@ -4219,17 +4395,15 @@ void AccessorAssembler::GenerateCloneObjectIC() {
},
1, IndexAdvanceMode::kPost);
- // If mutable HeapNumbers can occur, we need to go through the {object}
- // again here and properly clone them. We use a second loop here to
- // ensure that the GC (and heap verifier) always sees properly initialized
- // objects, i.e. never hits undefined values in double fields.
- if (!FLAG_unbox_double_fields) {
- TNode<IntPtrT> start_offset = TimesTaggedSize(result_start);
- TNode<IntPtrT> end_offset =
- IntPtrAdd(TimesTaggedSize(source_size), field_offset_difference);
- ConstructorBuiltinsAssembler(state()).CopyMutableHeapNumbersInObject(
- object, start_offset, end_offset);
- }
+ // We need to go through the {object} again here and properly clone them. We
+ // use a second loop here to ensure that the GC (and heap verifier) always
+ // sees properly initialized objects, i.e. never hits undefined values in
+ // double fields.
+ TNode<IntPtrT> start_offset = TimesTaggedSize(result_start);
+ TNode<IntPtrT> end_offset =
+ IntPtrAdd(TimesTaggedSize(source_size), field_offset_difference);
+ ConstructorBuiltinsAssembler(state()).CopyMutableHeapNumbersInObject(
+ object, start_offset, end_offset);
Return(object);
}
@@ -4286,6 +4460,18 @@ void AccessorAssembler::GenerateKeyedHasIC() {
KeyedLoadIC(&p, LoadAccessMode::kHas);
}
+void AccessorAssembler::GenerateKeyedHasICBaseline() {
+ using Descriptor = LoadBaselineDescriptor;
+
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ TNode<FeedbackVector> vector = LoadFeedbackVectorFromBaseline();
+ TNode<Context> context = LoadContextFromBaseline();
+
+ TailCallBuiltin(Builtins::kKeyedHasIC, context, receiver, name, slot, vector);
+}
+
void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
using Descriptor = LoadWithVectorDescriptor;
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index bf506da478..64ad680882 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -31,30 +31,42 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void GenerateLoadIC_NoFeedback();
void GenerateLoadGlobalIC_NoFeedback();
void GenerateLoadICTrampoline();
+ void GenerateLoadICBaseline();
void GenerateLoadICTrampoline_Megamorphic();
void GenerateLoadSuperIC();
+ void GenerateLoadSuperICBaseline();
void GenerateKeyedLoadIC();
void GenerateKeyedLoadIC_Megamorphic();
void GenerateKeyedLoadIC_PolymorphicName();
void GenerateKeyedLoadICTrampoline();
+ void GenerateKeyedLoadICBaseline();
void GenerateKeyedLoadICTrampoline_Megamorphic();
void GenerateStoreIC();
void GenerateStoreICTrampoline();
+ void GenerateStoreICBaseline();
void GenerateStoreGlobalIC();
void GenerateStoreGlobalICTrampoline();
+ void GenerateStoreGlobalICBaseline();
void GenerateCloneObjectIC();
+ void GenerateCloneObjectICBaseline();
void GenerateCloneObjectIC_Slow();
void GenerateKeyedHasIC();
+ void GenerateKeyedHasICBaseline();
void GenerateKeyedHasIC_Megamorphic();
void GenerateKeyedHasIC_PolymorphicName();
void GenerateLoadGlobalIC(TypeofMode typeof_mode);
void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
+ void GenerateLoadGlobalICBaseline(TypeofMode typeof_mode);
+ void GenerateLookupGlobalICBaseline(TypeofMode typeof_mode);
+ void GenerateLookupContextBaseline(TypeofMode typeof_mode);
void GenerateKeyedStoreIC();
void GenerateKeyedStoreICTrampoline();
+ void GenerateKeyedStoreICBaseline();
void GenerateStoreInArrayLiteralIC();
+ void GenerateStoreInArrayLiteralICBaseline();
void TryProbeStubCache(StubCache* stub_cache,
TNode<Object> lookup_start_object, TNode<Name> name,
@@ -243,6 +255,8 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Object> value, Label* slow,
bool do_transitioning_store);
+ TNode<BoolT> IsPropertyDetailsConst(TNode<Uint32T> details);
+
void CheckFieldType(TNode<DescriptorArray> descriptors,
TNode<IntPtrT> name_index, TNode<Word32T> representation,
TNode<Object> value, Label* bailout);
@@ -454,7 +468,6 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
Label* unimplemented_elements_kind, Label* out_of_bounds,
Label* miss, ExitPoint* exit_point,
LoadAccessMode access_mode = LoadAccessMode::kLoad);
- TNode<BoolT> IsPropertyDetailsConst(TNode<Uint32T> details);
// Stub cache access helpers.
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index 8cba7172a2..3c0fc420ee 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -10,9 +10,9 @@ namespace v8 {
namespace internal {
TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
- TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
- TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi) {
+ const LazyNode<Context>& context, TNode<Object> lhs, TNode<Object> rhs,
+ TNode<UintPtrT> slot_id, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
// Shared entry for floating point addition.
Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred),
check_rhsisoddball(this, Label::kDeferred),
@@ -69,8 +69,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
// Not overflowed.
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
- slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
+ slot_id, update_feedback_mode);
var_result = smi_result;
Goto(&end);
}
@@ -118,7 +118,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
BIND(&do_fadd);
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(), slot_id,
+ update_feedback_mode);
TNode<Float64T> value =
Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
@@ -169,10 +170,10 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
&call_with_any_feedback);
var_type_feedback = SmiConstant(BinaryOperationFeedback::kString);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
- slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
+ slot_id, update_feedback_mode);
var_result =
- CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs);
+ CallBuiltin(Builtins::kStringAdd_CheckNone, context(), lhs, rhs);
Goto(&end);
}
@@ -194,20 +195,21 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
{
// Both {lhs} and {rhs} are of BigInt type.
Label bigint_too_big(this);
- var_result = CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs);
+ var_result = CallBuiltin(Builtins::kBigIntAddNoThrow, context(), lhs, rhs);
// Check for sentinel that signals BigIntTooBig exception.
GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(), slot_id,
+ update_feedback_mode);
Goto(&end);
BIND(&bigint_too_big);
{
// Update feedback to prevent deopt loop.
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
- maybe_feedback_vector, slot_id);
- ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
+ maybe_feedback_vector(), slot_id, update_feedback_mode);
+ ThrowRangeError(context(), MessageTemplate::kBigIntTooBig);
}
}
@@ -225,8 +227,9 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
BIND(&call_add_stub);
{
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
- var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(), slot_id,
+ update_feedback_mode);
+ var_result = CallBuiltin(Builtins::kAdd, context(), lhs, rhs);
Goto(&end);
}
@@ -235,10 +238,10 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
}
TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
- TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
- TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
+ const LazyNode<Context>& context, TNode<Object> lhs, TNode<Object> rhs,
+ TNode<UintPtrT> slot_id, const LazyNode<HeapObject>& maybe_feedback_vector,
const SmiOperation& smiOperation, const FloatOperation& floatOperation,
- Operation op, bool rhs_known_smi) {
+ Operation op, UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
Label do_float_operation(this), end(this), call_stub(this),
check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this),
if_lhsisnotnumber(this, Label::kDeferred),
@@ -285,7 +288,8 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
Comment("perform smi operation");
var_result = smiOperation(lhs_smi, CAST(rhs), &var_type_feedback);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(),
+ slot_id, update_feedback_mode);
Goto(&end);
}
}
@@ -328,7 +332,8 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&do_float_operation);
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(), slot_id,
+ update_feedback_mode);
TNode<Float64T> lhs_value = var_float_lhs.value();
TNode<Float64T> rhs_value = var_float_rhs.value();
TNode<Float64T> value = floatOperation(lhs_value, rhs_value);
@@ -392,11 +397,12 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_both_bigint);
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(), slot_id,
+ update_feedback_mode);
if (op == Operation::kSubtract) {
Label bigint_too_big(this);
var_result =
- CallBuiltin(Builtins::kBigIntSubtractNoThrow, context, lhs, rhs);
+ CallBuiltin(Builtins::kBigIntSubtractNoThrow, context(), lhs, rhs);
// Check for sentinel that signals BigIntTooBig exception.
GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
@@ -406,11 +412,11 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
// Update feedback to prevent deopt loop.
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
- maybe_feedback_vector, slot_id);
- ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
+ maybe_feedback_vector(), slot_id, update_feedback_mode);
+ ThrowRangeError(context(), MessageTemplate::kBigIntTooBig);
}
} else {
- var_result = CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
+ var_result = CallRuntime(Runtime::kBigIntBinaryOp, context(), lhs, rhs,
SmiConstant(op));
Goto(&end);
}
@@ -424,20 +430,21 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&call_stub);
{
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector(), slot_id,
+ update_feedback_mode);
TNode<Object> result;
switch (op) {
case Operation::kSubtract:
- result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs);
+ result = CallBuiltin(Builtins::kSubtract, context(), lhs, rhs);
break;
case Operation::kMultiply:
- result = CallBuiltin(Builtins::kMultiply, context, lhs, rhs);
+ result = CallBuiltin(Builtins::kMultiply, context(), lhs, rhs);
break;
case Operation::kDivide:
- result = CallBuiltin(Builtins::kDivide, context, lhs, rhs);
+ result = CallBuiltin(Builtins::kDivide, context(), lhs, rhs);
break;
case Operation::kModulus:
- result = CallBuiltin(Builtins::kModulus, context, lhs, rhs);
+ result = CallBuiltin(Builtins::kModulus, context(), lhs, rhs);
break;
default:
UNREACHABLE();
@@ -451,9 +458,9 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
}
TNode<Object> BinaryOpAssembler::Generate_SubtractWithFeedback(
- TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
- TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi) {
+ const LazyNode<Context>& context, TNode<Object> lhs, TNode<Object> rhs,
+ TNode<UintPtrT> slot_id, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
TVariable<Smi>* var_type_feedback) {
Label end(this);
@@ -483,13 +490,13 @@ TNode<Object> BinaryOpAssembler::Generate_SubtractWithFeedback(
};
return Generate_BinaryOperationWithFeedback(
context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
- floatFunction, Operation::kSubtract, rhs_known_smi);
+ floatFunction, Operation::kSubtract, update_feedback_mode, rhs_known_smi);
}
TNode<Object> BinaryOpAssembler::Generate_MultiplyWithFeedback(
- TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
- TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi) {
+ const LazyNode<Context>& context, TNode<Object> lhs, TNode<Object> rhs,
+ TNode<UintPtrT> slot_id, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
TVariable<Smi>* var_type_feedback) {
TNode<Number> result = SmiMul(lhs, rhs);
@@ -503,13 +510,14 @@ TNode<Object> BinaryOpAssembler::Generate_MultiplyWithFeedback(
};
return Generate_BinaryOperationWithFeedback(
context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
- floatFunction, Operation::kMultiply, rhs_known_smi);
+ floatFunction, Operation::kMultiply, update_feedback_mode, rhs_known_smi);
}
TNode<Object> BinaryOpAssembler::Generate_DivideWithFeedback(
- TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
- TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi) {
+ const LazyNode<Context>& context, TNode<Object> dividend,
+ TNode<Object> divisor, TNode<UintPtrT> slot_id,
+ const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
TVariable<Smi>* var_type_feedback) {
TVARIABLE(Object, var_result);
@@ -539,13 +547,14 @@ TNode<Object> BinaryOpAssembler::Generate_DivideWithFeedback(
};
return Generate_BinaryOperationWithFeedback(
context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
- floatFunction, Operation::kDivide, rhs_known_smi);
+ floatFunction, Operation::kDivide, update_feedback_mode, rhs_known_smi);
}
TNode<Object> BinaryOpAssembler::Generate_ModulusWithFeedback(
- TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
- TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi) {
+ const LazyNode<Context>& context, TNode<Object> dividend,
+ TNode<Object> divisor, TNode<UintPtrT> slot_id,
+ const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
TVariable<Smi>* var_type_feedback) {
TNode<Number> result = SmiMod(lhs, rhs);
@@ -559,22 +568,24 @@ TNode<Object> BinaryOpAssembler::Generate_ModulusWithFeedback(
};
return Generate_BinaryOperationWithFeedback(
context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
- floatFunction, Operation::kModulus, rhs_known_smi);
+ floatFunction, Operation::kModulus, update_feedback_mode, rhs_known_smi);
}
TNode<Object> BinaryOpAssembler::Generate_ExponentiateWithFeedback(
- TNode<Context> context, TNode<Object> base, TNode<Object> exponent,
- TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi) {
+ const LazyNode<Context>& context, TNode<Object> base,
+ TNode<Object> exponent, TNode<UintPtrT> slot_id,
+ const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
// We currently don't optimize exponentiation based on feedback.
TNode<Smi> dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
- UpdateFeedback(dummy_feedback, maybe_feedback_vector, slot_id);
- return CallBuiltin(Builtins::kExponentiate, context, base, exponent);
+ UpdateFeedback(dummy_feedback, maybe_feedback_vector(), slot_id,
+ update_feedback_mode);
+ return CallBuiltin(Builtins::kExponentiate, context(), base, exponent);
}
TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
- TNode<Context> context, TVariable<Smi>* feedback) {
+ const LazyNode<Context>& context, TVariable<Smi>* feedback) {
TVARIABLE(Object, result);
TVARIABLE(Smi, var_left_feedback);
TVARIABLE(Smi, var_right_feedback);
@@ -592,14 +603,14 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
Label if_left_bigint(this), do_bigint_op(this);
TaggedToWord32OrBigIntWithFeedback(
- context, left, &if_left_number, &var_left_word32, &if_left_bigint,
+ context(), left, &if_left_number, &var_left_word32, &if_left_bigint,
&var_left_bigint, feedback ? &var_left_feedback : nullptr);
Label right_is_bigint(this);
BIND(&if_left_number);
{
TaggedToWord32OrBigIntWithFeedback(
- context, right, &do_number_op, &var_right_word32, &right_is_bigint,
+ context(), right, &do_number_op, &var_right_word32, &right_is_bigint,
&var_right_bigint, feedback ? &var_right_feedback : nullptr);
}
@@ -631,7 +642,7 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
// BigInt cases.
BIND(&if_left_bigint);
{
- TaggedToNumericWithFeedback(context, right, &var_right_maybe_bigint,
+ TaggedToNumericWithFeedback(context(), right, &var_right_maybe_bigint,
&var_right_feedback);
var_left_maybe_bigint = var_left_bigint.value();
Goto(&do_bigint_op);
@@ -643,7 +654,7 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
*feedback = SmiOr(var_left_feedback.value(), var_right_feedback.value());
}
result = CallRuntime(
- Runtime::kBigIntBinaryOp, context, var_left_maybe_bigint.value(),
+ Runtime::kBigIntBinaryOp, context(), var_left_maybe_bigint.value(),
var_right_maybe_bigint.value(), SmiConstant(bitwise_op));
Goto(&done);
}
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index a3a1e40e2d..6dff319736 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -21,106 +21,113 @@ class BinaryOpAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
TNode<Object> Generate_AddWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi);
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi);
TNode<Object> Generate_SubtractWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi);
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi);
TNode<Object> Generate_MultiplyWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi);
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi);
TNode<Object> Generate_DivideWithFeedback(
- TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi);
+ const LazyNode<Context>& context, TNode<Object> dividend,
+ TNode<Object> divisor, TNode<UintPtrT> slot,
+ const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi);
TNode<Object> Generate_ModulusWithFeedback(
- TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi);
+ const LazyNode<Context>& context, TNode<Object> dividend,
+ TNode<Object> divisor, TNode<UintPtrT> slot,
+ const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi);
TNode<Object> Generate_ExponentiateWithFeedback(
- TNode<Context> context, TNode<Object> base, TNode<Object> exponent,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi);
+ const LazyNode<Context>& context, TNode<Object> base,
+ TNode<Object> exponent, TNode<UintPtrT> slot,
+ const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi);
TNode<Object> Generate_BitwiseOrWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool /* unused */) {
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
Operation::kBitwiseOr, left, right, context, &feedback);
- UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
+ update_feedback_mode);
return result;
}
TNode<Object> Generate_BitwiseXorWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool /* unused */) {
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
Operation::kBitwiseXor, left, right, context, &feedback);
- UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
+ update_feedback_mode);
return result;
}
TNode<Object> Generate_BitwiseAndWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool /* unused */) {
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
Operation::kBitwiseAnd, left, right, context, &feedback);
- UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
+ update_feedback_mode);
return result;
}
TNode<Object> Generate_ShiftLeftWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool /* unused */) {
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
Operation::kShiftLeft, left, right, context, &feedback);
- UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
+ update_feedback_mode);
return result;
}
TNode<Object> Generate_ShiftRightWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool /* unused */) {
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
Operation::kShiftRight, left, right, context, &feedback);
- UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
+ update_feedback_mode);
return result;
}
TNode<Object> Generate_ShiftRightLogicalWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool /* unused */) {
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
Operation::kShiftRightLogical, left, right, context, &feedback);
- UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
+ update_feedback_mode);
return result;
}
- TNode<Object> Generate_BitwiseBinaryOpWithFeedback(Operation bitwise_op,
- TNode<Object> left,
- TNode<Object> right,
- TNode<Context> context,
- TVariable<Smi>* feedback) {
+ TNode<Object> Generate_BitwiseBinaryOpWithFeedback(
+ Operation bitwise_op, TNode<Object> left, TNode<Object> right,
+ const LazyNode<Context>& context, TVariable<Smi>* feedback) {
return Generate_BitwiseBinaryOpWithOptionalFeedback(bitwise_op, left, right,
context, feedback);
}
@@ -129,8 +136,8 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> left,
TNode<Object> right,
TNode<Context> context) {
- return Generate_BitwiseBinaryOpWithOptionalFeedback(bitwise_op, left, right,
- context, nullptr);
+ return Generate_BitwiseBinaryOpWithOptionalFeedback(
+ bitwise_op, left, right, [&] { return context; }, nullptr);
}
private:
@@ -140,14 +147,15 @@ class BinaryOpAssembler : public CodeStubAssembler {
std::function<TNode<Float64T>(TNode<Float64T>, TNode<Float64T>)>;
TNode<Object> Generate_BinaryOperationWithFeedback(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
const SmiOperation& smiOperation, const FloatOperation& floatOperation,
- Operation op, bool rhs_known_smi);
+ Operation op, UpdateFeedbackMode update_feedback_mode,
+ bool rhs_known_smi);
TNode<Object> Generate_BitwiseBinaryOpWithOptionalFeedback(
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
- TNode<Context> context, TVariable<Smi>* feedback);
+ const LazyNode<Context>& context, TVariable<Smi>* feedback);
};
} // namespace internal
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 72f43743d2..8a6374a431 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -60,19 +60,10 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
return Handle<JSObject>::null();
}
-bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
- Handle<JSObject> holder) const {
+bool CallOptimization::IsCompatibleReceiverMap(
+ Handle<JSObject> api_holder, Handle<JSObject> holder,
+ HolderLookup holder_lookup) const {
DCHECK(is_simple_api_call());
- if (!receiver->IsHeapObject()) return false;
- Handle<Map> map(HeapObject::cast(*receiver).map(), holder->GetIsolate());
- return IsCompatibleReceiverMap(map, holder);
-}
-
-
-bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
- Handle<JSObject> holder) const {
- HolderLookup holder_lookup;
- Handle<JSObject> api_holder = LookupHolderOfExpectedType(map, &holder_lookup);
switch (holder_lookup) {
case kHolderNotFound:
return false;
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index c8c7f25d5a..b6d49a1bf9 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -42,13 +42,8 @@ class CallOptimization {
Handle<JSObject> LookupHolderOfExpectedType(
Handle<Map> receiver_map, HolderLookup* holder_lookup) const;
- // Check if the api holder is between the receiver and the holder.
- bool IsCompatibleReceiver(Handle<Object> receiver,
- Handle<JSObject> holder) const;
-
- // Check if the api holder is between the receiver and the holder.
- bool IsCompatibleReceiverMap(Handle<Map> receiver_map,
- Handle<JSObject> holder) const;
+ bool IsCompatibleReceiverMap(Handle<JSObject> api_holder,
+ Handle<JSObject> holder, HolderLookup) const;
private:
void Initialize(Isolate* isolate, Handle<JSFunction> function);
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index d13d1bdab5..2614e27440 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -432,7 +432,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name,
LookupForRead(&it, IsAnyHas());
if (name->IsPrivate()) {
- if (name->IsPrivateName() && !it.IsFound()) {
+ if (!IsAnyHas() && name->IsPrivateName() && !it.IsFound()) {
Handle<String> name_string(
String::cast(Symbol::cast(*name).description()), isolate());
if (name->IsPrivateBrand()) {
@@ -862,13 +862,14 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Object> accessors = lookup->GetAccessors();
if (accessors->IsAccessorPair()) {
- if (lookup->TryLookupCachedProperty()) {
+ Handle<AccessorPair> accessor_pair =
+ Handle<AccessorPair>::cast(accessors);
+ if (lookup->TryLookupCachedProperty(accessor_pair)) {
DCHECK_EQ(LookupIterator::DATA, lookup->state());
return ComputeHandler(lookup);
}
- Handle<Object> getter(AccessorPair::cast(*accessors).getter(),
- isolate());
+ Handle<Object> getter(accessor_pair->getter(), isolate());
if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return LoadHandler::LoadSlow(isolate());
@@ -887,15 +888,17 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
CallOptimization call_optimization(isolate(), getter);
if (call_optimization.is_simple_api_call()) {
- if (!call_optimization.IsCompatibleReceiverMap(map, holder) ||
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ call_optimization.LookupHolderOfExpectedType(map, &holder_lookup);
+
+ if (!call_optimization.IsCompatibleReceiverMap(api_holder, holder,
+ holder_lookup) ||
!holder->HasFastProperties()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return LoadHandler::LoadSlow(isolate());
}
- CallOptimization::HolderLookup holder_lookup;
- call_optimization.LookupHolderOfExpectedType(map, &holder_lookup);
-
smi_handler = LoadHandler::LoadApiGetter(
isolate(), holder_lookup == CallOptimization::kHolderIsReceiver);
@@ -993,7 +996,8 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
}
if (lookup->constness() == PropertyConstness::kConst &&
!holder_is_lookup_start_object) {
- DCHECK(!lookup->is_dictionary_holder());
+ DCHECK_IMPLIES(!V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
+ !lookup->is_dictionary_holder());
Handle<Object> value = lookup->GetDataValue();
@@ -1745,11 +1749,12 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
CallOptimization call_optimization(isolate(), setter);
if (call_optimization.is_simple_api_call()) {
- if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
- CallOptimization::HolderLookup holder_lookup;
- call_optimization.LookupHolderOfExpectedType(
- lookup_start_object_map(), &holder_lookup);
-
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ call_optimization.LookupHolderOfExpectedType(
+ lookup_start_object_map(), &holder_lookup);
+ if (call_optimization.IsCompatibleReceiverMap(api_holder, holder,
+ holder_lookup)) {
Handle<Smi> smi_handler = StoreHandler::StoreApiSetter(
isolate(),
holder_lookup == CallOptimization::kHolderIsReceiver);
@@ -1803,6 +1808,8 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
}
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreNormalDH);
DCHECK(holder.is_identical_to(receiver));
+ DCHECK_IMPLIES(!V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
+ lookup->constness() == PropertyConstness::kMutable);
// TODO(v8:11167) don't create slow hanlder once OrderedNameDictionary
// supported.
Handle<Smi> handler = V8_DICT_MODE_PROTOTYPES_BOOL
@@ -2766,9 +2773,7 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate, Handle<Map> source_map,
int slack = 0;
Handle<DescriptorArray> descriptors = DescriptorArray::CopyForFastObjectClone(
isolate, source_descriptors, size, slack);
- Handle<LayoutDescriptor> layout =
- LayoutDescriptor::New(isolate, map, descriptors, size);
- map->InitializeDescriptors(isolate, *descriptors, *layout);
+ map->InitializeDescriptors(isolate, *descriptors);
map->CopyUnusedPropertyFieldsAdjustedForInstanceSize(*source_map);
// Update bitfields
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index b2d33938b4..c741298d2c 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -842,10 +842,10 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
&var_name_index, &not_found);
BIND(&dictionary_found);
{
- Label overwrite(this);
+ Label check_const(this), overwrite(this), done(this);
TNode<Uint32T> details =
LoadDetailsByKeyIndex(properties, var_name_index.value());
- JumpIfDataProperty(details, &overwrite,
+ JumpIfDataProperty(details, &check_const,
ShouldReconfigureExisting() ? nullptr : &readonly);
if (ShouldCallSetter()) {
@@ -860,13 +860,30 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Goto(slow);
}
+ BIND(&check_const);
+ {
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
+ GotoIfNot(IsPropertyDetailsConst(details), &overwrite);
+ TNode<Object> prev_value =
+ LoadValueByKeyIndex(properties, var_name_index.value());
+
+ BranchIfSameValue(prev_value, p->value(), &done, slow,
+ SameValueMode::kNumbersOnly);
+ } else {
+ Goto(&overwrite);
+ }
+ }
+
BIND(&overwrite);
{
CheckForAssociatedProtector(name, slow);
StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
p->value());
- exit_point->Return(p->value());
+ Goto(&done);
}
+
+ BIND(&done);
+ exit_point->Return(p->value());
}
BIND(&not_found);
diff --git a/deps/v8/src/ic/unary-op-assembler.cc b/deps/v8/src/ic/unary-op-assembler.cc
index 6580601a1f..4308f561a7 100644
--- a/deps/v8/src/ic/unary-op-assembler.cc
+++ b/deps/v8/src/ic/unary-op-assembler.cc
@@ -18,7 +18,8 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
TNode<Object> BitwiseNot(TNode<Context> context, TNode<Object> value,
TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
// TODO(jgruber): Make this implementation more consistent with other unary
// ops (i.e. have them all use UnaryOpWithFeedback or some other common
// mechanism).
@@ -38,13 +39,13 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
TaggedIsSmi(var_result.value()), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
UpdateFeedback(SmiOr(result_type, var_feedback.value()),
- maybe_feedback_vector, slot);
+ maybe_feedback_vector, slot, update_feedback_mode);
Goto(&out);
// BigInt case.
BIND(&if_bigint);
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt),
- maybe_feedback_vector, slot);
+ maybe_feedback_vector, slot, update_feedback_mode);
var_result =
CallRuntime(Runtime::kBigIntUnaryOp, context, var_bigint.value(),
SmiConstant(Operation::kBitwiseNot));
@@ -56,21 +57,24 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
TNode<Object> Decrement(TNode<Context> context, TNode<Object> value,
TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
- return IncrementOrDecrement<Operation::kDecrement>(context, value, slot,
- maybe_feedback_vector);
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
+ return IncrementOrDecrement<Operation::kDecrement>(
+ context, value, slot, maybe_feedback_vector, update_feedback_mode);
}
TNode<Object> Increment(TNode<Context> context, TNode<Object> value,
TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
- return IncrementOrDecrement<Operation::kIncrement>(context, value, slot,
- maybe_feedback_vector);
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
+ return IncrementOrDecrement<Operation::kIncrement>(
+ context, value, slot, maybe_feedback_vector, update_feedback_mode);
}
TNode<Object> Negate(TNode<Context> context, TNode<Object> value,
TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
SmiOperation smi_op = [=](TNode<Smi> smi_value,
TVariable<Smi>* var_feedback, Label* do_float_op,
TVariable<Float64T>* var_float) {
@@ -108,7 +112,8 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
SmiConstant(Operation::kNegate)));
};
return UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector,
- smi_op, float_op, bigint_op);
+ smi_op, float_op, bigint_op,
+ update_feedback_mode);
}
private:
@@ -125,7 +130,8 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
TNode<HeapObject> maybe_feedback_vector,
const SmiOperation& smi_op,
const FloatOperation& float_op,
- const BigIntOperation& bigint_op) {
+ const BigIntOperation& bigint_op,
+ UpdateFeedbackMode update_feedback_mode) {
TVARIABLE(Object, var_value, value);
TVARIABLE(Object, var_result);
TVARIABLE(Float64T, var_float_value);
@@ -207,14 +213,16 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
}
BIND(&end);
- UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot);
+ UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot,
+ update_feedback_mode);
return var_result.value();
}
template <Operation kOperation>
TNode<Object> IncrementOrDecrement(TNode<Context> context,
TNode<Object> value, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
STATIC_ASSERT(kOperation == Operation::kIncrement ||
kOperation == Operation::kDecrement);
static constexpr int kAddValue =
@@ -245,7 +253,8 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
SmiConstant(kOperation)));
};
return UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector,
- smi_op, float_op, bigint_op);
+ smi_op, float_op, bigint_op,
+ update_feedback_mode);
}
};
@@ -253,30 +262,38 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
TNode<Object> UnaryOpAssembler::Generate_BitwiseNotWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
UnaryOpAssemblerImpl a(state_);
- return a.BitwiseNot(context, value, slot, maybe_feedback_vector);
+ return a.BitwiseNot(context, value, slot, maybe_feedback_vector,
+ update_feedback_mode);
}
TNode<Object> UnaryOpAssembler::Generate_DecrementWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
UnaryOpAssemblerImpl a(state_);
- return a.Decrement(context, value, slot, maybe_feedback_vector);
+ return a.Decrement(context, value, slot, maybe_feedback_vector,
+ update_feedback_mode);
}
TNode<Object> UnaryOpAssembler::Generate_IncrementWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
UnaryOpAssemblerImpl a(state_);
- return a.Increment(context, value, slot, maybe_feedback_vector);
+ return a.Increment(context, value, slot, maybe_feedback_vector,
+ update_feedback_mode);
}
TNode<Object> UnaryOpAssembler::Generate_NegateWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector) {
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode) {
UnaryOpAssemblerImpl a(state_);
- return a.Negate(context, value, slot, maybe_feedback_vector);
+ return a.Negate(context, value, slot, maybe_feedback_vector,
+ update_feedback_mode);
}
} // namespace internal
diff --git a/deps/v8/src/ic/unary-op-assembler.h b/deps/v8/src/ic/unary-op-assembler.h
index 447806722d..7dc6079fc4 100644
--- a/deps/v8/src/ic/unary-op-assembler.h
+++ b/deps/v8/src/ic/unary-op-assembler.h
@@ -21,19 +21,23 @@ class UnaryOpAssembler final {
TNode<Object> Generate_BitwiseNotWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector);
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode);
TNode<Object> Generate_DecrementWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector);
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode);
TNode<Object> Generate_IncrementWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector);
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode);
TNode<Object> Generate_NegateWithFeedback(
TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
- TNode<HeapObject> maybe_feedback_vector);
+ TNode<HeapObject> maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode);
private:
compiler::CodeAssemblerState* const state_;
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index ce3886e87e..37833e8823 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -239,7 +239,8 @@ class Genesis {
bool InstallExtrasBindings();
Handle<JSFunction> InstallTypedArray(const char* name,
- ElementsKind elements_kind);
+ ElementsKind elements_kind,
+ InstanceType type);
void InitializeNormalizedMapCaches();
enum ExtensionTraversalState { UNVISITED, VISITED, INSTALLED };
@@ -296,10 +297,6 @@ class Genesis {
Handle<NativeContext> native_context_;
Handle<JSGlobalProxy> global_proxy_;
- // Temporary function maps needed only during bootstrapping.
- Handle<Map> strict_function_with_home_object_map_;
- Handle<Map> strict_function_with_name_and_home_object_map_;
-
// %ThrowTypeError%. See ES#sec-%throwtypeerror% for details.
Handle<JSFunction> restricted_properties_thrower_;
@@ -349,7 +346,7 @@ Handle<JSGlobalProxy> Bootstrapper::NewRemoteContext(
}
void Bootstrapper::LogAllMaps() {
- if (!FLAG_trace_maps || isolate_->initialized_from_snapshot()) return;
+ if (!FLAG_log_maps || isolate_->initialized_from_snapshot()) return;
// Log all created Map objects that are on the heap. For snapshots the Map
// logging happens during deserialization in order to avoid printing Maps
// multiple times during partial deserialization.
@@ -518,6 +515,24 @@ V8_NOINLINE Handle<JSFunction> InstallFunction(
instance_size, inobject_properties, prototype, call);
}
+// This installs an instance type (|constructor_type|) on the constructor map
+// which will be used for protector cell checks -- this is separate from |type|
+// which is used to set the instance type of the object created by this
+// constructor. If protector cell checks are not required, continue to use the
+// default JS_FUNCTION_TYPE by directly calling InstallFunction.
+V8_NOINLINE Handle<JSFunction> InstallConstructor(
+ Isolate* isolate, Handle<JSObject> target, const char* name,
+ InstanceType type, int instance_size, int inobject_properties,
+ Handle<HeapObject> prototype, Builtins::Name call,
+ InstanceType constructor_type) {
+ Handle<JSFunction> function = InstallFunction(
+ isolate, target, isolate->factory()->InternalizeUtf8String(name), type,
+ instance_size, inobject_properties, prototype, call);
+ DCHECK(InstanceTypeChecker::IsJSFunction(constructor_type));
+ function->map().set_instance_type(constructor_type);
+ return function;
+}
+
V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
Handle<String> name,
Builtins::Name call,
@@ -774,13 +789,6 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
map = factory->CreateStrictFunctionMap(METHOD_WITH_NAME, empty);
native_context()->set_method_with_name_map(*map);
- map = factory->CreateStrictFunctionMap(METHOD_WITH_HOME_OBJECT, empty);
- native_context()->set_method_with_home_object_map(*map);
-
- map =
- factory->CreateStrictFunctionMap(METHOD_WITH_NAME_AND_HOME_OBJECT, empty);
- native_context()->set_method_with_name_and_home_object_map(*map);
-
//
// Allocate maps for strict functions with writable prototype.
//
@@ -792,12 +800,6 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
FUNCTION_WITH_NAME_AND_WRITEABLE_PROTOTYPE, empty);
native_context()->set_strict_function_with_name_map(*map);
- strict_function_with_home_object_map_ = factory->CreateStrictFunctionMap(
- FUNCTION_WITH_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE, empty);
- strict_function_with_name_and_home_object_map_ =
- factory->CreateStrictFunctionMap(
- FUNCTION_WITH_NAME_AND_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE, empty);
-
//
// Allocate maps for strict functions with readonly prototype.
//
@@ -960,12 +962,14 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
Handle<JSFunction> call_async_module_fulfilled =
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kCallAsyncModuleFulfilled, 1, false);
+ call_async_module_fulfilled->shared().set_native(false);
native_context()->set_call_async_module_fulfilled(
*call_async_module_fulfilled);
Handle<JSFunction> call_async_module_rejected =
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kCallAsyncModuleRejected, 1, false);
+ call_async_module_rejected->shared().set_native(false);
native_context()->set_call_async_module_rejected(
*call_async_module_rejected);
}
@@ -986,17 +990,6 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
generator_function_prototype, "GeneratorFunction with name");
native_context()->set_generator_function_with_name_map(*map);
- map = CreateNonConstructorMap(
- isolate(), strict_function_with_home_object_map_,
- generator_function_prototype, "GeneratorFunction with home object");
- native_context()->set_generator_function_with_home_object_map(*map);
-
- map = CreateNonConstructorMap(isolate(),
- strict_function_with_name_and_home_object_map_,
- generator_function_prototype,
- "GeneratorFunction with name and home object");
- native_context()->set_generator_function_with_name_and_home_object_map(*map);
-
Handle<JSFunction> object_function(native_context()->object_function(),
isolate());
Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
@@ -1100,19 +1093,6 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
async_generator_function_prototype, "AsyncGeneratorFunction with name");
native_context()->set_async_generator_function_with_name_map(*map);
- map =
- CreateNonConstructorMap(isolate(), strict_function_with_home_object_map_,
- async_generator_function_prototype,
- "AsyncGeneratorFunction with home object");
- native_context()->set_async_generator_function_with_home_object_map(*map);
-
- map = CreateNonConstructorMap(
- isolate(), strict_function_with_name_and_home_object_map_,
- async_generator_function_prototype,
- "AsyncGeneratorFunction with name and home object");
- native_context()->set_async_generator_function_with_name_and_home_object_map(
- *map);
-
Handle<JSFunction> object_function(native_context()->object_function(),
isolate());
Handle<Map> async_generator_object_prototype_map = Map::Create(isolate(), 0);
@@ -1140,16 +1120,6 @@ void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
"AsyncFunction with name");
Map::SetPrototype(isolate(), map, async_function_prototype);
native_context()->set_async_function_with_name_map(*map);
-
- map = Map::Copy(isolate(), isolate()->method_with_home_object_map(),
- "AsyncFunction with home object");
- Map::SetPrototype(isolate(), map, async_function_prototype);
- native_context()->set_async_function_with_home_object_map(*map);
-
- map = Map::Copy(isolate(), isolate()->method_with_name_and_home_object_map(),
- "AsyncFunction with name and home object");
- Map::SetPrototype(isolate(), map, async_function_prototype);
- native_context()->set_async_function_with_name_and_home_object_map(*map);
}
void Genesis::CreateJSProxyMaps() {
@@ -1690,9 +1660,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_->strict_function_map()->SetConstructor(*function_fun);
isolate_->strict_function_with_name_map()->SetConstructor(*function_fun);
- strict_function_with_home_object_map_->SetConstructor(*function_fun);
- strict_function_with_name_and_home_object_map_->SetConstructor(
- *function_fun);
isolate_->strict_function_with_readonly_prototype_map()->SetConstructor(
*function_fun);
@@ -1702,9 +1669,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> array_prototype_to_string_fun;
{ // --- A r r a y ---
- Handle<JSFunction> array_function = InstallFunction(
+ Handle<JSFunction> array_function = InstallConstructor(
isolate_, global, "Array", JS_ARRAY_TYPE, JSArray::kHeaderSize, 0,
- isolate_->initial_object_prototype(), Builtins::kArrayConstructor);
+ isolate_->initial_object_prototype(), Builtins::kArrayConstructor,
+ JS_ARRAY_CONSTRUCTOR_TYPE);
array_function->shared().DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length
@@ -2390,10 +2358,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- P r o m i s e
- Handle<JSFunction> promise_fun = InstallFunction(
+ Handle<JSFunction> promise_fun = InstallConstructor(
isolate_, global, "Promise", JS_PROMISE_TYPE,
JSPromise::kSizeWithEmbedderFields, 0, factory->the_hole_value(),
- Builtins::kPromiseConstructor);
+ Builtins::kPromiseConstructor, JS_PROMISE_CONSTRUCTOR_TYPE);
InstallWithIntrinsicDefaultProto(isolate_, promise_fun,
Context::PROMISE_FUNCTION_INDEX);
@@ -2453,14 +2421,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
- Handle<JSFunction> regexp_fun = InstallFunction(
+ Handle<JSFunction> regexp_fun = InstallConstructor(
isolate_, global, "RegExp", JS_REG_EXP_TYPE,
JSRegExp::kHeaderSize + JSRegExp::kInObjectFieldCount * kTaggedSize,
JSRegExp::kInObjectFieldCount, factory->the_hole_value(),
- Builtins::kRegExpConstructor);
+ Builtins::kRegExpConstructor, JS_REG_EXP_CONSTRUCTOR_TYPE);
InstallWithIntrinsicDefaultProto(isolate_, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
-
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate_);
shared->set_internal_formal_parameter_count(2);
shared->set_length(2);
@@ -2638,12 +2605,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_regexp_last_match_info(*last_match_info);
// Install the species protector cell.
- {
- Handle<PropertyCell> cell =
- factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
- native_context()->set_regexp_species_protector(*cell);
- }
+ Handle<PropertyCell> cell = factory->NewProtector();
+ native_context()->set_regexp_species_protector(*cell);
DCHECK(regexp_fun->HasFastProperties());
}
@@ -3432,12 +3395,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{// -- T y p e d A r r a y s
-#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype) \
- { \
- Handle<JSFunction> fun = \
- InstallTypedArray(#Type "Array", TYPE##_ELEMENTS); \
- InstallWithIntrinsicDefaultProto(isolate_, fun, \
- Context::TYPE##_ARRAY_FUN_INDEX); \
+#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype) \
+ { \
+ Handle<JSFunction> fun = InstallTypedArray( \
+ #Type "Array", TYPE##_ELEMENTS, TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE); \
+ InstallWithIntrinsicDefaultProto(isolate_, fun, \
+ Context::TYPE##_ARRAY_FUN_INDEX); \
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
@@ -4012,17 +3975,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
} // NOLINT(readability/fn_size)
Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ InstanceType type) {
Handle<JSObject> global =
Handle<JSObject>(native_context()->global_object(), isolate());
Handle<JSObject> typed_array_prototype = isolate()->typed_array_prototype();
Handle<JSFunction> typed_array_function = isolate()->typed_array_function();
- Handle<JSFunction> result = InstallFunction(
+ Handle<JSFunction> result = InstallConstructor(
isolate(), global, name, JS_TYPED_ARRAY_TYPE,
JSTypedArray::kSizeWithEmbedderFields, 0, factory()->the_hole_value(),
- Builtins::kTypedArrayConstructor);
+ Builtins::kTypedArrayConstructor, type);
result->initial_map().set_elements_kind(elements_kind);
result->shared().DontAdaptArguments();
@@ -4352,6 +4316,8 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_logical_assignment)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_assertions)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_brand_checks)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_static_blocks)
#ifdef V8_INTL_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_displaynames_date_types)
@@ -4470,13 +4436,31 @@ void Genesis::InitializeGlobal_harmony_weak_refs_with_cleanup_some() {
void Genesis::InitializeGlobal_harmony_regexp_match_indices() {
if (!FLAG_harmony_regexp_match_indices) return;
- // Add indices accessor to JSRegExpResult's initial map.
- Handle<Map> initial_map(native_context()->regexp_result_map(), isolate());
- Descriptor d = Descriptor::AccessorConstant(
- factory()->indices_string(), factory()->regexp_result_indices_accessor(),
- NONE);
- Map::EnsureDescriptorSlack(isolate(), initial_map, 1);
- initial_map->AppendDescriptor(isolate(), &d);
+ Handle<Map> source_map(native_context()->regexp_result_map(), isolate());
+ Handle<Map> initial_map =
+ Map::Copy(isolate(), source_map, "JSRegExpResult with indices");
+ initial_map->set_instance_size(JSRegExpResultWithIndices::kSize);
+ DCHECK_EQ(initial_map->GetInObjectProperties(),
+ JSRegExpResultWithIndices::kInObjectPropertyCount);
+
+ // indices descriptor
+ {
+ Descriptor d =
+ Descriptor::DataField(isolate(), factory()->indices_string(),
+ JSRegExpResultWithIndices::kIndicesIndex, NONE,
+ Representation::Tagged());
+ Map::EnsureDescriptorSlack(isolate(), initial_map, 1);
+ initial_map->AppendDescriptor(isolate(), &d);
+ }
+
+ native_context()->set_regexp_result_with_indices_map(*initial_map);
+
+ Handle<JSObject> prototype(native_context()->regexp_prototype(), isolate());
+ SimpleInstallGetter(isolate(), prototype, factory()->has_indices_string(),
+ Builtins::kRegExpPrototypeHasIndicesGetter, true);
+
+ // Store regexp prototype map again after change.
+ native_context()->set_regexp_prototype_map(prototype->map());
}
void Genesis::InitializeGlobal_harmony_string_replaceall() {
@@ -4610,6 +4594,9 @@ bool Genesis::InstallABunchOfRandomThings() {
native_context()->set_slow_template_instantiations_cache(
*slow_template_instantiations_cache);
+ auto wasm_debug_maps = isolate()->factory()->empty_fixed_array();
+ native_context()->set_wasm_debug_maps(*wasm_debug_maps);
+
// Store the map for the %ObjectPrototype% after the natives has been compiled
// and the Object function has been set up.
{
@@ -4819,16 +4806,6 @@ bool Genesis::InstallABunchOfRandomThings() {
{
PropertyAttributes attribs = DONT_ENUM;
- // cached_indices_or_regexp descriptor.
- {
- Descriptor d = Descriptor::DataField(
- isolate(),
- factory()->regexp_result_cached_indices_or_regexp_symbol(),
- JSRegExpResult::kCachedIndicesOrRegExpIndex, attribs,
- Representation::Tagged());
- initial_map->AppendDescriptor(isolate(), &d);
- }
-
// names descriptor.
{
Descriptor d = Descriptor::DataField(
@@ -5084,10 +5061,14 @@ bool Genesis::InstallExtension(Isolate* isolate,
return false;
}
}
- // We do not expect this to throw an exception. Change this if it does.
bool result = CompileExtension(isolate, extension);
- DCHECK(isolate->has_pending_exception() != result);
if (!result) {
+ // If this failed, it either threw an exception, or the isolate is
+ // terminating.
+ DCHECK(isolate->has_pending_exception() ||
+ (isolate->has_scheduled_exception() &&
+ isolate->scheduled_exception() ==
+ ReadOnlyRoots(isolate).termination_exception()));
// We print out the name of the extension that fail to install.
// When an error is thrown during bootstrapping we automatically print
// the line number at which this happened to the console in the isolate
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 5a2a3a8ac9..56b51314ab 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -168,15 +168,16 @@
V(_, defineProperty_string, "defineProperty") \
V(_, deleteProperty_string, "deleteProperty") \
V(_, disjunction_string, "disjunction") \
- V(_, display_name_string, "displayName") \
V(_, done_string, "done") \
V(_, dot_brand_string, ".brand") \
V(_, dot_catch_string, ".catch") \
V(_, dot_default_string, ".default") \
V(_, dot_for_string, ".for") \
V(_, dot_generator_object_string, ".generator_object") \
+ V(_, dot_home_object_string, ".home_object") \
V(_, dot_result_string, ".result") \
V(_, dot_repl_result_string, ".repl_result") \
+ V(_, dot_static_home_object_string, "._static_home_object") \
V(_, dot_string, ".") \
V(_, dot_switch_tag_string, ".switch_tag") \
V(_, dotAll_string, "dotAll") \
@@ -207,6 +208,7 @@
V(_, globalThis_string, "globalThis") \
V(_, groups_string, "groups") \
V(_, has_string, "has") \
+ V(_, has_indices_string, "hasIndices") \
V(_, ignoreCase_string, "ignoreCase") \
V(_, illegal_access_string, "illegal access") \
V(_, illegal_argument_string, "illegal argument") \
@@ -333,41 +335,40 @@
V(_, writable_string, "writable") \
V(_, zero_string, "0")
-#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
- V(_, call_site_frame_array_symbol) \
- V(_, call_site_frame_index_symbol) \
- V(_, console_context_id_symbol) \
- V(_, console_context_name_symbol) \
- V(_, class_fields_symbol) \
- V(_, class_positions_symbol) \
- V(_, detailed_stack_trace_symbol) \
- V(_, elements_transition_symbol) \
- V(_, error_end_pos_symbol) \
- V(_, error_script_symbol) \
- V(_, error_start_pos_symbol) \
- V(_, frozen_symbol) \
- V(_, home_object_symbol) \
- V(_, interpreter_trampoline_symbol) \
- V(_, megamorphic_symbol) \
- V(_, native_context_index_symbol) \
- V(_, nonextensible_symbol) \
- V(_, not_mapped_symbol) \
- V(_, promise_debug_marker_symbol) \
- V(_, promise_debug_message_symbol) \
- V(_, promise_forwarding_handler_symbol) \
- V(_, promise_handled_by_symbol) \
- V(_, regexp_result_cached_indices_or_regexp_symbol) \
- V(_, regexp_result_names_symbol) \
- V(_, regexp_result_regexp_input_symbol) \
- V(_, regexp_result_regexp_last_index_symbol) \
- V(_, sealed_symbol) \
- V(_, stack_trace_symbol) \
- V(_, strict_function_transition_symbol) \
- V(_, wasm_exception_tag_symbol) \
- V(_, wasm_exception_values_symbol) \
- V(_, wasm_uncatchable_symbol) \
- V(_, wasm_wrapped_object_symbol) \
- V(_, wasm_debug_proxy_cache_symbol) \
+#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
+ V(_, array_buffer_wasm_memory_symbol) \
+ V(_, call_site_frame_info_symbol) \
+ V(_, console_context_id_symbol) \
+ V(_, console_context_name_symbol) \
+ V(_, class_fields_symbol) \
+ V(_, class_positions_symbol) \
+ V(_, detailed_stack_trace_symbol) \
+ V(_, elements_transition_symbol) \
+ V(_, error_end_pos_symbol) \
+ V(_, error_script_symbol) \
+ V(_, error_start_pos_symbol) \
+ V(_, frozen_symbol) \
+ V(_, interpreter_trampoline_symbol) \
+ V(_, megamorphic_symbol) \
+ V(_, native_context_index_symbol) \
+ V(_, nonextensible_symbol) \
+ V(_, not_mapped_symbol) \
+ V(_, promise_debug_marker_symbol) \
+ V(_, promise_debug_message_symbol) \
+ V(_, promise_forwarding_handler_symbol) \
+ V(_, promise_handled_by_symbol) \
+ V(_, regexp_result_names_symbol) \
+ V(_, regexp_result_regexp_input_symbol) \
+ V(_, regexp_result_regexp_last_index_symbol) \
+ V(_, sealed_symbol) \
+ V(_, stack_trace_symbol) \
+ V(_, strict_function_transition_symbol) \
+ V(_, wasm_exception_tag_symbol) \
+ V(_, wasm_exception_values_symbol) \
+ V(_, wasm_uncatchable_symbol) \
+ V(_, wasm_wrapped_object_symbol) \
+ V(_, wasm_debug_proxy_cache_symbol) \
+ V(_, wasm_debug_proxy_names_symbol) \
V(_, uninitialized_symbol)
#define PUBLIC_SYMBOL_LIST_GENERATOR(V, _) \
@@ -448,7 +449,6 @@
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_PARALLEL) \
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \
- F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \
@@ -509,7 +509,8 @@
F(SCAVENGER_SCAVENGE_WEAK) \
F(SCAVENGER_SCAVENGE_FINALIZE) \
F(SCAVENGER_SWEEP_ARRAY_BUFFERS) \
- F(STOP_THE_WORLD)
+ F(TIME_TO_SAFEPOINT) \
+ F(UNMAPPER)
#define TRACER_BACKGROUND_SCOPES(F) \
F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 2ca20ca262..921efe631b 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -57,6 +57,38 @@ void V8::TearDown() {
}
void V8::InitializeOncePerProcessImpl() {
+ // Update logging information before enforcing flag implications.
+ bool* log_all_flags[] = {&FLAG_turbo_profiling_log_builtins,
+ &FLAG_log_all,
+ &FLAG_log_api,
+ &FLAG_log_code,
+ &FLAG_log_code_disassemble,
+ &FLAG_log_handles,
+ &FLAG_log_suspect,
+ &FLAG_log_source_code,
+ &FLAG_log_function_events,
+ &FLAG_log_internal_timer_events,
+ &FLAG_log_deopt,
+ &FLAG_log_ic,
+ &FLAG_log_maps};
+ if (FLAG_log_all) {
+ // Enable all logging flags
+ for (auto* flag : log_all_flags) {
+ *flag = true;
+ }
+ FLAG_log = true;
+ } else if (!FLAG_log) {
+ // Enable --log if any log flag is set.
+ for (const auto* flag : log_all_flags) {
+ if (!*flag) continue;
+ FLAG_log = true;
+ break;
+ }
+ // Profiling flags depend on logging.
+ FLAG_log |= FLAG_perf_prof || FLAG_perf_basic_prof || FLAG_ll_prof ||
+ FLAG_prof || FLAG_prof_cpp;
+ }
+
FlagList::EnforceFlagImplications();
if (FLAG_predictable && FLAG_random_seed == 0) {
@@ -87,7 +119,11 @@ void V8::InitializeOncePerProcessImpl() {
// TODO(jgruber): Remove this once / if wasm can run without executable
// memory.
if (FLAG_jitless && !FLAG_correctness_fuzzer_suppressions) {
+#if V8_ENABLE_WEBASSEMBLY
FLAG_expose_wasm = false;
+#else
+ STATIC_ASSERT(!FLAG_expose_wasm);
+#endif
}
if (FLAG_regexp_interpret_all && FLAG_regexp_tier_up) {
@@ -106,7 +142,7 @@ void V8::InitializeOncePerProcessImpl() {
if (FLAG_random_seed) SetRandomMmapSeed(FLAG_random_seed);
#if defined(V8_USE_PERFETTO)
- TrackEvent::Register();
+ if (perfetto::Tracing::IsInitialized()) TrackEvent::Register();
#endif
Isolate::InitializeOncePerProcess();
@@ -130,7 +166,6 @@ void V8::InitializePlatform(v8::Platform* platform) {
platform_ = platform;
v8::base::SetPrintStackTrace(platform_->GetStackTracePrinter());
v8::tracing::TracingCategoryObserver::SetUp();
- cppgc::InitializeProcess(platform->GetPageAllocator());
}
void V8::ShutdownPlatform() {
@@ -138,7 +173,6 @@ void V8::ShutdownPlatform() {
v8::tracing::TracingCategoryObserver::TearDown();
v8::base::SetPrintStackTrace(nullptr);
platform_ = nullptr;
- cppgc::ShutdownProcess();
}
v8::Platform* V8::GetCurrentPlatform() {
diff --git a/deps/v8/src/inspector/custom-preview.cc b/deps/v8/src/inspector/custom-preview.cc
index 393e0f15c5..d8e88861cb 100644
--- a/deps/v8/src/inspector/custom-preview.cc
+++ b/deps/v8/src/inspector/custom-preview.cc
@@ -249,7 +249,11 @@ void generateCustomPreview(int sessionId, const String16& groupName,
v8::Local<v8::Object> object,
v8::MaybeLocal<v8::Value> maybeConfig, int maxDepth,
std::unique_ptr<CustomPreview>* preview) {
- v8::Local<v8::Context> context = object->CreationContext();
+ v8::Local<v8::Context> context;
+ if (!object->GetCreationContext().ToLocal(&context)) {
+ return;
+ }
+
v8::Isolate* isolate = context->GetIsolate();
v8::MicrotasksScope microtasksScope(isolate,
v8::MicrotasksScope::kDoNotRunMicrotasks);
diff --git a/deps/v8/src/inspector/search-util.cc b/deps/v8/src/inspector/search-util.cc
index ec800007dd..72e97a3880 100644
--- a/deps/v8/src/inspector/search-util.cc
+++ b/deps/v8/src/inspector/search-util.cc
@@ -151,6 +151,7 @@ searchInTextByLinesImpl(V8InspectorSession* session, const String16& text,
scriptRegexpMatchesByLines(*regex.get(), text);
std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> result;
+ result.reserve(matches.size());
for (const auto& match : matches)
result.push_back(buildObjectForSearchMatch(match.first, match.second));
return result;
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index d878ca94a0..58a37073a8 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -208,7 +208,12 @@ void V8ConsoleMessage::setLocation(const String16& url, unsigned lineNumber,
unsigned columnNumber,
std::unique_ptr<V8StackTraceImpl> stackTrace,
int scriptId) {
- m_url = url;
+ const char* dataURIPrefix = "data:";
+ if (url.substring(0, strlen(dataURIPrefix)) == dataURIPrefix) {
+ m_url = String16();
+ } else {
+ m_url = url;
+ }
m_lineNumber = lineNumber;
m_columnNumber = columnNumber;
m_stackTrace = std::move(stackTrace);
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index ee989e8931..cc464ebe04 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -74,6 +74,7 @@ class ConsoleHelper {
void reportCallWithDefaultArgument(ConsoleAPIType type,
const String16& message) {
std::vector<v8::Local<v8::Value>> arguments;
+ arguments.reserve(m_info.Length());
for (int i = 0; i < m_info.Length(); ++i) arguments.push_back(m_info[i]);
if (!m_info.Length()) arguments.push_back(toV8String(m_isolate, message));
reportCall(type, arguments);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index f986938c2e..0a17f9e2f8 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -43,8 +43,10 @@ class MatchPrototypePredicate : public v8::debug::QueryObjectPredicate {
bool Filter(v8::Local<v8::Object> object) override {
if (object->IsModuleNamespaceObject()) return false;
- v8::Local<v8::Context> objectContext =
- v8::debug::GetCreationContext(object);
+ v8::Local<v8::Context> objectContext;
+ if (!v8::debug::GetCreationContext(object).ToLocal(&objectContext)) {
+ return false;
+ }
if (objectContext != m_context) return false;
if (!m_inspector->client()->isInspectableHeapObject(object)) return false;
// Get prototype chain for current object until first visited prototype.
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index a845bcbae3..6472c5ca94 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -54,9 +54,13 @@ class GlobalObjectNameResolver final
: m_offset(0), m_strings(10000), m_session(session) {}
const char* GetName(v8::Local<v8::Object> object) override {
+ v8::Local<v8::Context> creationContext;
+ if (!object->GetCreationContext().ToLocal(&creationContext)) {
+ return "";
+ }
InspectedContext* context = m_session->inspector()->getContext(
m_session->contextGroupId(),
- InspectedContext::contextId(object->CreationContext()));
+ InspectedContext::contextId(creationContext));
if (!context) return "";
String16 name = context->origin();
size_t length = name.length();
@@ -286,7 +290,11 @@ Response V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject))
return Response::ServerError("Object is not available");
- *result = m_session->wrapObject(heapObject->CreationContext(), heapObject,
+ v8::Local<v8::Context> creationContext;
+ if (!heapObject->GetCreationContext().ToLocal(&creationContext)) {
+ return Response::ServerError("Object is not available");
+ }
+ *result = m_session->wrapObject(creationContext, heapObject,
objectGroup.fromMaybe(""), false);
if (!*result) return Response::ServerError("Object is not available");
return Response::Success();
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 3d51aa7f6a..7c10f19131 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -102,7 +102,8 @@ v8::MaybeLocal<v8::Value> V8InspectorImpl::compileAndRunInternalScript(
v8::MaybeLocal<v8::Script> V8InspectorImpl::compileScript(
v8::Local<v8::Context> context, const String16& code,
const String16& fileName) {
- v8::ScriptOrigin origin(toV8String(m_isolate, fileName), 0, 0, false);
+ v8::ScriptOrigin origin(m_isolate, toV8String(m_isolate, fileName), 0, 0,
+ false);
v8::ScriptCompiler::Source source(toV8String(m_isolate, code), origin);
return v8::ScriptCompiler::Compile(context, &source,
v8::ScriptCompiler::kNoCompileOptions);
@@ -365,9 +366,14 @@ std::shared_ptr<V8Inspector::Counters> V8InspectorImpl::enableCounters() {
return std::make_shared<Counters>(m_isolate);
}
-v8::Local<v8::Context> V8InspectorImpl::regexContext() {
- if (m_regexContext.IsEmpty())
+v8::MaybeLocal<v8::Context> V8InspectorImpl::regexContext() {
+ if (m_regexContext.IsEmpty()) {
m_regexContext.Reset(m_isolate, v8::Context::New(m_isolate));
+ if (m_regexContext.IsEmpty()) {
+ DCHECK(m_isolate->IsExecutionTerminating());
+ return {};
+ }
+ }
return m_regexContext.Get(m_isolate);
}
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index c5259b0c60..3884da844c 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -75,7 +75,7 @@ class V8InspectorImpl : public V8Inspector {
v8::MaybeLocal<v8::Script> compileScript(v8::Local<v8::Context>,
const String16& code,
const String16& fileName);
- v8::Local<v8::Context> regexContext();
+ v8::MaybeLocal<v8::Context> regexContext();
// V8Inspector implementation.
std::unique_ptr<V8InspectorSession> connect(int contextGroupId,
diff --git a/deps/v8/src/inspector/v8-regex.cc b/deps/v8/src/inspector/v8-regex.cc
index 5f43d84e2c..55b00d50ae 100644
--- a/deps/v8/src/inspector/v8-regex.cc
+++ b/deps/v8/src/inspector/v8-regex.cc
@@ -18,7 +18,12 @@ V8Regex::V8Regex(V8InspectorImpl* inspector, const String16& pattern,
: m_inspector(inspector) {
v8::Isolate* isolate = m_inspector->isolate();
v8::HandleScope handleScope(isolate);
- v8::Local<v8::Context> context = m_inspector->regexContext();
+ v8::Local<v8::Context> context;
+ if (!m_inspector->regexContext().ToLocal(&context)) {
+ DCHECK(isolate->IsExecutionTerminating());
+ m_errorMessage = "terminated";
+ return;
+ }
v8::Context::Scope contextScope(context);
v8::TryCatch tryCatch(isolate);
@@ -48,7 +53,11 @@ int V8Regex::match(const String16& string, int startFrom,
v8::Isolate* isolate = m_inspector->isolate();
v8::HandleScope handleScope(isolate);
- v8::Local<v8::Context> context = m_inspector->regexContext();
+ v8::Local<v8::Context> context;
+ if (!m_inspector->regexContext().ToLocal(&context)) {
+ DCHECK(isolate->IsExecutionTerminating());
+ return -1;
+ }
v8::Context::Scope contextScope(context);
v8::MicrotasksScope microtasks(isolate,
v8::MicrotasksScope::kDoNotRunMicrotasks);
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index db1d98a173..86bedc07ec 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -204,7 +204,12 @@ int StackFrame::columnNumber() const { return m_columnNumber; }
std::unique_ptr<protocol::Runtime::CallFrame> StackFrame::buildInspectorObject(
V8InspectorClient* client) const {
- String16 frameUrl = m_sourceURL;
+ String16 frameUrl;
+ const char* dataURIPrefix = "data:";
+ if (m_sourceURL.substring(0, strlen(dataURIPrefix)) != dataURIPrefix) {
+ frameUrl = m_sourceURL;
+ }
+
if (client && !m_hasSourceURLComment && frameUrl.length() > 0) {
std::unique_ptr<StringBuffer> url =
client->resourceNameToUrl(toStringView(m_sourceURL));
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 0b76d9ee22..62744a8a9c 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -43,26 +43,6 @@ V8InternalValueType v8InternalValueTypeFrom(v8::Local<v8::Context> context,
return inspectedContext->getInternalType(value.As<v8::Object>());
}
-template <typename ResultType>
-ResultType unpackWasmValue(v8::Local<v8::Context> context,
- v8::Local<v8::Array> array) {
- ResultType result;
- constexpr int kSize = sizeof(result);
- uint8_t buffer[kSize];
- for (int i = 0; i < kSize; i++) {
- v8::Local<v8::Int32> i32 =
- array->Get(context, i).ToLocalChecked().As<v8::Int32>();
- buffer[i] = static_cast<uint8_t>(i32->Value());
- }
- memcpy(&result, buffer, kSize);
- return result;
-}
-
-// Partial list of Wasm's ValueType, copied here to avoid including internal
-// header. Using an unscoped enumeration here to allow implicit conversions from
-// int. Keep in sync with ValueType::Kind in wasm/value-type.h.
-enum WasmValueType { kStmt, kI32, kI64, kF32, kF64, kS128, kExternRef };
-
Response toProtocolValue(v8::Local<v8::Context> context,
v8::Local<v8::Value> value, int maxDepth,
std::unique_ptr<protocol::Value>* result) {
@@ -813,6 +793,8 @@ bool getPropertiesForPreview(v8::Local<v8::Context> context,
if (object->IsArray() || isArrayLike(context, object, &length) ||
object->IsStringObject()) {
blocklist.push_back("length");
+ } else if (v8::debug::WasmValueObject::IsWasmValueObject(object)) {
+ blocklist.push_back("type");
} else {
auto clientSubtype = clientFor(context)->valueSubtype(object);
if (clientSubtype && toString16(clientSubtype->string()) == "array") {
@@ -1227,14 +1209,24 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
bool formatAccessorsAsProperties =
clientFor(context)->formatAccessorsAsProperties(object);
- for (auto iterator = v8::debug::PropertyIterator::Create(object);
- !iterator->Done(); iterator->Advance()) {
+ auto iterator = v8::debug::PropertyIterator::Create(context, object);
+ if (!iterator) {
+ CHECK(tryCatch.HasCaught());
+ return false;
+ }
+ while (!iterator->Done()) {
bool isOwn = iterator->is_own();
if (!isOwn && ownProperties) break;
v8::Local<v8::Name> v8Name = iterator->name();
v8::Maybe<bool> result = set->Has(context, v8Name);
if (result.IsNothing()) return false;
- if (result.FromJust()) continue;
+ if (result.FromJust()) {
+ if (!iterator->Advance().FromMaybe(false)) {
+ CHECK(tryCatch.HasCaught());
+ return false;
+ }
+ continue;
+ }
if (!set->Add(context, v8Name).ToLocal(&set)) return false;
String16 name;
@@ -1330,6 +1322,11 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
std::move(symbolMirror),
std::move(exceptionMirror)};
if (!accumulator->Add(std::move(mirror))) return true;
+
+ if (!iterator->Advance().FromMaybe(false)) {
+ CHECK(tryCatch.HasCaught());
+ return false;
+ }
}
if (!shouldSkipProto && ownProperties && !object->IsProxy() &&
!accessorPropertiesOnly) {
@@ -1696,6 +1693,13 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
descriptionForCollection(
isolate, memory, memory->Buffer()->ByteLength() / kWasmPageSize));
}
+ if (v8::debug::WasmValueObject::IsWasmValueObject(value)) {
+ v8::Local<v8::debug::WasmValueObject> object =
+ value.As<v8::debug::WasmValueObject>();
+ return std::make_unique<ObjectMirror>(
+ value, RemoteObject::SubtypeEnum::Wasmvalue,
+ descriptionForObject(isolate, object));
+ }
V8InternalValueType internalType =
v8InternalValueTypeFrom(context, value.As<v8::Object>());
if (value->IsArray() && internalType == V8InternalValueType::kScopeList) {
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index 3dacc26070..7294255dbe 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -14,64 +14,30 @@ namespace v8 {
namespace internal {
namespace interpreter {
-namespace {
-
-class OnHeapBytecodeArray final : public AbstractBytecodeArray {
- public:
- explicit OnHeapBytecodeArray(Handle<BytecodeArray> bytecode_array)
- : array_(bytecode_array) {}
-
- int length() const override { return array_->length(); }
-
- int parameter_count() const override { return array_->parameter_count(); }
-
- uint8_t get(int index) const override { return array_->get(index); }
-
- void set(int index, uint8_t value) override {
- return array_->set(index, value);
- }
-
- Address GetFirstBytecodeAddress() const override {
- return array_->GetFirstBytecodeAddress();
- }
-
- Handle<Object> GetConstantAtIndex(int index,
- Isolate* isolate) const override {
- return handle(array_->constant_pool().get(index), isolate);
- }
-
- bool IsConstantAtIndexSmi(int index) const override {
- return array_->constant_pool().get(index).IsSmi();
- }
-
- Smi GetConstantAtIndexAsSmi(int index) const override {
- return Smi::cast(array_->constant_pool().get(index));
- }
-
- private:
- Handle<BytecodeArray> array_;
-};
-
-} // namespace
-
BytecodeArrayAccessor::BytecodeArrayAccessor(
- std::unique_ptr<AbstractBytecodeArray> bytecode_array, int initial_offset)
- : bytecode_array_(std::move(bytecode_array)),
- bytecode_length_(bytecode_array_->length()),
- bytecode_offset_(initial_offset),
+ Handle<BytecodeArray> bytecode_array, int initial_offset)
+ : bytecode_array_(bytecode_array),
+ start_(reinterpret_cast<uint8_t*>(
+ bytecode_array_->GetFirstBytecodeAddress())),
+ end_(start_ + bytecode_array_->length()),
+ cursor_(start_ + initial_offset),
operand_scale_(OperandScale::kSingle),
- prefix_offset_(0) {
+ prefix_size_(0),
+ local_heap_(LocalHeap::Current()
+ ? LocalHeap::Current()
+ : Isolate::Current()->main_thread_local_heap()) {
+ local_heap_->AddGCEpilogueCallback(UpdatePointersCallback, this);
UpdateOperandScale();
}
-BytecodeArrayAccessor::BytecodeArrayAccessor(
- Handle<BytecodeArray> bytecode_array, int initial_offset)
- : BytecodeArrayAccessor(
- std::make_unique<OnHeapBytecodeArray>(bytecode_array),
- initial_offset) {}
+BytecodeArrayAccessor::~BytecodeArrayAccessor() {
+ local_heap_->RemoveGCEpilogueCallback(UpdatePointersCallback, this);
+}
void BytecodeArrayAccessor::SetOffset(int offset) {
- bytecode_offset_ = offset;
+ if (offset < 0) return;
+ cursor_ = reinterpret_cast<uint8_t*>(
+ bytecode_array()->GetFirstBytecodeAddress() + offset);
UpdateOperandScale();
}
@@ -79,45 +45,16 @@ void BytecodeArrayAccessor::ApplyDebugBreak() {
// Get the raw bytecode from the bytecode array. This may give us a
// scaling prefix, which we can patch with the matching debug-break
// variant.
- interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_array()->get(bytecode_offset_));
+ uint8_t* cursor = cursor_ - prefix_size_;
+ interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(*cursor);
if (interpreter::Bytecodes::IsDebugBreak(bytecode)) return;
interpreter::Bytecode debugbreak =
interpreter::Bytecodes::GetDebugBreak(bytecode);
- bytecode_array()->set(bytecode_offset_,
- interpreter::Bytecodes::ToByte(debugbreak));
-}
-
-void BytecodeArrayAccessor::UpdateOperandScale() {
- if (OffsetInBounds()) {
- uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
- Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
- if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
- operand_scale_ =
- Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
- prefix_offset_ = 1;
- } else {
- operand_scale_ = OperandScale::kSingle;
- prefix_offset_ = 0;
- }
- }
-}
-
-bool BytecodeArrayAccessor::OffsetInBounds() const {
- return bytecode_offset_ >= 0 && bytecode_offset_ < bytecode_length_;
-}
-
-Bytecode BytecodeArrayAccessor::current_bytecode() const {
- DCHECK(OffsetInBounds());
- uint8_t current_byte =
- bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
- Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
- DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
- return current_bytecode;
+ *cursor = interpreter::Bytecodes::ToByte(debugbreak);
}
int BytecodeArrayAccessor::current_bytecode_size() const {
- return current_prefix_offset() +
+ return prefix_size_ +
Bytecodes::Size(current_bytecode(), current_operand_scale());
}
@@ -129,8 +66,7 @@ uint32_t BytecodeArrayAccessor::GetUnsignedOperand(
Bytecodes::GetOperandType(current_bytecode(), operand_index));
DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
Address operand_start =
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- current_prefix_offset() +
+ reinterpret_cast<Address>(cursor_) +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
current_operand_scale());
return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
@@ -145,8 +81,7 @@ int32_t BytecodeArrayAccessor::GetSignedOperand(
Bytecodes::GetOperandType(current_bytecode(), operand_index));
DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
Address operand_start =
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- current_prefix_offset() +
+ reinterpret_cast<Address>(cursor_) +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
current_operand_scale());
return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
@@ -207,14 +142,27 @@ Register BytecodeArrayAccessor::GetRegisterOperand(int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
Address operand_start =
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- current_prefix_offset() +
+ reinterpret_cast<Address>(cursor_) +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
current_operand_scale());
return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
current_operand_scale());
}
+std::pair<Register, Register> BytecodeArrayAccessor::GetRegisterPairOperand(
+ int operand_index) const {
+ Register first = GetRegisterOperand(operand_index);
+ Register second(first.index() + 1);
+ return std::make_pair(first, second);
+}
+
+RegisterList BytecodeArrayAccessor::GetRegisterListOperand(
+ int operand_index) const {
+ Register first = GetRegisterOperand(operand_index);
+ uint32_t count = GetRegisterCountOperand(operand_index + 1);
+ return RegisterList(first.index(), count);
+}
+
int BytecodeArrayAccessor::GetRegisterOperandRange(int operand_index) const {
DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
const OperandType* operand_types =
@@ -256,24 +204,32 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
+template <typename LocalIsolate>
Handle<Object> BytecodeArrayAccessor::GetConstantAtIndex(
- int index, Isolate* isolate) const {
- return bytecode_array()->GetConstantAtIndex(index, isolate);
+ int index, LocalIsolate* isolate) const {
+ return handle(bytecode_array()->constant_pool().get(index), isolate);
}
bool BytecodeArrayAccessor::IsConstantAtIndexSmi(int index) const {
- return bytecode_array()->IsConstantAtIndexSmi(index);
+ return bytecode_array()->constant_pool().get(index).IsSmi();
}
Smi BytecodeArrayAccessor::GetConstantAtIndexAsSmi(int index) const {
- return bytecode_array()->GetConstantAtIndexAsSmi(index);
+ return Smi::cast(bytecode_array()->constant_pool().get(index));
}
+template <typename LocalIsolate>
Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
- int operand_index, Isolate* isolate) const {
+ int operand_index, LocalIsolate* isolate) const {
return GetConstantAtIndex(GetIndexOperand(operand_index), isolate);
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
+ int operand_index, Isolate* isolate) const;
+template Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
+ int operand_index, LocalIsolate* isolate) const;
+
int BytecodeArrayAccessor::GetRelativeJumpTargetOffset() const {
Bytecode bytecode = current_bytecode();
if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
@@ -312,21 +268,27 @@ JumpTableTargetOffsets BytecodeArrayAccessor::GetJumpTableTargetOffsets()
}
int BytecodeArrayAccessor::GetAbsoluteOffset(int relative_offset) const {
- return current_offset() + relative_offset + current_prefix_offset();
-}
-
-bool BytecodeArrayAccessor::OffsetWithinBytecode(int offset) const {
- return current_offset() <= offset &&
- offset < current_offset() + current_bytecode_size();
+ return current_offset() + relative_offset + prefix_size_;
}
std::ostream& BytecodeArrayAccessor::PrintTo(std::ostream& os) const {
- const uint8_t* bytecode_addr = reinterpret_cast<const uint8_t*>(
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_);
- return BytecodeDecoder::Decode(os, bytecode_addr,
+ return BytecodeDecoder::Decode(os, cursor_ - prefix_size_,
bytecode_array()->parameter_count());
}
+void BytecodeArrayAccessor::UpdatePointers() {
+ DisallowGarbageCollection no_gc;
+ uint8_t* start =
+ reinterpret_cast<uint8_t*>(bytecode_array_->GetFirstBytecodeAddress());
+ if (start != start_) {
+ start_ = start;
+ uint8_t* end = start + bytecode_array_->length();
+ size_t distance_to_end = end_ - cursor_;
+ cursor_ = end - distance_to_end;
+ end_ = end;
+ }
+}
+
JumpTableTargetOffsets::JumpTableTargetOffsets(
const BytecodeArrayAccessor* accessor, int table_start, int table_size,
int case_value_base)
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index 65bb2cdbd4..dc2a8c217a 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -67,45 +67,37 @@ class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
int case_value_base_;
};
-class V8_EXPORT_PRIVATE AbstractBytecodeArray {
- public:
- virtual int length() const = 0;
- virtual int parameter_count() const = 0;
- virtual uint8_t get(int index) const = 0;
- virtual void set(int index, uint8_t value) = 0;
- virtual Address GetFirstBytecodeAddress() const = 0;
-
- virtual Handle<Object> GetConstantAtIndex(int index,
- Isolate* isolate) const = 0;
- virtual bool IsConstantAtIndexSmi(int index) const = 0;
- virtual Smi GetConstantAtIndexAsSmi(int index) const = 0;
-
- virtual ~AbstractBytecodeArray() = default;
-};
-
class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
public:
- BytecodeArrayAccessor(std::unique_ptr<AbstractBytecodeArray> bytecode_array,
- int initial_offset);
-
BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
int initial_offset);
+ ~BytecodeArrayAccessor();
BytecodeArrayAccessor(const BytecodeArrayAccessor&) = delete;
BytecodeArrayAccessor& operator=(const BytecodeArrayAccessor&) = delete;
+ inline void Advance() {
+ cursor_ += Bytecodes::Size(current_bytecode(), current_operand_scale());
+ UpdateOperandScale();
+ }
void SetOffset(int offset);
+ void Reset() { SetOffset(0); }
void ApplyDebugBreak();
- Bytecode current_bytecode() const;
+ inline Bytecode current_bytecode() const {
+ DCHECK(!done());
+ uint8_t current_byte = *cursor_;
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
+ return current_bytecode;
+ }
int current_bytecode_size() const;
- int current_offset() const { return bytecode_offset_; }
- OperandScale current_operand_scale() const { return operand_scale_; }
- int current_prefix_offset() const { return prefix_offset_; }
- AbstractBytecodeArray* bytecode_array() const {
- return bytecode_array_.get();
+ int current_offset() const {
+ return static_cast<int>(cursor_ - start_ - prefix_size_);
}
+ OperandScale current_operand_scale() const { return operand_scale_; }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
uint32_t GetFlagOperand(int operand_index) const;
uint32_t GetUnsignedImmediateOperand(int operand_index) const;
@@ -116,15 +108,19 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
Register GetParameter(int parameter_index) const;
uint32_t GetRegisterCountOperand(int operand_index) const;
Register GetRegisterOperand(int operand_index) const;
+ std::pair<Register, Register> GetRegisterPairOperand(int operand_index) const;
+ RegisterList GetRegisterListOperand(int operand_index) const;
int GetRegisterOperandRange(int operand_index) const;
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
uint32_t GetNativeContextIndexOperand(int operand_index) const;
- Handle<Object> GetConstantAtIndex(int offset, Isolate* isolate) const;
+ template <typename LocalIsolate>
+ Handle<Object> GetConstantAtIndex(int offset, LocalIsolate* isolate) const;
bool IsConstantAtIndexSmi(int offset) const;
Smi GetConstantAtIndexAsSmi(int offset) const;
+ template <typename LocalIsolate>
Handle<Object> GetConstantForIndexOperand(int operand_index,
- Isolate* isolate) const;
+ LocalIsolate* isolate) const;
// Returns the relative offset of the branch target at the current bytecode.
// It is an error to call this method if the bytecode is not for a jump or
@@ -143,26 +139,45 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
// from the current bytecode.
int GetAbsoluteOffset(int relative_offset) const;
- bool OffsetWithinBytecode(int offset) const;
-
std::ostream& PrintTo(std::ostream& os) const;
- int bytecode_length() const { return bytecode_length_; }
+ static void UpdatePointersCallback(void* accessor) {
+ reinterpret_cast<BytecodeArrayAccessor*>(accessor)->UpdatePointers();
+ }
- private:
- bool OffsetInBounds() const;
+ void UpdatePointers();
+ inline bool done() const { return cursor_ >= end_; }
+
+ private:
uint32_t GetUnsignedOperand(int operand_index,
OperandType operand_type) const;
int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
- void UpdateOperandScale();
+ inline void UpdateOperandScale() {
+ if (done()) return;
+ uint8_t current_byte = *cursor_;
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
+ operand_scale_ =
+ Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
+ ++cursor_;
+ prefix_size_ = 1;
+ } else {
+ operand_scale_ = OperandScale::kSingle;
+ prefix_size_ = 0;
+ }
+ }
- std::unique_ptr<AbstractBytecodeArray> bytecode_array_;
- const int bytecode_length_;
- int bytecode_offset_;
+ Handle<BytecodeArray> bytecode_array_;
+ uint8_t* start_;
+ uint8_t* end_;
+ // The cursor always points to the active bytecode. If there's a prefix, the
+ // prefix is at (cursor - 1).
+ uint8_t* cursor_;
OperandScale operand_scale_;
- int prefix_offset_;
+ int prefix_size_;
+ LocalHeap* const local_heap_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 2511c87e3c..63c07683e6 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -197,7 +197,10 @@ void BytecodeArrayBuilder::OutputLdarRaw(Register reg) {
void BytecodeArrayBuilder::OutputStarRaw(Register reg) {
uint32_t operand = static_cast<uint32_t>(reg.ToOperand());
- BytecodeNode node(BytecodeNode::Star(BytecodeSourceInfo(), operand));
+ base::Optional<Bytecode> short_code = reg.TryToShortStar();
+ BytecodeNode node = short_code
+ ? BytecodeNode(*short_code)
+ : BytecodeNode::Star(BytecodeSourceInfo(), operand);
Write(&node);
}
@@ -327,7 +330,7 @@ class OperandHelper<OperandType::kRegOutTriple> {
} // namespace
-template <Bytecode bytecode, AccumulatorUse accumulator_use,
+template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use,
OperandType... operand_types>
class BytecodeNodeBuilder {
public:
@@ -336,7 +339,7 @@ class BytecodeNodeBuilder {
Operands... operands) {
static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands,
"too many operands for bytecode");
- builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
+ builder->PrepareToOutputBytecode<bytecode, implicit_register_use>();
// The "OperandHelper<operand_types>::Convert(builder, operands)..." will
// expand both the OperandType... and Operands... parameter packs e.g. for:
// BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
@@ -344,7 +347,8 @@ class BytecodeNodeBuilder {
// the code will expand into:
// OperandHelper<OperandType::kReg>::Convert(builder, reg),
// OperandHelper<OperandType::kImm>::Convert(builder, immediate),
- return BytecodeNode::Create<bytecode, accumulator_use, operand_types...>(
+ return BytecodeNode::Create<bytecode, implicit_register_use,
+ operand_types...>(
builder->CurrentSourcePosition(bytecode),
OperandHelper<operand_types>::Convert(builder, operands)...);
}
@@ -645,18 +649,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(AstBigInt bigint) {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(AstSymbol symbol) {
- size_t entry;
- switch (symbol) {
- case AstSymbol::kHomeObjectSymbol:
- entry = HomeObjectSymbolConstantPoolEntry();
- break;
- // No default case so that we get a warning if AstSymbol changes
- }
- OutputLdaConstant(entry);
- return *this;
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
OutputLdaUndefined();
return *this;
@@ -707,7 +699,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
SetDeferredSourceInfo(CurrentSourcePosition(Bytecode::kStar));
register_optimizer_->DoStar(reg);
} else {
- OutputStar(reg);
+ OutputStarRaw(reg);
}
return *this;
}
@@ -941,12 +933,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreInArrayLiteral(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StoreHomeObjectProperty(
- Register object, int feedback_slot, LanguageMode language_mode) {
- size_t name_index = HomeObjectSymbolConstantPoolEntry();
- return StoreNamedProperty(object, name_index, feedback_slot, language_mode);
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreClassFieldsInitializer(
Register constructor, int feedback_slot) {
size_t name_index = ClassFieldsSymbolConstantPoolEntry();
@@ -1616,10 +1602,10 @@ bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
}
}
-template <Bytecode bytecode, AccumulatorUse accumulator_use>
+template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use>
void BytecodeArrayBuilder::PrepareToOutputBytecode() {
if (register_optimizer_)
- register_optimizer_->PrepareForBytecode<bytecode, accumulator_use>();
+ register_optimizer_->PrepareForBytecode<bytecode, implicit_register_use>();
}
uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index b03cebdd60..cb1c92e3f8 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -90,7 +90,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& LoadLiteral(const AstRawString* raw_string);
BytecodeArrayBuilder& LoadLiteral(const Scope* scope);
BytecodeArrayBuilder& LoadLiteral(AstBigInt bigint);
- BytecodeArrayBuilder& LoadLiteral(AstSymbol symbol);
BytecodeArrayBuilder& LoadUndefined();
BytecodeArrayBuilder& LoadNull();
BytecodeArrayBuilder& LoadTheHole();
@@ -200,11 +199,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// in the accumulator.
BytecodeArrayBuilder& StoreInArrayLiteral(Register array, Register index,
int feedback_slot);
- // Store the home object property. The value to be stored should be in the
- // accumulator.
- BytecodeArrayBuilder& StoreHomeObjectProperty(Register object,
- int feedback_slot,
- LanguageMode language_mode);
// Store the class fields property. The initializer to be stored should
// be in the accumulator.
@@ -573,7 +567,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
private:
friend class BytecodeRegisterAllocator;
- template <Bytecode bytecode, AccumulatorUse accumulator_use,
+ template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use,
OperandType... operand_types>
friend class BytecodeNodeBuilder;
@@ -619,7 +613,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// during bytecode generation.
BytecodeArrayBuilder& Illegal();
- template <Bytecode bytecode, AccumulatorUse accumulator_use>
+ template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use>
void PrepareToOutputBytecode();
BytecodeArrayWriter* bytecode_array_writer() {
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index 1232777fd1..c90ed56f17 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -11,21 +11,9 @@ namespace internal {
namespace interpreter {
BytecodeArrayIterator::BytecodeArrayIterator(
- std::unique_ptr<AbstractBytecodeArray> bytecode_array)
- : BytecodeArrayAccessor(std::move(bytecode_array), 0) {}
-
-BytecodeArrayIterator::BytecodeArrayIterator(
Handle<BytecodeArray> bytecode_array)
: BytecodeArrayAccessor(bytecode_array, 0) {}
-void BytecodeArrayIterator::Advance() {
- SetOffset(current_offset() + current_bytecode_size());
-}
-
-bool BytecodeArrayIterator::done() const {
- return current_offset() >= bytecode_length();
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 58b0b1a55a..37fa228236 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -16,15 +16,10 @@ namespace interpreter {
class V8_EXPORT_PRIVATE BytecodeArrayIterator final
: public BytecodeArrayAccessor {
public:
- explicit BytecodeArrayIterator(std::unique_ptr<AbstractBytecodeArray> array);
-
explicit BytecodeArrayIterator(Handle<BytecodeArray> array);
BytecodeArrayIterator(const BytecodeArrayIterator&) = delete;
BytecodeArrayIterator& operator=(const BytecodeArrayIterator&) = delete;
-
- void Advance();
- bool done() const;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
index 9c94cc4c84..4dca16effe 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
@@ -22,7 +22,7 @@ void BytecodeArrayRandomIterator::Initialize() {
// bytecode.
while (current_offset() < bytecode_array()->length()) {
offsets_.push_back(current_offset());
- SetOffset(current_offset() + current_bytecode_size());
+ Advance();
}
GoToStart();
}
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 840e788b94..0172d3626b 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -253,7 +253,8 @@ void BytecodeArrayWriter::MaybeElideLastBytecode(Bytecode next_bytecode,
// and the next bytecode clobbers this load without reading the accumulator,
// then the previous bytecode can be elided as it has no effect.
if (Bytecodes::IsAccumulatorLoadWithoutEffects(last_bytecode_) &&
- Bytecodes::GetAccumulatorUse(next_bytecode) == AccumulatorUse::kWrite &&
+ Bytecodes::GetImplicitRegisterUse(next_bytecode) ==
+ ImplicitRegisterUse::kWriteAccumulator &&
(!last_bytecode_had_source_info_ || !has_source_info)) {
DCHECK_GT(bytecodes()->size(), last_bytecode_offset_);
bytecodes()->resize(last_bytecode_offset_);
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index f26d431057..233ad6d4e0 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -36,7 +36,8 @@ namespace interpreter {
// popping of the current {context_register} during visitation.
class V8_NODISCARD BytecodeGenerator::ContextScope {
public:
- ContextScope(BytecodeGenerator* generator, Scope* scope)
+ ContextScope(BytecodeGenerator* generator, Scope* scope,
+ Register outer_context_reg = Register())
: generator_(generator),
scope_(scope),
outer_(generator_->execution_context()),
@@ -47,8 +48,9 @@ class V8_NODISCARD BytecodeGenerator::ContextScope {
depth_ = outer_->depth_ + 1;
// Push the outer context into a new context register.
- Register outer_context_reg =
- generator_->register_allocator()->NewRegister();
+ if (!outer_context_reg.is_valid()) {
+ outer_context_reg = generator_->register_allocator()->NewRegister();
+ }
outer_->set_register(outer_context_reg);
generator_->builder()->PushContext(outer_context_reg);
}
@@ -64,6 +66,9 @@ class V8_NODISCARD BytecodeGenerator::ContextScope {
generator_->set_execution_context(outer_);
}
+ ContextScope(const ContextScope&) = delete;
+ ContextScope& operator=(const ContextScope&) = delete;
+
// Returns the depth of the given |scope| for the current execution context.
int ContextChainDepth(Scope* scope) {
return scope_->ContextChainLength(scope);
@@ -853,12 +858,75 @@ class V8_NODISCARD BytecodeGenerator::CurrentScope final {
generator_->set_current_scope(outer_scope_);
}
}
+ CurrentScope(const CurrentScope&) = delete;
+ CurrentScope& operator=(const CurrentScope&) = delete;
private:
BytecodeGenerator* generator_;
Scope* outer_scope_;
};
+class V8_NODISCARD BytecodeGenerator::MultipleEntryBlockContextScope {
+ public:
+ MultipleEntryBlockContextScope(BytecodeGenerator* generator, Scope* scope)
+ : generator_(generator), scope_(scope), is_in_scope_(false) {
+ if (scope) {
+ inner_context_ = generator->register_allocator()->NewRegister();
+ outer_context_ = generator->register_allocator()->NewRegister();
+ generator->BuildNewLocalBlockContext(scope_);
+ generator->builder()->StoreAccumulatorInRegister(inner_context_);
+ }
+ }
+
+ void SetEnteredIf(bool condition) {
+ RegisterAllocationScope register_scope(generator_);
+ if (condition && scope_ != nullptr && !is_in_scope_) {
+ EnterScope();
+ } else if (!condition && is_in_scope_) {
+ ExitScope();
+ }
+ }
+
+ MultipleEntryBlockContextScope(const MultipleEntryBlockContextScope&) =
+ delete;
+ MultipleEntryBlockContextScope& operator=(
+ const MultipleEntryBlockContextScope&) = delete;
+
+ private:
+ void EnterScope() {
+ DCHECK(inner_context_.is_valid());
+ DCHECK(outer_context_.is_valid());
+ DCHECK(!is_in_scope_);
+ Register temp = generator_->register_allocator()->NewRegister();
+ generator_->builder()->StoreAccumulatorInRegister(temp);
+ generator_->builder()->LoadAccumulatorWithRegister(inner_context_);
+ current_scope_.emplace(generator_, scope_);
+ context_scope_.emplace(generator_, scope_, outer_context_);
+ generator_->builder()->LoadAccumulatorWithRegister(temp);
+ is_in_scope_ = true;
+ }
+
+ void ExitScope() {
+ DCHECK(inner_context_.is_valid());
+ DCHECK(outer_context_.is_valid());
+ DCHECK(is_in_scope_);
+ Register temp = generator_->register_allocator()->NewRegister();
+ generator_->builder()->StoreAccumulatorInRegister(temp);
+ context_scope_ = base::nullopt;
+ current_scope_ = base::nullopt;
+ generator_->builder()->LoadAccumulatorWithRegister(temp);
+ is_in_scope_ = false;
+ }
+
+ BytecodeGenerator* generator_;
+ Scope* scope_;
+ Register inner_context_;
+ Register outer_context_;
+ bool is_in_scope_;
+ base::Optional<CurrentScope> current_scope_;
+ base::Optional<ContextScope> context_scope_;
+};
+
class BytecodeGenerator::FeedbackSlotCache : public ZoneObject {
public:
enum class SlotKind {
@@ -2145,7 +2213,7 @@ void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- DCHECK(expr->scope()->outer_scope() == current_scope());
+ DCHECK_EQ(expr->scope()->outer_scope(), current_scope());
uint8_t flags = CreateClosureFlags::Encode(
expr->pretenure(), closure_scope()->is_function_scope(),
info()->flags().might_always_opt());
@@ -2220,9 +2288,10 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
break;
}
case ClassLiteral::Property::METHOD: {
- // We can initialize the private methods and accessors later so that the
- // home objects can be assigned right after the creation of the
- // closures, and those are guarded by the brand checks.
+ RegisterAllocationScope register_scope(this);
+ VisitForAccumulatorValue(property->value());
+ BuildVariableAssignment(property->private_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
break;
}
// Collect private accessors into a table to merge the creation of
@@ -2317,6 +2386,24 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
Register prototype = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(prototype);
+ // Assign to the home object variable. Accumulator already contains the
+ // prototype.
+ Variable* home_object_variable = expr->home_object();
+ if (home_object_variable != nullptr) {
+ DCHECK(home_object_variable->is_used());
+ DCHECK(home_object_variable->IsContextSlot());
+ BuildVariableAssignment(home_object_variable, Token::INIT,
+ HoleCheckMode::kElided);
+ }
+ Variable* static_home_object_variable = expr->static_home_object();
+ if (static_home_object_variable != nullptr) {
+ DCHECK(static_home_object_variable->is_used());
+ DCHECK(static_home_object_variable->IsContextSlot());
+ builder()->LoadAccumulatorWithRegister(class_constructor);
+ BuildVariableAssignment(static_home_object_variable, Token::INIT,
+ HoleCheckMode::kElided);
+ }
+
// Assign to class variable.
Variable* class_variable = expr->scope()->class_variable();
if (class_variable != nullptr && class_variable->is_used()) {
@@ -2326,42 +2413,16 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
HoleCheckMode::kElided);
}
- // Create the closures of private methods, and store the home object for
- // any private methods that need them.
- if (expr->has_private_methods()) {
- for (int i = 0; i < expr->private_members()->length(); i++) {
- ClassLiteral::Property* property = expr->private_members()->at(i);
- if (property->kind() != ClassLiteral::Property::METHOD) {
- continue;
- }
- RegisterAllocationScope register_scope(this);
- VisitForAccumulatorValue(property->value());
- BuildVariableAssignment(property->private_name_var(), Token::INIT,
- HoleCheckMode::kElided);
- Register home_object = property->private_name_var()->is_static()
- ? class_constructor
- : prototype;
- if (property->NeedsHomeObjectOnClassPrototype()) {
- Register func = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(func);
- VisitSetHomeObject(func, home_object, property);
- }
- }
- }
-
// Define private accessors, using only a single call to the runtime for
// each pair of corresponding getters and setters, in the order the first
- // component is declared. Store the home objects if necessary.
+ // component is declared.
for (auto accessors : private_accessors.ordered_accessors()) {
RegisterAllocationScope inner_register_scope(this);
RegisterList accessors_reg = register_allocator()->NewRegisterList(2);
ClassLiteral::Property* getter = accessors.second->getter;
ClassLiteral::Property* setter = accessors.second->setter;
- bool is_static =
- getter != nullptr ? getter->is_static() : setter->is_static();
- Register home_object = is_static ? class_constructor : prototype;
- VisitLiteralAccessor(home_object, getter, accessors_reg[0]);
- VisitLiteralAccessor(home_object, setter, accessors_reg[1]);
+ VisitLiteralAccessor(getter, accessors_reg[0]);
+ VisitLiteralAccessor(setter, accessors_reg[1]);
builder()->CallRuntime(Runtime::kCreatePrivateAccessors, accessors_reg);
Variable* var = getter != nullptr ? getter->private_name_var()
: setter->private_name_var();
@@ -2373,13 +2434,6 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
Register initializer =
VisitForRegisterValue(expr->instance_members_initializer_function());
- if (FunctionLiteral::NeedsHomeObject(
- expr->instance_members_initializer_function())) {
- FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
- builder()->LoadAccumulatorWithRegister(prototype).StoreHomeObjectProperty(
- initializer, feedback_index(slot), language_mode());
- }
-
FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
builder()
->LoadAccumulatorWithRegister(initializer)
@@ -2387,7 +2441,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
.LoadAccumulatorWithRegister(class_constructor);
}
- if (expr->static_fields_initializer() != nullptr) {
+ if (expr->static_initializer() != nullptr) {
// TODO(gsathya): This can be optimized away to be a part of the
// class boilerplate in the future. The name argument can be
// passed to the DefineClass runtime function and have it set
@@ -2407,16 +2461,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
}
RegisterList args = register_allocator()->NewRegisterList(1);
- Register initializer =
- VisitForRegisterValue(expr->static_fields_initializer());
-
- if (FunctionLiteral::NeedsHomeObject(expr->static_fields_initializer())) {
- FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
- builder()
- ->LoadAccumulatorWithRegister(class_constructor)
- .StoreHomeObjectProperty(initializer, feedback_index(slot),
- language_mode());
- }
+ Register initializer = VisitForRegisterValue(expr->static_initializer());
builder()
->MoveRegister(class_constructor, args[0])
@@ -2442,47 +2487,64 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr, Register name) {
}
}
-void BytecodeGenerator::VisitInitializeClassMembersStatement(
- InitializeClassMembersStatement* stmt) {
+void BytecodeGenerator::BuildClassProperty(ClassLiteral::Property* property) {
+ RegisterAllocationScope register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(3);
Register constructor = args[0], key = args[1], value = args[2];
builder()->MoveRegister(builder()->Receiver(), constructor);
- for (int i = 0; i < stmt->fields()->length(); i++) {
- ClassLiteral::Property* property = stmt->fields()->at(i);
- // Private methods are not initialized in the
- // InitializeClassMembersStatement.
- DCHECK_IMPLIES(property->is_private(),
- property->kind() == ClassLiteral::Property::FIELD);
-
- if (property->is_computed_name()) {
- DCHECK_EQ(property->kind(), ClassLiteral::Property::FIELD);
- DCHECK(!property->is_private());
- Variable* var = property->computed_name_var();
- DCHECK_NOT_NULL(var);
- // The computed name is already evaluated and stored in a
- // variable at class definition time.
- BuildVariableLoad(var, HoleCheckMode::kElided);
- builder()->StoreAccumulatorInRegister(key);
- } else if (property->is_private()) {
- Variable* private_name_var = property->private_name_var();
- DCHECK_NOT_NULL(private_name_var);
- BuildVariableLoad(private_name_var, HoleCheckMode::kElided);
- builder()->StoreAccumulatorInRegister(key);
- } else {
- BuildLoadPropertyKey(property, key);
- }
+ // Private methods are not initialized in BuildClassProperty.
+ DCHECK_IMPLIES(property->is_private(),
+ property->kind() == ClassLiteral::Property::FIELD);
- builder()->SetExpressionAsStatementPosition(property->value());
- VisitForRegisterValue(property->value(), value);
- VisitSetHomeObject(value, constructor, property);
+ if (property->is_computed_name()) {
+ DCHECK_EQ(property->kind(), ClassLiteral::Property::FIELD);
+ DCHECK(!property->is_private());
+ Variable* var = property->computed_name_var();
+ DCHECK_NOT_NULL(var);
+ // The computed name is already evaluated and stored in a variable at class
+ // definition time.
+ BuildVariableLoad(var, HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(key);
+ } else if (property->is_private()) {
+ Variable* private_name_var = property->private_name_var();
+ DCHECK_NOT_NULL(private_name_var);
+ BuildVariableLoad(private_name_var, HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(key);
+ } else {
+ BuildLoadPropertyKey(property, key);
+ }
- Runtime::FunctionId function_id =
- property->kind() == ClassLiteral::Property::FIELD &&
- !property->is_private()
- ? Runtime::kCreateDataProperty
- : Runtime::kAddPrivateField;
- builder()->CallRuntime(function_id, args);
+ builder()->SetExpressionAsStatementPosition(property->value());
+ VisitForRegisterValue(property->value(), value);
+
+ Runtime::FunctionId function_id =
+ property->kind() == ClassLiteral::Property::FIELD &&
+ !property->is_private()
+ ? Runtime::kCreateDataProperty
+ : Runtime::kAddPrivateField;
+ builder()->CallRuntime(function_id, args);
+}
+
+void BytecodeGenerator::VisitInitializeClassMembersStatement(
+ InitializeClassMembersStatement* stmt) {
+ for (int i = 0; i < stmt->fields()->length(); i++) {
+ BuildClassProperty(stmt->fields()->at(i));
+ }
+}
+
+void BytecodeGenerator::VisitInitializeClassStaticElementsStatement(
+ InitializeClassStaticElementsStatement* stmt) {
+ for (int i = 0; i < stmt->elements()->length(); i++) {
+ ClassLiteral::StaticElement* element = stmt->elements()->at(i);
+ switch (element->kind()) {
+ case ClassLiteral::StaticElement::PROPERTY:
+ BuildClassProperty(element->property());
+ break;
+ case ClassLiteral::StaticElement::STATIC_BLOCK:
+ VisitBlock(element->static_block());
+ break;
+ }
}
}
@@ -2595,9 +2657,6 @@ void BytecodeGenerator::VisitLiteral(Literal* expr) {
builder()->LoadLiteral(expr->AsRawString());
execution_result()->SetResultIsString();
break;
- case Literal::kSymbol:
- builder()->LoadLiteral(expr->AsSymbol());
- break;
case Literal::kBigInt:
builder()->LoadLiteral(expr->AsBigInt());
break;
@@ -2645,6 +2704,14 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
return;
}
+ Variable* home_object = expr->home_object();
+ if (home_object != nullptr) {
+ DCHECK(home_object->is_used());
+ DCHECK(home_object->IsContextSlot());
+ }
+ MultipleEntryBlockContextScope object_literal_context_scope(
+ this, home_object ? home_object->scope() : nullptr);
+
// Deep-copy the literal boilerplate.
uint8_t flags = CreateObjectLiteralFlags::Encode(
expr->ComputeFlags(), expr->IsFastCloningSupported());
@@ -2701,21 +2768,14 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// contains computed properties with an uninitialized value.
if (key->IsStringLiteral()) {
DCHECK(key->IsPropertyName());
+ object_literal_context_scope.SetEnteredIf(
+ property->value()->IsConciseMethodDefinition());
if (property->emit_store()) {
builder()->SetExpressionPosition(property->value());
VisitForAccumulatorValue(property->value());
FeedbackSlot slot = feedback_spec()->AddStoreOwnICSlot();
- if (FunctionLiteral::NeedsHomeObject(property->value())) {
- RegisterAllocationScope register_scope(this);
- Register value = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(value);
- builder()->StoreNamedOwnProperty(
- literal, key->AsRawPropertyName(), feedback_index(slot));
- VisitSetHomeObject(value, literal, property);
- } else {
- builder()->StoreNamedOwnProperty(
- literal, key->AsRawPropertyName(), feedback_index(slot));
- }
+ builder()->StoreNamedOwnProperty(literal, key->AsRawPropertyName(),
+ feedback_index(slot));
} else {
builder()->SetExpressionPosition(property->value());
VisitForEffect(property->value());
@@ -2726,12 +2786,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->MoveRegister(literal, args[0]);
builder()->SetExpressionPosition(property->key());
VisitForRegisterValue(property->key(), args[1]);
+
+ object_literal_context_scope.SetEnteredIf(
+ property->value()->IsConciseMethodDefinition());
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[2]);
if (property->emit_store()) {
builder()->CallRuntime(Runtime::kSetKeyedProperty, args);
- Register value = args[2];
- VisitSetHomeObject(value, literal, property);
}
}
break;
@@ -2743,6 +2804,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->NeedsSetFunctionName());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
+ object_literal_context_scope.SetEnteredIf(false);
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[1]);
builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
@@ -2763,13 +2825,14 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Define accessors, using only a single call to the runtime for each pair of
// corresponding getters and setters.
+ object_literal_context_scope.SetEnteredIf(true);
for (auto accessors : accessor_table.ordered_accessors()) {
RegisterAllocationScope inner_register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(5);
builder()->MoveRegister(literal, args[0]);
VisitForRegisterValue(accessors.first, args[1]);
- VisitLiteralAccessor(literal, accessors.second->getter, args[2]);
- VisitLiteralAccessor(literal, accessors.second->setter, args[3]);
+ VisitLiteralAccessor(accessors.second->getter, args[2]);
+ VisitLiteralAccessor(accessors.second->setter, args[3]);
builder()
->LoadLiteral(Smi::FromInt(NONE))
.StoreAccumulatorInRegister(args[4])
@@ -2789,6 +2852,10 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
RegisterAllocationScope inner_register_scope(this);
+ bool should_be_in_object_literal_scope =
+ (property->value()->IsConciseMethodDefinition() ||
+ property->value()->IsAccessorFunctionDefinition());
+
if (property->IsPrototype()) {
// __proto__:null is handled by CreateObjectLiteral.
if (property->IsNullPrototype()) continue;
@@ -2796,6 +2863,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->NeedsSetFunctionName());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
+
+ DCHECK(!should_be_in_object_literal_scope);
+ object_literal_context_scope.SetEnteredIf(false);
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[1]);
builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
@@ -2806,8 +2876,16 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
+ // Computed property keys don't belong to the object literal scope (even
+ // if they're syntactically inside it).
+ if (property->is_computed_name()) {
+ object_literal_context_scope.SetEnteredIf(false);
+ }
Register key = register_allocator()->NewRegister();
BuildLoadPropertyKey(property, key);
+
+ object_literal_context_scope.SetEnteredIf(
+ should_be_in_object_literal_scope);
builder()->SetExpressionPosition(property->value());
Register value;
@@ -2815,7 +2893,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// the class, meaning we can't wait until the
// StoreDataPropertyInLiteral call later to set the name.
if (property->value()->IsClassLiteral() &&
- property->value()->AsClassLiteral()->static_fields_initializer() !=
+ property->value()->AsClassLiteral()->static_initializer() !=
nullptr) {
value = register_allocator()->NewRegister();
VisitClassLiteral(property->value()->AsClassLiteral(), key);
@@ -2823,7 +2901,6 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
value = VisitForRegisterValue(property->value());
}
- VisitSetHomeObject(value, literal, property);
DataPropertyInLiteralFlags data_property_flags =
DataPropertyInLiteralFlag::kNoFlags;
@@ -2841,12 +2918,19 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
case ObjectLiteral::Property::GETTER:
case ObjectLiteral::Property::SETTER: {
+ // Computed property keys don't belong to the object literal scope (even
+ // if they're syntactically inside it).
+ if (property->is_computed_name()) {
+ object_literal_context_scope.SetEnteredIf(false);
+ }
RegisterList args = register_allocator()->NewRegisterList(4);
builder()->MoveRegister(literal, args[0]);
BuildLoadPropertyKey(property, args[1]);
+
+ DCHECK(should_be_in_object_literal_scope);
+ object_literal_context_scope.SetEnteredIf(true);
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[2]);
- VisitSetHomeObject(args[2], literal, property);
builder()
->LoadLiteral(Smi::FromInt(NONE))
.StoreAccumulatorInRegister(args[3]);
@@ -2861,6 +2945,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
builder()->SetExpressionPosition(property->value());
+ object_literal_context_scope.SetEnteredIf(false);
VisitForRegisterValue(property->value(), args[1]);
builder()->CallRuntime(Runtime::kInlineCopyDataProperties, args);
break;
@@ -2872,6 +2957,10 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
builder()->LoadAccumulatorWithRegister(literal);
+ if (home_object != nullptr) {
+ object_literal_context_scope.SetEnteredIf(true);
+ BuildVariableAssignment(home_object, Token::INIT, HoleCheckMode::kElided);
+ }
}
// Fill an array with values from an iterator, starting at a given index. It is
@@ -3520,12 +3609,12 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
RegisterList super_property_args =
register_allocator()->NewRegisterList(4);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
BuildThisVariableLoad();
builder()->StoreAccumulatorInRegister(super_property_args[0]);
- VisitForRegisterValue(super_property->home_object(),
- super_property_args[1]);
+ BuildVariableLoad(
+ property->obj()->AsSuperPropertyReference()->home_object()->var(),
+ HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(super_property_args[1]);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(super_property_args[2]);
@@ -3535,12 +3624,12 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
RegisterList super_property_args =
register_allocator()->NewRegisterList(4);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
BuildThisVariableLoad();
builder()->StoreAccumulatorInRegister(super_property_args[0]);
- VisitForRegisterValue(super_property->home_object(),
- super_property_args[1]);
+ BuildVariableLoad(
+ property->obj()->AsSuperPropertyReference()->home_object()->var(),
+ HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(super_property_args[1]);
VisitForRegisterValue(property->key(), super_property_args[2]);
return AssignmentLhsData::KeyedSuperProperty(super_property_args);
}
@@ -4135,12 +4224,47 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
lhs_data.super_property_args().Truncate(3));
break;
}
- case PRIVATE_METHOD:
- case PRIVATE_GETTER_ONLY:
- case PRIVATE_SETTER_ONLY:
+ case PRIVATE_METHOD: {
+ // The property access is invalid, but if the brand check fails too, we
+ // need to return the error from the brand check.
+ Property* property = lhs_data.expr()->AsProperty();
+ Register object = VisitForRegisterValue(property->obj());
+ BuildPrivateBrandCheck(property, object,
+ MessageTemplate::kInvalidPrivateMemberRead);
+ BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateMethodWrite,
+ lhs_data.expr()->AsProperty());
+ break;
+ }
+ case PRIVATE_GETTER_ONLY: {
+ // The property access is invalid, but if the brand check fails too, we
+ // need to return the error from the brand check.
+ Property* property = lhs_data.expr()->AsProperty();
+ Register object = VisitForRegisterValue(property->obj());
+ BuildPrivateBrandCheck(property, object,
+ MessageTemplate::kInvalidPrivateMemberRead);
+ BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateSetterAccess,
+ lhs_data.expr()->AsProperty());
+
+ break;
+ }
+ case PRIVATE_SETTER_ONLY: {
+ // The property access is invalid, but if the brand check fails too, we
+ // need to return the error from the brand check.
+ Property* property = lhs_data.expr()->AsProperty();
+ Register object = VisitForRegisterValue(property->obj());
+ BuildPrivateBrandCheck(property, object,
+ MessageTemplate::kInvalidPrivateMemberRead);
+ BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateGetterAccess,
+ lhs_data.expr()->AsProperty());
+ break;
+ }
case PRIVATE_GETTER_AND_SETTER: {
- // ({ #foo: name } = obj) is currently syntactically invalid.
- UNREACHABLE();
+ Property* property = lhs_data.expr()->AsProperty();
+ Register object = VisitForRegisterValue(property->obj());
+ Register key = VisitForRegisterValue(property->key());
+ BuildPrivateBrandCheck(property, object,
+ MessageTemplate::kInvalidPrivateMemberRead);
+ BuildPrivateGetterAccess(object, key);
break;
}
}
@@ -4397,7 +4521,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
.SwitchOnSmiNoFeedback(switch_jump_table);
// Fallthrough to default case.
- // TODO(tebbi): Add debug code to check that {resume_mode} really is
+ // TODO(ignition): Add debug code to check that {resume_mode} really is
// {JSGeneratorObject::kNext} in this case.
STATIC_ASSERT(JSGeneratorObject::kNext == 0);
{
@@ -4685,6 +4809,65 @@ void BytecodeGenerator::BuildPrivateSetterAccess(Register object,
feedback_index(feedback_spec()->AddCallICSlot()));
}
+void BytecodeGenerator::BuildPrivateMethodIn(Variable* private_name,
+ Expression* object_expression) {
+ DCHECK(IsPrivateMethodOrAccessorVariableMode(private_name->mode()));
+ ClassScope* scope = private_name->scope()->AsClassScope();
+ if (private_name->is_static()) {
+ // For static private methods, "#privatemethod in ..." only returns true for
+ // the class constructor.
+ if (scope->class_variable() == nullptr) {
+ // Can only happen via the debugger. See comment in
+ // BuildPrivateBrandCheck.
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(Smi::FromEnum(
+ MessageTemplate::
+ kInvalidUnusedPrivateStaticMethodAccessedByDebugger))
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(private_name->raw_name())
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewError, args)
+ .Throw();
+ } else {
+ VisitForAccumulatorValue(object_expression);
+ Register object = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(object);
+
+ BytecodeLabel is_object;
+ builder()->JumpIfJSReceiver(&is_object);
+
+ RegisterList args = register_allocator()->NewRegisterList(3);
+ builder()
+ ->StoreAccumulatorInRegister(args[2])
+ .LoadLiteral(Smi::FromEnum(MessageTemplate::kInvalidInOperatorUse))
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(private_name->raw_name())
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewTypeError, args)
+ .Throw();
+
+ builder()->Bind(&is_object);
+ BuildVariableLoadForAccumulatorValue(scope->class_variable(),
+ HoleCheckMode::kElided);
+ builder()->CompareReference(object);
+ }
+ } else {
+ BuildVariableLoadForAccumulatorValue(scope->brand(),
+ HoleCheckMode::kElided);
+ Register brand = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(brand);
+
+ VisitForAccumulatorValue(object_expression);
+ builder()->SetExpressionPosition(object_expression);
+
+ FeedbackSlot slot = feedback_spec()->AddKeyedHasICSlot();
+ builder()->CompareOperation(Token::IN, brand, feedback_index(slot));
+ execution_result()->SetResultIsBoolean();
+ }
+}
+
void BytecodeGenerator::BuildPrivateBrandCheck(Property* property,
Register object,
MessageTemplate tmpl) {
@@ -4743,13 +4926,13 @@ void BytecodeGenerator::VisitPropertyLoadForRegister(Register obj,
void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
Register opt_receiver_out) {
RegisterAllocationScope register_scope(this);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
if (FLAG_super_ic) {
Register receiver = register_allocator()->NewRegister();
BuildThisVariableLoad();
builder()->StoreAccumulatorInRegister(receiver);
- VisitForAccumulatorValue(super_property->home_object());
+ BuildVariableLoad(
+ property->obj()->AsSuperPropertyReference()->home_object()->var(),
+ HoleCheckMode::kElided);
builder()->SetExpressionPosition(property);
auto name = property->key()->AsLiteral()->AsRawPropertyName();
FeedbackSlot slot = GetCachedLoadSuperICSlot(name);
@@ -4761,8 +4944,10 @@ void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
RegisterList args = register_allocator()->NewRegisterList(3);
BuildThisVariableLoad();
builder()->StoreAccumulatorInRegister(args[0]);
- VisitForRegisterValue(super_property->home_object(), args[1]);
-
+ BuildVariableLoad(
+ property->obj()->AsSuperPropertyReference()->home_object()->var(),
+ HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(args[1]);
builder()->SetExpressionPosition(property);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
@@ -4778,12 +4963,13 @@ void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
Register opt_receiver_out) {
RegisterAllocationScope register_scope(this);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
RegisterList args = register_allocator()->NewRegisterList(3);
BuildThisVariableLoad();
builder()->StoreAccumulatorInRegister(args[0]);
- VisitForRegisterValue(super_property->home_object(), args[1]);
+ BuildVariableLoad(
+ property->obj()->AsSuperPropertyReference()->home_object()->var(),
+ HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(args[1]);
VisitForRegisterValue(property->key(), args[2]);
builder()->SetExpressionPosition(property);
@@ -5328,11 +5514,12 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_SUPER_PROPERTY: {
super_property_args = register_allocator()->NewRegisterList(4);
RegisterList load_super_args = super_property_args.Truncate(3);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
BuildThisVariableLoad();
builder()->StoreAccumulatorInRegister(load_super_args[0]);
- VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
+ BuildVariableLoad(
+ property->obj()->AsSuperPropertyReference()->home_object()->var(),
+ HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(load_super_args[1]);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(load_super_args[2])
@@ -5342,11 +5529,12 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_SUPER_PROPERTY: {
super_property_args = register_allocator()->NewRegisterList(4);
RegisterList load_super_args = super_property_args.Truncate(3);
- SuperPropertyReference* super_property =
- property->obj()->AsSuperPropertyReference();
BuildThisVariableLoad();
builder()->StoreAccumulatorInRegister(load_super_args[0]);
- VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
+ BuildVariableLoad(
+ property->obj()->AsSuperPropertyReference()->home_object()->var(),
+ HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(load_super_args[1]);
VisitForRegisterValue(property->key(), load_super_args[2]);
builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, load_super_args);
break;
@@ -5548,6 +5736,16 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
builder()->SetExpressionPosition(expr);
BuildLiteralCompareNil(expr->op(), BytecodeArrayBuilder::kNullValue);
} else {
+ if (expr->op() == Token::IN && expr->left()->IsPrivateName()) {
+ DCHECK(FLAG_harmony_private_brand_checks);
+ Variable* var = expr->left()->AsVariableProxy()->var();
+ if (IsPrivateMethodOrAccessorVariableMode(var->mode())) {
+ BuildPrivateMethodIn(var, expr->right());
+ return;
+ }
+ // For private fields, the code below does the right thing.
+ }
+
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
builder()->SetExpressionPosition(expr);
@@ -5630,8 +5828,12 @@ void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
}
void BytecodeGenerator::VisitImportCallExpression(ImportCallExpression* expr) {
- RegisterList args = register_allocator()->NewRegisterList(2);
+ const int register_count = expr->import_assertions() ? 3 : 2;
+ RegisterList args = register_allocator()->NewRegisterList(register_count);
VisitForRegisterValue(expr->specifier(), args[1]);
+ if (expr->import_assertions()) {
+ VisitForRegisterValue(expr->import_assertions(), args[2]);
+ }
builder()
->MoveRegister(Register::function_closure(), args[0])
.CallRuntime(Runtime::kDynamicImportCall, args);
@@ -6259,25 +6461,12 @@ void BytecodeGenerator::BuildNewLocalCatchContext(Scope* scope) {
builder()->CreateCatchContext(exception, scope);
}
-void BytecodeGenerator::VisitLiteralAccessor(Register home_object,
- LiteralProperty* property,
+void BytecodeGenerator::VisitLiteralAccessor(LiteralProperty* property,
Register value_out) {
if (property == nullptr) {
builder()->LoadNull().StoreAccumulatorInRegister(value_out);
} else {
VisitForRegisterValue(property->value(), value_out);
- VisitSetHomeObject(value_out, home_object, property);
- }
-}
-
-void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
- LiteralProperty* property) {
- Expression* expr = property->value();
- if (FunctionLiteral::NeedsHomeObject(expr)) {
- FeedbackSlot slot = feedback_spec()->AddStoreICSlot(language_mode());
- builder()
- ->LoadAccumulatorWithRegister(home_object)
- .StoreHomeObjectProperty(value, feedback_index(slot), language_mode());
}
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 93c422dcb7..3abda9e387 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -71,6 +71,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
class ExpressionResultScope;
class FeedbackSlotCache;
class IteratorRecord;
+ class MultipleEntryBlockContextScope;
class LoopScope;
class NaryCodeCoverageSlots;
class OptionalChainNullLabelScope;
@@ -311,11 +312,14 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildInvalidPropertyAccess(MessageTemplate tmpl, Property* property);
void BuildPrivateBrandCheck(Property* property, Register object,
MessageTemplate tmpl);
+ void BuildPrivateMethodIn(Variable* private_name,
+ Expression* object_expression);
void BuildPrivateGetterAccess(Register obj, Register access_pair);
void BuildPrivateSetterAccess(Register obj, Register access_pair,
Register value);
void BuildPrivateMethods(ClassLiteral* expr, bool is_static,
Register home_object);
+ void BuildClassProperty(ClassLiteral::Property* property);
void BuildClassLiteral(ClassLiteral* expr, Register name);
void VisitClassLiteral(ClassLiteral* expr, Register name);
void VisitNewTargetVariable(Variable* variable);
@@ -325,10 +329,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Register instance);
void BuildGeneratorObjectVariableInitialization();
void VisitBlockDeclarationsAndStatements(Block* stmt);
- void VisitSetHomeObject(Register value, Register home_object,
- LiteralProperty* property);
- void VisitLiteralAccessor(Register home_object, LiteralProperty* property,
- Register value_out);
+ void VisitLiteralAccessor(LiteralProperty* property, Register value_out);
void VisitForInAssignment(Expression* expr);
void VisitModuleNamespaceImports();
diff --git a/deps/v8/src/interpreter/bytecode-node.h b/deps/v8/src/interpreter/bytecode-node.h
index ebf0e91f50..ac6f5922f2 100644
--- a/deps/v8/src/interpreter/bytecode-node.h
+++ b/deps/v8/src/interpreter/bytecode-node.h
@@ -127,7 +127,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private:
- template <Bytecode bytecode, AccumulatorUse accumulator_use,
+ template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use,
OperandType... operand_types>
friend class BytecodeNodeBuilder;
@@ -148,12 +148,12 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
operands_[4] = operand4;
}
- template <Bytecode bytecode, AccumulatorUse accum_use>
+ template <Bytecode bytecode, ImplicitRegisterUse accum_use>
V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info) {
return BytecodeNode(bytecode, 0, OperandScale::kSingle, source_info);
}
- template <Bytecode bytecode, AccumulatorUse accum_use,
+ template <Bytecode bytecode, ImplicitRegisterUse accum_use,
OperandType operand0_type>
V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0) {
@@ -163,7 +163,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
return BytecodeNode(bytecode, 1, scale, source_info, operand0);
}
- template <Bytecode bytecode, AccumulatorUse accum_use,
+ template <Bytecode bytecode, ImplicitRegisterUse accum_use,
OperandType operand0_type, OperandType operand1_type>
V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1) {
@@ -175,7 +175,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
return BytecodeNode(bytecode, 2, scale, source_info, operand0, operand1);
}
- template <Bytecode bytecode, AccumulatorUse accum_use,
+ template <Bytecode bytecode, ImplicitRegisterUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type>
V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info,
@@ -192,7 +192,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
operand2);
}
- template <Bytecode bytecode, AccumulatorUse accum_use,
+ template <Bytecode bytecode, ImplicitRegisterUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type, OperandType operand3_type>
V8_INLINE static BytecodeNode Create(BytecodeSourceInfo source_info,
@@ -211,7 +211,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final {
operand2, operand3);
}
- template <Bytecode bytecode, AccumulatorUse accum_use,
+ template <Bytecode bytecode, ImplicitRegisterUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type, OperandType operand3_type,
OperandType operand4_type>
diff --git a/deps/v8/src/interpreter/bytecode-operands.cc b/deps/v8/src/interpreter/bytecode-operands.cc
index 5ebf66be38..9e6a66769c 100644
--- a/deps/v8/src/interpreter/bytecode-operands.cc
+++ b/deps/v8/src/interpreter/bytecode-operands.cc
@@ -12,16 +12,21 @@ namespace interpreter {
namespace {
-const char* AccumulatorUseToString(AccumulatorUse accumulator_use) {
- switch (accumulator_use) {
- case AccumulatorUse::kNone:
+const char* ImplicitRegisterUseToString(
+ ImplicitRegisterUse implicit_register_use) {
+ switch (implicit_register_use) {
+ case ImplicitRegisterUse::kNone:
return "None";
- case AccumulatorUse::kRead:
- return "Read";
- case AccumulatorUse::kWrite:
- return "Write";
- case AccumulatorUse::kReadWrite:
- return "ReadWrite";
+ case ImplicitRegisterUse::kReadAccumulator:
+ return "ReadAccumulator";
+ case ImplicitRegisterUse::kWriteAccumulator:
+ return "WriteAccumulator";
+ case ImplicitRegisterUse::kWriteShortStar:
+ return "WriteShortStar";
+ case ImplicitRegisterUse::kReadWriteAccumulator:
+ return "ReadWriteAccumulator";
+ case ImplicitRegisterUse::kReadAccumulatorWriteShortStar:
+ return "ReadAccumulatorWriteShortStar";
}
UNREACHABLE();
}
@@ -64,8 +69,8 @@ const char* OperandSizeToString(OperandSize operand_size) {
} // namespace
-std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use) {
- return os << AccumulatorUseToString(use);
+std::ostream& operator<<(std::ostream& os, const ImplicitRegisterUse& use) {
+ return os << ImplicitRegisterUseToString(use);
}
std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index 4f953341d4..c9cca226ab 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -109,27 +109,29 @@ enum class OperandType : uint8_t {
#undef COUNT_OPERAND_TYPES
};
-enum class AccumulatorUse : uint8_t {
+enum class ImplicitRegisterUse : uint8_t {
kNone = 0,
- kRead = 1 << 0,
- kWrite = 1 << 1,
- kReadWrite = kRead | kWrite
+ kReadAccumulator = 1 << 0,
+ kWriteAccumulator = 1 << 1,
+ kWriteShortStar = 1 << 2,
+ kReadWriteAccumulator = kReadAccumulator | kWriteAccumulator,
+ kReadAccumulatorWriteShortStar = kReadAccumulator | kWriteShortStar
};
-constexpr inline AccumulatorUse operator&(AccumulatorUse lhs,
- AccumulatorUse rhs) {
- return static_cast<AccumulatorUse>(static_cast<int>(lhs) &
- static_cast<int>(rhs));
+constexpr inline ImplicitRegisterUse operator&(ImplicitRegisterUse lhs,
+ ImplicitRegisterUse rhs) {
+ return static_cast<ImplicitRegisterUse>(static_cast<int>(lhs) &
+ static_cast<int>(rhs));
}
-constexpr inline AccumulatorUse operator|(AccumulatorUse lhs,
- AccumulatorUse rhs) {
- return static_cast<AccumulatorUse>(static_cast<int>(lhs) |
- static_cast<int>(rhs));
+constexpr inline ImplicitRegisterUse operator|(ImplicitRegisterUse lhs,
+ ImplicitRegisterUse rhs) {
+ return static_cast<ImplicitRegisterUse>(static_cast<int>(lhs) |
+ static_cast<int>(rhs));
}
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
- const AccumulatorUse& use);
+ const ImplicitRegisterUse& use);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const OperandScale& operand_scale);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -170,14 +172,28 @@ class BytecodeOperands : public AllStatic {
return static_cast<int>(operand_scale) >> 1;
}
- // Returns true if |accumulator_use| reads the accumulator.
- static constexpr bool ReadsAccumulator(AccumulatorUse accumulator_use) {
- return (accumulator_use & AccumulatorUse::kRead) == AccumulatorUse::kRead;
+ // Returns true if |implicit_register_use| reads the
+ // accumulator.
+ static constexpr bool ReadsAccumulator(
+ ImplicitRegisterUse implicit_register_use) {
+ return (implicit_register_use & ImplicitRegisterUse::kReadAccumulator) ==
+ ImplicitRegisterUse::kReadAccumulator;
}
- // Returns true if |accumulator_use| writes the accumulator.
- static constexpr bool WritesAccumulator(AccumulatorUse accumulator_use) {
- return (accumulator_use & AccumulatorUse::kWrite) == AccumulatorUse::kWrite;
+ // Returns true if |implicit_register_use| writes the
+ // accumulator.
+ static constexpr bool WritesAccumulator(
+ ImplicitRegisterUse implicit_register_use) {
+ return (implicit_register_use & ImplicitRegisterUse::kWriteAccumulator) ==
+ ImplicitRegisterUse::kWriteAccumulator;
+ }
+
+ // Returns true if |implicit_register_use| writes to a
+ // register not specified by an operand.
+ static constexpr bool WritesImplicitRegister(
+ ImplicitRegisterUse implicit_register_use) {
+ return (implicit_register_use & ImplicitRegisterUse::kWriteShortStar) ==
+ ImplicitRegisterUse::kWriteShortStar;
}
// Returns true if |operand_type| is a scalable signed byte.
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 289b8983f3..4c6e8836d3 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -65,7 +65,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
bool EnsureAllRegistersAreFlushed() const;
// Prepares for |bytecode|.
- template <Bytecode bytecode, AccumulatorUse accumulator_use>
+ template <Bytecode bytecode, ImplicitRegisterUse implicit_register_use>
V8_INLINE void PrepareForBytecode() {
if (Bytecodes::IsJump(bytecode) || Bytecodes::IsSwitch(bytecode) ||
bytecode == Bytecode::kDebugger ||
@@ -85,13 +85,13 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
// Materialize the accumulator if it is read by the bytecode. The
// accumulator is special and no other register can be materialized
// in it's place.
- if (BytecodeOperands::ReadsAccumulator(accumulator_use)) {
+ if (BytecodeOperands::ReadsAccumulator(implicit_register_use)) {
Materialize(accumulator_info_);
}
// Materialize an equivalent to the accumulator if it will be
// clobbered when the bytecode is dispatched.
- if (BytecodeOperands::WritesAccumulator(accumulator_use)) {
+ if (BytecodeOperands::WritesAccumulator(implicit_register_use)) {
PrepareOutputRegister(accumulator_);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index e8eb347f16..5266f693d2 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -32,6 +32,10 @@ static const int kCallerPCOffsetRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kCallerPCOffset) /
kSystemPointerSize;
+static const int kArgumentCountRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kArgCOffset) /
+ kSystemPointerSize;
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
@@ -83,6 +87,11 @@ Register Register::virtual_accumulator() {
return Register(kCallerPCOffsetRegisterIndex);
}
+// static
+Register Register::argument_count() {
+ return Register(kArgumentCountRegisterIndex);
+}
+
OperandSize Register::SizeOfOperand() const {
int32_t operand = ToOperand();
if (operand >= kMinInt8 && operand <= kMaxInt8) {
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index 034ac0bb76..604ebe56f5 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -20,7 +20,7 @@ namespace interpreter {
// in its stack-frame. Register hold parameters, this, and expression values.
class V8_EXPORT_PRIVATE Register final {
public:
- explicit Register(int index = kInvalidIndex) : index_(index) {}
+ constexpr explicit Register(int index = kInvalidIndex) : index_(index) {}
int index() const { return index_; }
bool is_parameter() const { return index() < 0; }
@@ -48,6 +48,9 @@ class V8_EXPORT_PRIVATE Register final {
static Register bytecode_offset();
bool is_bytecode_offset() const;
+ // Returns the register for the argument count.
+ static Register argument_count();
+
// Returns a register that can be used to represent the accumulator
// within code in the interpreter, but should never be emitted in
// bytecode.
@@ -55,11 +58,30 @@ class V8_EXPORT_PRIVATE Register final {
OperandSize SizeOfOperand() const;
- int32_t ToOperand() const { return kRegisterFileStartOffset - index_; }
+ constexpr int32_t ToOperand() const {
+ return kRegisterFileStartOffset - index_;
+ }
static Register FromOperand(int32_t operand) {
return Register(kRegisterFileStartOffset - operand);
}
+ static Register FromShortStar(Bytecode bytecode) {
+ DCHECK(Bytecodes::IsShortStar(bytecode));
+ return Register(static_cast<int>(Bytecode::kStar0) -
+ static_cast<int>(bytecode));
+ }
+
+ const base::Optional<Bytecode> TryToShortStar() const {
+ if (index() >= 0 && index() < Bytecodes::kShortStarCount) {
+ Bytecode bytecode =
+ static_cast<Bytecode>(static_cast<int>(Bytecode::kStar0) - index());
+ DCHECK_GE(bytecode, Bytecode::kFirstShortStar);
+ DCHECK_LE(bytecode, Bytecode::kLastShortStar);
+ return bytecode;
+ }
+ return {};
+ }
+
static bool AreContiguous(Register reg1, Register reg2,
Register reg3 = invalid_value(),
Register reg4 = invalid_value(),
@@ -110,6 +132,10 @@ class RegisterList {
DCHECK_LT(new_count, register_count_);
return RegisterList(first_reg_index_, new_count);
}
+ const RegisterList PopLeft() {
+ DCHECK_GE(register_count_, 0);
+ return RegisterList(first_reg_index_ + 1, register_count_ - 1);
+ }
const Register operator[](size_t i) const {
DCHECK_LT(static_cast<int>(i), register_count_);
@@ -131,6 +157,7 @@ class RegisterList {
friend class BytecodeDecoder;
friend class InterpreterTester;
friend class BytecodeUtils;
+ friend class BytecodeArrayAccessor;
RegisterList(int first_reg_index, int register_count)
: first_reg_index_(first_reg_index), register_count_(register_count) {}
diff --git a/deps/v8/src/interpreter/bytecode-traits.h b/deps/v8/src/interpreter/bytecode-traits.h
index 3f7b9afb4a..afe1520c09 100644
--- a/deps/v8/src/interpreter/bytecode-traits.h
+++ b/deps/v8/src/interpreter/bytecode-traits.h
@@ -76,7 +76,7 @@ struct SumHelper<value, values...> {
static const int kValue = value + SumHelper<values...>::kValue;
};
-template <AccumulatorUse accumulator_use, OperandType... operands>
+template <ImplicitRegisterUse implicit_register_use, OperandType... operands>
struct BytecodeTraits {
static const OperandType kOperandTypes[];
static const OperandTypeInfo kOperandTypeInfos[];
@@ -89,33 +89,33 @@ struct BytecodeTraits {
1, OperandScaler<operands, OperandScale::kDouble>::kSize...>::kValue;
static const int kQuadrupleScaleSize = SumHelper<
1, OperandScaler<operands, OperandScale::kQuadruple>::kSize...>::kValue;
- static const AccumulatorUse kAccumulatorUse = accumulator_use;
+ static const ImplicitRegisterUse kImplicitRegisterUse = implicit_register_use;
static const int kOperandCount = sizeof...(operands);
};
-template <AccumulatorUse accumulator_use, OperandType... operands>
+template <ImplicitRegisterUse implicit_register_use, OperandType... operands>
STATIC_CONST_MEMBER_DEFINITION const OperandType
- BytecodeTraits<accumulator_use, operands...>::kOperandTypes[] = {
+ BytecodeTraits<implicit_register_use, operands...>::kOperandTypes[] = {
operands...};
-template <AccumulatorUse accumulator_use, OperandType... operands>
+template <ImplicitRegisterUse implicit_register_use, OperandType... operands>
STATIC_CONST_MEMBER_DEFINITION const OperandTypeInfo
- BytecodeTraits<accumulator_use, operands...>::kOperandTypeInfos[] = {
+ BytecodeTraits<implicit_register_use, operands...>::kOperandTypeInfos[] = {
OperandTraits<operands>::kOperandTypeInfo...};
-template <AccumulatorUse accumulator_use, OperandType... operands>
-STATIC_CONST_MEMBER_DEFINITION const OperandSize
- BytecodeTraits<accumulator_use, operands...>::kSingleScaleOperandSizes[] = {
- OperandScaler<operands, OperandScale::kSingle>::kOperandSize...};
-template <AccumulatorUse accumulator_use, OperandType... operands>
-STATIC_CONST_MEMBER_DEFINITION const OperandSize
- BytecodeTraits<accumulator_use, operands...>::kDoubleScaleOperandSizes[] = {
- OperandScaler<operands, OperandScale::kDouble>::kOperandSize...};
-template <AccumulatorUse accumulator_use, OperandType... operands>
+template <ImplicitRegisterUse implicit_register_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize BytecodeTraits<
+ implicit_register_use, operands...>::kSingleScaleOperandSizes[] = {
+ OperandScaler<operands, OperandScale::kSingle>::kOperandSize...};
+template <ImplicitRegisterUse implicit_register_use, OperandType... operands>
+STATIC_CONST_MEMBER_DEFINITION const OperandSize BytecodeTraits<
+ implicit_register_use, operands...>::kDoubleScaleOperandSizes[] = {
+ OperandScaler<operands, OperandScale::kDouble>::kOperandSize...};
+template <ImplicitRegisterUse implicit_register_use, OperandType... operands>
STATIC_CONST_MEMBER_DEFINITION const OperandSize BytecodeTraits<
- accumulator_use, operands...>::kQuadrupleScaleOperandSizes[] = {
+ implicit_register_use, operands...>::kQuadrupleScaleOperandSizes[] = {
OperandScaler<operands, OperandScale::kQuadruple>::kOperandSize...};
-template <AccumulatorUse accumulator_use>
-struct BytecodeTraits<accumulator_use> {
+template <ImplicitRegisterUse implicit_register_use>
+struct BytecodeTraits<implicit_register_use> {
static const OperandType kOperandTypes[];
static const OperandTypeInfo kOperandTypeInfos[];
static const OperandSize kSingleScaleOperandSizes[];
@@ -124,28 +124,29 @@ struct BytecodeTraits<accumulator_use> {
static const int kSingleScaleSize = 1;
static const int kDoubleScaleSize = 1;
static const int kQuadrupleScaleSize = 1;
- static const AccumulatorUse kAccumulatorUse = accumulator_use;
+ static const ImplicitRegisterUse kImplicitRegisterUse = implicit_register_use;
static const int kOperandCount = 0;
};
-template <AccumulatorUse accumulator_use>
+template <ImplicitRegisterUse implicit_register_use>
STATIC_CONST_MEMBER_DEFINITION const OperandType
- BytecodeTraits<accumulator_use>::kOperandTypes[] = {OperandType::kNone};
-template <AccumulatorUse accumulator_use>
+ BytecodeTraits<implicit_register_use>::kOperandTypes[] = {
+ OperandType::kNone};
+template <ImplicitRegisterUse implicit_register_use>
STATIC_CONST_MEMBER_DEFINITION const OperandTypeInfo
- BytecodeTraits<accumulator_use>::kOperandTypeInfos[] = {
+ BytecodeTraits<implicit_register_use>::kOperandTypeInfos[] = {
OperandTypeInfo::kNone};
-template <AccumulatorUse accumulator_use>
+template <ImplicitRegisterUse implicit_register_use>
STATIC_CONST_MEMBER_DEFINITION const OperandSize
- BytecodeTraits<accumulator_use>::kSingleScaleOperandSizes[] = {
+ BytecodeTraits<implicit_register_use>::kSingleScaleOperandSizes[] = {
OperandSize::kNone};
-template <AccumulatorUse accumulator_use>
+template <ImplicitRegisterUse implicit_register_use>
STATIC_CONST_MEMBER_DEFINITION const OperandSize
- BytecodeTraits<accumulator_use>::kDoubleScaleOperandSizes[] = {
+ BytecodeTraits<implicit_register_use>::kDoubleScaleOperandSizes[] = {
OperandSize::kNone};
-template <AccumulatorUse accumulator_use>
+template <ImplicitRegisterUse implicit_register_use>
STATIC_CONST_MEMBER_DEFINITION const OperandSize
- BytecodeTraits<accumulator_use>::kQuadrupleScaleOperandSizes[] = {
+ BytecodeTraits<implicit_register_use>::kQuadrupleScaleOperandSizes[] = {
OperandSize::kNone};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 0eab890d1e..628408efc5 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -32,13 +32,13 @@ const int Bytecodes::kOperandCount[] = {
#undef ENTRY
};
-const AccumulatorUse Bytecodes::kAccumulatorUse[] = {
-#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kAccumulatorUse,
+const ImplicitRegisterUse Bytecodes::kImplicitRegisterUse[] = {
+#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kImplicitRegisterUse,
BYTECODE_LIST(ENTRY)
#undef ENTRY
};
-const int Bytecodes::kBytecodeSizes[3][kBytecodeCount] = {
+const uint8_t Bytecodes::kBytecodeSizes[3][kBytecodeCount] = {
{
#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kSingleScaleSize,
BYTECODE_LIST(ENTRY)
@@ -94,6 +94,13 @@ Bytecodes::kOperandKindSizes[3][BytecodeOperands::kOperandTypeCount] = {
};
// clang-format on
+// Make sure kFirstShortStar and kLastShortStar are set correctly.
+#define ASSERT_SHORT_STAR_RANGE(Name, ...) \
+ STATIC_ASSERT(Bytecode::k##Name >= Bytecode::kFirstShortStar && \
+ Bytecode::k##Name <= Bytecode::kLastShortStar);
+SHORT_STAR_BYTECODE_LIST(ASSERT_SHORT_STAR_RANGE)
+#undef ASSERT_SHORT_STAR_RANGE
+
// static
const char* Bytecodes::ToString(Bytecode bytecode) {
switch (bytecode) {
@@ -264,6 +271,11 @@ bool Bytecodes::IsRegisterOutputOperandType(OperandType operand_type) {
bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
if (operand_scale == OperandScale::kSingle) {
switch (bytecode) {
+ // Short-star lookahead is required for correctness on kDebugBreak0. The
+ // handler for all short-star codes re-reads the opcode from the bytecode
+ // array and would not work correctly if it instead read kDebugBreak0.
+ case Bytecode::kDebugBreak0:
+
case Bytecode::kLdaZero:
case Bytecode::kLdaSmi:
case Bytecode::kLdaNull:
@@ -332,7 +344,8 @@ bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
// static
bool Bytecodes::BytecodeHasHandler(Bytecode bytecode,
OperandScale operand_scale) {
- return operand_scale == OperandScale::kSingle ||
+ return (operand_scale == OperandScale::kSingle &&
+ (!IsShortStar(bytecode) || bytecode == Bytecode::kStar0)) ||
Bytecodes::IsBytecodeWithScalableOperands(bytecode);
}
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 2cff678920..56d9d5af0d 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -21,355 +21,438 @@ namespace v8 {
namespace internal {
namespace interpreter {
-// The list of bytecodes which are interpreted by the interpreter.
-// Format is V(<bytecode>, <accumulator_use>, <operands>).
-#define BYTECODE_LIST(V) \
+// The list of single-byte Star variants, in the format of BYTECODE_LIST.
+#define SHORT_STAR_BYTECODE_LIST(V) \
+ V(Star15, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star14, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star13, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star12, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star11, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star10, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star9, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star8, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star7, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star6, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star5, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star4, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star3, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star2, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star1, ImplicitRegisterUse::kReadAccumulatorWriteShortStar) \
+ V(Star0, ImplicitRegisterUse::kReadAccumulatorWriteShortStar)
+
+// The list of bytecodes which have unique handlers (no other bytecode is
+// executed using identical code).
+// Format is V(<bytecode>, <implicit_register_use>, <operands>).
+#define BYTECODE_LIST_WITH_UNIQUE_HANDLERS(V) \
/* Extended width operands */ \
- V(Wide, AccumulatorUse::kNone) \
- V(ExtraWide, AccumulatorUse::kNone) \
+ V(Wide, ImplicitRegisterUse::kNone) \
+ V(ExtraWide, ImplicitRegisterUse::kNone) \
\
/* Debug Breakpoints - one for each possible size of unscaled bytecodes */ \
/* and one for each operand widening prefix bytecode */ \
- V(DebugBreakWide, AccumulatorUse::kReadWrite) \
- V(DebugBreakExtraWide, AccumulatorUse::kReadWrite) \
- V(DebugBreak0, AccumulatorUse::kReadWrite) \
- V(DebugBreak1, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(DebugBreak2, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(DebugBreakWide, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(DebugBreakExtraWide, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(DebugBreak0, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(DebugBreak1, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg) \
- V(DebugBreak3, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kReg, OperandType::kReg) \
- V(DebugBreak4, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kReg, OperandType::kReg, OperandType::kReg) \
- V(DebugBreak5, AccumulatorUse::kReadWrite, OperandType::kRuntimeId, \
+ V(DebugBreak2, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kReg) \
- V(DebugBreak6, AccumulatorUse::kReadWrite, OperandType::kRuntimeId, \
+ V(DebugBreak3, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kReg, OperandType::kReg) \
+ V(DebugBreak4, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kReg, OperandType::kReg, \
+ OperandType::kReg) \
+ V(DebugBreak5, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kRuntimeId, OperandType::kReg, OperandType::kReg) \
+ V(DebugBreak6, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kRuntimeId, OperandType::kReg, OperandType::kReg, \
+ OperandType::kReg) \
\
/* Loading the accumulator */ \
- V(LdaZero, AccumulatorUse::kWrite) \
- V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm) \
- V(LdaUndefined, AccumulatorUse::kWrite) \
- V(LdaNull, AccumulatorUse::kWrite) \
- V(LdaTheHole, AccumulatorUse::kWrite) \
- V(LdaTrue, AccumulatorUse::kWrite) \
- V(LdaFalse, AccumulatorUse::kWrite) \
- V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(LdaZero, ImplicitRegisterUse::kWriteAccumulator) \
+ V(LdaSmi, ImplicitRegisterUse::kWriteAccumulator, OperandType::kImm) \
+ V(LdaUndefined, ImplicitRegisterUse::kWriteAccumulator) \
+ V(LdaNull, ImplicitRegisterUse::kWriteAccumulator) \
+ V(LdaTheHole, ImplicitRegisterUse::kWriteAccumulator) \
+ V(LdaTrue, ImplicitRegisterUse::kWriteAccumulator) \
+ V(LdaFalse, ImplicitRegisterUse::kWriteAccumulator) \
+ V(LdaConstant, ImplicitRegisterUse::kWriteAccumulator, OperandType::kIdx) \
\
/* Globals */ \
- V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx) \
- V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx, \
+ V(LdaGlobal, ImplicitRegisterUse::kWriteAccumulator, OperandType::kIdx, \
+ OperandType::kIdx) \
+ V(LdaGlobalInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx, OperandType::kIdx) \
+ V(StaGlobal, ImplicitRegisterUse::kReadAccumulator, OperandType::kIdx, \
OperandType::kIdx) \
- V(StaGlobal, AccumulatorUse::kRead, OperandType::kIdx, OperandType::kIdx) \
\
/* Context operations */ \
- V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut) \
- V(PopContext, AccumulatorUse::kNone, OperandType::kReg) \
- V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kIdx, OperandType::kUImm) \
- V(LdaImmutableContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(PushContext, ImplicitRegisterUse::kReadAccumulator, OperandType::kRegOut) \
+ V(PopContext, ImplicitRegisterUse::kNone, OperandType::kReg) \
+ V(LdaContextSlot, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
- V(LdaCurrentContextSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(LdaImmutableCurrentContextSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg, \
+ V(LdaImmutableContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx, OperandType::kUImm) \
+ V(LdaCurrentContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx) \
+ V(LdaImmutableCurrentContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx) \
+ V(StaContextSlot, ImplicitRegisterUse::kReadAccumulator, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
- V(StaCurrentContextSlot, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(StaCurrentContextSlot, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
\
/* Load-Store lookup slots */ \
- V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(LdaLookupContextSlot, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kUImm) \
- V(LdaLookupGlobalSlot, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kUImm) \
- V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(LdaLookupContextSlotInsideTypeof, AccumulatorUse::kWrite, \
+ V(LdaLookupSlot, ImplicitRegisterUse::kWriteAccumulator, OperandType::kIdx) \
+ V(LdaLookupContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kIdx, OperandType::kIdx, OperandType::kUImm) \
- V(LdaLookupGlobalSlotInsideTypeof, AccumulatorUse::kWrite, \
+ V(LdaLookupGlobalSlot, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kIdx, OperandType::kIdx, OperandType::kUImm) \
- V(StaLookupSlot, AccumulatorUse::kReadWrite, OperandType::kIdx, \
- OperandType::kFlag8) \
+ V(LdaLookupSlotInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx) \
+ V(LdaLookupContextSlotInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx, OperandType::kIdx, OperandType::kUImm) \
+ V(LdaLookupGlobalSlotInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx, OperandType::kIdx, OperandType::kUImm) \
+ V(StaLookupSlot, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kIdx, OperandType::kFlag8) \
\
/* Register-accumulator transfers */ \
- V(Ldar, AccumulatorUse::kWrite, OperandType::kReg) \
- V(Star, AccumulatorUse::kRead, OperandType::kRegOut) \
+ V(Ldar, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
+ V(Star, ImplicitRegisterUse::kReadAccumulator, OperandType::kRegOut) \
\
/* Register-register transfers */ \
- V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut) \
+ V(Mov, ImplicitRegisterUse::kNone, OperandType::kReg, OperandType::kRegOut) \
\
/* Property loads (LoadIC) operations */ \
- V(LdaNamedProperty, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kIdx, OperandType::kIdx) \
- V(LdaNamedPropertyNoFeedback, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kIdx) \
- V(LdaNamedPropertyFromSuper, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx, OperandType::kIdx) \
- V(LdaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx) \
+ V(LdaNamedProperty, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
+ V(LdaNamedPropertyNoFeedback, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(LdaNamedPropertyFromSuper, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
+ V(LdaKeyedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
\
/* Operations on module variables */ \
- V(LdaModuleVariable, AccumulatorUse::kWrite, OperandType::kImm, \
- OperandType::kUImm) \
- V(StaModuleVariable, AccumulatorUse::kRead, OperandType::kImm, \
- OperandType::kUImm) \
+ V(LdaModuleVariable, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kImm, OperandType::kUImm) \
+ V(StaModuleVariable, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kImm, OperandType::kUImm) \
\
/* Propery stores (StoreIC) operations */ \
- V(StaNamedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx, OperandType::kIdx) \
- V(StaNamedPropertyNoFeedback, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx, OperandType::kFlag8) \
- V(StaNamedOwnProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx, OperandType::kIdx) \
- V(StaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kReg, OperandType::kIdx) \
- V(StaInArrayLiteral, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kReg, OperandType::kIdx) \
- V(StaDataPropertyInLiteral, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kReg, OperandType::kFlag8, OperandType::kIdx) \
- V(CollectTypeProfile, AccumulatorUse::kRead, OperandType::kImm) \
+ V(StaNamedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
+ V(StaNamedPropertyNoFeedback, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx, OperandType::kFlag8) \
+ V(StaNamedOwnProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
+ V(StaKeyedProperty, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
+ V(StaInArrayLiteral, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
+ V(StaDataPropertyInLiteral, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kReg, OperandType::kReg, OperandType::kFlag8, \
+ OperandType::kIdx) \
+ V(CollectTypeProfile, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kImm) \
\
/* Binary Operators */ \
- V(Add, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
- V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
- V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
- V(Div, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
- V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
- V(Exp, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
- V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(Add, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
- V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(Sub, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
- V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(Mul, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
- V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(Div, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
- V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(Mod, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
- V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(Exp, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
+ V(BitwiseOr, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(BitwiseXor, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(BitwiseAnd, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(ShiftLeft, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(ShiftRight, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(ShiftRightLogical, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
\
/* Binary operators with immediate operands */ \
- V(AddSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
- V(SubSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
- V(MulSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
- V(DivSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
- V(ModSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
- V(ExpSmi, AccumulatorUse::kReadWrite, OperandType::kImm, OperandType::kIdx) \
- V(BitwiseOrSmi, AccumulatorUse::kReadWrite, OperandType::kImm, \
+ V(AddSmi, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kImm, \
OperandType::kIdx) \
- V(BitwiseXorSmi, AccumulatorUse::kReadWrite, OperandType::kImm, \
+ V(SubSmi, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kImm, \
OperandType::kIdx) \
- V(BitwiseAndSmi, AccumulatorUse::kReadWrite, OperandType::kImm, \
+ V(MulSmi, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kImm, \
OperandType::kIdx) \
- V(ShiftLeftSmi, AccumulatorUse::kReadWrite, OperandType::kImm, \
+ V(DivSmi, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kImm, \
OperandType::kIdx) \
- V(ShiftRightSmi, AccumulatorUse::kReadWrite, OperandType::kImm, \
+ V(ModSmi, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kImm, \
OperandType::kIdx) \
- V(ShiftRightLogicalSmi, AccumulatorUse::kReadWrite, OperandType::kImm, \
+ V(ExpSmi, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kImm, \
OperandType::kIdx) \
+ V(BitwiseOrSmi, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kImm, OperandType::kIdx) \
+ V(BitwiseXorSmi, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kImm, OperandType::kIdx) \
+ V(BitwiseAndSmi, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kImm, OperandType::kIdx) \
+ V(ShiftLeftSmi, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kImm, OperandType::kIdx) \
+ V(ShiftRightSmi, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kImm, OperandType::kIdx) \
+ V(ShiftRightLogicalSmi, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kImm, OperandType::kIdx) \
\
/* Unary Operators */ \
- V(Inc, AccumulatorUse::kReadWrite, OperandType::kIdx) \
- V(Dec, AccumulatorUse::kReadWrite, OperandType::kIdx) \
- V(Negate, AccumulatorUse::kReadWrite, OperandType::kIdx) \
- V(BitwiseNot, AccumulatorUse::kReadWrite, OperandType::kIdx) \
- V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite) \
- V(LogicalNot, AccumulatorUse::kReadWrite) \
- V(TypeOf, AccumulatorUse::kReadWrite) \
- V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(Inc, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx) \
+ V(Dec, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx) \
+ V(Negate, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx) \
+ V(BitwiseNot, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx) \
+ V(ToBooleanLogicalNot, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(LogicalNot, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(TypeOf, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(DeletePropertyStrict, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg) \
+ V(DeletePropertySloppy, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg) \
\
/* GetSuperConstructor operator */ \
- V(GetSuperConstructor, AccumulatorUse::kRead, OperandType::kRegOut) \
+ V(GetSuperConstructor, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kRegOut) \
\
/* Call operations */ \
- V(CallAnyReceiver, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
- V(CallProperty, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CallAnyReceiver, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kRegList, OperandType::kRegCount, \
+ OperandType::kIdx) \
+ V(CallProperty, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
- V(CallProperty0, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CallProperty0, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg, OperandType::kIdx) \
- V(CallProperty1, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CallProperty1, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- V(CallProperty2, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CallProperty2, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg, OperandType::kReg, OperandType::kReg, \
OperandType::kIdx) \
- V(CallUndefinedReceiver, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
- V(CallUndefinedReceiver0, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CallUndefinedReceiver, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kRegList, OperandType::kRegCount, \
OperandType::kIdx) \
- V(CallUndefinedReceiver1, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CallUndefinedReceiver0, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kReg, OperandType::kIdx) \
- V(CallUndefinedReceiver2, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CallUndefinedReceiver1, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- V(CallNoFeedback, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CallUndefinedReceiver2, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kReg, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(CallNoFeedback, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount) \
- V(CallWithSpread, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CallWithSpread, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
- V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
- OperandType::kRegList, OperandType::kRegCount) \
- V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId, \
+ V(CallRuntime, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kRuntimeId, OperandType::kRegList, OperandType::kRegCount) \
+ V(CallRuntimeForPair, ImplicitRegisterUse::kNone, OperandType::kRuntimeId, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kRegOutPair) \
- V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kNativeContextIndex, \
- OperandType::kRegList, OperandType::kRegCount) \
+ V(CallJSRuntime, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kNativeContextIndex, OperandType::kRegList, \
+ OperandType::kRegCount) \
\
/* Intrinsics */ \
- V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId, \
- OperandType::kRegList, OperandType::kRegCount) \
+ V(InvokeIntrinsic, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIntrinsicId, OperandType::kRegList, OperandType::kRegCount) \
\
/* Construct operators */ \
- V(Construct, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
- V(ConstructWithSpread, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(Construct, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
+ V(ConstructWithSpread, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kRegList, OperandType::kRegCount, \
+ OperandType::kIdx) \
\
/* Test Operators */ \
- V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx) \
- V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx) \
- V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(TestEqual, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
- V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx) \
- V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx) \
- V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx) \
- V(TestReferenceEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ V(TestEqualStrict, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(TestLessThan, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(TestGreaterThan, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(TestLessThanOrEqual, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(TestGreaterThanOrEqual, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(TestReferenceEqual, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg) \
+ V(TestInstanceOf, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(TestIn, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
- V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
- V(TestUndetectable, AccumulatorUse::kReadWrite) \
- V(TestNull, AccumulatorUse::kReadWrite) \
- V(TestUndefined, AccumulatorUse::kReadWrite) \
- V(TestTypeOf, AccumulatorUse::kReadWrite, OperandType::kFlag8) \
+ V(TestUndetectable, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(TestNull, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(TestUndefined, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(TestTypeOf, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kFlag8) \
\
/* Cast operators */ \
- V(ToName, AccumulatorUse::kRead, OperandType::kRegOut) \
- V(ToNumber, AccumulatorUse::kReadWrite, OperandType::kIdx) \
- V(ToNumeric, AccumulatorUse::kReadWrite, OperandType::kIdx) \
- V(ToObject, AccumulatorUse::kRead, OperandType::kRegOut) \
- V(ToString, AccumulatorUse::kReadWrite) \
+ V(ToName, ImplicitRegisterUse::kReadAccumulator, OperandType::kRegOut) \
+ V(ToNumber, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx) \
+ V(ToNumeric, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx) \
+ V(ToObject, ImplicitRegisterUse::kReadAccumulator, OperandType::kRegOut) \
+ V(ToString, ImplicitRegisterUse::kReadWriteAccumulator) \
\
/* Literals */ \
- V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kFlag8) \
- V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kFlag8) \
- V(CreateArrayFromIterable, AccumulatorUse::kReadWrite) \
- V(CreateEmptyArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kFlag8) \
- V(CreateEmptyObjectLiteral, AccumulatorUse::kWrite) \
- V(CloneObject, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CreateRegExpLiteral, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx, OperandType::kIdx, OperandType::kFlag8) \
+ V(CreateArrayLiteral, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx, OperandType::kIdx, OperandType::kFlag8) \
+ V(CreateArrayFromIterable, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(CreateEmptyArrayLiteral, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx) \
+ V(CreateObjectLiteral, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx, OperandType::kIdx, OperandType::kFlag8) \
+ V(CreateEmptyObjectLiteral, ImplicitRegisterUse::kWriteAccumulator) \
+ V(CloneObject, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kFlag8, OperandType::kIdx) \
\
/* Tagged templates */ \
- V(GetTemplateObject, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx) \
+ V(GetTemplateObject, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx, OperandType::kIdx) \
\
/* Closure allocation */ \
- V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
+ V(CreateClosure, ImplicitRegisterUse::kWriteAccumulator, OperandType::kIdx, \
OperandType::kIdx, OperandType::kFlag8) \
\
/* Context allocation */ \
- V(CreateBlockContext, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(CreateCatchContext, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kIdx) \
- V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kUImm) \
- V(CreateEvalContext, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kUImm) \
- V(CreateWithContext, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(CreateBlockContext, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kIdx) \
+ V(CreateCatchContext, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(CreateFunctionContext, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx, OperandType::kUImm) \
+ V(CreateEvalContext, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx, OperandType::kUImm) \
+ V(CreateWithContext, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx) \
\
/* Arguments allocation */ \
- V(CreateMappedArguments, AccumulatorUse::kWrite) \
- V(CreateUnmappedArguments, AccumulatorUse::kWrite) \
- V(CreateRestParameter, AccumulatorUse::kWrite) \
+ V(CreateMappedArguments, ImplicitRegisterUse::kWriteAccumulator) \
+ V(CreateUnmappedArguments, ImplicitRegisterUse::kWriteAccumulator) \
+ V(CreateRestParameter, ImplicitRegisterUse::kWriteAccumulator) \
\
/* Control Flow -- carefully ordered for efficient checks */ \
/* - [Unconditional jumps] */ \
- V(JumpLoop, AccumulatorUse::kNone, OperandType::kUImm, OperandType::kImm) \
+ V(JumpLoop, ImplicitRegisterUse::kNone, OperandType::kUImm, \
+ OperandType::kImm) \
/* - [Forward jumps] */ \
- V(Jump, AccumulatorUse::kNone, OperandType::kUImm) \
+ V(Jump, ImplicitRegisterUse::kNone, OperandType::kUImm) \
/* - [Start constant jumps] */ \
- V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx) \
+ V(JumpConstant, ImplicitRegisterUse::kNone, OperandType::kIdx) \
/* - [Conditional jumps] */ \
/* - [Conditional constant jumps] */ \
- V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfNotNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfNotUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfUndefinedOrNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfJSReceiverConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfNullConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
+ V(JumpIfNotNullConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
+ V(JumpIfUndefinedConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
+ V(JumpIfNotUndefinedConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
+ V(JumpIfUndefinedOrNullConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
+ V(JumpIfTrueConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
+ V(JumpIfFalseConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
+ V(JumpIfJSReceiverConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
/* - [Start ToBoolean jumps] */ \
- V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfToBooleanTrueConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
+ V(JumpIfToBooleanFalseConstant, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
/* - [End constant jumps] */ \
/* - [Conditional immediate jumps] */ \
- V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kUImm) \
- V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kUImm) \
+ V(JumpIfToBooleanTrue, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kUImm) \
+ V(JumpIfToBooleanFalse, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kUImm) \
/* - [End ToBoolean jumps] */ \
- V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kUImm) \
- V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kUImm) \
- V(JumpIfNull, AccumulatorUse::kRead, OperandType::kUImm) \
- V(JumpIfNotNull, AccumulatorUse::kRead, OperandType::kUImm) \
- V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kUImm) \
- V(JumpIfNotUndefined, AccumulatorUse::kRead, OperandType::kUImm) \
- V(JumpIfUndefinedOrNull, AccumulatorUse::kRead, OperandType::kUImm) \
- V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kUImm) \
+ V(JumpIfTrue, ImplicitRegisterUse::kReadAccumulator, OperandType::kUImm) \
+ V(JumpIfFalse, ImplicitRegisterUse::kReadAccumulator, OperandType::kUImm) \
+ V(JumpIfNull, ImplicitRegisterUse::kReadAccumulator, OperandType::kUImm) \
+ V(JumpIfNotNull, ImplicitRegisterUse::kReadAccumulator, OperandType::kUImm) \
+ V(JumpIfUndefined, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kUImm) \
+ V(JumpIfNotUndefined, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kUImm) \
+ V(JumpIfUndefinedOrNull, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kUImm) \
+ V(JumpIfJSReceiver, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kUImm) \
\
/* Smi-table lookup for switch statements */ \
- V(SwitchOnSmiNoFeedback, AccumulatorUse::kRead, OperandType::kIdx, \
- OperandType::kUImm, OperandType::kImm) \
+ V(SwitchOnSmiNoFeedback, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx, OperandType::kUImm, OperandType::kImm) \
\
/* Complex flow control For..in */ \
- V(ForInEnumerate, AccumulatorUse::kWrite, OperandType::kReg) \
- V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple, \
- OperandType::kIdx) \
- V(ForInContinue, AccumulatorUse::kWrite, OperandType::kReg, \
+ V(ForInEnumerate, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
+ V(ForInPrepare, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kRegOutTriple, OperandType::kIdx) \
+ V(ForInContinue, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg) \
- V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
- OperandType::kRegPair, OperandType::kIdx) \
- V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg) \
+ V(ForInNext, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
+ OperandType::kReg, OperandType::kRegPair, OperandType::kIdx) \
+ V(ForInStep, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
\
/* Update the pending message */ \
- V(SetPendingMessage, AccumulatorUse::kReadWrite) \
+ V(SetPendingMessage, ImplicitRegisterUse::kReadWriteAccumulator) \
\
/* Non-local flow control */ \
- V(Throw, AccumulatorUse::kRead) \
- V(ReThrow, AccumulatorUse::kRead) \
- V(Return, AccumulatorUse::kRead) \
- V(ThrowReferenceErrorIfHole, AccumulatorUse::kRead, OperandType::kIdx) \
- V(ThrowSuperNotCalledIfHole, AccumulatorUse::kRead) \
- V(ThrowSuperAlreadyCalledIfNotHole, AccumulatorUse::kRead) \
- V(ThrowIfNotSuperConstructor, AccumulatorUse::kNone, OperandType::kReg) \
+ V(Throw, ImplicitRegisterUse::kReadAccumulator) \
+ V(ReThrow, ImplicitRegisterUse::kReadAccumulator) \
+ V(Return, ImplicitRegisterUse::kReadAccumulator) \
+ V(ThrowReferenceErrorIfHole, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kIdx) \
+ V(ThrowSuperNotCalledIfHole, ImplicitRegisterUse::kReadAccumulator) \
+ V(ThrowSuperAlreadyCalledIfNotHole, ImplicitRegisterUse::kReadAccumulator) \
+ V(ThrowIfNotSuperConstructor, ImplicitRegisterUse::kNone, OperandType::kReg) \
\
/* Generators */ \
- V(SwitchOnGeneratorState, AccumulatorUse::kNone, OperandType::kReg, \
+ V(SwitchOnGeneratorState, ImplicitRegisterUse::kNone, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
- V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kRegList, OperandType::kRegCount, OperandType::kUImm) \
- V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kRegOutList, OperandType::kRegCount) \
+ V(SuspendGenerator, ImplicitRegisterUse::kReadAccumulator, \
+ OperandType::kReg, OperandType::kRegList, OperandType::kRegCount, \
+ OperandType::kUImm) \
+ V(ResumeGenerator, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kRegOutList, OperandType::kRegCount) \
\
/* Iterator protocol operations */ \
- V(GetIterator, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx, \
- OperandType::kIdx) \
+ V(GetIterator, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kIdx) \
\
/* Debugger */ \
- V(Debugger, AccumulatorUse::kNone) \
+ V(Debugger, ImplicitRegisterUse::kNone) \
\
/* Block Coverage */ \
- V(IncBlockCounter, AccumulatorUse::kNone, OperandType::kIdx) \
+ V(IncBlockCounter, ImplicitRegisterUse::kNone, OperandType::kIdx) \
\
/* Execution Abort (internal error) */ \
- V(Abort, AccumulatorUse::kNone, OperandType::kIdx) \
- \
- /* Illegal bytecode */ \
- V(Illegal, AccumulatorUse::kNone)
+ V(Abort, ImplicitRegisterUse::kNone, OperandType::kIdx)
+
+// The list of bytecodes which are interpreted by the interpreter.
+// Format is V(<bytecode>, <implicit_register_use>, <operands>).
+#define BYTECODE_LIST(V) \
+ BYTECODE_LIST_WITH_UNIQUE_HANDLERS(V) \
+ \
+ /* Special-case Star for common register numbers, to save space */ \
+ SHORT_STAR_BYTECODE_LIST(V) \
+ \
+ /* Illegal bytecode */ \
+ V(Illegal, ImplicitRegisterUse::kNone)
// List of debug break bytecodes.
#define DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
@@ -468,7 +551,9 @@ enum class Bytecode : uint8_t {
#define COUNT_BYTECODE(x, ...) +1
// The COUNT_BYTECODE macro will turn this into kLast = -1 +1 +1... which will
// evaluate to the same value as the last real bytecode.
- kLast = -1 BYTECODE_LIST(COUNT_BYTECODE)
+ kLast = -1 BYTECODE_LIST(COUNT_BYTECODE),
+ kFirstShortStar = kStar15,
+ kLastShortStar = kStar0
#undef COUNT_BYTECODE
};
@@ -480,6 +565,10 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// The total number of bytecodes used.
static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
+ static const int kShortStarCount =
+ static_cast<int>(Bytecode::kLastShortStar) -
+ static_cast<int>(Bytecode::kFirstShortStar) + 1;
+
// Returns string representation of |bytecode|.
static const char* ToString(Bytecode bytecode);
@@ -535,19 +624,27 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
}
// Returns how accumulator is used by |bytecode|.
- static AccumulatorUse GetAccumulatorUse(Bytecode bytecode) {
+ static ImplicitRegisterUse GetImplicitRegisterUse(Bytecode bytecode) {
DCHECK_LE(bytecode, Bytecode::kLast);
- return kAccumulatorUse[static_cast<size_t>(bytecode)];
+ return kImplicitRegisterUse[static_cast<size_t>(bytecode)];
}
// Returns true if |bytecode| reads the accumulator.
static bool ReadsAccumulator(Bytecode bytecode) {
- return BytecodeOperands::ReadsAccumulator(GetAccumulatorUse(bytecode));
+ return BytecodeOperands::ReadsAccumulator(GetImplicitRegisterUse(bytecode));
}
// Returns true if |bytecode| writes the accumulator.
static bool WritesAccumulator(Bytecode bytecode) {
- return BytecodeOperands::WritesAccumulator(GetAccumulatorUse(bytecode));
+ return BytecodeOperands::WritesAccumulator(
+ GetImplicitRegisterUse(bytecode));
+ }
+
+ // Returns true if |bytecode| writes to a register not specified by an
+ // operand.
+ static bool WritesImplicitRegister(Bytecode bytecode) {
+ return BytecodeOperands::WritesImplicitRegister(
+ GetImplicitRegisterUse(bytecode));
}
// Return true if |bytecode| is an accumulator load without effects,
@@ -574,11 +671,20 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode == Bytecode::kTestTypeOf;
}
+ static constexpr bool IsShortStar(Bytecode bytecode) {
+ return bytecode >= Bytecode::kFirstShortStar &&
+ bytecode <= Bytecode::kLastShortStar;
+ }
+
+ static constexpr bool IsAnyStar(Bytecode bytecode) {
+ return bytecode == Bytecode::kStar || IsShortStar(bytecode);
+ }
+
// Return true if |bytecode| is a register load without effects,
// e.g. Mov, Star.
static constexpr bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
- bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar;
+ bytecode == Bytecode::kPushContext || IsAnyStar(bytecode);
}
// Returns true if the bytecode is a conditional jump taking
@@ -668,7 +774,7 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Returns true if the bytecode is Ldar or Star.
static constexpr bool IsLdarOrStar(Bytecode bytecode) {
- return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
+ return bytecode == Bytecode::kLdar || IsAnyStar(bytecode);
}
// Returns true if the bytecode is a call or a constructor call.
@@ -934,7 +1040,8 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
}
static Address bytecode_size_table_address() {
- return reinterpret_cast<Address>(const_cast<int*>(&kBytecodeSizes[0][0]));
+ return reinterpret_cast<Address>(
+ const_cast<uint8_t*>(&kBytecodeSizes[0][0]));
}
private:
@@ -942,9 +1049,9 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
static const OperandTypeInfo* const kOperandTypeInfos[];
static const int kOperandCount[];
static const int kNumberOfRegisterOperands[];
- static const AccumulatorUse kAccumulatorUse[];
+ static const ImplicitRegisterUse kImplicitRegisterUse[];
static const bool kIsScalable[];
- static const int kBytecodeSizes[3][kBytecodeCount];
+ static const uint8_t kBytecodeSizes[3][kBytecodeCount];
static const OperandSize* const kOperandSizes[3][kBytecodeCount];
static OperandSize const
kOperandKindSizes[3][BytecodeOperands::kOperandTypeCount];
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 87fe0559ae..b17995f0a1 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -29,7 +29,6 @@ namespace interpreter {
V(EmptyObjectBoilerplateDescription, empty_object_boilerplate_description) \
V(EmptyArrayBoilerplateDescription, empty_array_boilerplate_description) \
V(EmptyFixedArray, empty_fixed_array) \
- V(HomeObjectSymbol, home_object_symbol) \
V(IteratorSymbol, iterator_symbol) \
V(InterpreterTrampolineSymbol, interpreter_trampoline_symbol) \
V(NaN, nan_value)
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 596783b64f..4ff5579597 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -42,12 +42,12 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
TVARIABLE_CONSTRUCTOR(
accumulator_,
Parameter<Object>(InterpreterDispatchDescriptor::kAccumulator)),
- accumulator_use_(AccumulatorUse::kNone),
+ implicit_register_use_(ImplicitRegisterUse::kNone),
made_call_(false),
reloaded_frame_ptr_(false),
bytecode_array_valid_(true) {
-#ifdef V8_TRACE_IGNITION
- TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+#ifdef V8_TRACE_UNOPTIMIZED
+ TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
#endif
RegisterCallGenerationCallbacks([this] { CallPrologue(); },
[this] { CallEpilogue(); });
@@ -64,7 +64,8 @@ InterpreterAssembler::~InterpreterAssembler() {
// If the following check fails the handler does not use the
// accumulator in the way described in the bytecode definitions in
// bytecodes.h.
- DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+ DCHECK_EQ(implicit_register_use_,
+ Bytecodes::GetImplicitRegisterUse(bytecode_));
UnregisterCallGenerationCallbacks();
}
@@ -154,13 +155,15 @@ TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() {
TNode<Object> InterpreterAssembler::GetAccumulator() {
DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
- accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
+ implicit_register_use_ =
+ implicit_register_use_ | ImplicitRegisterUse::kReadAccumulator;
return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
}
void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
DCHECK(Bytecodes::WritesAccumulator(bytecode_));
- accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
+ implicit_register_use_ =
+ implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
accumulator_ = value;
}
@@ -199,41 +202,6 @@ TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
return cur_context.value();
}
-void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(
- TNode<Context> context, TNode<Uint32T> depth, Label* target) {
- TVARIABLE(Context, cur_context, context);
- TVARIABLE(Uint32T, cur_depth, depth);
-
- Label context_search(this, {&cur_depth, &cur_context});
- Label no_extension(this);
-
- // Loop until the depth is 0.
- Goto(&context_search);
- BIND(&context_search);
- {
- // Check if context has an extension slot.
- TNode<BoolT> has_extension =
- LoadScopeInfoHasExtensionField(LoadScopeInfo(cur_context.value()));
- GotoIfNot(has_extension, &no_extension);
-
- // Jump to the target if the extension slot is not an undefined value.
- TNode<Object> extension_slot =
- LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
- Branch(TaggedNotEqual(extension_slot, UndefinedConstant()), target,
- &no_extension);
-
- BIND(&no_extension);
- {
- cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
- cur_context = CAST(
- LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
-
- GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
- &context_search);
- }
- }
-}
-
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
TNode<IntPtrT> reg_index) {
return Signed(WordPoisonOnSpeculation(
@@ -330,6 +298,35 @@ void InterpreterAssembler::StoreRegister(TNode<Object> value,
RegisterFrameOffset(reg_index), value);
}
+void InterpreterAssembler::StoreRegisterForShortStar(TNode<Object> value,
+ TNode<WordT> opcode) {
+ DCHECK(Bytecodes::IsShortStar(bytecode_));
+ implicit_register_use_ =
+ implicit_register_use_ | ImplicitRegisterUse::kWriteShortStar;
+
+ CSA_ASSERT(
+ this, UintPtrGreaterThanOrEqual(opcode, UintPtrConstant(static_cast<int>(
+ Bytecode::kFirstShortStar))));
+ CSA_ASSERT(
+ this,
+ UintPtrLessThanOrEqual(
+ opcode, UintPtrConstant(static_cast<int>(Bytecode::kLastShortStar))));
+
+ // Compute the constant that we can add to a Bytecode value to map the range
+ // [Bytecode::kStar15, Bytecode::kStar0] to the range
+ // [Register(15).ToOperand(), Register(0).ToOperand()].
+ constexpr int short_star_to_operand =
+ Register(0).ToOperand() - static_cast<int>(Bytecode::kStar0);
+ // Make sure the values count in the right direction.
+ STATIC_ASSERT(short_star_to_operand ==
+ Register(1).ToOperand() - static_cast<int>(Bytecode::kStar1));
+
+ TNode<IntPtrT> offset =
+ IntPtrAdd(RegisterFrameOffset(Signed(opcode)),
+ IntPtrConstant(short_star_to_operand * kSystemPointerSize));
+ StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(), offset, value);
+}
+
void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
int operand_index) {
StoreRegister(value,
@@ -726,8 +723,7 @@ void InterpreterAssembler::CallPrologue() {
made_call_ = true;
}
-void InterpreterAssembler::CallEpilogue() {
-}
+void InterpreterAssembler::CallEpilogue() {}
void InterpreterAssembler::CallJSAndDispatch(
TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
@@ -755,7 +751,8 @@ void InterpreterAssembler::CallJSAndDispatch(
args_count, args.base_reg_location(),
function);
// TailCallStubThenDispatch updates accumulator with result.
- accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
+ implicit_register_use_ =
+ implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
}
template <class... TArgs>
@@ -781,7 +778,8 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
context, function, arg_count, args...);
}
// TailCallStubThenDispatch updates accumulator with result.
- accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
+ implicit_register_use_ =
+ implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
}
// Instantiate CallJSAndDispatch() for argument counts used by interpreter
@@ -818,7 +816,8 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
args_count, args.base_reg_location(),
function);
// TailCallStubThenDispatch updates accumulator with result.
- accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
+ implicit_register_use_ =
+ implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
}
TNode<Object> InterpreterAssembler::Construct(
@@ -832,8 +831,8 @@ TNode<Object> InterpreterAssembler::Construct(
construct_array(this, &var_site);
CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
- slot_id, &construct_generic, &construct_array,
- &var_site);
+ slot_id, UpdateFeedbackMode::kOptionalFeedback,
+ &construct_generic, &construct_array, &var_site);
BIND(&construct_generic);
{
@@ -929,8 +928,8 @@ TNode<Object> InterpreterAssembler::ConstructWithSpread(
TNode<Uint16T> current_instance_type = LoadInstanceType(current);
GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
&if_boundfunction);
- Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
- &if_function, &mark_megamorphic);
+ Branch(IsJSFunctionInstanceType(current_instance_type), &if_function,
+ &mark_megamorphic);
BIND(&if_function);
{
@@ -1078,8 +1077,8 @@ TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta,
bool backward) {
-#ifdef V8_TRACE_IGNITION
- TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
+#ifdef V8_TRACE_UNOPTIMIZED
+ TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit);
#endif
TNode<IntPtrT> next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
: IntPtrAdd(BytecodeOffset(), delta);
@@ -1135,46 +1134,58 @@ TNode<WordT> InterpreterAssembler::LoadBytecode(
return ChangeUint32ToWord(bytecode);
}
-TNode<WordT> InterpreterAssembler::StarDispatchLookahead(
- TNode<WordT> target_bytecode) {
+void InterpreterAssembler::StarDispatchLookahead(TNode<WordT> target_bytecode) {
Label do_inline_star(this), done(this);
- TVARIABLE(WordT, var_bytecode, target_bytecode);
-
- TNode<Int32T> star_bytecode =
- Int32Constant(static_cast<int>(Bytecode::kStar));
- TNode<BoolT> is_star =
- Word32Equal(TruncateWordToInt32(target_bytecode), star_bytecode);
+ // Check whether the following opcode is one of the short Star codes. All
+ // opcodes higher than the short Star variants are invalid, and invalid
+ // opcodes are never deliberately written, so we can use a one-sided check.
+ // This is no less secure than the normal-length Star handler, which performs
+ // no validation on its operand.
+ STATIC_ASSERT(static_cast<int>(Bytecode::kLastShortStar) + 1 ==
+ static_cast<int>(Bytecode::kIllegal));
+ STATIC_ASSERT(Bytecode::kIllegal == Bytecode::kLast);
+ TNode<Int32T> first_short_star_bytecode =
+ Int32Constant(static_cast<int>(Bytecode::kFirstShortStar));
+ TNode<BoolT> is_star = Uint32GreaterThanOrEqual(
+ TruncateWordToInt32(target_bytecode), first_short_star_bytecode);
Branch(is_star, &do_inline_star, &done);
BIND(&do_inline_star);
{
- InlineStar();
- var_bytecode = LoadBytecode(BytecodeOffset());
- Goto(&done);
+ InlineShortStar(target_bytecode);
+
+ // Rather than merging control flow to a single indirect jump, we can get
+ // better branch prediction by duplicating it. This is because the
+ // instruction following a merged X + StarN is a bad predictor of the
+ // instruction following a non-merged X, and vice versa.
+ DispatchToBytecode(LoadBytecode(BytecodeOffset()), BytecodeOffset());
}
BIND(&done);
- return var_bytecode.value();
}
-void InterpreterAssembler::InlineStar() {
+void InterpreterAssembler::InlineShortStar(TNode<WordT> target_bytecode) {
Bytecode previous_bytecode = bytecode_;
- AccumulatorUse previous_acc_use = accumulator_use_;
+ ImplicitRegisterUse previous_acc_use = implicit_register_use_;
- bytecode_ = Bytecode::kStar;
- accumulator_use_ = AccumulatorUse::kNone;
+ // At this point we don't know statically what bytecode we're executing, but
+ // kStar0 has the right attributes (namely, no operands) for any of the short
+ // Star codes.
+ bytecode_ = Bytecode::kStar0;
+ implicit_register_use_ = ImplicitRegisterUse::kNone;
-#ifdef V8_TRACE_IGNITION
- TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+#ifdef V8_TRACE_UNOPTIMIZED
+ TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
#endif
- StoreRegister(GetAccumulator(),
- BytecodeOperandReg(0, LoadSensitivity::kSafe));
- DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+ StoreRegisterForShortStar(GetAccumulator(), target_bytecode);
+
+ DCHECK_EQ(implicit_register_use_,
+ Bytecodes::GetImplicitRegisterUse(bytecode_));
Advance();
bytecode_ = previous_bytecode;
- accumulator_use_ = previous_acc_use;
+ implicit_register_use_ = previous_acc_use;
}
void InterpreterAssembler::Dispatch() {
@@ -1182,9 +1193,13 @@ void InterpreterAssembler::Dispatch() {
DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
TNode<IntPtrT> target_offset = Advance();
TNode<WordT> target_bytecode = LoadBytecode(target_offset);
+ DispatchToBytecodeWithOptionalStarLookahead(target_bytecode);
+}
+void InterpreterAssembler::DispatchToBytecodeWithOptionalStarLookahead(
+ TNode<WordT> target_bytecode) {
if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
- target_bytecode = StarDispatchLookahead(target_bytecode);
+ StarDispatchLookahead(target_bytecode);
}
DispatchToBytecode(target_bytecode, BytecodeOffset());
}
@@ -1349,7 +1364,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
return false;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \
@@ -1552,7 +1567,8 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
+ MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
+ slot_index);
SetAccumulator(var_result.value());
Dispatch();
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 2884aaed1a..e2fc572f18 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -82,11 +82,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
TNode<Context> GetContextAtDepth(TNode<Context> context,
TNode<Uint32T> depth);
- // Goto the given |target| if the context chain starting at |context| has any
- // extensions up to the given |depth|.
- void GotoIfHasContextExtensionUpToDepth(TNode<Context> context,
- TNode<Uint32T> depth, Label* target);
-
// A RegListNodePair provides an abstraction over lists of registers.
class RegListNodePair {
public:
@@ -231,6 +226,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void DispatchToBytecode(TNode<WordT> target_bytecode,
TNode<IntPtrT> new_bytecode_offset);
+ // Dispatches to |target_bytecode| at BytecodeOffset(). Includes short-star
+ // lookahead if the current bytecode_ is likely followed by a short-star
+ // instruction.
+ void DispatchToBytecodeWithOptionalStarLookahead(
+ TNode<WordT> target_bytecode);
+
// Abort with the given abort reason.
void Abort(AbortReason abort_reason);
void AbortIfWordNotEqual(TNode<WordT> lhs, TNode<WordT> rhs,
@@ -252,6 +253,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void ToNumberOrNumeric(Object::Conversion mode);
+ void StoreRegisterForShortStar(TNode<Object> value, TNode<WordT> opcode);
+
+ // Load the bytecode at |bytecode_offset|.
+ TNode<WordT> LoadBytecode(TNode<IntPtrT> bytecode_offset);
+
private:
// Returns a pointer to the current function's BytecodeArray object.
TNode<BytecodeArray> BytecodeArrayTaggedPointer();
@@ -372,16 +378,14 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
TNode<IntPtrT> Advance(int delta);
TNode<IntPtrT> Advance(TNode<IntPtrT> delta, bool backward = false);
- // Load the bytecode at |bytecode_offset|.
- TNode<WordT> LoadBytecode(TNode<IntPtrT> bytecode_offset);
-
- // Look ahead for Star and inline it in a branch. Returns a new target
- // bytecode node for dispatch.
- TNode<WordT> StarDispatchLookahead(TNode<WordT> target_bytecode);
+ // Look ahead for short Star and inline it in a branch, including subsequent
+ // dispatch. Anything after this point can assume that the following
+ // instruction was not a short Star.
+ void StarDispatchLookahead(TNode<WordT> target_bytecode);
- // Build code for Star at the current BytecodeOffset() and Advance() to the
- // next dispatch offset.
- void InlineStar();
+ // Build code for short Star at the current BytecodeOffset() and Advance() to
+ // the next dispatch offset.
+ void InlineShortStar(TNode<WordT> target_bytecode);
// Dispatch to the bytecode handler with code entry point |handler_entry|.
void DispatchToBytecodeHandlerEntry(TNode<RawPtrT> handler_entry,
@@ -398,7 +402,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
CodeStubAssembler::TVariable<IntPtrT> bytecode_offset_;
CodeStubAssembler::TVariable<ExternalReference> dispatch_table_;
CodeStubAssembler::TVariable<Object> accumulator_;
- AccumulatorUse accumulator_use_;
+ ImplicitRegisterUse implicit_register_use_;
bool made_call_;
bool reloaded_frame_ptr_;
bool bytecode_array_valid_;
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 3b7172867e..c7993316ab 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -145,6 +145,20 @@ IGNITION_HANDLER(Star, InterpreterAssembler) {
Dispatch();
}
+// Star0 - StarN
+//
+// Store accumulator to one of a special batch of registers, without using a
+// second byte to specify the destination.
+//
+// Even though this handler is declared as Star0, multiple entries in
+// the jump table point to this handler.
+IGNITION_HANDLER(Star0, InterpreterAssembler) {
+ TNode<Object> accumulator = GetAccumulator();
+ TNode<WordT> opcode = LoadBytecode(BytecodeOffset());
+ StoreRegisterForShortStar(accumulator, opcode);
+ Dispatch();
+}
+
// Mov <src> <dst>
//
// Stores the value of register <src> to register <dst>.
@@ -222,18 +236,9 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
- Label no_feedback(this, Label::kDeferred), end(this);
- GotoIf(IsUndefined(maybe_vector), &no_feedback);
-
CallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot,
maybe_vector);
- Goto(&end);
-
- Bind(&no_feedback);
- CallRuntime(Runtime::kStoreGlobalICNoFeedback_Miss, context, value, name);
- Goto(&end);
- Bind(&end);
Dispatch();
}
@@ -353,11 +358,11 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
Label slowpath(this, Label::kDeferred);
// Check for context extensions to allow the fast path.
- GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
+ TNode<Context> slot_context =
+ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
// Fast path does a normal load context.
{
- TNode<Context> slot_context = GetContextAtDepth(context, depth);
TNode<Object> result = LoadContextElement(slot_context, slot_index);
SetAccumulator(result);
Dispatch();
@@ -853,9 +858,9 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
: InterpreterAssembler(state, bytecode, operand_scale) {}
using BinaryOpGenerator = TNode<Object> (BinaryOpAssembler::*)(
- TNode<Context> context, TNode<Object> left, TNode<Object> right,
- TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
- bool rhs_known_smi);
+ const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi);
void BinaryOpWithFeedback(BinaryOpGenerator generator) {
TNode<Object> lhs = LoadRegisterAtOperandIndex(0);
@@ -865,8 +870,10 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
BinaryOpAssembler binop_asm(state());
- TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- maybe_feedback_vector, false);
+ TNode<Object> result =
+ (binop_asm.*generator)([=] { return context; }, lhs, rhs, slot_index,
+ [=] { return maybe_feedback_vector; },
+ UpdateFeedbackMode::kOptionalFeedback, false);
SetAccumulator(result);
Dispatch();
}
@@ -879,8 +886,10 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
BinaryOpAssembler binop_asm(state());
- TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
- maybe_feedback_vector, true);
+ TNode<Object> result =
+ (binop_asm.*generator)([=] { return context; }, lhs, rhs, slot_index,
+ [=] { return maybe_feedback_vector; },
+ UpdateFeedbackMode::kOptionalFeedback, true);
SetAccumulator(result);
Dispatch();
}
@@ -989,9 +998,9 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
BinaryOpAssembler binop_asm(state());
TNode<Object> result = binop_asm.Generate_BitwiseBinaryOpWithFeedback(
- bitwise_op, left, right, context, &feedback);
+ bitwise_op, left, right, [=] { return context; }, &feedback);
- UpdateFeedback(feedback.value(), maybe_feedback_vector, slot_index);
+ MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -1017,14 +1026,14 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TNode<Smi> result_type = SelectSmiConstant(
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_left_feedback.value()),
- maybe_feedback_vector, slot_index);
+ MaybeUpdateFeedback(SmiOr(result_type, var_left_feedback.value()),
+ maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
BIND(&if_bigint_mix);
- UpdateFeedback(var_left_feedback.value(), maybe_feedback_vector,
- slot_index);
+ MaybeUpdateFeedback(var_left_feedback.value(), maybe_feedback_vector,
+ slot_index);
ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
}
};
@@ -1112,7 +1121,8 @@ IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
UnaryOpAssembler unary_op_asm(state());
TNode<Object> result = unary_op_asm.Generate_BitwiseNotWithFeedback(
- context, value, slot_index, maybe_feedback_vector);
+ context, value, slot_index, maybe_feedback_vector,
+ UpdateFeedbackMode::kOptionalFeedback);
SetAccumulator(result);
Dispatch();
@@ -1156,7 +1166,8 @@ IGNITION_HANDLER(Negate, InterpreterAssembler) {
UnaryOpAssembler unary_op_asm(state());
TNode<Object> result = unary_op_asm.Generate_NegateWithFeedback(
- context, value, slot_index, maybe_feedback_vector);
+ context, value, slot_index, maybe_feedback_vector,
+ UpdateFeedbackMode::kOptionalFeedback);
SetAccumulator(result);
Dispatch();
@@ -1217,7 +1228,8 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
UnaryOpAssembler unary_op_asm(state());
TNode<Object> result = unary_op_asm.Generate_IncrementWithFeedback(
- context, value, slot_index, maybe_feedback_vector);
+ context, value, slot_index, maybe_feedback_vector,
+ UpdateFeedbackMode::kOptionalFeedback);
SetAccumulator(result);
Dispatch();
@@ -1234,7 +1246,8 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
UnaryOpAssembler unary_op_asm(state());
TNode<Object> result = unary_op_asm.Generate_DecrementWithFeedback(
- context, value, slot_index, maybe_feedback_vector);
+ context, value, slot_index, maybe_feedback_vector,
+ UpdateFeedbackMode::kOptionalFeedback);
SetAccumulator(result);
Dispatch();
@@ -1623,8 +1636,8 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
TNode<UintPtrT> slot_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
- slot_index);
+ MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
+ slot_index);
SetAccumulator(result);
Dispatch();
}
@@ -2233,7 +2246,7 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
- TNode<Object> pattern = LoadConstantPoolEntryAtOperandIndex(0);
+ TNode<String> pattern = CAST(LoadConstantPoolEntryAtOperandIndex(0));
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<Smi> flags =
@@ -2784,7 +2797,7 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
TNode<IntPtrT> original_bytecode = SmiUntag(Projection<1>(result_pair)); \
MaybeDropFrames(context); \
SetAccumulator(return_value); \
- DispatchToBytecode(original_bytecode, BytecodeOffset()); \
+ DispatchToBytecodeWithOptionalStarLookahead(original_bytecode); \
}
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
#undef DEBUG_BREAK
@@ -2852,7 +2865,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
TNode<FixedArray> cache_array;
TNode<Smi> cache_length;
ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
- &cache_length);
+ &cache_length, UpdateFeedbackMode::kOptionalFeedback);
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
@@ -2885,9 +2898,9 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
}
BIND(&if_slow);
{
- TNode<Object> result =
- ForInNextSlow(GetContext(), vector_index, receiver, key, cache_type,
- maybe_feedback_vector);
+ TNode<Object> result = ForInNextSlow(GetContext(), vector_index, receiver,
+ key, cache_type, maybe_feedback_vector,
+ UpdateFeedbackMode::kOptionalFeedback);
SetAccumulator(result);
Dispatch();
}
@@ -3104,8 +3117,19 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, const char* debug_name,
case Bytecode::k##Name: \
Name##Assembler::Generate(&state, operand_scale); \
break;
- BYTECODE_LIST(CALL_GENERATOR);
+ BYTECODE_LIST_WITH_UNIQUE_HANDLERS(CALL_GENERATOR);
#undef CALL_GENERATOR
+ case Bytecode::kIllegal:
+ IllegalAssembler::Generate(&state, operand_scale);
+ break;
+ case Bytecode::kStar0:
+ Star0Assembler::Generate(&state, operand_scale);
+ break;
+ default:
+ // Others (the rest of the short stars, and the rest of the illegal range)
+ // must not get their own handler generated. Rather, multiple entries in
+ // the jump table point to those handlers.
+ UNREACHABLE();
}
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index b9975e66ea..b6ea44f6e7 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -261,43 +261,8 @@ TNode<Object> IntrinsicsGenerator::Call(
TNode<Object> IntrinsicsGenerator::CreateAsyncFromSyncIterator(
const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
int arg_count) {
- InterpreterAssembler::Label not_receiver(
- assembler_, InterpreterAssembler::Label::kDeferred);
- InterpreterAssembler::Label done(assembler_);
- InterpreterAssembler::TVariable<Object> return_value(assembler_);
-
TNode<Object> sync_iterator = __ LoadRegisterFromRegisterList(args, 0);
-
- __ GotoIf(__ TaggedIsSmi(sync_iterator), &not_receiver);
- __ GotoIfNot(__ IsJSReceiver(__ CAST(sync_iterator)), &not_receiver);
-
- const TNode<Object> next =
- __ GetProperty(context, sync_iterator, factory()->next_string());
-
- const TNode<NativeContext> native_context = __ LoadNativeContext(context);
- const TNode<Map> map = __ CAST(__ LoadContextElement(
- native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX));
- const TNode<JSObject> iterator = __ AllocateJSObjectFromMap(map);
-
- __ StoreObjectFieldNoWriteBarrier(
- iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator);
- __ StoreObjectFieldNoWriteBarrier(iterator,
- JSAsyncFromSyncIterator::kNextOffset, next);
-
- return_value = iterator;
- __ Goto(&done);
-
- __ BIND(&not_receiver);
- {
- return_value =
- __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
-
- // Unreachable due to the Throw in runtime call.
- __ Goto(&done);
- }
-
- __ BIND(&done);
- return return_value.value();
+ return __ CreateAsyncFromSyncIterator(context, sync_iterator);
}
TNode<Object> IntrinsicsGenerator::CreateJSGeneratorObject(
@@ -332,23 +297,7 @@ TNode<Object> IntrinsicsGenerator::GeneratorClose(
TNode<Object> IntrinsicsGenerator::GetImportMetaObject(
const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
int arg_count) {
- const TNode<Context> module_context = __ LoadModuleContext(context);
- const TNode<HeapObject> module =
- __ CAST(__ LoadContextElement(module_context, Context::EXTENSION_INDEX));
- const TNode<Object> import_meta =
- __ LoadObjectField(module, SourceTextModule::kImportMetaOffset);
-
- InterpreterAssembler::TVariable<Object> return_value(assembler_);
- return_value = import_meta;
-
- InterpreterAssembler::Label end(assembler_);
- __ GotoIfNot(__ IsTheHole(import_meta), &end);
-
- return_value = __ CallRuntime(Runtime::kGetImportMetaObject, context);
- __ Goto(&end);
-
- __ BIND(&end);
- return return_value.value();
+ return __ GetImportMetaObject(context);
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitCaught(
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 3a6cdff8ad..a24bbca706 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -12,6 +12,8 @@
#include "src/ast/scopes.h"
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/execution/local-isolate.h"
+#include "src/heap/parked-scope.h"
#include "src/init/bootstrapper.h"
#include "src/init/setup-isolate.h"
#include "src/interpreter/bytecode-generator.h"
@@ -31,10 +33,10 @@ namespace interpreter {
class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
public:
- InterpreterCompilationJob(
- ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator,
- std::vector<FunctionLiteral*>* eager_inner_literals);
+ InterpreterCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator,
+ std::vector<FunctionLiteral*>* eager_inner_literals,
+ LocalIsolate* local_isolate);
InterpreterCompilationJob(const InterpreterCompilationJob&) = delete;
InterpreterCompilationJob& operator=(const InterpreterCompilationJob&) =
delete;
@@ -59,6 +61,7 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
Zone zone_;
UnoptimizedCompilationInfo compilation_info_;
+ LocalIsolate* local_isolate_;
BytecodeGenerator generator_;
};
@@ -80,7 +83,14 @@ namespace {
int BuiltinIndexFromBytecode(Bytecode bytecode, OperandScale operand_scale) {
int index = static_cast<int>(bytecode);
- if (operand_scale != OperandScale::kSingle) {
+ if (operand_scale == OperandScale::kSingle) {
+ if (Bytecodes::IsShortStar(bytecode)) {
+ index = static_cast<int>(Bytecode::kFirstShortStar);
+ } else if (bytecode > Bytecode::kLastShortStar) {
+ // Adjust the index due to repeated handlers.
+ index -= Bytecodes::kShortStarCount - 1;
+ }
+ } else {
// The table contains uint8_t offsets starting at 0 with
// kIllegalBytecodeHandlerEncoding for illegal bytecode/scale combinations.
uint8_t offset = kWideBytecodeToBuiltinsMapping[index];
@@ -156,11 +166,13 @@ bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
InterpreterCompilationJob::InterpreterCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- std::vector<FunctionLiteral*>* eager_inner_literals)
+ std::vector<FunctionLiteral*>* eager_inner_literals,
+ LocalIsolate* local_isolate)
: UnoptimizedCompilationJob(parse_info->stack_limit(), parse_info,
&compilation_info_),
zone_(allocator, ZONE_NAME),
compilation_info_(&zone_, parse_info, literal),
+ local_isolate_(local_isolate),
generator_(&zone_, &compilation_info_, parse_info->ast_string_constants(),
eager_inner_literals) {}
@@ -176,6 +188,9 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
// then ASTs from different functions may be intersperse when printed.
MaybePrintAst(parse_info(), compilation_info());
+ base::Optional<ParkedScope> parked_scope;
+ if (local_isolate_) parked_scope.emplace(local_isolate_);
+
generator()->GenerateBytecode(stack_limit());
if (generator()->HasStackOverflow()) {
@@ -269,6 +284,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl(
compilation_info()->literal()->GetDebugName();
os << "[generated bytecode for function: " << name.get() << " ("
<< shared_info << ")]" << std::endl;
+ os << "Bytecode length: " << bytecodes->length() << std::endl;
bytecodes->Disassemble(os);
os << std::flush;
}
@@ -284,19 +300,21 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl(
std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- std::vector<FunctionLiteral*>* eager_inner_literals) {
+ std::vector<FunctionLiteral*>* eager_inner_literals,
+ LocalIsolate* local_isolate) {
return std::make_unique<InterpreterCompilationJob>(
- parse_info, literal, allocator, eager_inner_literals);
+ parse_info, literal, allocator, eager_inner_literals, local_isolate);
}
std::unique_ptr<UnoptimizedCompilationJob>
Interpreter::NewSourcePositionCollectionJob(
ParseInfo* parse_info, FunctionLiteral* literal,
- Handle<BytecodeArray> existing_bytecode, AccountingAllocator* allocator) {
- auto job = std::make_unique<InterpreterCompilationJob>(parse_info, literal,
- allocator, nullptr);
+ Handle<BytecodeArray> existing_bytecode, AccountingAllocator* allocator,
+ LocalIsolate* local_isolate) {
+ auto job = std::make_unique<InterpreterCompilationJob>(
+ parse_info, literal, allocator, nullptr, local_isolate);
job->compilation_info()->SetBytecodeArray(existing_bytecode);
- return std::unique_ptr<UnoptimizedCompilationJob> { static_cast<UnoptimizedCompilationJob*>(job.release()) };
+ return job;
}
void Interpreter::ForEachBytecode(
@@ -326,23 +344,23 @@ void Interpreter::Initialize() {
interpreter_entry_trampoline_instruction_start_ = code->InstructionStart();
// Initialize the dispatch table.
- Code illegal = builtins->builtin(Builtins::kIllegalHandler);
- int builtin_id = Builtins::kFirstBytecodeHandler;
- ForEachBytecode([=, &builtin_id](Bytecode bytecode,
- OperandScale operand_scale) {
- Code handler = illegal;
+ ForEachBytecode([=](Bytecode bytecode, OperandScale operand_scale) {
+ int builtin_id = BuiltinIndexFromBytecode(bytecode, operand_scale);
+ Code handler = builtins->builtin(builtin_id);
if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
#ifdef DEBUG
std::string builtin_name(Builtins::name(builtin_id));
std::string expected_name =
- Bytecodes::ToString(bytecode, operand_scale, "") + "Handler";
+ (Bytecodes::IsShortStar(bytecode)
+ ? "ShortStar"
+ : Bytecodes::ToString(bytecode, operand_scale, "")) +
+ "Handler";
DCHECK_EQ(expected_name, builtin_name);
#endif
- handler = builtins->builtin(builtin_id++);
}
+
SetBytecodeHandler(bytecode, operand_scale, handler);
});
- DCHECK(builtin_id == Builtins::builtin_count);
DCHECK(IsDispatchTableInitialized());
}
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 3bbd93fffb..9f24a27595 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -23,6 +23,7 @@ class Callable;
class UnoptimizedCompilationJob;
class FunctionLiteral;
class Isolate;
+class LocalIsolate;
class ParseInfo;
class RootVisitor;
class SetupIsolateDelegate;
@@ -46,7 +47,8 @@ class Interpreter {
static std::unique_ptr<UnoptimizedCompilationJob> NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- std::vector<FunctionLiteral*>* eager_inner_literals);
+ std::vector<FunctionLiteral*>* eager_inner_literals,
+ LocalIsolate* local_isolate);
// Creates a compilation job which will generate source positions for
// |literal| and when finalized, store the result into |existing_bytecode|.
@@ -54,7 +56,8 @@ class Interpreter {
NewSourcePositionCollectionJob(ParseInfo* parse_info,
FunctionLiteral* literal,
Handle<BytecodeArray> existing_bytecode,
- AccountingAllocator* allocator);
+ AccountingAllocator* allocator,
+ LocalIsolate* local_isolate);
// If the bytecode handler for |bytecode| and |operand_scale| has not yet
// been loaded, deserialize it. Then return the handler.
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index c0109bb77a..668cd79824 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -520,8 +520,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Map::GeneralizeField(isolate(), target, descriptor_index,
details.constness(), expected_representation,
value_type);
- } else if (!FLAG_unbox_double_fields &&
- expected_representation.IsDouble() && value->IsSmi()) {
+ } else if (expected_representation.IsDouble() && value->IsSmi()) {
new_mutable_double++;
}
@@ -581,18 +580,6 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
descriptor++;
if (details.representation().IsDouble()) {
- if (object->IsUnboxedDoubleField(index)) {
- uint64_t bits;
- if (value.IsSmi()) {
- bits = bit_cast<uint64_t>(static_cast<double>(Smi::ToInt(value)));
- } else {
- DCHECK(value.IsHeapNumber());
- bits = HeapNumber::cast(value).value_as_bits();
- }
- object->RawFastDoublePropertyAsBitsAtPut(index, bits);
- continue;
- }
-
if (value.IsSmi()) {
if (kTaggedSize != kDoubleSize) {
// Write alignment filler.
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 1dac546262..e933c61864 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -87,7 +87,7 @@ using mcontext_t = struct sigcontext;
struct ucontext_t {
uint64_t uc_flags;
- struct ucontext *uc_link;
+ struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
@@ -153,7 +153,7 @@ struct mcontext_t {
struct ucontext_t {
uint64_t uc_flags;
- struct ucontext *uc_link;
+ struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
@@ -163,7 +163,6 @@ enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
namespace v8 {
namespace sampler {
@@ -262,11 +261,9 @@ class Sampler::PlatformData {
// not work in this case. We're using OpenThread because DuplicateHandle
// for some reason doesn't work in Chrome's sandbox.
PlatformData()
- : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
+ : profiled_thread_(OpenThread(THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME |
+ THREAD_QUERY_INFORMATION,
+ false, GetCurrentThreadId())) {}
~PlatformData() {
if (profiled_thread_ != nullptr) {
@@ -304,7 +301,6 @@ class Sampler::PlatformData {
#endif // USE_SIGNALS
-
#if defined(USE_SIGNALS)
class SignalHandler {
public:
@@ -359,7 +355,6 @@ int SignalHandler::client_count_ = 0;
struct sigaction SignalHandler::old_signal_handler_;
bool SignalHandler::signal_handler_installed_ = false;
-
void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
void* context) {
USE(info);
@@ -418,10 +413,8 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
#if V8_LIBC_GLIBC
state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
- state->sp =
- reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
- state->fp =
- reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
+ state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
+ state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->link);
#else
// Some C libraries, notably Musl, define the regs member as a void pointer
@@ -442,6 +435,12 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[14]);
+#elif V8_HOST_ARCH_RISCV64
+ // Spec CH.25 RISC-V Assembly Programmer’s Handbook
+ state->pc = reinterpret_cast<void*>(mcontext.__gregs[REG_PC]);
+ state->sp = reinterpret_cast<void*>(mcontext.__gregs[REG_SP]);
+ state->fp = reinterpret_cast<void*>(mcontext.__gregs[REG_S0]);
+ state->lr = reinterpret_cast<void*>(mcontext.__gregs[REG_RA]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_IOS
@@ -537,9 +536,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
Sampler::Sampler(Isolate* isolate)
: isolate_(isolate), data_(std::make_unique<PlatformData>()) {}
-Sampler::~Sampler() {
- DCHECK(!IsActive());
-}
+Sampler::~Sampler() { DCHECK(!IsActive()); }
void Sampler::Start() {
DCHECK(!IsActive());
diff --git a/deps/v8/src/logging/code-events.h b/deps/v8/src/logging/code-events.h
index d2974e4964..c6ea66edb8 100644
--- a/deps/v8/src/logging/code-events.h
+++ b/deps/v8/src/logging/code-events.h
@@ -28,6 +28,7 @@ class WasmCode;
using WasmName = Vector<const char>;
} // namespace wasm
+// clang-format off
#define LOG_EVENTS_LIST(V) \
V(CODE_CREATION_EVENT, code-creation) \
V(CODE_DISABLE_OPT_EVENT, code-disable-optimization) \
@@ -36,7 +37,9 @@ using WasmName = Vector<const char>;
V(CODE_MOVING_GC, code-moving-gc) \
V(SHARED_FUNC_MOVE_EVENT, sfi-move) \
V(SNAPSHOT_CODE_NAME_EVENT, snapshot-code-name) \
- V(TICK_EVENT, tick)
+ V(TICK_EVENT, tick) \
+ V(BYTECODE_FLUSH_EVENT, bytecode-flush)
+// clang-format on
#define TAGS_LIST(V) \
V(BUILTIN_TAG, Builtin) \
@@ -106,6 +109,8 @@ class CodeEventListener {
virtual void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> shared,
const char* reason) = 0;
+ // Invoked during GC. No allocation allowed.
+ virtual void BytecodeFlushEvent(Address compiled_data_start) = 0;
virtual bool is_listening_to_code_events() { return false; }
};
@@ -232,6 +237,11 @@ class CodeEventDispatcher : public CodeEventListener {
listener->CodeDependencyChangeEvent(code, sfi, reason);
});
}
+ void BytecodeFlushEvent(Address compiled_data_start) override {
+ DispatchEventToListeners([=](CodeEventListener* listener) {
+ listener->BytecodeFlushEvent(compiled_data_start);
+ });
+ }
private:
std::unordered_set<CodeEventListener*> listeners_;
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 9cb58dd9fc..5a3298a772 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -85,6 +85,12 @@ namespace internal {
HR(wasm_modules_per_engine, V8.WasmModulesPerEngine, 1, 1024, 30) \
/* bailout reason if Liftoff failed, or {kSuccess} (per function) */ \
HR(liftoff_bailout_reasons, V8.LiftoffBailoutReasons, 0, 20, 21) \
+ /* number of thrown exceptions per isolate */ \
+ HR(wasm_throw_count, V8.WasmThrowCount, 0, 100000, 30) \
+ /* number of rethrown exceptions per isolate */ \
+ HR(wasm_rethrow_count, V8.WasmReThrowCount, 0, 100000, 30) \
+ /* number of caught exceptions per isolate */ \
+ HR(wasm_catch_count, V8.WasmCatchCount, 0, 100000, 30) \
/* Ticks observed in a single Turbofan compilation, in 1K */ \
HR(turbofan_ticks, V8.TurboFan1KTicks, 0, 100000, 200) \
/* Backtracks observed in a single regexp interpreter execution */ \
@@ -140,8 +146,9 @@ namespace internal {
HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
HT(measure_memory_delay_ms, V8.MeasureMemoryDelayMilliseconds, 100000, \
MILLISECOND) \
- HT(stop_the_world, V8.StopTheWorld, 10000, MICROSECOND) \
- HT(time_to_collection, V8.TimeToCollection, 10000, MICROSECOND) \
+ HT(gc_time_to_safepoint, V8.GC.TimeToSafepoint, 10000000, MICROSECOND) \
+ HT(gc_time_to_collection_on_background, V8.GC.TimeToCollectionOnBackground, \
+ 10000000, MICROSECOND) \
/* TurboFan timers. */ \
HT(turbofan_optimize_prepare, V8.TurboFanOptimizePrepare, 1000000, \
MICROSECOND) \
@@ -188,6 +195,12 @@ namespace internal {
V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
HT(wasm_instantiate_asm_module_time, \
V8.WasmInstantiateModuleMicroSeconds.asm, 10000000, MICROSECOND) \
+ HT(wasm_time_between_throws, V8.WasmTimeBetweenThrowsMilliseconds, 1000, \
+ MILLISECOND) \
+ HT(wasm_time_between_rethrows, V8.WasmTimeBetweenRethrowsMilliseconds, 1000, \
+ MILLISECOND) \
+ HT(wasm_time_between_catch, V8.WasmTimeBetweenCatchMilliseconds, 1000, \
+ MILLISECOND) \
/* Total compilation time incl. caching/parsing for various cache states. */ \
HT(compile_script_with_produce_cache, \
V8.CompileScriptMicroSeconds.ProduceCache, 1000000, MICROSECOND) \
@@ -314,12 +327,10 @@ namespace internal {
/* Total count of functions compiled using the baseline compiler. */ \
SC(total_baseline_compile_count, V8.TotalBaselineCompileCount)
-#define STATS_COUNTER_TS_LIST(SC) \
- SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
- SC(wasm_reloc_size, V8.WasmRelocBytes) \
- SC(wasm_lazily_compiled_functions, V8.WasmLazilyCompiledFunctions) \
- SC(liftoff_compiled_functions, V8.LiftoffCompiledFunctions) \
- SC(liftoff_unsupported_functions, V8.LiftoffUnsupportedFunctions)
+#define STATS_COUNTER_TS_LIST(SC) \
+ SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
+ SC(wasm_reloc_size, V8.WasmRelocBytes) \
+ SC(wasm_lazily_compiled_functions, V8.WasmLazilyCompiledFunctions)
// List of counters that can be incremented from generated code. We need them in
// a separate list to be able to relocate them.
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index f1a7595a34..6f0076b180 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -923,6 +923,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateGraphTrimming) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateOptimization) \
@@ -968,6 +969,10 @@ class RuntimeCallTimer final {
V(BoundFunctionNameGetter) \
V(CodeGenerationFromStringsCallbacks) \
V(CompileBackgroundCompileTask) \
+ V(CompileBaseline) \
+ V(CompileBaselineVisit) \
+ V(CompileBaselinePrepareHandlerOffsets) \
+ V(CompileBaselinePreVisit) \
V(CompileCollectSourcePositions) \
V(CompileDeserialize) \
V(CompileEnqueueOnDispatcher) \
diff --git a/deps/v8/src/logging/log-utils.cc b/deps/v8/src/logging/log-utils.cc
index 3b65729069..ec23b1bdf0 100644
--- a/deps/v8/src/logging/log-utils.cc
+++ b/deps/v8/src/logging/log-utils.cc
@@ -25,7 +25,7 @@ const char* const Log::kLogToConsole = "-";
// static
FILE* Log::CreateOutputHandle(std::string file_name) {
// If we're logging anything, we need to open the log file.
- if (!Log::InitLogAtStart()) {
+ if (!FLAG_log) {
return nullptr;
} else if (Log::IsLoggingToConsole(file_name)) {
return stdout;
diff --git a/deps/v8/src/logging/log-utils.h b/deps/v8/src/logging/log-utils.h
index 47abf927f1..159ce9150e 100644
--- a/deps/v8/src/logging/log-utils.h
+++ b/deps/v8/src/logging/log-utils.h
@@ -32,15 +32,6 @@ class Log {
public:
explicit Log(Logger* logger, std::string log_file_name);
- static bool InitLogAtStart() {
- return FLAG_log || FLAG_log_all || FLAG_log_api || FLAG_log_code ||
- FLAG_log_handles || FLAG_log_suspect || FLAG_ll_prof ||
- FLAG_perf_basic_prof || FLAG_perf_prof || FLAG_log_source_code ||
- FLAG_gdbjit || FLAG_log_internal_timer_events || FLAG_prof_cpp ||
- FLAG_trace_ic || FLAG_log_function_events || FLAG_trace_zone_stats ||
- FLAG_turbo_profiling_log_builtins;
- }
-
V8_EXPORT_PRIVATE static bool IsLoggingToConsole(std::string file_name);
V8_EXPORT_PRIVATE static bool IsLoggingToTemporaryFile(std::string file_name);
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index e7ffba35a9..7738cab831 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -595,6 +595,8 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "arm64";
#elif V8_TARGET_ARCH_S390
const char arch[] = "s390";
+#elif V8_TARGET_ARCH_RISCV64
+ const char arch[] = "riscv64";
#else
const char arch[] = "unknown";
#endif
@@ -886,7 +888,7 @@ class Ticker : public sampler::Sampler {
: sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
sampling_thread_(
std::make_unique<SamplingThread>(this, interval_microseconds)),
- threadId_(ThreadId::Current()) {}
+ perThreadData_(isolate->FindPerThreadDataForThisThread()) {}
~Ticker() override {
if (IsActive()) Stop();
@@ -908,8 +910,9 @@ class Ticker : public sampler::Sampler {
void SampleStack(const v8::RegisterState& state) override {
if (!profiler_) return;
Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByThread(threadId_))
+ if (v8::Locker::IsActive() && (!isolate->thread_manager()->IsLockedByThread(
+ perThreadData_->thread_id()) ||
+ perThreadData_->thread_state() != nullptr))
return;
TickSample sample;
sample.Init(isolate, state, TickSample::kIncludeCEntryFrame, true);
@@ -919,7 +922,7 @@ class Ticker : public sampler::Sampler {
private:
Profiler* profiler_ = nullptr;
std::unique_ptr<SamplingThread> sampling_thread_;
- ThreadId threadId_;
+ Isolate::PerIsolateThreadData* perThreadData_;
};
//
@@ -1029,10 +1032,7 @@ void Logger::UncheckedStringEvent(const char* name, const char* value) {
}
void Logger::IntPtrTEvent(const char* name, intptr_t value) {
- if (FLAG_log) UncheckedIntPtrTEvent(name, value);
-}
-
-void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
+ if (!FLAG_log) return;
MSG_BUILDER();
msg << name << kNext;
msg.AppendFormatString("%" V8PRIdPTR, value);
@@ -1088,7 +1088,7 @@ void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
}
void Logger::BasicBlockCounterEvent(const char* name, int block_id,
- double count) {
+ uint32_t count) {
if (!FLAG_turbo_profiling_log_builtins) return;
MSG_BUILDER();
msg << ProfileDataFromFileConstants::kBlockCounterMarker << kNext << name
@@ -1221,15 +1221,18 @@ void Logger::LogSourceCodeInformation(Handle<AbstractCode> code,
<< reinterpret_cast<void*>(code->InstructionStart()) << Logger::kNext
<< script.id() << Logger::kNext << shared->StartPosition()
<< Logger::kNext << shared->EndPosition() << Logger::kNext;
-
- SourcePositionTableIterator iterator(code->source_position_table());
+ // TODO(v8:11429): Clean-up baseline-replated code in source position
+ // iteration.
bool hasInlined = false;
- for (; !iterator.done(); iterator.Advance()) {
- SourcePosition pos = iterator.source_position();
- msg << "C" << iterator.code_offset() << "O" << pos.ScriptOffset();
- if (pos.isInlined()) {
- msg << "I" << pos.InliningId();
- hasInlined = true;
+ if (code->kind() != CodeKind::BASELINE) {
+ SourcePositionTableIterator iterator(code->source_position_table());
+ for (; !iterator.done(); iterator.Advance()) {
+ SourcePosition pos = iterator.source_position();
+ msg << "C" << iterator.code_offset() << "O" << pos.ScriptOffset();
+ if (pos.isInlined()) {
+ msg << "I" << pos.InliningId();
+ hasInlined = true;
+ }
}
}
msg << Logger::kNext;
@@ -1469,7 +1472,7 @@ void Logger::ProcessDeoptEvent(Handle<Code> code, SourcePosition position,
void Logger::CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta, bool reuse_code) {
- if (!is_logging()) return;
+ if (!is_logging() || !FLAG_log_deopt) return;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(*code, pc);
ProcessDeoptEvent(code, info.position,
Deoptimizer::MessageFor(kind, reuse_code),
@@ -1479,7 +1482,7 @@ void Logger::CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
void Logger::CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) {
- if (!is_logging()) return;
+ if (!is_logging() || !FLAG_log_deopt) return;
SourcePosition position(sfi->StartPosition(), -1);
ProcessDeoptEvent(code, position, "dependency-change", reason);
}
@@ -1487,41 +1490,41 @@ void Logger::CodeDependencyChangeEvent(Handle<Code> code,
namespace {
void CodeLinePosEvent(
- JitLogger* jit_logger, Address code_start,
+ JitLogger& jit_logger, Address code_start,
SourcePositionTableIterator& iter) { // NOLINT(runtime/references)
- if (jit_logger) {
- void* jit_handler_data = jit_logger->StartCodePosInfoEvent();
- for (; !iter.done(); iter.Advance()) {
- if (iter.is_statement()) {
- jit_logger->AddCodeLinePosInfoEvent(
- jit_handler_data, iter.code_offset(),
- iter.source_position().ScriptOffset(),
- JitCodeEvent::STATEMENT_POSITION);
- }
- jit_logger->AddCodeLinePosInfoEvent(jit_handler_data, iter.code_offset(),
- iter.source_position().ScriptOffset(),
- JitCodeEvent::POSITION);
+ void* jit_handler_data = jit_logger.StartCodePosInfoEvent();
+ for (; !iter.done(); iter.Advance()) {
+ if (iter.is_statement()) {
+ jit_logger.AddCodeLinePosInfoEvent(jit_handler_data, iter.code_offset(),
+ iter.source_position().ScriptOffset(),
+ JitCodeEvent::STATEMENT_POSITION);
}
- jit_logger->EndCodePosInfoEvent(code_start, jit_handler_data);
+ jit_logger.AddCodeLinePosInfoEvent(jit_handler_data, iter.code_offset(),
+ iter.source_position().ScriptOffset(),
+ JitCodeEvent::POSITION);
}
+ jit_logger.EndCodePosInfoEvent(code_start, jit_handler_data);
}
} // namespace
void Logger::CodeLinePosInfoRecordEvent(Address code_start,
ByteArray source_position_table) {
+ if (!jit_logger_) return;
SourcePositionTableIterator iter(source_position_table);
- CodeLinePosEvent(jit_logger_.get(), code_start, iter);
+ CodeLinePosEvent(*jit_logger_, code_start, iter);
}
void Logger::CodeLinePosInfoRecordEvent(
Address code_start, Vector<const byte> source_position_table) {
+ if (!jit_logger_) return;
SourcePositionTableIterator iter(source_position_table);
- CodeLinePosEvent(jit_logger_.get(), code_start, iter);
+ CodeLinePosEvent(*jit_logger_, code_start, iter);
}
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
if (code_name == nullptr) return; // Not a code object.
+ if (!is_listening_to_code_events()) return;
MSG_BUILDER();
msg << kLogEventsNames[CodeEventListener::SNAPSHOT_CODE_NAME_EVENT] << kNext
<< pos << kNext << code_name;
@@ -1726,7 +1729,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
void Logger::ICEvent(const char* type, bool keyed, Handle<Map> map,
Handle<Object> key, char old_state, char new_state,
const char* modifier, const char* slow_stub_reason) {
- if (!FLAG_trace_ic) return;
+ if (!FLAG_log_ic) return;
MSG_BUILDER();
if (keyed) msg << "Keyed";
int line;
@@ -1752,7 +1755,7 @@ void Logger::ICEvent(const char* type, bool keyed, Handle<Map> map,
void Logger::MapEvent(const char* type, Handle<Map> from, Handle<Map> to,
const char* reason, Handle<HeapObject> name_or_sfi) {
- if (!FLAG_trace_maps) return;
+ if (!FLAG_log_maps) return;
if (!to.is_null()) MapDetails(*to);
int line = -1;
int column = -1;
@@ -1783,7 +1786,7 @@ void Logger::MapEvent(const char* type, Handle<Map> from, Handle<Map> to,
}
void Logger::MapCreate(Map map) {
- if (!FLAG_trace_maps) return;
+ if (!FLAG_log_maps) return;
DisallowGarbageCollection no_gc;
MSG_BUILDER();
msg << "map-create" << kNext << Time() << kNext << AsHex::Address(map.ptr());
@@ -1791,12 +1794,12 @@ void Logger::MapCreate(Map map) {
}
void Logger::MapDetails(Map map) {
- if (!FLAG_trace_maps) return;
+ if (!FLAG_log_maps) return;
DisallowGarbageCollection no_gc;
MSG_BUILDER();
msg << "map-details" << kNext << Time() << kNext << AsHex::Address(map.ptr())
<< kNext;
- if (FLAG_trace_maps_details) {
+ if (FLAG_log_maps_details) {
std::ostringstream buffer;
map.PrintMapDetails(buffer);
msg << buffer.str().c_str();
@@ -2007,21 +2010,16 @@ bool Logger::SetUp(Isolate* isolate) {
std::make_unique<LowLevelLogger>(isolate, log_file_name.str().c_str());
AddCodeEventListener(ll_logger_.get());
}
-
ticker_ = std::make_unique<Ticker>(isolate, FLAG_prof_sampling_interval);
-
- if (Log::InitLogAtStart()) UpdateIsLogging(true);
-
+ if (FLAG_log) UpdateIsLogging(true);
timer_.Start();
-
if (FLAG_prof_cpp) {
- UpdateIsLogging(true);
+ CHECK(FLAG_log);
+ CHECK(is_logging());
profiler_ = std::make_unique<Profiler>(isolate);
profiler_->Engage();
}
-
if (is_logging_) AddCodeEventListener(this);
-
return true;
}
@@ -2107,6 +2105,7 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
switch (abstract_code->kind()) {
case CodeKind::INTERPRETED_FUNCTION:
case CodeKind::TURBOFAN:
+ case CodeKind::BASELINE:
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
case CodeKind::TURBOPROP:
return; // We log this later using LogCompiledFunctions.
@@ -2178,12 +2177,21 @@ void ExistingCodeLogger::LogCompiledFunctions() {
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (auto& pair : compiled_funcs) {
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, pair.first);
- if (pair.first->function_data(kAcquireLoad).IsInterpreterData()) {
+ Handle<SharedFunctionInfo> shared = pair.first;
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
+ if (shared->HasInterpreterData()) {
+ LogExistingFunction(
+ shared,
+ Handle<AbstractCode>(
+ AbstractCode::cast(shared->InterpreterTrampoline()), isolate_),
+ CodeEventListener::INTERPRETED_FUNCTION_TAG);
+ }
+ if (shared->HasBaselineData()) {
+ // TODO(v8:11429): Add a tag for baseline code. Or use CodeKind?
LogExistingFunction(
- pair.first,
+ shared,
Handle<AbstractCode>(
- AbstractCode::cast(pair.first->InterpreterTrampoline()),
+ AbstractCode::cast(shared->baseline_data().baseline_code()),
isolate_),
CodeEventListener::INTERPRETED_FUNCTION_TAG);
}
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 74a601ba6d..6951a29054 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -35,7 +35,7 @@ struct TickSample;
//
// --log-all
// Log all events to the file, default is off. This is the same as combining
-// --log-api, --log-code, and --log-regexp.
+// --log-api and --log-code.
//
// --log-api
// Log API events to the logfile, default is off. --log-api implies --log.
@@ -44,10 +44,6 @@ struct TickSample;
// Log code (create, move, and delete) events to the logfile, default is off.
// --log-code implies --log.
//
-// --log-regexp
-// Log creation and use of regular expressions, Default is off.
-// --log-regexp implies --log.
-//
// --logfile <filename>
// Specify the name of the logfile, default is "v8.log".
//
@@ -72,10 +68,9 @@ class SourcePosition;
class Ticker;
#undef LOG
-#define LOG(isolate, Call) \
- do { \
- auto&& logger = (isolate)->logger(); \
- if (logger->is_logging()) logger->Call; \
+#define LOG(isolate, Call) \
+ do { \
+ if (v8::internal::FLAG_log) (isolate)->logger()->Call; \
} while (false)
#define LOG_CODE_EVENT(isolate, Call) \
@@ -219,6 +214,7 @@ class Logger : public CodeEventListener {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override;
+ void BytecodeFlushEvent(Address compiled_data_start) override {}
void ProcessDeoptEvent(Handle<Code> code, SourcePosition position,
const char* kind, const char* reason);
@@ -248,7 +244,7 @@ class Logger : public CodeEventListener {
V8_EXPORT_PRIVATE void TimerEvent(StartEnd se, const char* name);
- void BasicBlockCounterEvent(const char* name, int block_id, double count);
+ void BasicBlockCounterEvent(const char* name, int block_id, uint32_t count);
void BuiltinHashEvent(const char* name, int hash);
@@ -305,9 +301,6 @@ class Logger : public CodeEventListener {
// Logs a StringEvent regardless of whether FLAG_log is true.
void UncheckedStringEvent(const char* name, const char* value);
- // Logs an IntPtrTEvent regardless of whether FLAG_log is true.
- void UncheckedIntPtrTEvent(const char* name, intptr_t value);
-
// Logs a scripts sources. Keeps track of all logged scripts to ensure that
// each script is logged only once.
bool EnsureLogScriptSource(Script script);
@@ -419,6 +412,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
+ void BytecodeFlushEvent(Address compiled_data_start) override {}
protected:
Isolate* isolate_;
@@ -482,6 +476,7 @@ class ExternalCodeEventListener : public CodeEventListener {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
+ void BytecodeFlushEvent(Address compiled_data_start) override {}
void StartListening(v8::CodeEventHandler* code_event_handler);
void StopListening();
diff --git a/deps/v8/src/objects/all-objects-inl.h b/deps/v8/src/objects/all-objects-inl.h
index 6e7c7a59ce..78c239fd77 100644
--- a/deps/v8/src/objects/all-objects-inl.h
+++ b/deps/v8/src/objects/all-objects-inl.h
@@ -31,7 +31,6 @@
#include "src/objects/field-index-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/foreign-inl.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
@@ -48,7 +47,6 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator-inl.h"
#include "src/objects/js-weak-refs-inl.h"
-#include "src/objects/layout-descriptor-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/lookup-cache-inl.h"
#include "src/objects/lookup-inl.h"
@@ -68,6 +66,7 @@
#include "src/objects/property-cell-inl.h"
#include "src/objects/property-descriptor-object-inl.h"
#include "src/objects/prototype-info-inl.h"
+#include "src/objects/scope-info-inl.h"
#include "src/objects/script-inl.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/objects/slots-atomic-inl.h"
@@ -77,6 +76,7 @@
#include "src/objects/string-set-inl.h"
#include "src/objects/string-table-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/objects/synthetic-module-inl.h"
#include "src/objects/tagged-field-inl.h"
#include "src/objects/tagged-impl-inl.h"
diff --git a/deps/v8/src/objects/arguments.tq b/deps/v8/src/objects/arguments.tq
index 2fc9dfc53a..b65464688a 100644
--- a/deps/v8/src/objects/arguments.tq
+++ b/deps/v8/src/objects/arguments.tq
@@ -80,7 +80,7 @@ extern shape JSStrictArgumentsObject extends JSArgumentsObject {
class SloppyArgumentsElements extends FixedArrayBase {
context: Context;
arguments: FixedArray|NumberDictionary;
- mapped_entries[length]: Smi|TheHole;
+ @relaxedRead mapped_entries[length]: Smi|TheHole;
}
macro NewSloppyArgumentsElements<Iterator: type>(
@@ -317,6 +317,21 @@ builtin NewRestArgumentsElements(
frame, formalParameterCount, Convert<intptr>(argumentCount));
}
+builtin FastNewSloppyArguments(implicit context: Context)(f: JSFunction):
+ JSSloppyArgumentsObject {
+ return EmitFastNewSloppyArguments(f);
+}
+
+builtin FastNewStrictArguments(implicit context: Context)(f: JSFunction):
+ JSStrictArgumentsObject {
+ return EmitFastNewStrictArguments(f);
+}
+
+builtin FastNewRestArguments(implicit context: Context)(f: JSFunction):
+ JSArray {
+ return EmitFastNewRestArguments(f);
+}
+
macro
AccessSloppyArgumentsCommon(
receiver: JSObject, keyObject: Object): &Object labels Bailout {
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index fd044335da..689dedd103 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -29,6 +29,9 @@ namespace {
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
+#elif V8_TARGET_ARCH_RISCV64
+// RISC-V64 has a user space of 256GB on the Sv39 scheme.
+constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB
#elif V8_TARGET_ARCH_64_BIT
constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 26eedcac09..191abdb4b6 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -5,12 +5,12 @@
#ifndef V8_OBJECTS_CODE_INL_H_
#define V8_OBJECTS_CODE_INL_H_
-#include "src/objects/code.h"
-
#include "src/base/memory.h"
#include "src/codegen/code-desc.h"
+#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
#include "src/interpreter/bytecode-register.h"
+#include "src/objects/code.h"
#include "src/objects/dictionary.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/map-inl.h"
@@ -329,6 +329,67 @@ CodeKind Code::kind() const {
return KindField::decode(ReadField<uint32_t>(kFlagsOffset));
}
+namespace detail {
+
+// TODO(v8:11429): Extract out of header, to generic helper, and merge with
+// TranslationArray de/encoding.
+inline int ReadUint(ByteArray array, int* index) {
+ int byte = 0;
+ int value = 0;
+ int shift = 0;
+ do {
+ byte = array.get((*index)++);
+ value += (byte & ((1 << 7) - 1)) << shift;
+ shift += 7;
+ } while (byte & (1 << 7));
+ return value;
+}
+
+} // namespace detail
+
+int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc) {
+ DisallowGarbageCollection no_gc;
+ CHECK(!is_baseline_prologue_builtin());
+ if (is_baseline_leave_frame_builtin()) return kFunctionExitBytecodeOffset;
+ CHECK_EQ(kind(), CodeKind::BASELINE);
+ ByteArray data = ByteArray::cast(source_position_table());
+ Address lookup_pc = 0;
+ Address pc = baseline_pc - InstructionStart();
+ int index = 0;
+ int offset = 0;
+ while (pc > lookup_pc) {
+ lookup_pc += detail::ReadUint(data, &index);
+ offset += detail::ReadUint(data, &index);
+ }
+ CHECK_EQ(pc, lookup_pc);
+ return offset;
+}
+
+uintptr_t Code::GetBaselinePCForBytecodeOffset(int bytecode_offset,
+ bool precise) {
+ DisallowGarbageCollection no_gc;
+ CHECK_EQ(kind(), CodeKind::BASELINE);
+ ByteArray data = ByteArray::cast(source_position_table());
+ intptr_t pc = 0;
+ int index = 0;
+ int offset = 0;
+ // TODO(v8:11429,cbruni): clean up
+ // Return the offset for the last bytecode that matches
+ while (offset < bytecode_offset && index < data.length()) {
+ int delta_pc = detail::ReadUint(data, &index);
+ int delta_offset = detail::ReadUint(data, &index);
+ if (!precise && (bytecode_offset < offset + delta_offset)) break;
+ pc += delta_pc;
+ offset += delta_offset;
+ }
+ if (precise) {
+ CHECK_EQ(offset, bytecode_offset);
+ } else {
+ CHECK_LE(offset, bytecode_offset);
+ }
+ return pc;
+}
+
void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
bool is_off_heap_trampoline) {
CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
@@ -352,6 +413,14 @@ inline bool Code::is_interpreter_trampoline_builtin() const {
index == Builtins::kInterpreterEnterBytecodeDispatch);
}
+inline bool Code::is_baseline_leave_frame_builtin() const {
+ return builtin_index() == Builtins::kBaselineLeaveFrame;
+}
+
+inline bool Code::is_baseline_prologue_builtin() const {
+ return builtin_index() == Builtins::kBaselineOutOfLinePrologue;
+}
+
inline bool Code::checks_optimization_marker() const {
bool checks_marker =
(builtin_index() == Builtins::kCompileLazy ||
@@ -361,7 +430,7 @@ inline bool Code::checks_optimization_marker() const {
(CodeKindCanDeoptimize(kind()) && marked_for_deoptimization());
}
-inline bool Code::has_tagged_params() const {
+inline bool Code::has_tagged_outgoing_params() const {
return kind() != CodeKind::JS_TO_WASM_FUNCTION &&
kind() != CodeKind::C_WASM_ENTRY && kind() != CodeKind::WASM_FUNCTION;
}
@@ -655,7 +724,7 @@ void BytecodeArray::set_parameter_count(int32_t number_of_parameters) {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
WriteField<int32_t>(kParameterSizeOffset,
- (number_of_parameters << kSystemPointerSizeLog2));
+ (number_of_parameters << kSystemPointerSizeLog2));
}
interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
@@ -678,7 +747,7 @@ void BytecodeArray::set_incoming_new_target_or_generator_register(
register_count());
DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand());
WriteField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset,
- incoming_new_target_or_generator_register.ToOperand());
+ incoming_new_target_or_generator_register.ToOperand());
}
}
@@ -762,7 +831,7 @@ int BytecodeArray::SizeIncludingMetadata() {
return size;
}
-DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, TranslationArray)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
@@ -777,11 +846,11 @@ DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
-BailoutId DeoptimizationData::BytecodeOffset(int i) {
- return BailoutId(BytecodeOffsetRaw(i).value());
+BytecodeOffset DeoptimizationData::GetBytecodeOffset(int i) {
+ return BytecodeOffset(BytecodeOffsetRaw(i).value());
}
-void DeoptimizationData::SetBytecodeOffset(int i, BailoutId value) {
+void DeoptimizationData::SetBytecodeOffset(int i, BytecodeOffset value) {
SetBytecodeOffsetRaw(i, Smi::FromInt(value.ToInt()));
}
diff --git a/deps/v8/src/objects/code-kind.cc b/deps/v8/src/objects/code-kind.cc
index 48b28ea11d..8d480c86db 100644
--- a/deps/v8/src/objects/code-kind.cc
+++ b/deps/v8/src/objects/code-kind.cc
@@ -22,6 +22,8 @@ const char* CodeKindToMarker(CodeKind kind) {
switch (kind) {
case CodeKind::INTERPRETED_FUNCTION:
return "~";
+ case CodeKind::BASELINE:
+ return "^";
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
return "-";
case CodeKind::TURBOPROP:
diff --git a/deps/v8/src/objects/code-kind.h b/deps/v8/src/objects/code-kind.h
index c5ac4c49c6..12f65ba18a 100644
--- a/deps/v8/src/objects/code-kind.h
+++ b/deps/v8/src/objects/code-kind.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_CODE_KIND_H_
#define V8_OBJECTS_CODE_KIND_H_
+#include "src/base/bounds.h"
#include "src/base/flags.h"
#include "src/flags/flags.h"
@@ -26,6 +27,7 @@ namespace internal {
V(JS_TO_JS_FUNCTION) \
V(C_WASM_ENTRY) \
V(INTERPRETED_FUNCTION) \
+ V(BASELINE) \
V(NATIVE_CONTEXT_INDEPENDENT) \
V(TURBOPROP) \
V(TURBOFAN)
@@ -37,8 +39,12 @@ enum class CodeKind {
};
STATIC_ASSERT(CodeKind::INTERPRETED_FUNCTION < CodeKind::TURBOPROP &&
CodeKind::INTERPRETED_FUNCTION <
- CodeKind::NATIVE_CONTEXT_INDEPENDENT);
-STATIC_ASSERT(CodeKind::TURBOPROP < CodeKind::TURBOFAN &&
+ CodeKind::NATIVE_CONTEXT_INDEPENDENT &&
+ CodeKind::INTERPRETED_FUNCTION < CodeKind::BASELINE);
+STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOPROP &&
+ CodeKind::BASELINE < CodeKind::NATIVE_CONTEXT_INDEPENDENT);
+STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOFAN &&
+ CodeKind::TURBOPROP < CodeKind::TURBOFAN &&
CodeKind::NATIVE_CONTEXT_INDEPENDENT < CodeKind::TURBOFAN);
#define V(...) +1
@@ -53,19 +59,33 @@ inline constexpr bool CodeKindIsInterpretedJSFunction(CodeKind kind) {
return kind == CodeKind::INTERPRETED_FUNCTION;
}
+inline constexpr bool CodeKindIsBaselinedJSFunction(CodeKind kind) {
+ return kind == CodeKind::BASELINE;
+}
+
+inline constexpr bool CodeKindIsUnoptimizedJSFunction(CodeKind kind) {
+ STATIC_ASSERT(static_cast<int>(CodeKind::INTERPRETED_FUNCTION) + 1 ==
+ static_cast<int>(CodeKind::BASELINE));
+ return base::IsInRange(kind, CodeKind::INTERPRETED_FUNCTION,
+ CodeKind::BASELINE);
+}
+
inline constexpr bool CodeKindIsNativeContextIndependentJSFunction(
CodeKind kind) {
return kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
inline constexpr bool CodeKindIsOptimizedJSFunction(CodeKind kind) {
- return kind == CodeKind::TURBOFAN ||
- kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
- kind == CodeKind::TURBOPROP;
+ STATIC_ASSERT(static_cast<int>(CodeKind::NATIVE_CONTEXT_INDEPENDENT) + 1 ==
+ static_cast<int>(CodeKind::TURBOPROP));
+ STATIC_ASSERT(static_cast<int>(CodeKind::TURBOPROP) + 1 ==
+ static_cast<int>(CodeKind::TURBOFAN));
+ return base::IsInRange(kind, CodeKind::NATIVE_CONTEXT_INDEPENDENT,
+ CodeKind::TURBOFAN);
}
inline constexpr bool CodeKindIsJSFunction(CodeKind kind) {
- return kind == CodeKind::INTERPRETED_FUNCTION ||
+ return CodeKindIsUnoptimizedJSFunction(kind) ||
CodeKindIsOptimizedJSFunction(kind);
}
@@ -86,11 +106,11 @@ inline constexpr bool CodeKindCanOSR(CodeKind kind) {
inline constexpr bool CodeKindIsOptimizedAndCanTierUp(CodeKind kind) {
return kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
- (FLAG_turboprop_as_midtier && kind == CodeKind::TURBOPROP);
+ (!FLAG_turboprop_as_toptier && kind == CodeKind::TURBOPROP);
}
inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
- return kind == CodeKind::INTERPRETED_FUNCTION ||
+ return CodeKindIsUnoptimizedJSFunction(kind) ||
CodeKindIsOptimizedAndCanTierUp(kind);
}
@@ -105,23 +125,25 @@ inline constexpr bool CodeKindIsStoredInOptimizedCodeCache(CodeKind kind) {
inline OptimizationTier GetTierForCodeKind(CodeKind kind) {
if (kind == CodeKind::TURBOFAN) return OptimizationTier::kTopTier;
if (kind == CodeKind::TURBOPROP) {
- return FLAG_turboprop_as_midtier ? OptimizationTier::kMidTier
- : OptimizationTier::kTopTier;
+ return FLAG_turboprop_as_toptier ? OptimizationTier::kTopTier
+ : OptimizationTier::kMidTier;
}
if (kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
- return FLAG_turbo_nci_as_midtier ? OptimizationTier::kMidTier
- : OptimizationTier::kTopTier;
+ return OptimizationTier::kTopTier;
}
return OptimizationTier::kNone;
}
inline CodeKind CodeKindForTopTier() {
- // TODO(turboprop, mythria): We should make FLAG_turboprop mean turboprop is
- // mid-tier compiler and replace FLAG_turboprop_as_midtier with
- // FLAG_turboprop_as_top_tier to tier up to only Turboprop once
- // FLAG_turboprop_as_midtier is stable and major regressions are addressed.
+ if (V8_UNLIKELY(FLAG_turboprop_as_toptier)) {
+ return CodeKind::TURBOPROP;
+ }
+ return CodeKind::TURBOFAN;
+}
+
+inline CodeKind CodeKindForOSR() {
if (V8_UNLIKELY(FLAG_turboprop)) {
- return FLAG_turboprop_as_midtier ? CodeKind::TURBOFAN : CodeKind::TURBOPROP;
+ return CodeKind::TURBOPROP;
}
return CodeKind::TURBOFAN;
}
@@ -147,7 +169,8 @@ DEFINE_OPERATORS_FOR_FLAGS(CodeKinds)
static constexpr CodeKinds kJSFunctionCodeKindsMask{
CodeKindFlag::INTERPRETED_FUNCTION | CodeKindFlag::TURBOFAN |
- CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT | CodeKindFlag::TURBOPROP};
+ CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT | CodeKindFlag::TURBOPROP |
+ CodeKindFlag::BASELINE};
static constexpr CodeKinds kOptimizedJSFunctionCodeKindsMask{
CodeKindFlag::TURBOFAN | CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT |
CodeKindFlag::TURBOPROP};
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index c78246f3cb..73068856c9 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -232,7 +232,8 @@ bool Code::CanDeoptAt(Address pc) {
for (int i = 0; i < deopt_data.DeoptCount(); i++) {
if (deopt_data.Pc(i).value() == -1) continue;
Address address = code_start_address + deopt_data.Pc(i).value();
- if (address == pc && deopt_data.BytecodeOffset(i) != BailoutId::None()) {
+ if (address == pc &&
+ deopt_data.GetBytecodeOffset(i) != BytecodeOffset::None()) {
return true;
}
}
@@ -259,7 +260,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
- defined(V8_TARGET_ARCH_MIPS64)
+ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_RISCV64)
return RelocIterator(*this, kModeMask).done();
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
@@ -440,7 +441,6 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
return;
}
- disasm::NameConverter converter;
int const inlined_function_count = InlinedFunctionCount().value();
os << "Inlined functions (count = " << inlined_function_count << ")\n";
for (int id = 0; id < inlined_function_count; ++id) {
@@ -457,7 +457,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
for (int i = 0; i < deopt_count; i++) {
os << std::setw(6) << i << " " << std::setw(15)
- << BytecodeOffset(i).ToInt() << " " << std::setw(4);
+ << GetBytecodeOffset(i).ToInt() << " " << std::setw(4);
print_pc(os, Pc(i).value());
os << std::setw(2);
@@ -466,202 +466,9 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
continue;
}
- // Print details of the frame translation.
- int translation_index = TranslationIndex(i).value();
- TranslationIterator iterator(TranslationByteArray(), translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- DCHECK(Translation::BEGIN == opcode);
- int frame_count = iterator.Next();
- int jsframe_count = iterator.Next();
- int update_feedback_count = iterator.Next();
- os << " " << Translation::StringFor(opcode)
- << " {frame count=" << frame_count
- << ", js frame count=" << jsframe_count
- << ", update_feedback_count=" << update_feedback_count << "}\n";
-
- while (iterator.HasNext() &&
- Translation::BEGIN !=
- (opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
- os << std::setw(31) << " " << Translation::StringFor(opcode) << " ";
-
- switch (opcode) {
- case Translation::BEGIN:
- UNREACHABLE();
- break;
-
- case Translation::INTERPRETED_FRAME: {
- int bytecode_offset = iterator.Next();
- int shared_info_id = iterator.Next();
- unsigned height = iterator.Next();
- int return_value_offset = iterator.Next();
- int return_value_count = iterator.Next();
- Object shared_info = LiteralArray().get(shared_info_id);
- os << "{bytecode_offset=" << bytecode_offset << ", function="
- << SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
- << ", height=" << height << ", retval=@" << return_value_offset
- << "(#" << return_value_count << ")}";
- break;
- }
-
- case Translation::CONSTRUCT_STUB_FRAME: {
- int bailout_id = iterator.Next();
- int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray().get(shared_info_id);
- unsigned height = iterator.Next();
- os << "{bailout_id=" << bailout_id << ", function="
- << SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
- << ", height=" << height << "}";
- break;
- }
-
- case Translation::BUILTIN_CONTINUATION_FRAME:
- case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
- case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
- int bailout_id = iterator.Next();
- int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray().get(shared_info_id);
- unsigned height = iterator.Next();
- os << "{bailout_id=" << bailout_id << ", function="
- << SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
- << ", height=" << height << "}";
- break;
- }
-
- case Translation::ARGUMENTS_ADAPTOR_FRAME: {
- int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray().get(shared_info_id);
- unsigned height = iterator.Next();
- os << "{function="
- << SharedFunctionInfo::cast(shared_info).DebugNameCStr().get()
- << ", height=" << height << "}";
- break;
- }
-
- case Translation::REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code) << "}";
- break;
- }
-
- case Translation::INT32_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code)
- << " (int32)}";
- break;
- }
-
- case Translation::INT64_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code)
- << " (int64)}";
- break;
- }
-
- case Translation::UINT32_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code)
- << " (uint32)}";
- break;
- }
-
- case Translation::BOOL_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << converter.NameOfCPURegister(reg_code)
- << " (bool)}";
- break;
- }
-
- case Translation::FLOAT_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << FloatRegister::from_code(reg_code) << "}";
- break;
- }
-
- case Translation::DOUBLE_REGISTER: {
- int reg_code = iterator.Next();
- os << "{input=" << DoubleRegister::from_code(reg_code) << "}";
- break;
- }
-
- case Translation::STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << "}";
- break;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << " (int32)}";
- break;
- }
-
- case Translation::INT64_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << " (int64)}";
- break;
- }
-
- case Translation::UINT32_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << " (uint32)}";
- break;
- }
-
- case Translation::BOOL_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << " (bool)}";
- break;
- }
-
- case Translation::FLOAT_STACK_SLOT:
- case Translation::DOUBLE_STACK_SLOT: {
- int input_slot_index = iterator.Next();
- os << "{input=" << input_slot_index << "}";
- break;
- }
-
- case Translation::LITERAL: {
- int literal_index = iterator.Next();
- Object literal_value = LiteralArray().get(literal_index);
- os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
- << ")}";
- break;
- }
-
- case Translation::DUPLICATED_OBJECT: {
- int object_index = iterator.Next();
- os << "{object_index=" << object_index << "}";
- break;
- }
-
- case Translation::ARGUMENTS_ELEMENTS: {
- CreateArgumentsType arguments_type =
- static_cast<CreateArgumentsType>(iterator.Next());
- os << "{arguments_type=" << arguments_type << "}";
- break;
- }
- case Translation::ARGUMENTS_LENGTH: {
- os << "{arguments_length}";
- break;
- }
-
- case Translation::CAPTURED_OBJECT: {
- int args_length = iterator.Next();
- os << "{length=" << args_length << "}";
- break;
- }
-
- case Translation::UPDATE_FEEDBACK: {
- int literal_index = iterator.Next();
- FeedbackSlot slot(iterator.Next());
- os << "{feedback={vector_index=" << literal_index << ", slot=" << slot
- << "}}";
- break;
- }
- }
- os << "\n";
- }
+ TranslationArrayPrintSingleFrame(os, TranslationByteArray(),
+ TranslationIndex(i).value(),
+ LiteralArray());
}
}
@@ -690,10 +497,14 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
if ((name != nullptr) && (name[0] != '\0')) {
os << "name = " << name << "\n";
}
- if (CodeKindIsOptimizedJSFunction(kind())) {
+ if (CodeKindIsOptimizedJSFunction(kind()) && kind() != CodeKind::BASELINE) {
os << "stack_slots = " << stack_slots() << "\n";
}
- os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
+ os << "compiler = "
+ << (is_turbofanned()
+ ? "turbofan"
+ : kind() == CodeKind::BASELINE ? "baseline" : "unknown")
+ << "\n";
os << "address = " << reinterpret_cast<void*>(ptr()) << "\n\n";
if (is_off_heap_trampoline()) {
@@ -724,32 +535,34 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
}
os << "\n";
- {
- SourcePositionTableIterator it(
- SourcePositionTable(), SourcePositionTableIterator::kJavaScriptOnly);
- if (!it.done()) {
- os << "Source positions:\n pc offset position\n";
- for (; !it.done(); it.Advance()) {
- os << std::setw(10) << std::hex << it.code_offset() << std::dec
- << std::setw(10) << it.source_position().ScriptOffset()
- << (it.is_statement() ? " statement" : "") << "\n";
+ if (kind() != CodeKind::BASELINE) {
+ {
+ SourcePositionTableIterator it(
+ SourcePositionTable(), SourcePositionTableIterator::kJavaScriptOnly);
+ if (!it.done()) {
+ os << "Source positions:\n pc offset position\n";
+ for (; !it.done(); it.Advance()) {
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
+ }
+ os << "\n";
}
- os << "\n";
}
- }
- {
- SourcePositionTableIterator it(SourcePositionTable(),
- SourcePositionTableIterator::kExternalOnly);
- if (!it.done()) {
- os << "External Source positions:\n pc offset fileid line\n";
- for (; !it.done(); it.Advance()) {
- DCHECK(it.source_position().IsExternal());
- os << std::setw(10) << std::hex << it.code_offset() << std::dec
- << std::setw(10) << it.source_position().ExternalFileId()
- << std::setw(10) << it.source_position().ExternalLine() << "\n";
+ {
+ SourcePositionTableIterator it(
+ SourcePositionTable(), SourcePositionTableIterator::kExternalOnly);
+ if (!it.done()) {
+ os << "External Source positions:\n pc offset fileid line\n";
+ for (; !it.done(); it.Advance()) {
+ DCHECK(it.source_position().IsExternal());
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ExternalFileId()
+ << std::setw(10) << it.source_position().ExternalLine() << "\n";
+ }
+ os << "\n";
}
- os << "\n";
}
}
@@ -806,10 +619,6 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
eh_frame_disassembler.DisassembleToStream(os);
os << "\n";
}
-
- if (has_code_comments()) {
- PrintCodeCommentsSection(os, code_comments(), code_comments_size());
- }
}
#endif // ENABLE_DISASSEMBLER
@@ -819,6 +628,8 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << "Parameter count " << parameter_count() << "\n";
os << "Register count " << register_count() << "\n";
os << "Frame size " << frame_size() << "\n";
+ os << "OSR nesting level: " << osr_loop_nesting_level() << "\n";
+ os << "Bytecode Age: " << bytecode_age() << "\n";
Address base_address = GetFirstBytecodeAddress();
SourcePositionTableIterator source_positions(SourcePositionTable());
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index f73e63b9e2..da7d6c9243 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -7,6 +7,7 @@
#include "src/base/bit-field.h"
#include "src/codegen/handler-table.h"
+#include "src/deoptimizer/translation-array.h"
#include "src/objects/code-kind.h"
#include "src/objects/contexts.h"
#include "src/objects/fixed-array.h"
@@ -248,12 +249,16 @@ class Code : public HeapObject {
// Testers for interpreter builtins.
inline bool is_interpreter_trampoline_builtin() const;
+ // Testers for baseline builtins.
+ inline bool is_baseline_prologue_builtin() const;
+ inline bool is_baseline_leave_frame_builtin() const;
+
// Tells whether the code checks the optimization marker in the function's
// feedback vector.
inline bool checks_optimization_marker() const;
// Tells whether the outgoing parameters of this code are tagged pointers.
- inline bool has_tagged_params() const;
+ inline bool has_tagged_outgoing_params() const;
// [is_turbofanned]: Tells whether the code object was generated by the
// TurboFan optimizing compiler.
@@ -374,6 +379,10 @@ class Code : public HeapObject {
static inline void CopyRelocInfoToByteArray(ByteArray dest,
const CodeDesc& desc);
+ inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
+ bool precise = true);
+ inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc);
+
// Flushes the instruction cache for the executable instructions of this code
// object. Make sure to call this while the code is still writable.
void FlushICache() const;
@@ -465,6 +474,8 @@ class Code : public HeapObject {
: (COMPRESS_POINTERS_BOOL ? 12 : 24);
#elif V8_TARGET_ARCH_S390X
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
+#elif V8_TARGET_ARCH_RISCV64
+ static constexpr int kHeaderPaddingSize = 24;
#else
#error Unknown architecture.
#endif
@@ -865,7 +876,7 @@ class DeoptimizationData : public FixedArray {
inline type name() const; \
inline void Set##name(type value);
- DECL_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
+ DECL_ELEMENT_ACCESSORS(TranslationByteArray, TranslationArray)
DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
DECL_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
DECL_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
@@ -890,9 +901,9 @@ class DeoptimizationData : public FixedArray {
#undef DECL_ENTRY_ACCESSORS
- inline BailoutId BytecodeOffset(int i);
+ inline BytecodeOffset GetBytecodeOffset(int i);
- inline void SetBytecodeOffset(int i, BailoutId value);
+ inline void SetBytecodeOffset(int i, BytecodeOffset value);
inline int DeoptCount();
diff --git a/deps/v8/src/objects/compressed-slots.h b/deps/v8/src/objects/compressed-slots.h
index 36a6cab596..6f74b723c8 100644
--- a/deps/v8/src/objects/compressed-slots.h
+++ b/deps/v8/src/objects/compressed-slots.h
@@ -5,13 +5,13 @@
#ifndef V8_OBJECTS_COMPRESSED_SLOTS_H_
#define V8_OBJECTS_COMPRESSED_SLOTS_H_
-#ifdef V8_COMPRESS_POINTERS
-
+#include "include/v8config.h"
#include "src/objects/slots.h"
namespace v8 {
namespace internal {
+#ifdef V8_COMPRESS_POINTERS
// A CompressedObjectSlot instance describes a kTaggedSize-sized field ("slot")
// holding a compressed tagged pointer (smi or heap object).
// Its address() is the address of the slot.
@@ -141,9 +141,9 @@ class OffHeapCompressedObjectSlot
inline void Release_CompareAndSwap(Object old, Object target) const;
};
+#endif // V8_COMPRESS_POINTERS
+
} // namespace internal
} // namespace v8
-#endif // V8_COMPRESS_POINTERS
-
#endif // V8_OBJECTS_COMPRESSED_SLOTS_H_
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index 87f13818e9..f2164d7d3b 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -31,11 +31,11 @@ OBJECT_CONSTRUCTORS_IMPL(ScriptContextTable, FixedArray)
CAST_ACCESSOR(ScriptContextTable)
int ScriptContextTable::synchronized_used() const {
- return Smi::ToInt(synchronized_get(kUsedSlotIndex));
+ return Smi::ToInt(get(kUsedSlotIndex, kAcquireLoad));
}
void ScriptContextTable::synchronized_set_used(int used) {
- synchronized_set(kUsedSlotIndex, Smi::FromInt(used));
+ set(kUsedSlotIndex, Smi::FromInt(used), kReleaseStore);
}
// static
@@ -182,7 +182,7 @@ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
CHECK_FOLLOWS2(v3, v4)
int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
- bool has_shared_name, bool needs_home_object) {
+ bool has_shared_name) {
if (IsClassConstructor(kind)) {
// Like the strict function map, but with no 'name' accessor. 'name'
// needs to be the last property and it is added during instantiation,
@@ -192,37 +192,27 @@ int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
int base = 0;
if (IsGeneratorFunction(kind)) {
- CHECK_FOLLOWS4(GENERATOR_FUNCTION_MAP_INDEX,
- GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX,
- GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
- GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
- CHECK_FOLLOWS4(
- ASYNC_GENERATOR_FUNCTION_MAP_INDEX,
- ASYNC_GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX,
- ASYNC_GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
- ASYNC_GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+ CHECK_FOLLOWS2(GENERATOR_FUNCTION_MAP_INDEX,
+ GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX);
+ CHECK_FOLLOWS2(ASYNC_GENERATOR_FUNCTION_MAP_INDEX,
+ ASYNC_GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX);
base = IsAsyncFunction(kind) ? ASYNC_GENERATOR_FUNCTION_MAP_INDEX
: GENERATOR_FUNCTION_MAP_INDEX;
} else if (IsAsyncFunction(kind) || IsAsyncModule(kind)) {
- CHECK_FOLLOWS4(ASYNC_FUNCTION_MAP_INDEX, ASYNC_FUNCTION_WITH_NAME_MAP_INDEX,
- ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
- ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+ CHECK_FOLLOWS2(ASYNC_FUNCTION_MAP_INDEX,
+ ASYNC_FUNCTION_WITH_NAME_MAP_INDEX);
base = ASYNC_FUNCTION_MAP_INDEX;
} else if (IsStrictFunctionWithoutPrototype(kind)) {
- DCHECK_IMPLIES(IsArrowFunction(kind), !needs_home_object);
- CHECK_FOLLOWS4(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- METHOD_WITH_NAME_MAP_INDEX,
- METHOD_WITH_HOME_OBJECT_MAP_INDEX,
- METHOD_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+ CHECK_FOLLOWS2(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ METHOD_WITH_NAME_MAP_INDEX);
base = STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
} else {
- DCHECK(!needs_home_object);
CHECK_FOLLOWS2(SLOPPY_FUNCTION_MAP_INDEX,
SLOPPY_FUNCTION_WITH_NAME_MAP_INDEX);
CHECK_FOLLOWS2(STRICT_FUNCTION_MAP_INDEX,
@@ -231,9 +221,8 @@ int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
base = is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
: SLOPPY_FUNCTION_MAP_INDEX;
}
- int offset = static_cast<int>(!has_shared_name) |
- (static_cast<int>(needs_home_object) << 1);
- DCHECK_EQ(0, offset & ~3);
+ int offset = static_cast<int>(!has_shared_name);
+ DCHECK_EQ(0, offset & ~1);
return base + offset;
}
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 4b70e3eded..47784cf405 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -214,6 +214,7 @@ enum ContextLookupFlags {
V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \
V(REGEXP_REPLACE_FUNCTION_INDEX, JSFunction, regexp_replace_function) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
+ V(REGEXP_RESULT_WITH_INDICES_MAP_INDEX, Map, regexp_result_with_indices_map) \
V(REGEXP_RESULT_INDICES_MAP_INDEX, Map, regexp_result_indices_map) \
V(REGEXP_SEARCH_FUNCTION_INDEX, JSFunction, regexp_search_function) \
V(REGEXP_SPLIT_FUNCTION_INDEX, JSFunction, regexp_split_function) \
@@ -236,6 +237,7 @@ enum ContextLookupFlags {
V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, SimpleNumberDictionary, \
slow_template_instantiations_cache) \
V(ATOMICS_WAITASYNC_PROMISES, OrderedHashSet, atomics_waitasync_promises) \
+ V(WASM_DEBUG_MAPS, FixedArray, wasm_debug_maps) \
/* Fast Path Protectors */ \
V(REGEXP_SPECIES_PROTECTOR_INDEX, PropertyCell, regexp_species_protector) \
/* All *_FUNCTION_MAP_INDEX definitions used by Context::FunctionMapIndex */ \
@@ -253,29 +255,14 @@ enum ContextLookupFlags {
V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_function_without_prototype_map) \
V(METHOD_WITH_NAME_MAP_INDEX, Map, method_with_name_map) \
- V(METHOD_WITH_HOME_OBJECT_MAP_INDEX, Map, method_with_home_object_map) \
- V(METHOD_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
- method_with_name_and_home_object_map) \
V(ASYNC_FUNCTION_MAP_INDEX, Map, async_function_map) \
V(ASYNC_FUNCTION_WITH_NAME_MAP_INDEX, Map, async_function_with_name_map) \
- V(ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \
- async_function_with_home_object_map) \
- V(ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
- async_function_with_name_and_home_object_map) \
V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
V(GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, Map, \
generator_function_with_name_map) \
- V(GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \
- generator_function_with_home_object_map) \
- V(GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
- generator_function_with_name_and_home_object_map) \
V(ASYNC_GENERATOR_FUNCTION_MAP_INDEX, Map, async_generator_function_map) \
V(ASYNC_GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, Map, \
async_generator_function_with_name_map) \
- V(ASYNC_GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \
- async_generator_function_with_home_object_map) \
- V(ASYNC_GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
- async_generator_function_with_name_and_home_object_map) \
V(CLASS_FUNCTION_MAP_INDEX, Map, class_function_map) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
@@ -400,7 +387,7 @@ class ScriptContextTable : public FixedArray {
// [ previous ] A pointer to the previous context.
//
// [ extension ] Additional data. This slot is only available when
-// extension_bit is set. Check using has_extension.
+// ScopeInfo::HasContextExtensionSlot returns true.
//
// For native contexts, it contains the global object.
// For module contexts, it contains the module object.
@@ -486,7 +473,7 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
SCOPE_INFO_INDEX,
PREVIOUS_INDEX,
- // This slot only exists if the extension_flag bit is set.
+ // This slot only exists if ScopeInfo::HasContextExtensionSlot returns true.
EXTENSION_INDEX,
// These slots are only in native contexts.
@@ -624,8 +611,7 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
bool* is_sloppy_function_name = nullptr);
static inline int FunctionMapIndex(LanguageMode language_mode,
- FunctionKind kind, bool has_shared_name,
- bool needs_home_object);
+ FunctionKind kind, bool has_shared_name);
static int ArrayMapIndex(ElementsKind elements_kind) {
DCHECK(IsFastElementsKind(elements_kind));
diff --git a/deps/v8/src/objects/contexts.tq b/deps/v8/src/objects/contexts.tq
index 157504da5f..604852c24e 100644
--- a/deps/v8/src/objects/contexts.tq
+++ b/deps/v8/src/objects/contexts.tq
@@ -13,7 +13,7 @@ class Context extends HeapObject {
return *ContextSlot(this, ContextSlot::SCOPE_INFO_INDEX);
}
const length: Smi;
- @relaxedWrite elements[length]: Object;
+ @relaxedRead @relaxedWrite elements[length]: Object;
}
extern class AwaitContext extends Context generates 'TNode<Context>';
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index 1c96fa377a..ecb24632ef 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -356,38 +356,15 @@ int BreakPointInfo::GetBreakPointCount(Isolate* isolate) {
return FixedArray::cast(break_points()).length();
}
-int CoverageInfo::SlotFieldOffset(int slot_index, int field_offset) const {
- DCHECK_LT(field_offset, Slot::kSize);
- DCHECK_LT(slot_index, slot_count());
- return kSlotsOffset + slot_index * Slot::kSize + field_offset;
-}
-
-int CoverageInfo::StartSourcePosition(int slot_index) const {
- return ReadField<int32_t>(
- SlotFieldOffset(slot_index, Slot::kStartSourcePositionOffset));
-}
-
-int CoverageInfo::EndSourcePosition(int slot_index) const {
- return ReadField<int32_t>(
- SlotFieldOffset(slot_index, Slot::kEndSourcePositionOffset));
-}
-
-int CoverageInfo::BlockCount(int slot_index) const {
- return ReadField<int32_t>(
- SlotFieldOffset(slot_index, Slot::kBlockCountOffset));
-}
-
void CoverageInfo::InitializeSlot(int slot_index, int from_pos, int to_pos) {
- WriteField<int32_t>(
- SlotFieldOffset(slot_index, Slot::kStartSourcePositionOffset), from_pos);
- WriteField<int32_t>(
- SlotFieldOffset(slot_index, Slot::kEndSourcePositionOffset), to_pos);
+ set_slots_start_source_position(slot_index, from_pos);
+ set_slots_end_source_position(slot_index, to_pos);
ResetBlockCount(slot_index);
- WriteField<int32_t>(SlotFieldOffset(slot_index, Slot::kPaddingOffset), 0);
+ set_slots_padding(slot_index, 0);
}
void CoverageInfo::ResetBlockCount(int slot_index) {
- WriteField<int32_t>(SlotFieldOffset(slot_index, Slot::kBlockCountOffset), 0);
+ set_slots_block_count(slot_index, 0);
}
void CoverageInfo::CoverageInfoPrint(std::ostream& os,
@@ -406,8 +383,8 @@ void CoverageInfo::CoverageInfoPrint(std::ostream& os,
os << "):" << std::endl;
for (int i = 0; i < slot_count(); i++) {
- os << "{" << StartSourcePosition(i) << "," << EndSourcePosition(i) << "}"
- << std::endl;
+ os << "{" << slots_start_source_position(i) << ","
+ << slots_end_source_position(i) << "}" << std::endl;
}
}
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 4a22094a43..e92a3026f8 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -170,10 +170,6 @@ class BreakPointInfo
class CoverageInfo
: public TorqueGeneratedCoverageInfo<CoverageInfo, HeapObject> {
public:
- int StartSourcePosition(int slot_index) const;
- int EndSourcePosition(int slot_index) const;
- int BlockCount(int slot_index) const;
-
void InitializeSlot(int slot_index, int start_pos, int end_pos);
void ResetBlockCount(int slot_index);
@@ -191,9 +187,6 @@ class CoverageInfo
// Description of layout within each slot.
using Slot = TorqueGeneratedCoverageInfoSlotOffsets;
- private:
- int SlotFieldOffset(int slot_index, int field_offset) const;
-
TQ_OBJECT_CONSTRUCTORS(CoverageInfo)
};
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index c7e169abcb..981f5aac93 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -322,16 +322,7 @@ template <typename Dictionary>
void GlobalDictionaryShape::DetailsAtPut(Dictionary dict, InternalIndex entry,
PropertyDetails value) {
DCHECK(entry.is_found());
- PropertyCell cell = dict.CellAt(entry);
- // Deopt when when making a writable property read-only. The reverse direction
- // is uninteresting because Turbofan does not currently rely on read-only
- // unless the property is also configurable, in which case it will stay
- // read-only forever.
- if (!cell.property_details().IsReadOnly() && value.IsReadOnly()) {
- cell.dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kPropertyCellChangedGroup);
- }
- cell.set_property_details(value);
+ dict.CellAt(entry).UpdatePropertyDetailsExceptCellType(value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index 494a951ce4..bd7c71d88b 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -21,6 +21,8 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(FeedbackCell)
+RELEASE_ACQUIRE_ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset)
+
void FeedbackCell::clear_padding() {
if (FeedbackCell::kAlignedSize == FeedbackCell::kUnalignedSize) return;
DCHECK_GE(FeedbackCell::kAlignedSize, FeedbackCell::kUnalignedSize);
@@ -53,9 +55,6 @@ void FeedbackCell::SetInitialInterruptBudget() {
}
}
-void FeedbackCell::SetInterruptBudget() {
- set_interrupt_budget(FLAG_interrupt_budget);
-}
void FeedbackCell::IncrementClosureCount(Isolate* isolate) {
ReadOnlyRoots r(isolate);
diff --git a/deps/v8/src/objects/feedback-cell.h b/deps/v8/src/objects/feedback-cell.h
index 19f1075e62..8e9c82539f 100644
--- a/deps/v8/src/objects/feedback-cell.h
+++ b/deps/v8/src/objects/feedback-cell.h
@@ -28,13 +28,17 @@ class FeedbackCell : public TorqueGeneratedFeedbackCell<FeedbackCell, Struct> {
static const int kUnalignedSize = kSize;
static const int kAlignedSize = RoundUp<kObjectAlignment>(int{kSize});
+ using TorqueGeneratedFeedbackCell<FeedbackCell, Struct>::value;
+ using TorqueGeneratedFeedbackCell<FeedbackCell, Struct>::set_value;
+
+ DECL_RELEASE_ACQUIRE_ACCESSORS(value, HeapObject)
+
inline void clear_padding();
inline void reset_feedback_vector(
base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>>
gc_notify_updated_slot = base::nullopt);
inline void SetInitialInterruptBudget();
- inline void SetInterruptBudget();
// The closure count is encoded in the cell's map, which distinguishes
// between zero, one, or many closures. This function records a new closure
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 2e23c35b5f..a66ec312f6 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -38,6 +38,9 @@ INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
INT32_ACCESSORS(FeedbackMetadata, create_closure_slot_count,
kCreateClosureSlotCountOffset)
+RELEASE_ACQUIRE_WEAK_ACCESSORS(FeedbackVector, maybe_optimized_code,
+ kMaybeOptimizedCodeOffset)
+
int32_t FeedbackMetadata::synchronized_slot_count() const {
return base::Acquire_Load(
reinterpret_cast<const base::Atomic32*>(field_address(kSlotCountOffset)));
@@ -100,6 +103,10 @@ Handle<FeedbackCell> ClosureFeedbackCellArray::GetFeedbackCell(int index) {
return handle(FeedbackCell::cast(get(index)), GetIsolate());
}
+FeedbackCell ClosureFeedbackCellArray::cell(int index) {
+ return FeedbackCell::cast(get(index));
+}
+
bool FeedbackVector::is_empty() const { return length() == 0; }
FeedbackMetadata FeedbackVector::metadata() const {
@@ -109,7 +116,7 @@ FeedbackMetadata FeedbackVector::metadata() const {
void FeedbackVector::clear_invocation_count() { set_invocation_count(0); }
Code FeedbackVector::optimized_code() const {
- MaybeObject slot = maybe_optimized_code();
+ MaybeObject slot = maybe_optimized_code(kAcquireLoad);
DCHECK(slot->IsWeakOrCleared());
HeapObject heap_object;
Code code =
@@ -126,12 +133,22 @@ OptimizationMarker FeedbackVector::optimization_marker() const {
return OptimizationMarkerBits::decode(flags());
}
+int FeedbackVector::global_ticks_at_last_runtime_profiler_interrupt() const {
+ return GlobalTicksAtLastRuntimeProfilerInterruptBits::decode(flags());
+}
+
+void FeedbackVector::set_global_ticks_at_last_runtime_profiler_interrupt(
+ int ticks) {
+ set_flags(
+ GlobalTicksAtLastRuntimeProfilerInterruptBits::update(flags(), ticks));
+}
+
OptimizationTier FeedbackVector::optimization_tier() const {
OptimizationTier tier = OptimizationTierBits::decode(flags());
// It is possible that the optimization tier bits aren't updated when the code
// was cleared due to a GC.
DCHECK_IMPLIES(tier == OptimizationTier::kNone,
- maybe_optimized_code()->IsCleared());
+ maybe_optimized_code(kAcquireLoad)->IsCleared());
return tier;
}
@@ -178,9 +195,12 @@ MaybeObject FeedbackVector::Get(IsolateRoot isolate, FeedbackSlot slot) const {
Handle<FeedbackCell> FeedbackVector::GetClosureFeedbackCell(int index) const {
DCHECK_GE(index, 0);
- ClosureFeedbackCellArray cell_array =
- ClosureFeedbackCellArray::cast(closure_feedback_cell_array());
- return cell_array.GetFeedbackCell(index);
+ return closure_feedback_cell_array().GetFeedbackCell(index);
+}
+
+FeedbackCell FeedbackVector::closure_feedback_cell(int index) const {
+ DCHECK_GE(index, 0);
+ return closure_feedback_cell_array().cell(index);
}
MaybeObject FeedbackVector::SynchronizedGet(FeedbackSlot slot) const {
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index f354c1689b..a77ea5d265 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -381,7 +381,8 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
// static
void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
- Handle<Code> code) {
+ Handle<Code> code,
+ FeedbackCell feedback_cell) {
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
// We should only set optimized code only when there is no valid optimized
// code or we are tiering up.
@@ -394,18 +395,46 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
// re-mark the function for non-concurrent optimization after an OSR. We
// should avoid these cases and also check that marker isn't
// kCompileOptimized or kCompileOptimizedConcurrent.
- vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code));
+ vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code),
+ kReleaseStore);
int32_t state = vector->flags();
state = OptimizationTierBits::update(state, GetTierForCodeKind(code->kind()));
state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone);
vector->set_flags(state);
+ // With FLAG_turboprop, we would have an interrupt budget necessary for
+ // tiering up to Turboprop code. Once we install turboprop code, set it to a
+ // higher value as required for tiering up from Turboprop to TurboFan.
+ if (FLAG_turboprop) {
+ FeedbackVector::SetInterruptBudget(feedback_cell);
+ }
}
-void FeedbackVector::ClearOptimizedCode() {
+// static
+void FeedbackVector::SetInterruptBudget(FeedbackCell feedback_cell) {
+ DCHECK(feedback_cell.value().IsFeedbackVector());
+ FeedbackVector vector = FeedbackVector::cast(feedback_cell.value());
+ // Set the interrupt budget as required for tiering up to next level. Without
+ // Turboprop, this is used only to tier up to TurboFan and hence always set to
+ // FLAG_interrupt_budget. With Turboprop, we use this budget to both tier up
+ // to Turboprop and TurboFan. When there is no optimized code, set it to
+ // FLAG_interrupt_budget required for tiering up to Turboprop. When there is
+ // optimized code, set it to a higher value required for tiering up from
+ // Turboprop to TurboFan.
+ if (FLAG_turboprop && vector.has_optimized_code()) {
+ feedback_cell.set_interrupt_budget(
+ FLAG_interrupt_budget *
+ FLAG_interrupt_budget_scale_factor_for_top_tier);
+ } else {
+ feedback_cell.set_interrupt_budget(FLAG_interrupt_budget);
+ }
+}
+
+void FeedbackVector::ClearOptimizedCode(FeedbackCell feedback_cell) {
DCHECK(has_optimized_code());
DCHECK_NE(optimization_tier(), OptimizationTier::kNone);
- set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()));
- ClearOptimizationTier();
+ set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()),
+ kReleaseStore);
+ ClearOptimizationTier(feedback_cell);
}
void FeedbackVector::ClearOptimizationMarker() {
@@ -418,10 +447,15 @@ void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
set_flags(state);
}
-void FeedbackVector::ClearOptimizationTier() {
+void FeedbackVector::ClearOptimizationTier(FeedbackCell feedback_cell) {
int32_t state = flags();
state = OptimizationTierBits::update(state, OptimizationTier::kNone);
set_flags(state);
+ // We are discarding the optimized code, adjust the interrupt budget
+ // so we have the correct budget required for the tier up.
+ if (FLAG_turboprop) {
+ FeedbackVector::SetInterruptBudget(feedback_cell);
+ }
}
void FeedbackVector::InitializeOptimizationState() {
@@ -434,10 +468,10 @@ void FeedbackVector::InitializeOptimizationState() {
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
- SharedFunctionInfo shared, const char* reason) {
- MaybeObject slot = maybe_optimized_code();
+ FeedbackCell feedback_cell, SharedFunctionInfo shared, const char* reason) {
+ MaybeObject slot = maybe_optimized_code(kAcquireLoad);
if (slot->IsCleared()) {
- ClearOptimizationTier();
+ ClearOptimizationTier(feedback_cell);
return;
}
@@ -447,7 +481,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
if (!code.deopt_already_counted()) {
code.set_deopt_already_counted(true);
}
- ClearOptimizedCode();
+ ClearOptimizedCode(feedback_cell);
}
}
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 17c0dc8ae3..e6a850fe52 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -26,6 +26,8 @@ namespace internal {
class IsCompiledScope;
+enum class UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback };
+
enum class FeedbackSlotKind : uint8_t {
// This kind means that the slot points to the middle of other slot
// which occupies more than one feedback vector element.
@@ -166,7 +168,9 @@ class ClosureFeedbackCellArray : public FixedArray {
V8_EXPORT_PRIVATE static Handle<ClosureFeedbackCellArray> New(
Isolate* isolate, Handle<SharedFunctionInfo> shared);
+
inline Handle<FeedbackCell> GetFeedbackCell(int index);
+ inline FeedbackCell cell(int index);
DECL_VERIFIER(ClosureFeedbackCellArray)
DECL_PRINTER(ClosureFeedbackCellArray)
@@ -194,6 +198,11 @@ class FeedbackVector
STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <
OptimizationTierBits::kMax);
+ static const bool kFeedbackVectorMaybeOptimizedCodeIsStoreRelease = true;
+ using TorqueGeneratedFeedbackVector<FeedbackVector,
+ HeapObject>::maybe_optimized_code;
+ DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
+
static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker =
kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask =
@@ -217,18 +226,26 @@ class FeedbackVector
inline bool has_optimization_marker() const;
inline OptimizationMarker optimization_marker() const;
inline OptimizationTier optimization_tier() const;
- void ClearOptimizedCode();
- void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
+ inline int global_ticks_at_last_runtime_profiler_interrupt() const;
+ inline void set_global_ticks_at_last_runtime_profiler_interrupt(int ticks);
+ void ClearOptimizedCode(FeedbackCell feedback_cell);
+ void EvictOptimizedCodeMarkedForDeoptimization(FeedbackCell feedback_cell,
+ SharedFunctionInfo shared,
const char* reason);
- static void SetOptimizedCode(Handle<FeedbackVector> vector,
- Handle<Code> code);
+ static void SetOptimizedCode(Handle<FeedbackVector> vector, Handle<Code> code,
+ FeedbackCell feedback_cell);
void SetOptimizationMarker(OptimizationMarker marker);
- void ClearOptimizationTier();
+ void ClearOptimizationTier(FeedbackCell feedback_cell);
void InitializeOptimizationState();
// Clears the optimization marker in the feedback vector.
void ClearOptimizationMarker();
+ // Sets the interrupt budget based on the optimized code available on the
+ // feedback vector. This function expects that the feedback cell contains a
+ // feedback vector.
+ static void SetInterruptBudget(FeedbackCell feedback_cell);
+
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackSlot slot) { return slot.ToInt(); }
@@ -247,6 +264,7 @@ class FeedbackVector
// Returns the feedback cell at |index| that is used to create the
// closure.
inline Handle<FeedbackCell> GetClosureFeedbackCell(int index) const;
+ inline FeedbackCell closure_feedback_cell(int index) const;
// Gives access to raw memory which stores the array's data.
inline MaybeObjectSlot slots_start();
diff --git a/deps/v8/src/objects/feedback-vector.tq b/deps/v8/src/objects/feedback-vector.tq
index a90d4d363c..a84533db67 100644
--- a/deps/v8/src/objects/feedback-vector.tq
+++ b/deps/v8/src/objects/feedback-vector.tq
@@ -8,6 +8,7 @@ type OptimizationTier extends uint16 constexpr 'OptimizationTier';
bitfield struct FeedbackVectorFlags extends uint32 {
optimization_marker: OptimizationMarker: 3 bit;
optimization_tier: OptimizationTier: 2 bit;
+ global_ticks_at_last_runtime_profiler_interrupt: uint32: 24 bit;
}
@generateBodyDescriptor
@@ -23,7 +24,7 @@ extern class FeedbackVector extends HeapObject {
shared_function_info: SharedFunctionInfo;
maybe_optimized_code: Weak<Code>;
closure_feedback_cell_array: ClosureFeedbackCellArray;
- raw_feedback_slots[length]: MaybeObject;
+ @relaxedRead raw_feedback_slots[length]: MaybeObject;
}
extern class FeedbackMetadata extends HeapObject;
diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
index a3b4c23140..09056cfd99 100644
--- a/deps/v8/src/objects/field-index-inl.h
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -39,7 +39,7 @@ FieldIndex FieldIndex::ForPropertyIndex(Map map, int property_index,
first_inobject_offset);
}
-// Returns the index format accepted by the HLoadFieldByIndex instruction.
+// Returns the index format accepted by the LoadFieldByIndex instruction.
// (In-object: zero-based from (object start + JSObject::kHeaderSize),
// out-of-object: zero-based from FixedArray::kHeaderSize.)
int FieldIndex::GetLoadByFieldIndex() const {
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index c227942112..a91f89784f 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -88,7 +88,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || defined(_WIN64)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -96,7 +95,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
@@ -125,21 +123,50 @@ void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
RELAXED_WRITE_FIELD(array, offset, value);
}
-Object FixedArray::synchronized_get(int index) const {
+Object FixedArray::get(int index, RelaxedLoadTag) const {
IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return synchronized_get(isolate, index);
+ return get(isolate, index);
}
-Object FixedArray::synchronized_get(IsolateRoot isolate, int index) const {
+Object FixedArray::get(IsolateRoot isolate, int index, RelaxedLoadTag) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
+ return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index));
}
-void FixedArray::synchronized_set(int index, Smi value) {
+void FixedArray::set(int index, Object value, RelaxedStoreTag,
+ WriteBarrierMode mode) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ RELAXED_WRITE_FIELD(*this, OffsetOfElementAt(index), value);
+ CONDITIONAL_WRITE_BARRIER(*this, OffsetOfElementAt(index), value, mode);
+}
+
+void FixedArray::set(int index, Smi value, RelaxedStoreTag tag) {
DCHECK(Object(value).IsSmi());
+ set(index, value, tag, SKIP_WRITE_BARRIER);
+}
+
+Object FixedArray::get(int index, AcquireLoadTag) const {
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+Object FixedArray::get(IsolateRoot isolate, int index, AcquireLoadTag) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
+}
+
+void FixedArray::set(int index, Object value, ReleaseStoreTag,
+ WriteBarrierMode mode) {
+ DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
RELEASE_WRITE_FIELD(*this, OffsetOfElementAt(index), value);
+ CONDITIONAL_WRITE_BARRIER(*this, OffsetOfElementAt(index), value, mode);
+}
+
+void FixedArray::set(int index, Smi value, ReleaseStoreTag tag) {
+ DCHECK(Object(value).IsSmi());
+ set(index, value, tag, SKIP_WRITE_BARRIER);
}
void FixedArray::set_undefined(int index) {
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index a05c7d3abe..53b4cbb22b 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -111,30 +111,26 @@ class FixedArray
Isolate* isolate, Handle<FixedArray> array, int index,
Handle<Object> value);
- // Synchronized setters and getters.
- inline Object synchronized_get(int index) const;
- inline Object synchronized_get(IsolateRoot isolate, int index) const;
- // Currently only Smis are written with release semantics, hence we can avoid
- // a write barrier.
- inline void synchronized_set(int index, Smi value);
+ // Relaxed accessors.
+ inline Object get(int index, RelaxedLoadTag) const;
+ inline Object get(IsolateRoot isolate, int index, RelaxedLoadTag) const;
+ inline void set(int index, Object value, RelaxedStoreTag,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void set(int index, Smi value, RelaxedStoreTag);
+
+ // Acquire/release accessors.
+ inline Object get(int index, AcquireLoadTag) const;
+ inline Object get(IsolateRoot isolate, int index, AcquireLoadTag) const;
+ inline void set(int index, Object value, ReleaseStoreTag,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void set(int index, Smi value, ReleaseStoreTag);
// Setter that uses write barrier.
inline void set(int index, Object value);
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if defined(_WIN32) && !defined(_WIN64)
- inline void set(int index, Smi value) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#else
inline void set(int index, Smi value);
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
diff --git a/deps/v8/src/objects/fixed-array.tq b/deps/v8/src/objects/fixed-array.tq
index 86ebea5db8..6aed0c26b2 100644
--- a/deps/v8/src/objects/fixed-array.tq
+++ b/deps/v8/src/objects/fixed-array.tq
@@ -26,7 +26,7 @@ extern class FixedDoubleArray extends FixedArrayBase {
@generateCppClass
extern class WeakFixedArray extends HeapObject {
const length: Smi;
- objects[length]: MaybeObject;
+ @relaxedRead objects[length]: MaybeObject;
}
@generateCppClass
@@ -51,7 +51,7 @@ extern class TemplateList extends FixedArray {
extern class WeakArrayList extends HeapObject {
const capacity: Smi;
length: Smi;
- objects[capacity]: MaybeObject;
+ @relaxedRead objects[capacity]: MaybeObject;
}
extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
@@ -125,7 +125,7 @@ extern macro CopyFixedArrayElements(
macro ExtractFixedArray(
source: FixedArray, first: intptr, count: intptr,
capacity: intptr): FixedArray {
- // TODO(tebbi): This should be optimized to use memcpy for initialization.
+ // TODO(turbofan): This should be optimized to use memcpy for initialization.
return NewFixedArray(
capacity,
IteratorSequence<Object>(
@@ -135,7 +135,7 @@ macro ExtractFixedArray(
macro ExtractFixedDoubleArray(
source: FixedDoubleArray, first: intptr, count: intptr,
capacity: intptr): FixedDoubleArray|EmptyFixedArray {
- // TODO(tebbi): This should be optimized to use memcpy for initialization.
+ // TODO(turbofan): This should be optimized to use memcpy for initialization.
return NewFixedDoubleArray(
capacity,
IteratorSequence<float64_or_hole>(
diff --git a/deps/v8/src/objects/frame-array-inl.h b/deps/v8/src/objects/frame-array-inl.h
deleted file mode 100644
index 5627b72823..0000000000
--- a/deps/v8/src/objects/frame-array-inl.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_FRAME_ARRAY_INL_H_
-#define V8_OBJECTS_FRAME_ARRAY_INL_H_
-
-#include "src/objects/frame-array.h"
-
-#include "src/objects/foreign-inl.h"
-#include "src/wasm/wasm-objects-inl.h"
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-OBJECT_CONSTRUCTORS_IMPL(FrameArray, FixedArray)
-CAST_ACCESSOR(FrameArray)
-
-#define DEFINE_FRAME_ARRAY_ACCESSORS(name, type) \
- type FrameArray::name(int frame_ix) const { \
- Object obj = \
- get(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset); \
- return type::cast(obj); \
- } \
- \
- void FrameArray::Set##name(int frame_ix, type value) { \
- set(kFirstIndex + frame_ix * kElementsPerFrame + k##name##Offset, value); \
- }
-FRAME_ARRAY_FIELD_LIST(DEFINE_FRAME_ARRAY_ACCESSORS)
-#undef DEFINE_FRAME_ARRAY_ACCESSORS
-
-bool FrameArray::IsWasmFrame(int frame_ix) const {
- const int flags = Flags(frame_ix).value();
- return (flags & kIsWasmFrame) != 0;
-}
-
-bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const {
- const int flags = Flags(frame_ix).value();
- return (flags & kIsAsmJsWasmFrame) != 0;
-}
-
-bool FrameArray::IsAnyWasmFrame(int frame_ix) const {
- return IsWasmFrame(frame_ix) || IsAsmJsWasmFrame(frame_ix);
-}
-
-int FrameArray::FrameCount() const {
- const int frame_count = Smi::ToInt(get(kFrameCountIndex));
- DCHECK_LE(0, frame_count);
- return frame_count;
-}
-
-} // namespace internal
-} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
-
-#endif // V8_OBJECTS_FRAME_ARRAY_INL_H_
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
deleted file mode 100644
index bc4676fc7b..0000000000
--- a/deps/v8/src/objects/frame-array.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_FRAME_ARRAY_H_
-#define V8_OBJECTS_FRAME_ARRAY_H_
-
-#include "src/objects/objects.h"
-#include "src/wasm/wasm-objects.h"
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-template <typename T>
-class Handle;
-
-#define FRAME_ARRAY_FIELD_LIST(V) \
- V(WasmInstance, WasmInstanceObject) \
- V(WasmFunctionIndex, Smi) \
- V(WasmCodeObject, Object) \
- V(Receiver, Object) \
- V(Function, JSFunction) \
- V(Code, AbstractCode) \
- V(Offset, Smi) \
- V(Flags, Smi) \
- V(Parameters, FixedArray)
-
-// Container object for data collected during simple stack trace captures.
-class FrameArray : public FixedArray {
- public:
-#define DECL_FRAME_ARRAY_ACCESSORS(name, type) \
- inline type name(int frame_ix) const; \
- inline void Set##name(int frame_ix, type value);
- FRAME_ARRAY_FIELD_LIST(DECL_FRAME_ARRAY_ACCESSORS)
-#undef DECL_FRAME_ARRAY_ACCESSORS
-
- inline bool IsWasmFrame(int frame_ix) const;
- inline bool IsAsmJsWasmFrame(int frame_ix) const;
- inline bool IsAnyWasmFrame(int frame_ix) const;
- inline int FrameCount() const;
-
- void ShrinkToFit(Isolate* isolate);
-
- // Flags.
- enum Flag {
- kIsWasmFrame = 1 << 0,
- kIsAsmJsWasmFrame = 1 << 1,
- kIsStrict = 1 << 2,
- kIsConstructor = 1 << 3,
- kAsmJsAtNumberConversion = 1 << 4,
- kIsAsync = 1 << 5,
- kIsPromiseAll = 1 << 6,
- kIsPromiseAny = 1 << 7
- };
-
- static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
- Handle<Object> receiver,
- Handle<JSFunction> function,
- Handle<AbstractCode> code, int offset,
- int flags,
- Handle<FixedArray> parameters);
- static Handle<FrameArray> AppendWasmFrame(
- Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
- int wasm_function_index, wasm::WasmCode* code, int offset, int flags);
-
- DECL_CAST(FrameArray)
-
- private:
- // The underlying fixed array embodies a captured stack trace. Frame i
- // occupies indices
- //
- // kFirstIndex + 1 + [i * kElementsPerFrame, (i + 1) * kElementsPerFrame[,
- //
- // with internal offsets as below:
-
- static const int kWasmInstanceOffset = 0;
- static const int kWasmFunctionIndexOffset = 1;
- static const int kWasmCodeObjectOffset = 2;
-
- static const int kReceiverOffset = 0;
- static const int kFunctionOffset = 1;
-
- static const int kCodeOffset = 2;
- static const int kOffsetOffset = 3;
-
- static const int kFlagsOffset = 4;
-
- static const int kParametersOffset = 5;
-
- static const int kElementsPerFrame = 6;
-
- // Array layout indices.
-
- static const int kFrameCountIndex = 0;
- static const int kFirstIndex = 1;
-
- static int LengthFor(int frame_count) {
- return kFirstIndex + frame_count * kElementsPerFrame;
- }
-
- static Handle<FrameArray> EnsureSpace(Isolate* isolate,
- Handle<FrameArray> array, int length);
-
- friend class Factory;
- OBJECT_CONSTRUCTORS(FrameArray, FixedArray);
-};
-
-} // namespace internal
-} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
-
-#endif // V8_OBJECTS_FRAME_ARRAY_H_
diff --git a/deps/v8/src/objects/function-kind.h b/deps/v8/src/objects/function-kind.h
index 5e17ebf054..b863f9c72f 100644
--- a/deps/v8/src/objects/function-kind.h
+++ b/deps/v8/src/objects/function-kind.h
@@ -31,7 +31,9 @@ enum FunctionKind : uint8_t {
// END constructable functions.
// BEGIN accessors
kGetterFunction,
+ kStaticGetterFunction,
kSetterFunction,
+ kStaticSetterFunction,
// END accessors
// BEGIN arrow functions
kArrowFunction,
@@ -41,20 +43,25 @@ enum FunctionKind : uint8_t {
kAsyncFunction,
// BEGIN concise methods 1
kAsyncConciseMethod,
+ kStaticAsyncConciseMethod,
// BEGIN generators
kAsyncConciseGeneratorMethod,
+ kStaticAsyncConciseGeneratorMethod,
// END concise methods 1
kAsyncGeneratorFunction,
// END async functions
kGeneratorFunction,
// BEGIN concise methods 2
kConciseGeneratorMethod,
+ kStaticConciseGeneratorMethod,
// END generators
kConciseMethod,
+ kStaticConciseMethod,
kClassMembersInitializerFunction,
+ kClassStaticInitializerFunction,
// END concise methods 2
- kLastFunctionKind = kClassMembersInitializerFunction,
+ kLastFunctionKind = kClassStaticInitializerFunction,
};
constexpr int kFunctionKindBitSize = 5;
@@ -81,7 +88,7 @@ inline bool IsAsyncGeneratorFunction(FunctionKind kind) {
inline bool IsGeneratorFunction(FunctionKind kind) {
return base::IsInRange(kind, FunctionKind::kAsyncConciseGeneratorMethod,
- FunctionKind::kConciseGeneratorMethod);
+ FunctionKind::kStaticConciseGeneratorMethod);
}
inline bool IsAsyncFunction(FunctionKind kind) {
@@ -95,31 +102,33 @@ inline bool IsResumableFunction(FunctionKind kind) {
inline bool IsConciseMethod(FunctionKind kind) {
return base::IsInRange(kind, FunctionKind::kAsyncConciseMethod,
- FunctionKind::kAsyncConciseGeneratorMethod) ||
+ FunctionKind::kStaticAsyncConciseGeneratorMethod) ||
base::IsInRange(kind, FunctionKind::kConciseGeneratorMethod,
- FunctionKind::kClassMembersInitializerFunction);
+ FunctionKind::kClassStaticInitializerFunction);
}
inline bool IsStrictFunctionWithoutPrototype(FunctionKind kind) {
return base::IsInRange(kind, FunctionKind::kGetterFunction,
FunctionKind::kAsyncArrowFunction) ||
base::IsInRange(kind, FunctionKind::kAsyncConciseMethod,
- FunctionKind::kAsyncConciseGeneratorMethod) ||
+ FunctionKind::kStaticAsyncConciseGeneratorMethod) ||
base::IsInRange(kind, FunctionKind::kConciseGeneratorMethod,
- FunctionKind::kClassMembersInitializerFunction);
+ FunctionKind::kClassStaticInitializerFunction);
}
inline bool IsGetterFunction(FunctionKind kind) {
- return kind == FunctionKind::kGetterFunction;
+ return base::IsInRange(kind, FunctionKind::kGetterFunction,
+ FunctionKind::kStaticGetterFunction);
}
inline bool IsSetterFunction(FunctionKind kind) {
- return kind == FunctionKind::kSetterFunction;
+ return base::IsInRange(kind, FunctionKind::kSetterFunction,
+ FunctionKind::kStaticSetterFunction);
}
inline bool IsAccessorFunction(FunctionKind kind) {
return base::IsInRange(kind, FunctionKind::kGetterFunction,
- FunctionKind::kSetterFunction);
+ FunctionKind::kStaticSetterFunction);
}
inline bool IsDefaultConstructor(FunctionKind kind) {
@@ -143,7 +152,8 @@ inline bool IsClassConstructor(FunctionKind kind) {
}
inline bool IsClassMembersInitializerFunction(FunctionKind kind) {
- return kind == FunctionKind::kClassMembersInitializerFunction;
+ return base::IsInRange(kind, FunctionKind::kClassMembersInitializerFunction,
+ FunctionKind::kClassStaticInitializerFunction);
}
inline bool IsConstructable(FunctionKind kind) {
@@ -151,6 +161,26 @@ inline bool IsConstructable(FunctionKind kind) {
FunctionKind::kDerivedConstructor);
}
+inline bool IsStatic(FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kStaticGetterFunction:
+ case FunctionKind::kStaticSetterFunction:
+ case FunctionKind::kStaticConciseMethod:
+ case FunctionKind::kStaticConciseGeneratorMethod:
+ case FunctionKind::kStaticAsyncConciseMethod:
+ case FunctionKind::kStaticAsyncConciseGeneratorMethod:
+ case FunctionKind::kClassStaticInitializerFunction:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool BindsSuper(FunctionKind kind) {
+ return IsConciseMethod(kind) || IsAccessorFunction(kind) ||
+ IsClassConstructor(kind);
+}
+
inline const char* FunctionKind2String(FunctionKind kind) {
switch (kind) {
case FunctionKind::kNormalFunction:
@@ -161,14 +191,20 @@ inline const char* FunctionKind2String(FunctionKind kind) {
return "GeneratorFunction";
case FunctionKind::kConciseMethod:
return "ConciseMethod";
+ case FunctionKind::kStaticConciseMethod:
+ return "StaticConciseMethod";
case FunctionKind::kDerivedConstructor:
return "DerivedConstructor";
case FunctionKind::kBaseConstructor:
return "BaseConstructor";
case FunctionKind::kGetterFunction:
return "GetterFunction";
+ case FunctionKind::kStaticGetterFunction:
+ return "StaticGetterFunction";
case FunctionKind::kSetterFunction:
return "SetterFunction";
+ case FunctionKind::kStaticSetterFunction:
+ return "StaticSetterFunction";
case FunctionKind::kAsyncFunction:
return "AsyncFunction";
case FunctionKind::kModule:
@@ -177,6 +213,8 @@ inline const char* FunctionKind2String(FunctionKind kind) {
return "AsyncModule";
case FunctionKind::kClassMembersInitializerFunction:
return "ClassMembersInitializerFunction";
+ case FunctionKind::kClassStaticInitializerFunction:
+ return "ClassStaticInitializerFunction";
case FunctionKind::kDefaultBaseConstructor:
return "DefaultBaseConstructor";
case FunctionKind::kDefaultDerivedConstructor:
@@ -185,10 +223,16 @@ inline const char* FunctionKind2String(FunctionKind kind) {
return "AsyncArrowFunction";
case FunctionKind::kAsyncConciseMethod:
return "AsyncConciseMethod";
+ case FunctionKind::kStaticAsyncConciseMethod:
+ return "StaticAsyncConciseMethod";
case FunctionKind::kConciseGeneratorMethod:
return "ConciseGeneratorMethod";
+ case FunctionKind::kStaticConciseGeneratorMethod:
+ return "StaticConciseGeneratorMethod";
case FunctionKind::kAsyncConciseGeneratorMethod:
return "AsyncConciseGeneratorMethod";
+ case FunctionKind::kStaticAsyncConciseGeneratorMethod:
+ return "StaticAsyncConciseGeneratorMethod";
case FunctionKind::kAsyncGeneratorFunction:
return "AsyncGeneratorFunction";
}
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index b412d7c733..e62356218d 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -11,6 +11,7 @@
#include "src/objects/tagged-field.h"
#include "src/roots/roots.h"
#include "src/torque/runtime-macro-shims.h"
+#include "src/torque/runtime-support.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index cc65797210..9c8c8d5d63 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -87,6 +87,12 @@ enum InstanceType : uint16_t {
kTwoByteStringTag | kExternalStringTag | kInternalizedTag,
EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
kOneByteStringTag | kExternalStringTag | kInternalizedTag,
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
+ kInternalizedTag,
+ UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kUncachedExternalStringTag |
+ kInternalizedTag,
STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
ONE_BYTE_STRING_TYPE =
ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
@@ -101,12 +107,10 @@ enum InstanceType : uint16_t {
EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
EXTERNAL_ONE_BYTE_STRING_TYPE =
EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
- UNCACHED_EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag |
- kUncachedExternalStringTag |
- kNotInternalizedTag,
+ UNCACHED_EXTERNAL_STRING_TYPE =
+ UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE =
- kOneByteStringTag | kExternalStringTag | kUncachedExternalStringTag |
- kNotInternalizedTag,
+ UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
THIN_STRING_TYPE = kTwoByteStringTag | kThinStringTag | kNotInternalizedTag,
THIN_ONE_BYTE_STRING_TYPE =
kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
@@ -234,40 +238,41 @@ TYPED_ARRAYS(TYPED_ARRAY_IS_TYPE_FUNCTION_DECL)
// This list must contain only maps that are shared by all objects of their
// instance type.
-#define UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \
- V(_, AccessorInfoMap, accessor_info_map, AccessorInfo) \
- V(_, AccessorPairMap, accessor_pair_map, AccessorPair) \
- V(_, AllocationMementoMap, allocation_memento_map, AllocationMemento) \
- V(_, ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \
- ArrayBoilerplateDescription) \
- V(_, BreakPointMap, break_point_map, BreakPoint) \
- V(_, BreakPointInfoMap, break_point_info_map, BreakPointInfo) \
- V(_, CachedTemplateObjectMap, cached_template_object_map, \
- CachedTemplateObject) \
- V(_, CellMap, cell_map, Cell) \
- V(_, WeakCellMap, weak_cell_map, WeakCell) \
- V(_, CodeMap, code_map, Code) \
- V(_, CoverageInfoMap, coverage_info_map, CoverageInfo) \
- V(_, DebugInfoMap, debug_info_map, DebugInfo) \
- V(_, FeedbackVectorMap, feedback_vector_map, FeedbackVector) \
- V(_, FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArray) \
- V(_, FunctionTemplateInfoMap, function_template_info_map, \
- FunctionTemplateInfo) \
- V(_, HeapNumberMap, heap_number_map, HeapNumber) \
- V(_, MetaMap, meta_map, Map) \
- V(_, PreparseDataMap, preparse_data_map, PreparseData) \
- V(_, PrototypeInfoMap, prototype_info_map, PrototypeInfo) \
- V(_, SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfo) \
- V(_, SmallOrderedHashSetMap, small_ordered_hash_set_map, \
- SmallOrderedHashSet) \
- V(_, SmallOrderedHashMapMap, small_ordered_hash_map_map, \
- SmallOrderedHashMap) \
- V(_, SmallOrderedNameDictionaryMap, small_ordered_name_dictionary_map, \
- SmallOrderedNameDictionary) \
- V(_, SymbolMap, symbol_map, Symbol) \
- V(_, TransitionArrayMap, transition_array_map, TransitionArray) \
- V(_, Tuple2Map, tuple2_map, Tuple2) \
- V(_, WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArray) \
+#define UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \
+ V(_, AccessorInfoMap, accessor_info_map, AccessorInfo) \
+ V(_, AccessorPairMap, accessor_pair_map, AccessorPair) \
+ V(_, AllocationMementoMap, allocation_memento_map, AllocationMemento) \
+ V(_, ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \
+ ArrayBoilerplateDescription) \
+ V(_, BreakPointMap, break_point_map, BreakPoint) \
+ V(_, BreakPointInfoMap, break_point_info_map, BreakPointInfo) \
+ V(_, CachedTemplateObjectMap, cached_template_object_map, \
+ CachedTemplateObject) \
+ V(_, CellMap, cell_map, Cell) \
+ V(_, WeakCellMap, weak_cell_map, WeakCell) \
+ V(_, CodeMap, code_map, Code) \
+ V(_, CoverageInfoMap, coverage_info_map, CoverageInfo) \
+ V(_, DebugInfoMap, debug_info_map, DebugInfo) \
+ V(_, FeedbackVectorMap, feedback_vector_map, FeedbackVector) \
+ V(_, FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArray) \
+ V(_, FunctionTemplateInfoMap, function_template_info_map, \
+ FunctionTemplateInfo) \
+ V(_, HeapNumberMap, heap_number_map, HeapNumber) \
+ V(_, MetaMap, meta_map, Map) \
+ V(_, PreparseDataMap, preparse_data_map, PreparseData) \
+ V(_, PrototypeInfoMap, prototype_info_map, PrototypeInfo) \
+ V(_, SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfo) \
+ V(_, SmallOrderedHashSetMap, small_ordered_hash_set_map, \
+ SmallOrderedHashSet) \
+ V(_, SmallOrderedHashMapMap, small_ordered_hash_map_map, \
+ SmallOrderedHashMap) \
+ V(_, SmallOrderedNameDictionaryMap, small_ordered_name_dictionary_map, \
+ SmallOrderedNameDictionary) \
+ V(_, SwissNameDictionaryMap, swiss_name_dictionary_map, SwissNameDictionary) \
+ V(_, SymbolMap, symbol_map, Symbol) \
+ V(_, TransitionArrayMap, transition_array_map, TransitionArray) \
+ V(_, Tuple2Map, tuple2_map, Tuple2) \
+ V(_, WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArray) \
TORQUE_DEFINED_MAP_CSA_LIST_GENERATOR(V, _)
} // namespace internal
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index e60c2d51fe..fc7e182939 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -617,15 +617,15 @@ MaybeHandle<Object> Intl::LegacyUnwrapReceiver(Isolate* isolate,
Handle<JSReceiver> receiver,
Handle<JSFunction> constructor,
bool has_initialized_slot) {
- Handle<Object> obj_is_instance_of;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj_is_instance_of,
- Object::InstanceOf(isolate, receiver, constructor),
- Object);
- bool is_instance_of = obj_is_instance_of->BooleanValue(isolate);
+ Handle<Object> obj_ordinary_has_instance;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, obj_ordinary_has_instance,
+ Object::OrdinaryHasInstance(isolate, constructor, receiver), Object);
+ bool ordinary_has_instance = obj_ordinary_has_instance->BooleanValue(isolate);
// 2. If receiver does not have an [[Initialized...]] internal slot
- // and ? InstanceofOperator(receiver, constructor) is true, then
- if (!has_initialized_slot && is_instance_of) {
+ // and ? OrdinaryHasInstance(constructor, receiver) is true, then
+ if (!has_initialized_slot && ordinary_has_instance) {
// 2. a. Let new_receiver be ? Get(receiver, %Intl%.[[FallbackSymbol]]).
Handle<Object> new_receiver;
ASSIGN_RETURN_ON_EXCEPTION(
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 8a0ef13593..12f8ef7796 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -27,6 +27,10 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayBufferView)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSTypedArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDataView)
+ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
+RELEASE_ACQUIRE_ACCESSORS(JSTypedArray, base_pointer, Object,
+ kBasePointerOffset)
+
void JSArrayBuffer::AllocateExternalPointerEntries(Isolate* isolate) {
InitExternalPointerField(kBackingStoreOffset, isolate);
}
@@ -253,15 +257,22 @@ void* JSTypedArray::DataPtr() {
// so that the addition with |external_pointer| (which already contains
// compensated offset value) will decompress the tagged value.
// See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for details.
+ STATIC_ASSERT(kOffHeapDataPtrEqualsExternalPointer);
return reinterpret_cast<void*>(external_pointer() +
static_cast<Tagged_t>(base_pointer().ptr()));
}
void JSTypedArray::SetOffHeapDataPtr(Isolate* isolate, void* base,
Address offset) {
- set_base_pointer(Smi::zero(), SKIP_WRITE_BARRIER);
Address address = reinterpret_cast<Address>(base) + offset;
set_external_pointer(isolate, address);
+ // This is the only spot in which the `base_pointer` field can be mutated
+ // after object initialization. Note this can happen at most once, when
+ // `JSTypedArray::GetBuffer` transitions from an on- to off-heap
+ // representation.
+ // To play well with Turbofan concurrency requirements, `base_pointer` is set
+ // with a release store, after external_pointer has been set.
+ set_base_pointer(Smi::zero(), kReleaseStore, SKIP_WRITE_BARRIER);
DCHECK_EQ(address, reinterpret_cast<Address>(DataPtr()));
}
@@ -274,10 +285,17 @@ void JSTypedArray::SetOnHeapDataPtr(Isolate* isolate, HeapObject base,
}
bool JSTypedArray::is_on_heap() const {
+ // Keep synced with `is_on_heap(AcquireLoadTag)`.
+ DisallowGarbageCollection no_gc;
+ return base_pointer() != Smi::zero();
+}
+
+bool JSTypedArray::is_on_heap(AcquireLoadTag tag) const {
+ // Keep synced with `is_on_heap()`.
+ // Note: For Turbofan concurrency requirements, it's important that this
+ // function reads only `base_pointer`.
DisallowGarbageCollection no_gc;
- // Checking that buffer()->backing_store() is not nullptr is not sufficient;
- // it will be nullptr when byte_length is 0 as well.
- return base_pointer() == elements();
+ return base_pointer(tag) != Smi::zero();
}
// static
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 6a61ce4385..0c259ddece 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -254,7 +254,10 @@ class JSTypedArray
static constexpr size_t kMaxLength = v8::TypedArray::kMaxLength;
// [length]: length of typed array in elements.
- DECL_PRIMITIVE_ACCESSORS(length, size_t)
+ DECL_PRIMITIVE_GETTER(length, size_t)
+
+ DECL_GETTER(base_pointer, Object)
+ DECL_ACQUIRE_GETTER(base_pointer, Object)
// ES6 9.4.5.3
V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
@@ -272,6 +275,10 @@ class JSTypedArray
// When sandbox is not enabled, it's a no-op.
inline void AllocateExternalPointerEntries(Isolate* isolate);
+ // The `DataPtr` is `base_ptr + external_pointer`, and `base_ptr` is nullptr
+ // for off-heap typed arrays.
+ static constexpr bool kOffHeapDataPtrEqualsExternalPointer = true;
+
// Use with care: returns raw pointer into heap.
inline void* DataPtr();
@@ -281,6 +288,7 @@ class JSTypedArray
// Whether the buffer's backing store is on-heap or off-heap.
inline bool is_on_heap() const;
+ inline bool is_on_heap(AcquireLoadTag tag) const;
// Note: this is a pointer compression specific optimization.
// Normally, on-heap typed arrays contain HeapObject value in |base_pointer|
@@ -335,11 +343,16 @@ class JSTypedArray
private:
friend class Deserializer;
+ friend class Factory;
+
+ DECL_PRIMITIVE_SETTER(length, size_t)
- // [external_pointer]: TODO(v8:4153)
DECL_GETTER(external_pointer, Address)
DECL_GETTER(external_pointer_raw, ExternalPointer_t)
+ DECL_SETTER(base_pointer, Object)
+ DECL_RELEASE_SETTER(base_pointer, Object)
+
inline void set_external_pointer(Isolate* isolate, Address value);
TQ_OBJECT_CONSTRUCTORS(JSTypedArray)
diff --git a/deps/v8/src/objects/js-array-buffer.tq b/deps/v8/src/objects/js-array-buffer.tq
index 6dcf03bd05..72e74cc99b 100644
--- a/deps/v8/src/objects/js-array-buffer.tq
+++ b/deps/v8/src/objects/js-array-buffer.tq
@@ -45,16 +45,47 @@ extern class JSArrayBufferView extends JSObject {
extern class JSTypedArray extends JSArrayBufferView {
length: uintptr;
external_pointer: ExternalPointer;
- // [base_pointer]: TODO(v8:4153)
base_pointer: ByteArray|Smi;
}
-extern operator '.external_pointer_ptr' macro
-LoadJSTypedArrayExternalPointerPtr(JSTypedArray): RawPtr;
-extern operator '.external_pointer_ptr=' macro
-StoreJSTypedArrayExternalPointerPtr(JSTypedArray, RawPtr);
-
@generateCppClass
extern class JSDataView extends JSArrayBufferView {
data_pointer: ExternalPointer;
}
+
+@abstract
+@doNotGenerateCast extern class TypedArrayConstructor extends JSFunction
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Uint8TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Int8TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Uint16TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Int16TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Uint32TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Int32TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Float32TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Float64TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Uint8ClampedTypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Biguint64TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
+@doNotGenerateCast
+extern class Bigint64TypedArrayConstructor extends TypedArrayConstructor
+ generates 'TNode<JSFunction>';
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 1ff7dcb123..b53a8919a5 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -21,7 +21,19 @@ OBJECT_CONSTRUCTORS_IMPL(JSArrayIterator, JSObject)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayIterator)
-ACCESSORS(JSArray, length, Object, kLengthOffset)
+DEF_GETTER(JSArray, length, Object) {
+ return TaggedField<Object, kLengthOffset>::load(isolate, *this);
+}
+
+void JSArray::set_length(Object value, WriteBarrierMode mode) {
+ // Note the relaxed atomic store.
+ TaggedField<Object, kLengthOffset>::Relaxed_Store(*this, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kLengthOffset, value, mode);
+}
+
+Object JSArray::length(IsolateRoot isolate, RelaxedLoadTag tag) const {
+ return TaggedField<Object, kLengthOffset>::Relaxed_Load(isolate, *this);
+}
void JSArray::set_length(Smi length) {
// Don't need a write barrier for a Smi.
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index cd1d2d800d..5a7da797cc 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -25,6 +25,16 @@ class JSArray : public JSObject {
public:
// [length]: The length property.
DECL_ACCESSORS(length, Object)
+ DECL_RELAXED_GETTER(length, Object)
+
+ // Acquire/release semantics on this field are explicitly forbidden to avoid
+ // confusion, since the default setter uses relaxed semantics. If
+ // acquire/release semantics ever become necessary, the default setter should
+ // be reverted to non-atomic behavior, and setters with explicit tags
+ // introduced and used when required.
+ Object length(IsolateRoot isolate, AcquireLoadTag tag) const = delete;
+ void set_length(Object value, ReleaseStoreTag tag,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER) = delete;
// Overload the length setter to skip write barrier when the length
// is set to a smi. This matches the set function on FixedArray.
diff --git a/deps/v8/src/objects/js-array.tq b/deps/v8/src/objects/js-array.tq
index b18f5bafac..8e238b9f8b 100644
--- a/deps/v8/src/objects/js-array.tq
+++ b/deps/v8/src/objects/js-array.tq
@@ -31,6 +31,10 @@ extern class JSArray extends JSObject {
length: Number;
}
+@doNotGenerateCast
+extern class JSArrayConstructor extends JSFunction
+ generates 'TNode<JSFunction>';
+
macro NewJSArray(implicit context: Context)(
map: Map, elements: FixedArrayBase): JSArray {
return new JSArray{
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index c26b77e2f5..42c9c6f31c 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -437,7 +437,7 @@ std::string CanonicalizeTimeZoneID(const std::string& input) {
title[1] = 'S';
}
return title;
- } else if (memcmp(upper.c_str(), "SYSTEMV/", 8) == 0) {
+ } else if (strncmp(upper.c_str(), "SYSTEMV/", 8) == 0) {
upper.replace(0, 8, "SystemV/");
return upper;
}
@@ -1107,6 +1107,7 @@ icu::UnicodeString ReplaceHourCycleInPattern(icu::UnicodeString pattern,
}
bool replace = true;
icu::UnicodeString result;
+ char16_t last = u'\0';
for (int32_t i = 0; i < pattern.length(); i++) {
char16_t ch = pattern.charAt(i);
switch (ch) {
@@ -1121,12 +1122,17 @@ icu::UnicodeString ReplaceHourCycleInPattern(icu::UnicodeString pattern,
case 'K':
V8_FALLTHROUGH;
case 'k':
+ // If the previous field is a day, add a space before the hour.
+ if (replace && last == u'd') {
+ result.append(' ');
+ }
result.append(replace ? replacement : ch);
break;
default:
result.append(ch);
break;
}
+ last = ch;
}
return result;
}
diff --git a/deps/v8/src/objects/js-display-names.cc b/deps/v8/src/objects/js-display-names.cc
index 5c444b98ba..305aadb08e 100644
--- a/deps/v8/src/objects/js-display-names.cc
+++ b/deps/v8/src/objects/js-display-names.cc
@@ -124,8 +124,15 @@ class LanguageNames : public LocaleDisplayNamesCommon {
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
UErrorCode status = U_ZERO_ERROR;
+ // 1.a If code does not match the unicode_language_id production, throw a
+ // RangeError exception.
+
+ // 1.b If IsStructurallyValidLanguageTag(code) is false, throw a RangeError
+ // exception.
icu::Locale l =
icu::Locale(icu::Locale::forLanguageTag(code, status).getBaseName());
+ // 1.c Set code to CanonicalizeUnicodeLocaleId(code).
+ l.canonicalize(status);
std::string checked = l.toLanguageTag<std::string>(status);
if (U_FAILURE(status)) {
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index 8a4980a5f7..969e756156 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -29,6 +29,8 @@ OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSFunctionOrBoundFunction)
CAST_ACCESSOR(JSFunction)
ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
+RELEASE_ACQUIRE_ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell,
+ kFeedbackCellOffset)
FeedbackVector JSFunction::feedback_vector() const {
DCHECK(has_feedback_vector());
@@ -50,7 +52,7 @@ void JSFunction::ClearOptimizationMarker() {
}
bool JSFunction::ChecksOptimizationMarker() {
- return code().checks_optimization_marker();
+ return code(kAcquireLoad).checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
@@ -64,6 +66,20 @@ bool JSFunction::IsMarkedForConcurrentOptimization() {
OptimizationMarker::kCompileOptimizedConcurrent;
}
+void JSFunction::SetInterruptBudget() {
+ if (!has_feedback_vector()) {
+ DCHECK(shared().is_compiled());
+ int budget = FLAG_budget_for_feedback_vector_allocation;
+ if (FLAG_feedback_allocation_on_bytecode_size) {
+ budget = shared().GetBytecodeArray(GetIsolate()).length() *
+ FLAG_scale_factor_for_feedback_allocation;
+ }
+ raw_feedback_cell().set_interrupt_budget(budget);
+ return;
+ }
+ FeedbackVector::SetInterruptBudget(raw_feedback_cell());
+}
+
void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
Isolate* isolate = GetIsolate();
if (!isolate->concurrent_recompilation_enabled() ||
@@ -72,7 +88,7 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
}
DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsNCI() ||
- ActiveTierIsMidtierTurboprop());
+ ActiveTierIsMidtierTurboprop() || ActiveTierIsBaseline());
DCHECK(!ActiveTierIsTurbofan());
DCHECK(shared().IsInterpreted());
DCHECK(shared().allows_lazy_compilation() ||
@@ -116,7 +132,7 @@ AbstractCode JSFunction::abstract_code(LocalIsolate* isolate) {
if (ActiveTierIsIgnition()) {
return AbstractCode::cast(shared().GetBytecodeArray(isolate));
} else {
- return AbstractCode::cast(code());
+ return AbstractCode::cast(code(kAcquireLoad));
}
}
@@ -134,10 +150,7 @@ void JSFunction::set_code(Code value) {
#endif
}
-void JSFunction::set_code_no_write_barrier(Code value) {
- DCHECK(!ObjectInYoungGeneration(value));
- RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
-}
+RELEASE_ACQUIRE_ACCESSORS(JSFunction, code, Code, kCodeOffset)
// TODO(ishell): Why relaxed read but release store?
DEF_GETTER(JSFunction, shared, SharedFunctionInfo) {
@@ -253,7 +266,7 @@ DEF_GETTER(JSFunction, prototype, Object) {
}
bool JSFunction::is_compiled() const {
- return code().builtin_index() != Builtins::kCompileLazy &&
+ return code(kAcquireLoad).builtin_index() != Builtins::kCompileLazy &&
shared().is_compiled();
}
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index 2e657d2826..3fef9b665f 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -19,27 +19,22 @@ namespace v8 {
namespace internal {
CodeKinds JSFunction::GetAttachedCodeKinds() const {
- CodeKinds result;
-
// Note: There's a special case when bytecode has been aged away. After
// flushing the bytecode, the JSFunction will still have the interpreter
// entry trampoline attached, but the bytecode is no longer available.
- if (code().is_interpreter_trampoline_builtin()) {
- result |= CodeKindFlag::INTERPRETED_FUNCTION;
- }
-
- const CodeKind kind = code().kind();
- if (!CodeKindIsOptimizedJSFunction(kind) ||
- code().marked_for_deoptimization()) {
- DCHECK_EQ((result & ~kJSFunctionCodeKindsMask), 0);
- return result;
+ Code code = this->code(kAcquireLoad);
+ if (code.is_interpreter_trampoline_builtin()) {
+ return CodeKindFlag::INTERPRETED_FUNCTION;
}
- DCHECK(CodeKindIsOptimizedJSFunction(kind));
- result |= CodeKindToCodeKindFlag(kind);
+ const CodeKind kind = code.kind();
+ if (!CodeKindIsJSFunction(kind)) return {};
- DCHECK_EQ((result & ~kJSFunctionCodeKindsMask), 0);
- return result;
+ if (CodeKindIsOptimizedJSFunction(kind) && code.marked_for_deoptimization()) {
+ // Nothing is attached.
+ return {};
+ }
+ return CodeKindToCodeKindFlag(kind);
}
CodeKinds JSFunction::GetAvailableCodeKinds() const {
@@ -52,16 +47,21 @@ CodeKinds JSFunction::GetAvailableCodeKinds() const {
}
}
- if ((result & kOptimizedJSFunctionCodeKindsMask) == 0) {
- // Check the optimized code cache.
- if (has_feedback_vector() && feedback_vector().has_optimized_code() &&
- !feedback_vector().optimized_code().marked_for_deoptimization()) {
- Code code = feedback_vector().optimized_code();
- DCHECK(CodeKindIsOptimizedJSFunction(code.kind()));
- result |= CodeKindToCodeKindFlag(code.kind());
+ if ((result & CodeKindFlag::BASELINE) == 0) {
+ // The SharedFunctionInfo could have attached baseline code.
+ if (shared().HasBaselineData()) {
+ result |= CodeKindFlag::BASELINE;
}
}
+ // Check the optimized code cache.
+ if (has_feedback_vector() && feedback_vector().has_optimized_code() &&
+ !feedback_vector().optimized_code().marked_for_deoptimization()) {
+ Code code = feedback_vector().optimized_code();
+ DCHECK(CodeKindIsOptimizedJSFunction(code.kind()));
+ result |= CodeKindToCodeKindFlag(code.kind());
+ }
+
DCHECK_EQ((result & ~kJSFunctionCodeKindsMask), 0);
return result;
}
@@ -76,6 +76,11 @@ bool JSFunction::HasAvailableOptimizedCode() const {
return (result & kOptimizedJSFunctionCodeKindsMask) != 0;
}
+bool JSFunction::HasAttachedCodeKind(CodeKind kind) const {
+ CodeKinds result = GetAttachedCodeKinds();
+ return (result & CodeKindToCodeKindFlag(kind)) != 0;
+}
+
bool JSFunction::HasAvailableCodeKind(CodeKind kind) const {
CodeKinds result = GetAvailableCodeKinds();
return (result & CodeKindToCodeKindFlag(kind)) != 0;
@@ -93,6 +98,9 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
} else if ((kinds & CodeKindFlag::TURBOPROP) != 0) {
*highest_tier = CodeKind::TURBOPROP;
return true;
+ } else if ((kinds & CodeKindFlag::BASELINE) != 0) {
+ *highest_tier = CodeKind::BASELINE;
+ return true;
} else if ((kinds & CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT) != 0) {
*highest_tier = CodeKind::NATIVE_CONTEXT_INDEPENDENT;
return true;
@@ -107,51 +115,66 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
} // namespace
bool JSFunction::ActiveTierIsIgnition() const {
- CodeKind highest_tier;
- if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
- bool result = (highest_tier == CodeKind::INTERPRETED_FUNCTION);
- DCHECK_IMPLIES(result,
- code().is_interpreter_trampoline_builtin() ||
- (CodeKindIsOptimizedJSFunction(code().kind()) &&
- code().marked_for_deoptimization()) ||
- (code().builtin_index() == Builtins::kCompileLazy &&
- shared().IsInterpreted()));
+ if (!shared().HasBytecodeArray()) return false;
+ bool result = (GetActiveTier() == CodeKind::INTERPRETED_FUNCTION);
+#ifdef DEBUG
+ Code code = this->code(kAcquireLoad);
+ DCHECK_IMPLIES(result, code.is_interpreter_trampoline_builtin() ||
+ (CodeKindIsOptimizedJSFunction(code.kind()) &&
+ code.marked_for_deoptimization()) ||
+ (code.builtin_index() == Builtins::kCompileLazy &&
+ shared().IsInterpreted()));
+#endif // DEBUG
return result;
}
-bool JSFunction::ActiveTierIsTurbofan() const {
+CodeKind JSFunction::GetActiveTier() const {
CodeKind highest_tier;
- if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
- return highest_tier == CodeKind::TURBOFAN;
+ DCHECK(shared().is_compiled());
+ HighestTierOf(GetAvailableCodeKinds(), &highest_tier);
+ DCHECK(highest_tier == CodeKind::TURBOFAN ||
+ highest_tier == CodeKind::BASELINE ||
+ highest_tier == CodeKind::TURBOPROP ||
+ highest_tier == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
+ highest_tier == CodeKind::INTERPRETED_FUNCTION);
+ return highest_tier;
+}
+
+bool JSFunction::ActiveTierIsTurbofan() const {
+ if (!shared().HasBytecodeArray()) return false;
+ return GetActiveTier() == CodeKind::TURBOFAN;
}
bool JSFunction::ActiveTierIsNCI() const {
- CodeKind highest_tier;
- if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
- return highest_tier == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+ if (!shared().HasBytecodeArray()) return false;
+ return GetActiveTier() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+}
+
+bool JSFunction::ActiveTierIsBaseline() const {
+ return GetActiveTier() == CodeKind::BASELINE;
+}
+
+bool JSFunction::ActiveTierIsIgnitionOrBaseline() const {
+ return ActiveTierIsIgnition() || ActiveTierIsBaseline();
}
bool JSFunction::ActiveTierIsToptierTurboprop() const {
- CodeKind highest_tier;
- if (!FLAG_turboprop) return false;
- if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
- return highest_tier == CodeKind::TURBOPROP && !FLAG_turboprop_as_midtier;
+ if (!FLAG_turboprop_as_toptier) return false;
+ if (!shared().HasBytecodeArray()) return false;
+ return GetActiveTier() == CodeKind::TURBOPROP && FLAG_turboprop_as_toptier;
}
bool JSFunction::ActiveTierIsMidtierTurboprop() const {
- CodeKind highest_tier;
- if (!FLAG_turboprop_as_midtier) return false;
- if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
- return highest_tier == CodeKind::TURBOPROP && FLAG_turboprop_as_midtier;
+ if (!FLAG_turboprop) return false;
+ if (!shared().HasBytecodeArray()) return false;
+ return GetActiveTier() == CodeKind::TURBOPROP && !FLAG_turboprop_as_toptier;
}
CodeKind JSFunction::NextTier() const {
- if (V8_UNLIKELY(FLAG_turbo_nci_as_midtier && ActiveTierIsIgnition())) {
- return CodeKind::NATIVE_CONTEXT_INDEPENDENT;
- } else if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
+ if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
return CodeKind::TURBOFAN;
} else if (V8_UNLIKELY(FLAG_turboprop)) {
- DCHECK(ActiveTierIsIgnition());
+ DCHECK(ActiveTierIsIgnitionOrBaseline());
return CodeKind::TURBOPROP;
}
return CodeKind::TURBOFAN;
@@ -255,18 +278,35 @@ Handle<NativeContext> JSFunction::GetFunctionRealm(
}
// static
-void JSFunction::EnsureClosureFeedbackCellArray(Handle<JSFunction> function) {
+void JSFunction::EnsureClosureFeedbackCellArray(
+ Handle<JSFunction> function, bool reset_budget_for_feedback_allocation) {
Isolate* const isolate = function->GetIsolate();
DCHECK(function->shared().is_compiled());
DCHECK(function->shared().HasFeedbackMetadata());
- if (function->has_closure_feedback_cell_array() ||
- function->has_feedback_vector()) {
- return;
- }
if (function->shared().HasAsmWasmData()) return;
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
DCHECK(function->shared().HasBytecodeArray());
+
+ bool has_closure_feedback_cell_array =
+ (function->has_closure_feedback_cell_array() ||
+ function->has_feedback_vector());
+ // Initialize the interrupt budget to the feedback vector allocation budget
+ // when initializing the feedback cell for the first time or after a bytecode
+ // flush. We retain the closure feedback cell array on bytecode flush, so
+ // reset_budget_for_feedback_allocation is used to reset the budget in these
+ // cases. When using a fixed allocation budget, we reset it on a bytecode
+ // flush so no additional initialization is required here.
+ if (V8_UNLIKELY(FLAG_feedback_allocation_on_bytecode_size) &&
+ (reset_budget_for_feedback_allocation ||
+ !has_closure_feedback_cell_array)) {
+ function->SetInterruptBudget();
+ }
+
+ if (has_closure_feedback_cell_array) {
+ return;
+ }
+
Handle<HeapObject> feedback_cell_array =
ClosureFeedbackCellArray::New(isolate, shared);
// Many closure cell is used as a way to specify that there is no
@@ -278,9 +318,11 @@ void JSFunction::EnsureClosureFeedbackCellArray(Handle<JSFunction> function) {
if (function->raw_feedback_cell() == isolate->heap()->many_closures_cell()) {
Handle<FeedbackCell> feedback_cell =
isolate->factory()->NewOneClosureCell(feedback_cell_array);
- function->set_raw_feedback_cell(*feedback_cell);
+ function->set_raw_feedback_cell(*feedback_cell, kReleaseStore);
+ function->SetInterruptBudget();
} else {
- function->raw_feedback_cell().set_value(*feedback_cell_array);
+ function->raw_feedback_cell().set_value(*feedback_cell_array,
+ kReleaseStore);
}
}
@@ -296,7 +338,7 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function,
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
DCHECK(function->shared().HasBytecodeArray());
- EnsureClosureFeedbackCellArray(function);
+ EnsureClosureFeedbackCellArray(function, false);
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
handle(function->closure_feedback_cell_array(), isolate);
Handle<HeapObject> feedback_vector = FeedbackVector::New(
@@ -306,13 +348,14 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function,
// for more details.
DCHECK(function->raw_feedback_cell() !=
isolate->heap()->many_closures_cell());
- function->raw_feedback_cell().set_value(*feedback_vector);
- function->raw_feedback_cell().SetInterruptBudget();
+ function->raw_feedback_cell().set_value(*feedback_vector, kReleaseStore);
+ function->SetInterruptBudget();
}
// static
-void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function,
- IsCompiledScope* is_compiled_scope) {
+void JSFunction::InitializeFeedbackCell(
+ Handle<JSFunction> function, IsCompiledScope* is_compiled_scope,
+ bool reset_budget_for_feedback_allocation) {
Isolate* const isolate = function->GetIsolate();
if (function->has_feedback_vector()) {
@@ -338,7 +381,8 @@ void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function,
if (needs_feedback_vector) {
EnsureFeedbackVector(function, is_compiled_scope);
} else {
- EnsureClosureFeedbackCellArray(function);
+ EnsureClosureFeedbackCellArray(function,
+ reset_budget_for_feedback_allocation);
}
}
@@ -446,7 +490,7 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
}
function->set_prototype_or_initial_map(*map);
map->SetConstructor(*function);
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
LOG(isolate, MapEvent("InitialMap", Handle<Map>(), map, "",
SharedFunctionInfo::DebugName(
handle(function->shared(), isolate))));
@@ -516,8 +560,15 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_DATA_VIEW_TYPE:
case JS_DATE_TYPE:
- case JS_FUNCTION_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_FUNCTION_TYPE:
+ case JS_PROMISE_CONSTRUCTOR_TYPE:
+ case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_ARRAY_CONSTRUCTOR_TYPE:
+#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
+ case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
+#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
case JS_ITERATOR_PROTOTYPE_TYPE:
case JS_MAP_ITERATOR_PROTOTYPE_TYPE:
case JS_OBJECT_PROTOTYPE_TYPE:
@@ -563,6 +614,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
+ case WASM_VALUE_OBJECT_TYPE:
return true;
case BIGINT_TYPE:
@@ -757,20 +809,49 @@ void JSFunction::PrintName(FILE* out) {
PrintF(out, "%s", shared().DebugNameCStr().get());
}
-Handle<String> JSFunction::GetName(Handle<JSFunction> function) {
- Isolate* isolate = function->GetIsolate();
- Handle<Object> name =
- JSReceiver::GetDataProperty(function, isolate->factory()->name_string());
- if (name->IsString()) return Handle<String>::cast(name);
- return SharedFunctionInfo::DebugName(handle(function->shared(), isolate));
+namespace {
+
+bool UseFastFunctionNameLookup(Isolate* isolate, Map map) {
+ DCHECK(map.IsJSFunctionMap());
+ if (map.NumberOfOwnDescriptors() < JSFunction::kMinDescriptorsForFastBind) {
+ return false;
+ }
+ DCHECK(!map.is_dictionary_map());
+ HeapObject value;
+ ReadOnlyRoots roots(isolate);
+ auto descriptors = map.instance_descriptors(kRelaxedLoad);
+ InternalIndex kNameIndex{JSFunction::kNameDescriptorIndex};
+ if (descriptors.GetKey(kNameIndex) != roots.name_string() ||
+ !descriptors.GetValue(kNameIndex)
+ .GetHeapObjectIfStrong(isolate, &value)) {
+ return false;
+ }
+ return value.IsAccessorInfo();
}
+} // namespace
+
Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
+ // Below we use the same fast-path that we already established for
+ // Function.prototype.bind(), where we avoid a slow "name" property
+ // lookup if the DescriptorArray for the |function| still has the
+ // "name" property at the original spot and that property is still
+ // implemented via an AccessorInfo (which effectively means that
+ // it must be the FunctionNameGetter).
Isolate* isolate = function->GetIsolate();
- Handle<Object> name = JSReceiver::GetDataProperty(
- function, isolate->factory()->display_name_string());
- if (name->IsString()) return Handle<String>::cast(name);
- return JSFunction::GetName(function);
+ if (!UseFastFunctionNameLookup(isolate, function->map())) {
+ // Normally there should be an else case for the fast-path check
+ // above, which should invoke JSFunction::GetName(), since that's
+ // what the FunctionNameGetter does, however GetDataProperty() has
+ // never invoked accessors and thus always returned undefined for
+ // JSFunction where the "name" property is untouched, so we retain
+ // that exact behavior and go with SharedFunctionInfo::DebugName()
+ // in case of the fast-path.
+ Handle<Object> name =
+ GetDataProperty(function, isolate->factory()->name_string());
+ if (name->IsString()) return Handle<String>::cast(name);
+ }
+ return SharedFunctionInfo::DebugName(handle(function->shared(), isolate));
}
bool JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
@@ -883,7 +964,7 @@ int JSFunction::CalculateExpectedNofProperties(Isolate* isolate,
Handle<SharedFunctionInfo> shared(func->shared(), isolate);
IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
if (is_compiled_scope.is_compiled() ||
- Compiler::Compile(func, Compiler::CLEAR_EXCEPTION,
+ Compiler::Compile(isolate, func, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
DCHECK(shared->is_compiled());
int count = shared->expected_nof_properties();
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index bf4cbc87fb..4583c3e868 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -63,8 +63,7 @@ class JSFunction : public JSFunctionOrBoundFunction {
static const int kLengthDescriptorIndex = 0;
static const int kNameDescriptorIndex = 1;
- // Home object descriptor index when function has a [[HomeObject]] slot.
- static const int kMaybeHomeObjectDescriptorIndex = 2;
+
// Fast binding requires length and name accessors.
static const int kMinDescriptorsForFastBind = 2;
@@ -83,9 +82,13 @@ class JSFunction : public JSFunctionOrBoundFunction {
// when the function is invoked, e.g. foo() or new foo(). See
// [[Call]] and [[Construct]] description in ECMA-262, section
// 8.6.2, page 27.
+ // Release/Acquire accessors are used when storing a newly-created
+ // optimized code object, or when reading from the background thread.
+ // Storing a builtin doesn't require release semantics because these objects
+ // are fully initialized.
inline Code code() const;
inline void set_code(Code code);
- inline void set_code_no_write_barrier(Code code);
+ DECL_RELEASE_ACQUIRE_ACCESSORS(code, Code)
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
@@ -112,11 +115,15 @@ class JSFunction : public JSFunctionOrBoundFunction {
V8_EXPORT_PRIVATE bool HasAttachedOptimizedCode() const;
bool HasAvailableOptimizedCode() const;
+ bool HasAttachedCodeKind(CodeKind kind) const;
bool HasAvailableCodeKind(CodeKind kind) const;
+ CodeKind GetActiveTier() const;
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
bool ActiveTierIsTurbofan() const;
bool ActiveTierIsNCI() const;
+ bool ActiveTierIsBaseline() const;
+ bool ActiveTierIsIgnitionOrBaseline() const;
bool ActiveTierIsMidtierTurboprop() const;
bool ActiveTierIsToptierTurboprop() const;
@@ -151,6 +158,10 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Clears the optimization marker in the function's feedback vector.
inline void ClearOptimizationMarker();
+ // Sets the interrupt budget based on whether the function has a feedback
+ // vector and any optimized code.
+ inline void SetInterruptBudget();
+
// If slack tracking is active, it computes instance size of the initial map
// with minimum permissible object slack. If it is not active, it simply
// returns the initial map's instance size.
@@ -165,6 +176,11 @@ class JSFunction : public JSFunctionOrBoundFunction {
// the JSFunction's bytecode being flushed.
DECL_ACCESSORS(raw_feedback_cell, FeedbackCell)
+ // [raw_feedback_cell] (synchronized version) When this is initialized from a
+ // newly allocated object (instead of a root sentinel), it should
+ // be written with release store semantics.
+ DECL_RELEASE_ACQUIRE_ACCESSORS(raw_feedback_cell, FeedbackCell)
+
// Functions related to feedback vector. feedback_vector() can be used once
// the function has feedback vectors allocated. feedback vectors may not be
// available after compile when lazily allocating feedback vectors.
@@ -179,14 +195,16 @@ class JSFunction : public JSFunctionOrBoundFunction {
// lazily.
inline bool has_closure_feedback_cell_array() const;
inline ClosureFeedbackCellArray closure_feedback_cell_array() const;
- static void EnsureClosureFeedbackCellArray(Handle<JSFunction> function);
+ static void EnsureClosureFeedbackCellArray(
+ Handle<JSFunction> function, bool reset_budget_for_feedback_allocation);
// Initializes the feedback cell of |function|. In lite mode, this would be
// initialized to the closure feedback cell array that holds the feedback
// cells for create closure calls from this function. In the regular mode,
// this allocates feedback vector.
static void InitializeFeedbackCell(Handle<JSFunction> function,
- IsCompiledScope* compiled_scope);
+ IsCompiledScope* compiled_scope,
+ bool reset_budget_for_feedback_allocation);
// Unconditionally clear the type feedback vector.
void ClearTypeFeedbackInfo();
@@ -256,8 +274,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
DECL_PRINTER(JSFunction)
DECL_VERIFIER(JSFunction)
- // The function's name if it is configured, otherwise shared function info
- // debug name.
static Handle<String> GetName(Handle<JSFunction> function);
// ES6 section 9.2.11 SetFunctionName
@@ -268,8 +284,7 @@ class JSFunction : public JSFunctionOrBoundFunction {
Handle<Name> name,
Handle<String> prefix);
- // The function's displayName if it is set, otherwise name if it is
- // configured, otherwise shared function info
+ // The function's name if it is configured, otherwise shared function info
// debug name.
static Handle<String> GetDebugName(Handle<JSFunction> function);
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 82a6210375..a8da4a9612 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -392,22 +392,62 @@ MaybeHandle<JSLocale> Construct(Isolate* isolate,
MaybeHandle<JSLocale> JSLocale::Maximize(Isolate* isolate,
Handle<JSLocale> locale) {
- icu::Locale icu_locale(*(locale->icu_locale().raw()));
+ // ICU has limitation on the length of the locale while addLikelySubtags
+ // is called. Work around the issue by only perform addLikelySubtags
+ // on the base locale and merge the extension if needed.
+ icu::Locale source(*(locale->icu_locale().raw()));
+ icu::Locale result = icu::Locale::createFromName(source.getBaseName());
UErrorCode status = U_ZERO_ERROR;
- icu_locale.addLikelySubtags(status);
+ result.addLikelySubtags(status);
+ if (strlen(source.getBaseName()) != strlen(result.getBaseName())) {
+ // Base name is changed
+ if (strlen(source.getBaseName()) != strlen(source.getName())) {
+ // the source has extensions, get the extensions from the source.
+ result = icu::LocaleBuilder()
+ .setLocale(source)
+ .setLanguage(result.getLanguage())
+ .setRegion(result.getCountry())
+ .setScript(result.getScript())
+ .setVariant(result.getVariant())
+ .build(status);
+ }
+ } else {
+ // Base name is not changed
+ result = source;
+ }
DCHECK(U_SUCCESS(status));
- DCHECK(!icu_locale.isBogus());
- return Construct(isolate, icu_locale);
+ DCHECK(!result.isBogus());
+ return Construct(isolate, result);
}
MaybeHandle<JSLocale> JSLocale::Minimize(Isolate* isolate,
Handle<JSLocale> locale) {
- icu::Locale icu_locale(*(locale->icu_locale().raw()));
+ // ICU has limitation on the length of the locale while minimizeSubtags
+ // is called. Work around the issue by only perform addLikelySubtags
+ // on the base locale and merge the extension if needed.
+ icu::Locale source(*(locale->icu_locale().raw()));
+ icu::Locale result = icu::Locale::createFromName(source.getBaseName());
UErrorCode status = U_ZERO_ERROR;
- icu_locale.minimizeSubtags(status);
+ result.minimizeSubtags(status);
+ if (strlen(source.getBaseName()) != strlen(result.getBaseName())) {
+ // Base name is changed
+ if (strlen(source.getBaseName()) != strlen(source.getName())) {
+ // the source has extensions, get the extensions from the source.
+ result = icu::LocaleBuilder()
+ .setLocale(source)
+ .setLanguage(result.getLanguage())
+ .setRegion(result.getCountry())
+ .setScript(result.getScript())
+ .setVariant(result.getVariant())
+ .build(status);
+ }
+ } else {
+ // Base name is not changed
+ result = source;
+ }
DCHECK(U_SUCCESS(status));
- DCHECK(!icu_locale.isBogus());
- return Construct(isolate, icu_locale);
+ DCHECK(!result.isBogus());
+ return Construct(isolate, result);
}
Handle<Object> JSLocale::Language(Isolate* isolate, Handle<JSLocale> locale) {
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 065c5af005..e2f6becc5d 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -50,6 +50,21 @@ CAST_ACCESSOR(JSIteratorResult)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSReceiver)
+DEF_GETTER(JSObject, elements, FixedArrayBase) {
+ return TaggedField<FixedArrayBase, kElementsOffset>::load(isolate, *this);
+}
+
+FixedArrayBase JSObject::elements(IsolateRoot isolate, RelaxedLoadTag) const {
+ return TaggedField<FixedArrayBase, kElementsOffset>::Relaxed_Load(isolate,
+ *this);
+}
+
+void JSObject::set_elements(FixedArrayBase value, WriteBarrierMode mode) {
+ // Note the relaxed atomic store.
+ TaggedField<FixedArrayBase, kElementsOffset>::Relaxed_Store(*this, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kElementsOffset, value, mode);
+}
+
MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
Handle<JSReceiver> receiver,
Handle<Name> name) {
@@ -302,17 +317,6 @@ void JSObject::SetEmbedderField(int index, Smi value) {
EmbedderDataSlot(*this, index).store_smi(value);
}
-bool JSObject::IsUnboxedDoubleField(FieldIndex index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return IsUnboxedDoubleField(isolate, index);
-}
-
-bool JSObject::IsUnboxedDoubleField(IsolateRoot isolate,
- FieldIndex index) const {
- if (!FLAG_unbox_double_fields) return false;
- return map(isolate).IsUnboxedDoubleField(isolate, index);
-}
-
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
@@ -323,7 +327,6 @@ Object JSObject::RawFastPropertyAt(FieldIndex index) const {
Object JSObject::RawFastPropertyAt(IsolateRoot isolate,
FieldIndex index) const {
- DCHECK(!IsUnboxedDoubleField(isolate, index));
if (index.is_inobject()) {
return TaggedField<Object>::load(isolate, *this, index.offset());
} else {
@@ -331,16 +334,6 @@ Object JSObject::RawFastPropertyAt(IsolateRoot isolate,
}
}
-double JSObject::RawFastDoublePropertyAt(FieldIndex index) const {
- DCHECK(IsUnboxedDoubleField(index));
- return ReadField<double>(index.offset());
-}
-
-uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) const {
- DCHECK(IsUnboxedDoubleField(index));
- return ReadField<uint64_t>(index.offset());
-}
-
void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
WriteBarrierMode mode) {
DCHECK(index.is_inobject());
@@ -349,8 +342,8 @@ void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
}
-void JSObject::RawFastPropertyAtPut(FieldIndex index, Object value,
- WriteBarrierMode mode) {
+void JSObject::FastPropertyAtPut(FieldIndex index, Object value,
+ WriteBarrierMode mode) {
if (index.is_inobject()) {
RawFastInobjectPropertyAtPut(index, value, mode);
} else {
@@ -359,27 +352,6 @@ void JSObject::RawFastPropertyAtPut(FieldIndex index, Object value,
}
}
-void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
- uint64_t bits) {
- // Double unboxing is enabled only on 64-bit platforms without pointer
- // compression.
- DCHECK_EQ(kDoubleSize, kTaggedSize);
- Address field_addr = field_address(index.offset());
- base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(field_addr),
- static_cast<base::AtomicWord>(bits));
-}
-
-void JSObject::FastPropertyAtPut(FieldIndex index, Object value) {
- if (IsUnboxedDoubleField(index)) {
- DCHECK(value.IsHeapNumber());
- // Ensure that all bits of the double value are preserved.
- RawFastDoublePropertyAsBitsAtPut(index,
- HeapNumber::cast(value).value_as_bits());
- } else {
- RawFastPropertyAtPut(index, value);
- }
-}
-
void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value) {
DCHECK_EQ(kField, details.location());
@@ -400,14 +372,10 @@ void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
DCHECK(value.IsHeapNumber());
bits = HeapNumber::cast(value).value_as_bits();
}
- if (IsUnboxedDoubleField(index)) {
- RawFastDoublePropertyAsBitsAtPut(index, bits);
- } else {
- auto box = HeapNumber::cast(RawFastPropertyAt(index));
- box.set_value_as_bits(bits);
- }
+ auto box = HeapNumber::cast(RawFastPropertyAt(index));
+ box.set_value_as_bits(bits);
} else {
- RawFastPropertyAtPut(index, value);
+ FastPropertyAtPut(index, value);
}
}
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index e150a1339c..91006bcf2d 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -7,6 +7,7 @@
#include "src/api/api-arguments-inl.h"
#include "src/common/globals.h"
#include "src/date/date.h"
+#include "src/debug/debug-wasm-objects.h"
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
@@ -28,7 +29,6 @@
#include "src/objects/heap-number.h"
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-inl.h"
-#include "src/objects/layout-descriptor.h"
#include "src/objects/lookup.h"
#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
@@ -534,7 +534,7 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
return GetConstructorHelper(receiver).second;
}
-Handle<NativeContext> JSReceiver::GetCreationContext() {
+MaybeHandle<NativeContext> JSReceiver::GetCreationContext() {
JSReceiver receiver = *this;
// Externals are JSObjects with null as a constructor.
DCHECK(!receiver.IsExternal(GetIsolate()));
@@ -544,20 +544,19 @@ Handle<NativeContext> JSReceiver::GetCreationContext() {
function = JSFunction::cast(constructor);
} else if (constructor.IsFunctionTemplateInfo()) {
// Remote objects don't have a creation context.
- return Handle<NativeContext>::null();
+ return MaybeHandle<NativeContext>();
} else if (receiver.IsJSGeneratorObject()) {
function = JSGeneratorObject::cast(receiver).function();
- } else {
- // Functions have null as a constructor,
- // but any JSFunction knows its context immediately.
- CHECK(receiver.IsJSFunction());
+ } else if (receiver.IsJSFunction()) {
function = JSFunction::cast(receiver);
+ } else {
+ return MaybeHandle<NativeContext>();
}
return function.has_context()
? Handle<NativeContext>(function.context().native_context(),
receiver.GetIsolate())
- : Handle<NativeContext>::null();
+ : MaybeHandle<NativeContext>();
}
// static
@@ -1281,7 +1280,7 @@ Maybe<bool> JSReceiver::IsCompatiblePropertyDescriptor(
isolate, nullptr, extensible, desc, current, should_throw, property_name);
}
-// ES6 9.1.6.3
+// https://tc39.es/ecma262/#sec-validateandapplypropertydescriptor
// static
Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
Isolate* isolate, LookupIterator* it, bool extensible,
@@ -1308,8 +1307,8 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
}
// 2c. If IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true, then:
// (This is equivalent to !IsAccessorDescriptor(desc).)
- DCHECK((desc_is_generic_descriptor || desc_is_data_descriptor) ==
- !desc_is_accessor_descriptor);
+ DCHECK_EQ(desc_is_generic_descriptor || desc_is_data_descriptor,
+ !desc_is_accessor_descriptor);
if (!desc_is_accessor_descriptor) {
// 2c i. If O is not undefined, create an own data property named P of
// object O whose [[Value]], [[Writable]], [[Enumerable]] and
@@ -1356,10 +1355,8 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
// 2e. Return true.
return Just(true);
}
- // 3. Return true, if every field in Desc is absent.
- // 4. Return true, if every field in Desc also occurs in current and the
- // value of every field in Desc is the same value as the corresponding field
- // in current when compared using the SameValue algorithm.
+ // 3. If every field in Desc is absent, return true. (This also has a shortcut
+ // not in the spec: if every field value matches the current value, return.)
if ((!desc->has_enumerable() ||
desc->enumerable() == current->enumerable()) &&
(!desc->has_configurable() ||
@@ -1374,18 +1371,19 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
(current->has_set() && current->set()->SameValue(*desc->set())))) {
return Just(true);
}
- // 5. If the [[Configurable]] field of current is false, then
+ // 4. If current.[[Configurable]] is false, then
if (!current->configurable()) {
- // 5a. Return false, if the [[Configurable]] field of Desc is true.
+ // 4a. If Desc.[[Configurable]] is present and its value is true, return
+ // false.
if (desc->has_configurable() && desc->configurable()) {
RETURN_FAILURE(
isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed,
it != nullptr ? it->GetName() : property_name));
}
- // 5b. Return false, if the [[Enumerable]] field of Desc is present and the
- // [[Enumerable]] fields of current and Desc are the Boolean negation of
- // each other.
+ // 4b. If Desc.[[Enumerable]] is present and
+ // ! SameValue(Desc.[[Enumerable]], current.[[Enumerable]]) is false, return
+ // false.
if (desc->has_enumerable() && desc->enumerable() != current->enumerable()) {
RETURN_FAILURE(
isolate, GetShouldThrow(isolate, should_throw),
@@ -1396,79 +1394,79 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
bool current_is_data_descriptor =
PropertyDescriptor::IsDataDescriptor(current);
- // 6. If IsGenericDescriptor(Desc) is true, no further validation is required.
+ // 5. If ! IsGenericDescriptor(Desc) is true, no further validation is
+ // required.
if (desc_is_generic_descriptor) {
// Nothing to see here.
- // 7. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) have
- // different results, then:
+ // 6. Else if ! SameValue(!IsDataDescriptor(current),
+ // !IsDataDescriptor(Desc)) is false, the
} else if (current_is_data_descriptor != desc_is_data_descriptor) {
- // 7a. Return false, if the [[Configurable]] field of current is false.
+ // 6a. If current.[[Configurable]] is false, return false.
if (!current->configurable()) {
RETURN_FAILURE(
isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed,
it != nullptr ? it->GetName() : property_name));
}
- // 7b. If IsDataDescriptor(current) is true, then:
+ // 6b. If IsDataDescriptor(current) is true, then:
if (current_is_data_descriptor) {
- // 7b i. If O is not undefined, convert the property named P of object O
+ // 6b i. If O is not undefined, convert the property named P of object O
// from a data property to an accessor property. Preserve the existing
// values of the converted property's [[Configurable]] and [[Enumerable]]
// attributes and set the rest of the property's attributes to their
// default values.
- // --> Folded into step 10.
+ // --> Folded into step 9
} else {
- // 7c i. If O is not undefined, convert the property named P of object O
+ // 6c i. If O is not undefined, convert the property named P of object O
// from an accessor property to a data property. Preserve the existing
// values of the converted property’s [[Configurable]] and [[Enumerable]]
// attributes and set the rest of the property’s attributes to their
// default values.
- // --> Folded into step 10.
+ // --> Folded into step 9
}
- // 8. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) are both
+ // 7. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) are both
// true, then:
} else if (current_is_data_descriptor && desc_is_data_descriptor) {
- // 8a. If the [[Configurable]] field of current is false, then:
- if (!current->configurable()) {
- // 8a i. Return false, if the [[Writable]] field of current is false and
- // the [[Writable]] field of Desc is true.
- if (!current->writable() && desc->has_writable() && desc->writable()) {
+ // 7a. If current.[[Configurable]] is false and current.[[Writable]] is
+ // false, then
+ if (!current->configurable() && !current->writable()) {
+ // 7a i. If Desc.[[Writable]] is present and Desc.[[Writable]] is true,
+ // return false.
+ if (desc->has_writable() && desc->writable()) {
RETURN_FAILURE(
isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed,
it != nullptr ? it->GetName() : property_name));
}
- // 8a ii. If the [[Writable]] field of current is false, then:
- if (!current->writable()) {
- // 8a ii 1. Return false, if the [[Value]] field of Desc is present and
- // SameValue(Desc.[[Value]], current.[[Value]]) is false.
- if (desc->has_value() && !desc->value()->SameValue(*current->value())) {
- RETURN_FAILURE(
- isolate, GetShouldThrow(isolate, should_throw),
- NewTypeError(MessageTemplate::kRedefineDisallowed,
- it != nullptr ? it->GetName() : property_name));
- }
+ // 7a ii. If Desc.[[Value]] is present and SameValue(Desc.[[Value]],
+ // current.[[Value]]) is false, return false.
+ if (desc->has_value() && !desc->value()->SameValue(*current->value())) {
+ RETURN_FAILURE(
+ isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != nullptr ? it->GetName() : property_name));
}
}
} else {
- // 9. Else IsAccessorDescriptor(current) and IsAccessorDescriptor(Desc)
- // are both true,
+ // 8. Else,
+ // 8a. Assert: ! IsAccessorDescriptor(current) and
+ // ! IsAccessorDescriptor(Desc) are both true.
DCHECK(PropertyDescriptor::IsAccessorDescriptor(current) &&
desc_is_accessor_descriptor);
- // 9a. If the [[Configurable]] field of current is false, then:
+ // 8b. If current.[[Configurable]] is false, then:
if (!current->configurable()) {
- // 9a i. Return false, if the [[Set]] field of Desc is present and
- // SameValue(Desc.[[Set]], current.[[Set]]) is false.
+ // 8a i. If Desc.[[Set]] is present and SameValue(Desc.[[Set]],
+ // current.[[Set]]) is false, return false.
if (desc->has_set() && !desc->set()->SameValue(*current->set())) {
RETURN_FAILURE(
isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed,
it != nullptr ? it->GetName() : property_name));
}
- // 9a ii. Return false, if the [[Get]] field of Desc is present and
- // SameValue(Desc.[[Get]], current.[[Get]]) is false.
+ // 8a ii. If Desc.[[Get]] is present and SameValue(Desc.[[Get]],
+ // current.[[Get]]) is false, return false.
if (desc->has_get() && !desc->get()->SameValue(*current->get())) {
RETURN_FAILURE(
isolate, GetShouldThrow(isolate, should_throw),
@@ -1478,9 +1476,9 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
}
}
- // 10. If O is not undefined, then:
+ // 9. If O is not undefined, then:
if (it != nullptr) {
- // 10a. For each field of Desc that is present, set the corresponding
+ // 9a. For each field of Desc that is present, set the corresponding
// attribute of the property named P of object O to the value of the field.
PropertyAttributes attrs = NONE;
@@ -1537,7 +1535,7 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
}
}
- // 11. Return true.
+ // 10. Return true.
return Just(true);
}
@@ -1697,7 +1695,7 @@ Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
Handle<AccessorPair> accessors =
Handle<AccessorPair>::cast(it->GetAccessors());
Handle<NativeContext> native_context =
- it->GetHolder<JSReceiver>()->GetCreationContext();
+ it->GetHolder<JSReceiver>()->GetCreationContext().ToHandleChecked();
// 6a. Set D.[[Get]] to the value of X's [[Get]] attribute.
desc->set_get(AccessorPair::GetComponent(isolate, native_context, accessors,
ACCESSOR_GETTER));
@@ -2128,7 +2126,8 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
DCHECK(constructor->IsConstructor());
DCHECK(new_target->IsConstructor());
DCHECK(!constructor->has_initial_map() ||
- constructor->initial_map().instance_type() != JS_FUNCTION_TYPE);
+ !InstanceTypeChecker::IsJSFunction(
+ constructor->initial_map().instance_type()));
Handle<Map> initial_map;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -2205,6 +2204,13 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_BOUND_FUNCTION_TYPE:
return JSBoundFunction::kHeaderSize;
case JS_FUNCTION_TYPE:
+ case JS_PROMISE_CONSTRUCTOR_TYPE:
+ case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_ARRAY_CONSTRUCTOR_TYPE:
+#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
+ case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
+#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
return JSFunction::GetHeaderSize(function_has_prototype_slot);
case JS_PRIMITIVE_WRAPPER_TYPE:
return JSPrimitiveWrapper::kHeaderSize;
@@ -2293,6 +2299,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return WasmModuleObject::kHeaderSize;
case WASM_TABLE_OBJECT_TYPE:
return WasmTableObject::kHeaderSize;
+ case WASM_VALUE_OBJECT_TYPE:
+ return WasmValueObject::kHeaderSize;
case WASM_EXCEPTION_OBJECT_TYPE:
return WasmExceptionObject::kHeaderSize;
default:
@@ -2442,19 +2450,17 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
if (entry.is_not_found()) {
DCHECK_IMPLIES(global_obj->map().is_prototype_map(),
Map::IsPrototypeChainInvalidated(global_obj->map()));
- auto cell = isolate->factory()->NewPropertyCell(name);
- cell->set_value(*value);
auto cell_type = value->IsUndefined(roots) ? PropertyCellType::kUndefined
: PropertyCellType::kConstant;
details = details.set_cell_type(cell_type);
- value = cell;
+ auto cell = isolate->factory()->NewPropertyCell(name, details, value);
dictionary =
- GlobalDictionary::Add(isolate, dictionary, name, value, details);
+ GlobalDictionary::Add(isolate, dictionary, name, cell, details);
global_obj->set_global_dictionary(*dictionary, kReleaseStore);
} else {
- Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
- isolate, dictionary, entry, value, details);
- cell->set_value(*value);
+ PropertyCell::PrepareForAndSetValue(isolate, dictionary, entry, value,
+ details);
+ DCHECK_EQ(dictionary->CellAt(entry).value(), *value);
}
} else {
if (V8_DICT_MODE_PROTOTYPES_BOOL) {
@@ -2527,6 +2533,13 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
break;
}
+ case JS_PROMISE_CONSTRUCTOR_TYPE:
+ case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_ARRAY_CONSTRUCTOR_TYPE:
+#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
+ case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
+#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
case JS_FUNCTION_TYPE: {
JSFunction function = JSFunction::cast(*this);
std::unique_ptr<char[]> fun_name = function.shared().DebugNameCStr();
@@ -2764,13 +2777,10 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
FieldIndex::ForDescriptor(isolate, *new_map, new_map->LastAdded());
if (index.is_inobject() || index.outobject_array_index() <
object->property_array(isolate).length()) {
- // We still need to allocate HeapNumbers for double fields
- // if either double field unboxing is disabled or the double field
- // is in the PropertyArray backing store (where we don't support
- // double field unboxing).
- if (index.is_double() && !new_map->IsUnboxedDoubleField(isolate, index)) {
+ // Allocate HeapNumbers for double fields.
+ if (index.is_double()) {
auto value = isolate->factory()->NewHeapNumberWithHoleNaN();
- object->RawFastPropertyAtPut(index, *value);
+ object->FastPropertyAtPut(index, *value);
}
object->synchronized_set_map(*new_map);
return;
@@ -2862,19 +2872,13 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
} else {
DCHECK_EQ(kField, old_details.location());
FieldIndex index = FieldIndex::ForDescriptor(isolate, *old_map, i);
- if (object->IsUnboxedDoubleField(isolate, index)) {
- uint64_t old_bits = object->RawFastDoublePropertyAsBitsAt(index);
- value = isolate->factory()->NewHeapNumberFromBits(old_bits);
- } else {
- value = handle(object->RawFastPropertyAt(isolate, index), isolate);
- if (!old_representation.IsDouble() && representation.IsDouble()) {
- DCHECK_IMPLIES(old_representation.IsNone(),
- value->IsUninitialized(isolate));
- value = Object::NewStorageFor(isolate, value, representation);
- } else if (old_representation.IsDouble() &&
- !representation.IsDouble()) {
- value = Object::WrapForRead(isolate, value, old_representation);
- }
+ value = handle(object->RawFastPropertyAt(isolate, index), isolate);
+ if (!old_representation.IsDouble() && representation.IsDouble()) {
+ DCHECK_IMPLIES(old_representation.IsNone(),
+ value->IsUninitialized(isolate));
+ value = Object::NewStorageFor(isolate, value, representation);
+ } else if (old_representation.IsDouble() && !representation.IsDouble()) {
+ value = Object::WrapForRead(isolate, value, old_representation);
}
}
DCHECK(!(representation.IsDouble() && value->IsSmi()));
@@ -2919,25 +2923,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
Object value = inobject_props->get(isolate, i);
- // Can't use JSObject::FastPropertyAtPut() because proper map was not set
- // yet.
- if (new_map->IsUnboxedDoubleField(isolate, index)) {
- DCHECK(value.IsHeapNumber(isolate));
- // Ensure that all bits of the double value are preserved.
- object->RawFastDoublePropertyAsBitsAtPut(
- index, HeapNumber::cast(value).value_as_bits());
- if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
- // Transition from tagged to untagged slot.
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
- chunk->InvalidateRecordedSlots(*object);
- } else {
-#ifdef DEBUG
- heap->VerifyClearedSlot(*object, object->RawField(index.offset()));
-#endif
- }
- } else {
- object->RawFastPropertyAtPut(index, value);
- }
+ object->FastPropertyAtPut(index, value);
}
object->SetProperties(*array);
@@ -3004,16 +2990,11 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
if (details.location() == kField) {
FieldIndex index = FieldIndex::ForDescriptor(isolate, *map, i);
if (details.kind() == kData) {
- if (object->IsUnboxedDoubleField(isolate, index)) {
- double old_value = object->RawFastDoublePropertyAt(index);
+ value = handle(object->RawFastPropertyAt(isolate, index), isolate);
+ if (details.representation().IsDouble()) {
+ DCHECK(value->IsHeapNumber(isolate));
+ double old_value = Handle<HeapNumber>::cast(value)->value();
value = isolate->factory()->NewHeapNumber(old_value);
- } else {
- value = handle(object->RawFastPropertyAt(isolate, index), isolate);
- if (details.representation().IsDouble()) {
- DCHECK(value->IsHeapNumber(isolate));
- double old_value = Handle<HeapNumber>::cast(value)->value();
- value = isolate->factory()->NewHeapNumber(old_value);
- }
}
} else {
DCHECK_EQ(kAccessor, details.kind());
@@ -3025,8 +3006,11 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
value = handle(descs->GetStrongValue(isolate, i), isolate);
}
DCHECK(!value.is_null());
- PropertyDetails d(details.kind(), details.attributes(),
- PropertyCellType::kNoCell);
+ PropertyConstness constness = V8_DICT_PROPERTY_CONST_TRACKING_BOOL
+ ? details.constness()
+ : PropertyConstness::kMutable;
+ PropertyDetails d(details.kind(), details.attributes(), constness);
+
if (V8_DICT_MODE_PROTOTYPES_BOOL) {
ord_dictionary =
OrderedNameDictionary::Add(isolate, ord_dictionary, key, value, d)
@@ -3081,7 +3065,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
for (int i = 0; i < inobject_properties; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- object->RawFastPropertyAtPut(index, Smi::zero());
+ object->FastPropertyAtPut(index, Smi::zero());
}
}
@@ -3195,42 +3179,32 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
// Allocate mutable double boxes if necessary. It is always necessary if we
// have external properties, but is also necessary if we only have inobject
// properties but don't unbox double fields.
- if (!FLAG_unbox_double_fields || external > 0) {
- Isolate* isolate = object->GetIsolate();
+ Isolate* isolate = object->GetIsolate();
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
- isolate);
- Handle<FixedArray> storage;
- if (!FLAG_unbox_double_fields) {
- storage = isolate->factory()->NewFixedArray(inobject);
- }
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(inobject);
- Handle<PropertyArray> array =
- isolate->factory()->NewPropertyArray(external);
+ Handle<PropertyArray> array = isolate->factory()->NewPropertyArray(external);
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- PropertyDetails details = descriptors->GetDetails(i);
- Representation representation = details.representation();
- if (!representation.IsDouble()) continue;
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- if (map->IsUnboxedDoubleField(index)) continue;
- auto box = isolate->factory()->NewHeapNumberWithHoleNaN();
- if (index.is_inobject()) {
- storage->set(index.property_index(), *box);
- } else {
- array->set(index.outobject_array_index(), *box);
- }
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ Representation representation = details.representation();
+ if (!representation.IsDouble()) continue;
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ auto box = isolate->factory()->NewHeapNumberWithHoleNaN();
+ if (index.is_inobject()) {
+ storage->set(index.property_index(), *box);
+ } else {
+ array->set(index.outobject_array_index(), *box);
}
+ }
- object->SetProperties(*array);
-
- if (!FLAG_unbox_double_fields) {
- for (int i = 0; i < inobject; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
- Object value = storage->get(i);
- object->RawFastPropertyAtPut(index, value);
- }
- }
+ object->SetProperties(*array);
+ for (int i = 0; i < inobject; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
+ Object value = storage->get(i);
+ object->FastPropertyAtPut(index, value);
}
object->synchronized_set_map(*map);
}
@@ -3526,7 +3500,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
object->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
// Check that it really works.
DCHECK(object->HasFastProperties());
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
LOG(isolate, MapEvent("SlowToFast", old_map, new_map, reason));
}
return;
@@ -3588,7 +3562,8 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
}
DCHECK_EQ(kField, details.location());
- DCHECK_EQ(PropertyConstness::kMutable, details.constness());
+ DCHECK_IMPLIES(!V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
+ details.constness() == PropertyConstness::kMutable);
Descriptor d;
if (details.kind() == kData) {
@@ -3597,6 +3572,10 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
PropertyConstness constness = is_transitionable_elements_kind
? PropertyConstness::kMutable
: PropertyConstness::kConst;
+ // TODO(v8:11248): Consider always setting constness to kMutable
+ // once all prototypes stay in dictionary mode and we are not interested
+ // in tracking constness for fast mode properties anymore.
+
d = Descriptor::DataField(
key, current_offset, details.attributes(), constness,
// TODO(verwaest): value->OptimalRepresentation();
@@ -3624,18 +3603,15 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
descriptors->Sort();
- Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::New(
- isolate, new_map, descriptors, descriptors->number_of_descriptors());
-
DisallowGarbageCollection no_gc;
- new_map->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
+ new_map->InitializeDescriptors(isolate, *descriptors);
if (number_of_allocated_fields == 0) {
new_map->SetInObjectUnusedPropertyFields(unused_property_fields);
} else {
new_map->SetOutOfObjectUnusedPropertyFields(unused_property_fields);
}
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
LOG(isolate, MapEvent("SlowToFast", old_map, new_map, reason));
}
// Transform the object.
@@ -4189,11 +4165,6 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
Representation representation,
FieldIndex index) {
Isolate* isolate = object->GetIsolate();
- if (object->IsUnboxedDoubleField(index)) {
- DCHECK(representation.IsDouble());
- double value = object->RawFastDoublePropertyAt(index);
- return isolate->factory()->NewHeapNumber(value);
- }
Handle<Object> raw_value(object->RawFastPropertyAt(index), isolate);
return Object::WrapForRead(isolate, raw_value, representation);
}
@@ -4364,23 +4335,14 @@ Object JSObject::SlowReverseLookup(Object value) {
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
- if (IsUnboxedDoubleField(field_index)) {
- if (value_is_number) {
- double property = RawFastDoublePropertyAt(field_index);
- if (property == value.Number()) {
- return descs.GetKey(i);
- }
- }
- } else {
- Object property = RawFastPropertyAt(field_index);
- if (field_index.is_double()) {
- DCHECK(property.IsHeapNumber());
- if (value_is_number && property.Number() == value.Number()) {
- return descs.GetKey(i);
- }
- } else if (property == value) {
+ Object property = RawFastPropertyAt(field_index);
+ if (field_index.is_double()) {
+ DCHECK(property.IsHeapNumber());
+ if (value_is_number && property.Number() == value.Number()) {
return descs.GetKey(i);
}
+ } else if (property == value) {
+ return descs.GetKey(i);
}
} else {
DCHECK_EQ(kDescriptor, details.location());
@@ -4441,7 +4403,9 @@ static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
DisallowGarbageCollection no_gc;
if (!object->HasFastProperties()) return false;
if (object->IsJSGlobalProxy()) return false;
+ // TODO(v8:11248) make bootstrapper create dict mode prototypes, too?
if (object->GetIsolate()->bootstrapper()->IsActive()) return false;
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) return true;
return !object->map().is_prototype_map() ||
!object->map().should_be_fast_prototype_map();
}
@@ -4457,14 +4421,31 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
"NormalizeAsPrototype");
}
if (object->map().is_prototype_map()) {
- if (object->map().should_be_fast_prototype_map() &&
+ if (!V8_DICT_PROPERTY_CONST_TRACKING_BOOL &&
+ object->map().should_be_fast_prototype_map() &&
!object->HasFastProperties()) {
JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
}
} else {
Handle<Map> new_map =
Map::Copy(isolate, handle(object->map(), isolate), "CopyAsPrototype");
+
JSObject::MigrateToMap(isolate, object, new_map);
+
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && !object->HasFastProperties()) {
+ Handle<NameDictionary> dict =
+ handle(object->property_dictionary(), isolate);
+ ReadOnlyRoots roots(isolate);
+ for (InternalIndex index : dict->IterateEntries()) {
+ Object k;
+ if (!dict->ToKey(roots, index, &k)) continue;
+
+ PropertyDetails details = dict->DetailsAt(index);
+ details = details.CopyWithConstness(PropertyConstness::kConst);
+ dict->DetailsAtPut(index, details);
+ }
+ }
+
object->map().set_is_prototype_map(true);
// Replace the pointer to the exact constructor with the Object function
@@ -4480,6 +4461,12 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
}
}
}
+#ifdef DEBUG
+ bool should_be_dictionary = V8_DICT_PROPERTY_CONST_TRACKING_BOOL &&
+ enable_setup_mode && !object->IsJSGlobalProxy() &&
+ !object->GetIsolate()->bootstrapper()->IsActive();
+ DCHECK_IMPLIES(should_be_dictionary, object->map().is_dictionary_map());
+#endif
}
// static
@@ -5066,17 +5053,21 @@ bool JSGlobalProxy::IsDetached() const {
void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name) {
+ Isolate* isolate = global->GetIsolate();
// Regardless of whether the property is there or not invalidate
// Load/StoreGlobalICs that load/store through global object's prototype.
JSObject::InvalidatePrototypeValidityCell(*global);
-
DCHECK(!global->HasFastProperties());
- auto dictionary =
- handle(global->global_dictionary(kAcquireLoad), global->GetIsolate());
- InternalIndex entry = dictionary->FindEntry(global->GetIsolate(), name);
+ auto dictionary = handle(global->global_dictionary(kAcquireLoad), isolate);
+ InternalIndex entry = dictionary->FindEntry(isolate, name);
if (entry.is_not_found()) return;
- PropertyCell::InvalidateAndReplaceEntry(global->GetIsolate(), dictionary,
- entry);
+
+ Handle<PropertyCell> cell(dictionary->CellAt(entry), isolate);
+ Handle<Object> value(cell->value(), isolate);
+ PropertyDetails details = cell->property_details();
+ details = details.set_cell_type(PropertyCellType::kMutable);
+ PropertyCell::InvalidateAndReplaceEntry(isolate, dictionary, entry, details,
+ value);
}
// static
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index e38cb83ac6..d78df5c431 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -227,7 +227,7 @@ class JSReceiver : public HeapObject {
// returned instead.
static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
- V8_EXPORT_PRIVATE Handle<NativeContext> GetCreationContext();
+ V8_EXPORT_PRIVATE MaybeHandle<NativeContext> GetCreationContext();
V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
GetPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
@@ -311,6 +311,19 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> ObjectCreate(
Isolate* isolate, Handle<Object> prototype);
+ DECL_ACCESSORS(elements, FixedArrayBase)
+ DECL_RELAXED_GETTER(elements, FixedArrayBase)
+
+ // Acquire/release semantics on this field are explicitly forbidden to avoid
+ // confusion, since the default setter uses relaxed semantics. If
+ // acquire/release semantics ever become necessary, the default setter should
+ // be reverted to non-atomic behavior, and setters with explicit tags
+ // introduced and used when required.
+ FixedArrayBase elements(IsolateRoot isolate,
+ AcquireLoadTag tag) const = delete;
+ void set_elements(FixedArrayBase value, ReleaseStoreTag tag,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER) = delete;
+
inline void initialize_elements();
static inline void SetMapAndElements(Handle<JSObject> object, Handle<Map> map,
Handle<FixedArrayBase> elements);
@@ -630,26 +643,18 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
int unused_property_fields,
const char* reason);
- inline bool IsUnboxedDoubleField(FieldIndex index) const;
- inline bool IsUnboxedDoubleField(IsolateRoot isolate, FieldIndex index) const;
-
// Access fast-case object properties at index.
static Handle<Object> FastPropertyAt(Handle<JSObject> object,
Representation representation,
FieldIndex index);
inline Object RawFastPropertyAt(FieldIndex index) const;
inline Object RawFastPropertyAt(IsolateRoot isolate, FieldIndex index) const;
- inline double RawFastDoublePropertyAt(FieldIndex index) const;
- inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
- inline void FastPropertyAtPut(FieldIndex index, Object value);
- inline void RawFastPropertyAtPut(
- FieldIndex index, Object value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void FastPropertyAtPut(FieldIndex index, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void RawFastInobjectPropertyAtPut(
FieldIndex index, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits);
inline void WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value);
diff --git a/deps/v8/src/objects/js-promise.tq b/deps/v8/src/objects/js-promise.tq
index ae1c2bcc9d..e8e8c048cf 100644
--- a/deps/v8/src/objects/js-promise.tq
+++ b/deps/v8/src/objects/js-promise.tq
@@ -35,3 +35,7 @@ extern class JSPromise extends JSObject {
reactions_or_result: Zero|PromiseReaction|JSAny;
flags: SmiTagged<JSPromiseFlags>;
}
+
+@doNotGenerateCast
+extern class JSPromiseConstructor extends JSFunction
+ generates 'TNode<JSFunction>';
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 8b99aa7c4c..9a75fff807 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -24,7 +24,13 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExp)
OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResult, JSArray)
OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResultIndices, JSArray)
+inline JSRegExpResultWithIndices::JSRegExpResultWithIndices(Address ptr)
+ : JSRegExpResult(ptr) {
+ SLOW_DCHECK(IsJSArray());
+}
+
CAST_ACCESSOR(JSRegExpResult)
+CAST_ACCESSOR(JSRegExpResultWithIndices)
CAST_ACCESSOR(JSRegExpResultIndices)
ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
index 4149e99024..36d1480daf 100644
--- a/deps/v8/src/objects/js-regexp.cc
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -12,78 +12,6 @@
namespace v8 {
namespace internal {
-MaybeHandle<JSArray> JSRegExpResult::GetAndCacheIndices(
- Isolate* isolate, Handle<JSRegExpResult> regexp_result) {
- // Check for cached indices. We do a slow lookup and set of
- // the cached_indices_or_match_info and names fields just in
- // case they have been migrated to dictionaries.
- Handle<Object> indices_or_regexp(
- GetProperty(
- isolate, regexp_result,
- isolate->factory()->regexp_result_cached_indices_or_regexp_symbol())
- .ToHandleChecked());
- if (indices_or_regexp->IsJSRegExp()) {
- // Build and cache indices for next lookup.
- // TODO(joshualitt): Instead of caching the indices, we could call
- // ReconfigureToDataProperty on 'indices' setting its value to this
- // newly created array. However, care would have to be taken to ensure
- // a new map is not created each time.
-
- // Grab regexp, its last_index, and the original subject string from the
- // result and the re-execute the regexp to generate a new MatchInfo.
- Handle<JSRegExp> regexp(JSRegExp::cast(*indices_or_regexp), isolate);
- Handle<Object> input_object(
- GetProperty(isolate, regexp_result,
- isolate->factory()->regexp_result_regexp_input_symbol())
- .ToHandleChecked());
- Handle<String> subject(String::cast(*input_object), isolate);
- Handle<Object> last_index_object(
- GetProperty(
- isolate, regexp_result,
- isolate->factory()->regexp_result_regexp_last_index_symbol())
- .ToHandleChecked());
-
- int capture_count = regexp->CaptureCount();
- Handle<RegExpMatchInfo> match_info =
- RegExpMatchInfo::New(isolate, capture_count);
-
- int last_index = Smi::ToInt(*last_index_object);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- RegExp::Exec(isolate, regexp, subject, last_index, match_info),
- JSArray);
- DCHECK_EQ(*result, *match_info);
-
- Handle<Object> maybe_names(
- GetProperty(isolate, regexp_result,
- isolate->factory()->regexp_result_names_symbol())
- .ToHandleChecked());
- indices_or_regexp =
- JSRegExpResultIndices::BuildIndices(isolate, match_info, maybe_names);
-
- // Cache the result and clear the names array, last_index and subject.
- SetProperty(
- isolate, regexp_result,
- isolate->factory()->regexp_result_cached_indices_or_regexp_symbol(),
- indices_or_regexp)
- .ToHandleChecked();
- SetProperty(isolate, regexp_result,
- isolate->factory()->regexp_result_names_symbol(),
- isolate->factory()->undefined_value())
- .ToHandleChecked();
- SetProperty(isolate, regexp_result,
- isolate->factory()->regexp_result_regexp_last_index_symbol(),
- isolate->factory()->undefined_value())
- .ToHandleChecked();
- SetProperty(isolate, regexp_result,
- isolate->factory()->regexp_result_regexp_input_symbol(),
- isolate->factory()->undefined_value())
- .ToHandleChecked();
- }
- return Handle<JSArray>::cast(indices_or_regexp);
-}
-
Handle<JSRegExpResultIndices> JSRegExpResultIndices::BuildIndices(
Isolate* isolate, Handle<RegExpMatchInfo> match_info,
Handle<Object> maybe_names) {
@@ -127,8 +55,8 @@ Handle<JSRegExpResultIndices> JSRegExpResultIndices::BuildIndices(
FieldIndex groups_index = FieldIndex::ForDescriptor(
indices->map(), InternalIndex(kGroupsDescriptorIndex));
if (maybe_names->IsUndefined(isolate)) {
- indices->RawFastPropertyAtPut(groups_index,
- ReadOnlyRoots(isolate).undefined_value());
+ indices->FastPropertyAtPut(groups_index,
+ ReadOnlyRoots(isolate).undefined_value());
return indices;
}
@@ -174,7 +102,7 @@ Handle<JSRegExpResultIndices> JSRegExpResultIndices::BuildIndices(
Handle<JSObject> js_group_names =
isolate->factory()->NewSlowJSObjectWithPropertiesAndElements(
null, group_names, elements);
- indices->RawFastPropertyAtPut(groups_index, *js_group_names);
+ indices->FastPropertyAtPut(groups_index, *js_group_names);
return indices;
}
@@ -234,12 +162,6 @@ MaybeHandle<JSRegExp> JSRegExp::New(Isolate* isolate, Handle<String> pattern,
return JSRegExp::Initialize(regexp, pattern, flags, backtrack_limit);
}
-// static
-Handle<JSRegExp> JSRegExp::Copy(Handle<JSRegExp> regexp) {
- Isolate* const isolate = regexp->GetIsolate();
- return Handle<JSRegExp>::cast(isolate->factory()->CopyJSObject(regexp));
-}
-
Object JSRegExp::Code(bool is_latin1) const {
DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
return DataAt(code_index(is_latin1));
@@ -488,14 +410,16 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
if (constructor.IsJSFunction() &&
JSFunction::cast(constructor).initial_map() == map) {
// If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, Smi::zero(),
+ regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+ Smi::FromInt(kInitialLastIndexValue),
SKIP_WRITE_BARRIER);
} else {
// Map has changed, so use generic, but slower, method.
RETURN_ON_EXCEPTION(
isolate,
- Object::SetProperty(isolate, regexp, factory->lastIndex_string(),
- Handle<Smi>(Smi::zero(), isolate)),
+ Object::SetProperty(
+ isolate, regexp, factory->lastIndex_string(),
+ Handle<Smi>(Smi::FromInt(kInitialLastIndexValue), isolate)),
JSRegExp);
}
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index b1d1399eab..c23662251a 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_REGEXP_H_
#define V8_OBJECTS_JS_REGEXP_H_
+#include "src/objects/contexts.h"
#include "src/objects/js-array.h"
#include "torque-generated/bit-fields.h"
@@ -43,7 +44,7 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
DEFINE_TORQUE_GENERATED_JS_REG_EXP_FLAGS()
static base::Optional<Flag> FlagFromChar(char c) {
- STATIC_ASSERT(kFlagCount == 7);
+ STATIC_ASSERT(kFlagCount == 8);
// clang-format off
return c == 'g' ? base::Optional<Flag>(kGlobal)
: c == 'i' ? base::Optional<Flag>(kIgnoreCase)
@@ -53,6 +54,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
: c == 's' ? base::Optional<Flag>(kDotAll)
: (FLAG_enable_experimental_regexp_engine && c == 'l')
? base::Optional<Flag>(kLinear)
+ : (FLAG_harmony_regexp_match_indices && c == 'd')
+ ? base::Optional<Flag>(kHasIndices)
: base::Optional<Flag>();
// clang-format on
}
@@ -65,6 +68,7 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
STATIC_ASSERT(static_cast<int>(kUnicode) == v8::RegExp::kUnicode);
STATIC_ASSERT(static_cast<int>(kDotAll) == v8::RegExp::kDotAll);
STATIC_ASSERT(static_cast<int>(kLinear) == v8::RegExp::kLinear);
+ STATIC_ASSERT(static_cast<int>(kHasIndices) == v8::RegExp::kHasIndices);
STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
DECL_ACCESSORS(last_index, Object)
@@ -75,7 +79,6 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(
Isolate* isolate, Handle<String> source, Flags flags,
uint32_t backtrack_limit = kNoBacktrackLimit);
- static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
static MaybeHandle<JSRegExp> Initialize(
Handle<JSRegExp> regexp, Handle<String> source, Flags flags,
@@ -143,6 +146,9 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
// TODO(v8:8944): improve handling of in-object fields
static constexpr int kLastIndexOffset = kHeaderSize;
+ // The initial value of the last_index field on a new JSRegExp instance.
+ static constexpr int kInitialLastIndexValue = 0;
+
// Indices in the data array.
static const int kTagIndex = 0;
static const int kSourceIndex = kTagIndex + 1;
@@ -211,6 +217,9 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static const int kLastIndexFieldIndex = 0;
static const int kInObjectFieldCount = 1;
+ // The actual object size including in-object fields.
+ static int Size() { return kHeaderSize + kInObjectFieldCount * kTaggedSize; }
+
// Descriptor array index to important methods in the prototype.
static const int kExecFunctionDescriptorIndex = 1;
static const int kSymbolMatchFunctionDescriptorIndex = 13;
@@ -249,24 +258,40 @@ class JSRegExpResult : public JSArray {
DEFINE_FIELD_OFFSET_CONSTANTS(JSArray::kHeaderSize,
TORQUE_GENERATED_JS_REG_EXP_RESULT_FIELDS)
- static MaybeHandle<JSArray> GetAndCacheIndices(
- Isolate* isolate, Handle<JSRegExpResult> regexp_result);
-
// Indices of in-object properties.
static const int kIndexIndex = 0;
static const int kInputIndex = 1;
static const int kGroupsIndex = 2;
// Private internal only fields.
- static const int kCachedIndicesOrRegExpIndex = 3;
- static const int kNamesIndex = 4;
- static const int kRegExpInputIndex = 5;
- static const int kRegExpLastIndex = 6;
- static const int kInObjectPropertyCount = 7;
+ static const int kNamesIndex = 3;
+ static const int kRegExpInputIndex = 4;
+ static const int kRegExpLastIndex = 5;
+ static const int kInObjectPropertyCount = 6;
+
+ static const int kMapIndexInContext = Context::REGEXP_RESULT_MAP_INDEX;
OBJECT_CONSTRUCTORS(JSRegExpResult, JSArray);
};
+class JSRegExpResultWithIndices : public JSRegExpResult {
+ public:
+ DECL_CAST(JSRegExpResultWithIndices)
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ JSRegExpResult::kSize,
+ TORQUE_GENERATED_JS_REG_EXP_RESULT_WITH_INDICES_FIELDS)
+
+ static_assert(
+ JSRegExpResult::kInObjectPropertyCount == 6,
+ "JSRegExpResultWithIndices must be a subclass of JSRegExpResult");
+ static const int kIndicesIndex = 6;
+ static const int kInObjectPropertyCount = 7;
+
+ OBJECT_CONSTRUCTORS(JSRegExpResultWithIndices, JSRegExpResult);
+};
+
// JSRegExpResultIndices is just a JSArray with a specific initial map.
// This initial map adds in-object properties for "group"
// properties, as assigned by RegExp.prototype.exec, which allows
diff --git a/deps/v8/src/objects/js-regexp.tq b/deps/v8/src/objects/js-regexp.tq
index 6d3fc113cd..d8cff3fced 100644
--- a/deps/v8/src/objects/js-regexp.tq
+++ b/deps/v8/src/objects/js-regexp.tq
@@ -10,6 +10,7 @@ bitfield struct JSRegExpFlags extends uint31 {
unicode: bool: 1 bit;
dot_all: bool: 1 bit;
linear: bool: 1 bit;
+ has_indices: bool: 1 bit;
}
@generateCppClass
@@ -34,6 +35,10 @@ RegExpBuiltinsAssembler::FastLoadLastIndex(FastJSRegExp): Smi;
extern operator '.lastIndex=' macro
RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void;
+@doNotGenerateCast
+extern class JSRegExpConstructor extends JSFunction
+ generates 'TNode<JSFunction>';
+
extern shape JSRegExpResult extends JSArray {
// In-object properties:
// The below fields are externally exposed.
@@ -42,12 +47,15 @@ extern shape JSRegExpResult extends JSArray {
groups: JSAny;
// The below fields are for internal use only.
- cached_indices_or_regexp: JSRegExpResultIndices|JSRegExp;
names: FixedArray|Undefined;
regexp_input: String;
regexp_last_index: Smi;
}
+extern shape JSRegExpResultWithIndices extends JSRegExpResult {
+ indices: JSAny;
+}
+
extern shape JSRegExpResultIndices extends JSArray {
// In-object properties:
// The groups field is externally exposed.
diff --git a/deps/v8/src/objects/layout-descriptor-inl.h b/deps/v8/src/objects/layout-descriptor-inl.h
deleted file mode 100644
index f87143bade..0000000000
--- a/deps/v8/src/objects/layout-descriptor-inl.h
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_LAYOUT_DESCRIPTOR_INL_H_
-#define V8_OBJECTS_LAYOUT_DESCRIPTOR_INL_H_
-
-#include "src/objects/layout-descriptor.h"
-
-#include "src/handles/handles-inl.h"
-#include "src/objects/descriptor-array-inl.h"
-#include "src/objects/fixed-array-inl.h"
-#include "src/objects/objects-inl.h"
-#include "src/objects/smi.h"
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-LayoutDescriptor::LayoutDescriptor(Address ptr)
- : ByteArray(ptr, AllowInlineSmiStorage::kAllowBeingASmi) {
- SLOW_DCHECK(IsLayoutDescriptor());
-}
-CAST_ACCESSOR(LayoutDescriptor)
-
-LayoutDescriptor LayoutDescriptor::FromSmi(Smi smi) {
- return LayoutDescriptor::cast(smi);
-}
-
-Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
- if (length <= kBitsInSmiLayout) {
- // The whole bit vector fits into a smi.
- return handle(LayoutDescriptor::FromSmi(Smi::zero()), isolate);
- }
- int backing_store_length = GetSlowModeBackingStoreLength(length);
- Handle<LayoutDescriptor> result =
- Handle<LayoutDescriptor>::cast(isolate->factory()->NewByteArray(
- backing_store_length, AllocationType::kOld));
- memset(reinterpret_cast<void*>(result->GetDataStartAddress()), 0,
- result->DataSize());
- return result;
-}
-
-bool LayoutDescriptor::InobjectUnboxedField(int inobject_properties,
- PropertyDetails details) {
- if (details.location() != kField || !details.representation().IsDouble()) {
- return false;
- }
- // We care only about in-object properties.
- return details.field_index() < inobject_properties;
-}
-
-LayoutDescriptor LayoutDescriptor::FastPointerLayout() {
- return LayoutDescriptor::FromSmi(Smi::zero());
-}
-
-bool LayoutDescriptor::GetIndexes(int field_index, int* layout_word_index,
- int* layout_bit_index) {
- if (static_cast<unsigned>(field_index) >= static_cast<unsigned>(capacity())) {
- return false;
- }
-
- *layout_word_index = field_index / kBitsPerLayoutWord;
- CHECK((!IsSmi() && (*layout_word_index < length())) ||
- (IsSmi() && (*layout_word_index < 1)));
-
- *layout_bit_index = field_index % kBitsPerLayoutWord;
- return true;
-}
-
-LayoutDescriptor LayoutDescriptor::SetRawData(int field_index) {
- return SetTagged(field_index, false);
-}
-
-LayoutDescriptor LayoutDescriptor::SetTagged(int field_index, bool tagged) {
- int layout_word_index = 0;
- int layout_bit_index = 0;
-
- CHECK(GetIndexes(field_index, &layout_word_index, &layout_bit_index));
- uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
-
- if (IsSlowLayout()) {
- uint32_t value = get_layout_word(layout_word_index);
- if (tagged) {
- value &= ~layout_mask;
- } else {
- value |= layout_mask;
- }
- set_layout_word(layout_word_index, value);
- return *this;
- } else {
- uint32_t value = static_cast<uint32_t>(Smi::ToInt(*this));
- if (tagged) {
- value &= ~layout_mask;
- } else {
- value |= layout_mask;
- }
- return LayoutDescriptor::FromSmi(Smi::FromInt(static_cast<int>(value)));
- }
-}
-
-bool LayoutDescriptor::IsTagged(int field_index) {
- if (IsFastPointerLayout()) return true;
-
- int layout_word_index;
- int layout_bit_index;
-
- if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
- // All bits after Out of bounds queries
- return true;
- }
- uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
-
- if (IsSlowLayout()) {
- uint32_t value = get_layout_word(layout_word_index);
- return (value & layout_mask) == 0;
- } else {
- uint32_t value = static_cast<uint32_t>(Smi::ToInt(*this));
- return (value & layout_mask) == 0;
- }
-}
-
-bool LayoutDescriptor::IsFastPointerLayout() {
- return *this == FastPointerLayout();
-}
-
-bool LayoutDescriptor::IsFastPointerLayout(Object layout_descriptor) {
- return layout_descriptor == FastPointerLayout();
-}
-
-bool LayoutDescriptor::IsSlowLayout() { return !IsSmi(); }
-
-int LayoutDescriptor::capacity() {
- return IsSlowLayout() ? (length() * kBitsPerByte) : kBitsInSmiLayout;
-}
-
-LayoutDescriptor LayoutDescriptor::cast_gc_safe(Object object) {
- // The map word of the object can be a forwarding pointer during
- // object evacuation phase of GC. Since the layout descriptor methods
- // for checking whether a field is tagged or not do not depend on the
- // object map, it should be safe.
- return LayoutDescriptor::unchecked_cast(object);
-}
-
-int LayoutDescriptor::GetSlowModeBackingStoreLength(int length) {
- DCHECK_LT(0, length);
- // We allocate kTaggedSize rounded blocks of memory anyway so we increase
- // the length of allocated array to utilize that "lost" space which could
- // also help to avoid layout descriptor reallocations.
- return RoundUp(length, kBitsPerByte * kTaggedSize) / kBitsPerByte;
-}
-
-int LayoutDescriptor::CalculateCapacity(Map map, DescriptorArray descriptors,
- int num_descriptors) {
- int inobject_properties = map.GetInObjectProperties();
- if (inobject_properties == 0) return 0;
-
- DCHECK_LE(num_descriptors, descriptors.number_of_descriptors());
-
- int layout_descriptor_length;
- const int kMaxWordsPerField = kDoubleSize / kTaggedSize;
-
- if (num_descriptors <= kBitsInSmiLayout / kMaxWordsPerField) {
- // Even in the "worst" case (all fields are doubles) it would fit into
- // a Smi, so no need to calculate length.
- layout_descriptor_length = kBitsInSmiLayout;
-
- } else {
- layout_descriptor_length = 0;
-
- for (InternalIndex i : InternalIndex::Range(num_descriptors)) {
- PropertyDetails details = descriptors.GetDetails(i);
- if (!InobjectUnboxedField(inobject_properties, details)) continue;
- int field_index = details.field_index();
- int field_width_in_words = details.field_width_in_words();
- layout_descriptor_length = std::max(layout_descriptor_length,
- field_index + field_width_in_words);
- }
- }
- layout_descriptor_length =
- std::min(layout_descriptor_length, inobject_properties);
- return layout_descriptor_length;
-}
-
-LayoutDescriptor LayoutDescriptor::Initialize(
- LayoutDescriptor layout_descriptor, Map map, DescriptorArray descriptors,
- int num_descriptors) {
- DisallowGarbageCollection no_gc;
- int inobject_properties = map.GetInObjectProperties();
-
- for (InternalIndex i : InternalIndex::Range(num_descriptors)) {
- PropertyDetails details = descriptors.GetDetails(i);
- if (!InobjectUnboxedField(inobject_properties, details)) {
- DCHECK(details.location() != kField ||
- layout_descriptor.IsTagged(details.field_index()));
- continue;
- }
- int field_index = details.field_index();
- layout_descriptor = layout_descriptor.SetRawData(field_index);
- if (details.field_width_in_words() > 1) {
- layout_descriptor = layout_descriptor.SetRawData(field_index + 1);
- }
- }
- return layout_descriptor;
-}
-
-int LayoutDescriptor::number_of_layout_words() {
- return length() / kUInt32Size;
-}
-
-uint32_t LayoutDescriptor::get_layout_word(int index) const {
- return get_uint32_relaxed(index);
-}
-
-void LayoutDescriptor::set_layout_word(int index, uint32_t value) {
- set_uint32_relaxed(index, value);
-}
-
-// LayoutDescriptorHelper is a helper class for querying whether inobject
-// property at offset is Double or not.
-LayoutDescriptorHelper::LayoutDescriptorHelper(Map map)
- : all_fields_tagged_(true),
- header_size_(0),
- layout_descriptor_(LayoutDescriptor::FastPointerLayout()) {
- if (!FLAG_unbox_double_fields) return;
-
- layout_descriptor_ = map.layout_descriptor_gc_safe();
- if (layout_descriptor_.IsFastPointerLayout()) {
- return;
- }
-
- header_size_ = map.GetInObjectPropertiesStartInWords() * kTaggedSize;
- DCHECK_GE(header_size_, 0);
-
- all_fields_tagged_ = false;
-}
-
-bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
- DCHECK(IsAligned(offset_in_bytes, kTaggedSize));
- if (all_fields_tagged_) return true;
- // Object headers do not contain non-tagged fields.
- if (offset_in_bytes < header_size_) return true;
- int field_index = (offset_in_bytes - header_size_) / kTaggedSize;
-
- return layout_descriptor_.IsTagged(field_index);
-}
-
-} // namespace internal
-} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
-
-#endif // V8_OBJECTS_LAYOUT_DESCRIPTOR_INL_H_
diff --git a/deps/v8/src/objects/layout-descriptor.cc b/deps/v8/src/objects/layout-descriptor.cc
deleted file mode 100644
index abc3d5c2f8..0000000000
--- a/deps/v8/src/objects/layout-descriptor.cc
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/objects/layout-descriptor.h"
-
-#include <sstream>
-
-#include "src/base/bits.h"
-#include "src/base/platform/wrappers.h"
-#include "src/handles/handles-inl.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-Handle<LayoutDescriptor> LayoutDescriptor::New(
- Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
- int num_descriptors) {
- if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate);
-
- int layout_descriptor_length =
- CalculateCapacity(*map, *descriptors, num_descriptors);
-
- if (layout_descriptor_length == 0) {
- // No double fields were found, use fast pointer layout.
- return handle(FastPointerLayout(), isolate);
- }
-
- // Initially, layout descriptor corresponds to an object with all fields
- // tagged.
- Handle<LayoutDescriptor> layout_descriptor_handle =
- LayoutDescriptor::New(isolate, layout_descriptor_length);
-
- LayoutDescriptor layout_descriptor = Initialize(
- *layout_descriptor_handle, *map, *descriptors, num_descriptors);
-
- return handle(layout_descriptor, isolate);
-}
-
-Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
- Isolate* isolate, Handle<Map> map, PropertyDetails details) {
- DCHECK(map->owns_descriptors());
- Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
- isolate);
-
- if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
- DCHECK(details.location() != kField ||
- layout_descriptor->IsTagged(details.field_index()));
- return layout_descriptor;
- }
- int field_index = details.field_index();
- layout_descriptor = LayoutDescriptor::EnsureCapacity(
- isolate, layout_descriptor, field_index + details.field_width_in_words());
-
- DisallowGarbageCollection no_gc;
- LayoutDescriptor layout_desc = *layout_descriptor;
- layout_desc = layout_desc.SetRawData(field_index);
- if (details.field_width_in_words() > 1) {
- layout_desc = layout_desc.SetRawData(field_index + 1);
- }
- return handle(layout_desc, isolate);
-}
-
-Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
- Isolate* isolate, Handle<Map> map, PropertyDetails details,
- Handle<LayoutDescriptor> full_layout_descriptor) {
- DisallowGarbageCollection no_gc;
- LayoutDescriptor layout_descriptor = map->layout_descriptor(kAcquireLoad);
- if (layout_descriptor.IsSlowLayout()) {
- return full_layout_descriptor;
- }
- if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
- DCHECK(details.location() != kField ||
- layout_descriptor.IsTagged(details.field_index()));
- return handle(layout_descriptor, isolate);
- }
- int field_index = details.field_index();
- int new_capacity = field_index + details.field_width_in_words();
- if (new_capacity > layout_descriptor.capacity()) {
- // Current map's layout descriptor runs out of space, so use the full
- // layout descriptor.
- return full_layout_descriptor;
- }
-
- layout_descriptor = layout_descriptor.SetRawData(field_index);
- if (details.field_width_in_words() > 1) {
- layout_descriptor = layout_descriptor.SetRawData(field_index + 1);
- }
- return handle(layout_descriptor, isolate);
-}
-
-Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
- Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
- int new_capacity) {
- int old_capacity = layout_descriptor->capacity();
- if (new_capacity <= old_capacity) {
- return layout_descriptor;
- }
- Handle<LayoutDescriptor> new_layout_descriptor =
- LayoutDescriptor::New(isolate, new_capacity);
- DCHECK(new_layout_descriptor->IsSlowLayout());
-
- if (layout_descriptor->IsSlowLayout()) {
- base::Memcpy(new_layout_descriptor->GetDataStartAddress(),
- layout_descriptor->GetDataStartAddress(),
- layout_descriptor->DataSize());
- return new_layout_descriptor;
- } else {
- // Fast layout.
- uint32_t value = static_cast<uint32_t>(Smi::ToInt(*layout_descriptor));
- new_layout_descriptor->set_layout_word(0, value);
- return new_layout_descriptor;
- }
-}
-
-bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
- int* out_sequence_length) {
- DCHECK_GT(max_sequence_length, 0);
- if (IsFastPointerLayout()) {
- *out_sequence_length = max_sequence_length;
- return true;
- }
-
- int layout_word_index;
- int layout_bit_index;
-
- if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
- // Out of bounds queries are considered tagged.
- *out_sequence_length = max_sequence_length;
- return true;
- }
- uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
-
- uint32_t value = IsSlowLayout() ? get_layout_word(layout_word_index)
- : static_cast<uint32_t>(Smi::ToInt(*this));
-
- bool is_tagged = (value & layout_mask) == 0;
- if (!is_tagged) value = ~value; // Count set bits instead of cleared bits.
- value = value & ~(layout_mask - 1); // Clear bits we are not interested in.
- int sequence_length;
- if (IsSlowLayout()) {
- sequence_length = base::bits::CountTrailingZeros(value) - layout_bit_index;
-
- if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
- // This is a contiguous sequence till the end of current word, proceed
- // counting in the subsequent words.
- ++layout_word_index;
- int num_words = number_of_layout_words();
- for (; layout_word_index < num_words; layout_word_index++) {
- value = get_layout_word(layout_word_index);
- bool cur_is_tagged = (value & 1) == 0;
- if (cur_is_tagged != is_tagged) break;
- if (!is_tagged) value = ~value; // Count set bits instead.
- int cur_sequence_length = base::bits::CountTrailingZeros(value);
- sequence_length += cur_sequence_length;
- if (sequence_length >= max_sequence_length) break;
- if (cur_sequence_length != kBitsPerLayoutWord) break;
- }
- if (is_tagged && (field_index + sequence_length == capacity())) {
- // The contiguous sequence of tagged fields lasts till the end of the
- // layout descriptor which means that all the fields starting from
- // field_index are tagged.
- sequence_length = std::numeric_limits<int>::max();
- }
- }
- } else { // Fast layout.
- sequence_length = std::min(base::bits::CountTrailingZeros(value),
- static_cast<unsigned>(kBitsInSmiLayout)) -
- layout_bit_index;
- if (is_tagged && (field_index + sequence_length == capacity())) {
- // The contiguous sequence of tagged fields lasts till the end of the
- // layout descriptor which means that all the fields starting from
- // field_index are tagged.
- sequence_length = std::numeric_limits<int>::max();
- }
- }
- *out_sequence_length = std::min(sequence_length, max_sequence_length);
- return is_tagged;
-}
-
-Handle<LayoutDescriptor> LayoutDescriptor::NewForTesting(Isolate* isolate,
- int length) {
- return New(isolate, length);
-}
-
-LayoutDescriptor LayoutDescriptor::SetTaggedForTesting(int field_index,
- bool tagged) {
- return SetTagged(field_index, tagged);
-}
-
-bool LayoutDescriptorHelper::IsTagged(
- int offset_in_bytes, int end_offset,
- int* out_end_of_contiguous_region_offset) {
- DCHECK(IsAligned(offset_in_bytes, kTaggedSize));
- DCHECK(IsAligned(end_offset, kTaggedSize));
- DCHECK(offset_in_bytes < end_offset);
- if (all_fields_tagged_) {
- *out_end_of_contiguous_region_offset = end_offset;
- DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
- return true;
- }
- int max_sequence_length = (end_offset - offset_in_bytes) / kTaggedSize;
- int field_index = std::max(0, (offset_in_bytes - header_size_) / kTaggedSize);
- int sequence_length;
- bool tagged = layout_descriptor_.IsTagged(field_index, max_sequence_length,
- &sequence_length);
- DCHECK_GT(sequence_length, 0);
- if (offset_in_bytes < header_size_) {
- // Object headers do not contain non-tagged fields. Check if the contiguous
- // region continues after the header.
- if (tagged) {
- // First field is tagged, calculate end offset from there.
- *out_end_of_contiguous_region_offset =
- header_size_ + sequence_length * kTaggedSize;
-
- } else {
- *out_end_of_contiguous_region_offset = header_size_;
- }
- DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
- return true;
- }
- *out_end_of_contiguous_region_offset =
- offset_in_bytes + sequence_length * kTaggedSize;
- DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
- return tagged;
-}
-
-LayoutDescriptor LayoutDescriptor::Trim(Heap* heap, Map map,
- DescriptorArray descriptors,
- int num_descriptors) {
- DisallowGarbageCollection no_gc;
- // Fast mode descriptors are never shared and therefore always fully
- // correspond to their map.
- if (!IsSlowLayout()) return *this;
-
- int layout_descriptor_length =
- CalculateCapacity(map, descriptors, num_descriptors);
- // It must not become fast-mode descriptor here, because otherwise it has to
- // be fast pointer layout descriptor already but it's is slow mode now.
- DCHECK_LT(kBitsInSmiLayout, layout_descriptor_length);
-
- // Trim, clean and reinitialize this slow-mode layout descriptor.
- int new_backing_store_length =
- GetSlowModeBackingStoreLength(layout_descriptor_length);
- int backing_store_length = length();
- if (new_backing_store_length != backing_store_length) {
- DCHECK_LT(new_backing_store_length, backing_store_length);
- int delta = backing_store_length - new_backing_store_length;
- heap->RightTrimFixedArray(*this, delta);
- }
- memset(GetDataStartAddress(), 0, DataSize());
- LayoutDescriptor layout_descriptor =
- Initialize(*this, map, descriptors, num_descriptors);
- DCHECK_EQ(*this, layout_descriptor);
- return layout_descriptor;
-}
-
-bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
- if (FLAG_unbox_double_fields) {
- DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
- int last_field_index = 0;
- for (InternalIndex i : map.IterateOwnDescriptors()) {
- PropertyDetails details = descriptors.GetDetails(i);
- if (details.location() != kField) continue;
- FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
- bool tagged_expected =
- !field_index.is_inobject() || !details.representation().IsDouble();
- for (int bit = 0; bit < details.field_width_in_words(); bit++) {
- bool tagged_actual = IsTagged(details.field_index() + bit);
- DCHECK_EQ(tagged_expected, tagged_actual);
- if (tagged_actual != tagged_expected) return false;
- }
- last_field_index =
- std::max(last_field_index,
- details.field_index() + details.field_width_in_words());
- }
- if (check_tail) {
- int n = capacity();
- for (int i = last_field_index; i < n; i++) {
- DCHECK(IsTagged(i));
- }
- }
- }
- return true;
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/objects/layout-descriptor.h b/deps/v8/src/objects/layout-descriptor.h
deleted file mode 100644
index 2311594ff6..0000000000
--- a/deps/v8/src/objects/layout-descriptor.h
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_LAYOUT_DESCRIPTOR_H_
-#define V8_OBJECTS_LAYOUT_DESCRIPTOR_H_
-
-#include <iosfwd>
-
-#include "src/objects/fixed-array.h"
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-// LayoutDescriptor is a bit vector defining which fields contain non-tagged
-// values. It could either be a fixed typed array (slow form) or a Smi
-// if the length fits (fast form).
-// Each bit in the layout represents a FIELD. The bits are referenced by
-// field_index which is a field number. If the bit is set then the corresponding
-// field contains a non-tagged value and therefore must be skipped by GC.
-// Otherwise the field is considered tagged. If the queried bit lays "outside"
-// of the descriptor then the field is also considered tagged.
-// Once a layout descriptor is created it is allowed only to append properties
-// to it. GC uses layout descriptors to iterate objects. Avoid heap pointers
-// in a layout descriptor because they can lead to data races in GC when
-// GC moves objects in parallel.
-class V8_EXPORT_PRIVATE LayoutDescriptor : public ByteArray {
- public:
- V8_INLINE bool IsTagged(int field_index);
-
- // Queries the contiguous region of fields that are either tagged or not.
- // Returns true if the given field is tagged or false otherwise and writes
- // the length of the contiguous region to |out_sequence_length|.
- // If the sequence is longer than |max_sequence_length| then
- // |out_sequence_length| is set to |max_sequence_length|.
- bool IsTagged(int field_index, int max_sequence_length,
- int* out_sequence_length);
-
- // Returns true if this is a layout of the object having only tagged fields.
- V8_INLINE bool IsFastPointerLayout();
- V8_INLINE static bool IsFastPointerLayout(Object layout_descriptor);
-
- // Returns true if the layout descriptor is in non-Smi form.
- V8_INLINE bool IsSlowLayout();
-
- DECL_CAST(LayoutDescriptor)
-
- V8_INLINE static LayoutDescriptor cast_gc_safe(Object object);
-
- // Builds layout descriptor optimized for given |map| by |num_descriptors|
- // elements of given descriptors array. The |map|'s descriptors could be
- // different.
- static Handle<LayoutDescriptor> New(Isolate* isolate, Handle<Map> map,
- Handle<DescriptorArray> descriptors,
- int num_descriptors);
-
- // Modifies |map|'s layout descriptor or creates a new one if necessary by
- // appending property with |details| to it.
- static Handle<LayoutDescriptor> ShareAppend(Isolate* isolate, Handle<Map> map,
- PropertyDetails details);
-
- // Creates new layout descriptor by appending property with |details| to
- // |map|'s layout descriptor and if it is still fast then returns it.
- // Otherwise the |full_layout_descriptor| is returned.
- static Handle<LayoutDescriptor> AppendIfFastOrUseFull(
- Isolate* isolate, Handle<Map> map, PropertyDetails details,
- Handle<LayoutDescriptor> full_layout_descriptor);
-
- // Layout descriptor that corresponds to an object all fields of which are
- // tagged (FastPointerLayout).
- V8_INLINE static LayoutDescriptor FastPointerLayout();
-
- // Check that this layout descriptor corresponds to given map.
- bool IsConsistentWithMap(Map map, bool check_tail = false);
-
- // Trims this layout descriptor to given number of descriptors. This happens
- // only when corresponding descriptors array is trimmed.
- // The layout descriptor could be trimmed if it was slow or it could
- // become fast.
- LayoutDescriptor Trim(Heap* heap, Map map, DescriptorArray descriptors,
- int num_descriptors);
-
-#ifdef OBJECT_PRINT
- // For our gdb macros, we should perhaps change these in the future.
- void Print();
-
- void ShortPrint(std::ostream& os);
- void Print(std::ostream& os); // NOLINT
-#endif
-
- // Capacity of layout descriptors in bits.
- V8_INLINE int capacity();
-
- static Handle<LayoutDescriptor> NewForTesting(Isolate* isolate, int length);
- LayoutDescriptor SetTaggedForTesting(int field_index, bool tagged);
-
- private:
- // Exclude sign-bit to simplify encoding.
- static constexpr int kBitsInSmiLayout =
- SmiValuesAre32Bits() ? 32 : kSmiValueSize - 1;
-
- static const int kBitsPerLayoutWord = 32;
-
- V8_INLINE int number_of_layout_words();
- V8_INLINE uint32_t get_layout_word(int index) const;
- V8_INLINE void set_layout_word(int index, uint32_t value);
-
- V8_INLINE static Handle<LayoutDescriptor> New(Isolate* isolate, int length);
- V8_INLINE static LayoutDescriptor FromSmi(Smi smi);
-
- V8_INLINE static bool InobjectUnboxedField(int inobject_properties,
- PropertyDetails details);
-
- // Calculates minimal layout descriptor capacity required for given
- // |map|, |descriptors| and |num_descriptors|.
- V8_INLINE static int CalculateCapacity(Map map, DescriptorArray descriptors,
- int num_descriptors);
-
- // Calculates the length of the slow-mode backing store array by given layout
- // descriptor length.
- V8_INLINE static int GetSlowModeBackingStoreLength(int length);
-
- // Fills in clean |layout_descriptor| according to given |map|, |descriptors|
- // and |num_descriptors|.
- V8_INLINE static LayoutDescriptor Initialize(
- LayoutDescriptor layout_descriptor, Map map, DescriptorArray descriptors,
- int num_descriptors);
-
- static Handle<LayoutDescriptor> EnsureCapacity(
- Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
- int new_capacity);
-
- // Returns false if requested field_index is out of bounds.
- V8_INLINE bool GetIndexes(int field_index, int* layout_word_index,
- int* layout_bit_index);
-
- V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor SetRawData(int field_index);
-
- V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor SetTagged(int field_index,
- bool tagged);
-
- OBJECT_CONSTRUCTORS(LayoutDescriptor, ByteArray);
-};
-
-// LayoutDescriptorHelper is a helper class for querying layout descriptor
-// about whether the field at given offset is tagged or not.
-class LayoutDescriptorHelper {
- public:
- inline explicit LayoutDescriptorHelper(Map map);
-
- bool all_fields_tagged() { return all_fields_tagged_; }
- inline bool IsTagged(int offset_in_bytes);
-
- // Queries the contiguous region of fields that are either tagged or not.
- // Returns true if fields starting at |offset_in_bytes| are tagged or false
- // otherwise and writes the offset of the end of the contiguous region to
- // |out_end_of_contiguous_region_offset|. The |end_offset| value is the
- // upper bound for |out_end_of_contiguous_region_offset|.
- V8_EXPORT_PRIVATE bool IsTagged(int offset_in_bytes, int end_offset,
- int* out_end_of_contiguous_region_offset);
-
- private:
- bool all_fields_tagged_;
- int header_size_;
- LayoutDescriptor layout_descriptor_;
-};
-} // namespace internal
-} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
-
-#endif // V8_OBJECTS_LAYOUT_DESCRIPTOR_H_
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index 8b08dedb72..4a2329ee55 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -136,6 +136,12 @@ bool ArrayBoilerplateDescription::is_empty() const {
return constant_elements().length() == 0;
}
+//
+// RegExpBoilerplateDescription
+//
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(RegExpBoilerplateDescription)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index 16a678f7d8..e6c7402f73 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -13,6 +13,7 @@
#include "src/heap/local-factory-inl.h"
#include "src/objects/dictionary.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-regexp.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
@@ -172,7 +173,7 @@ void AddToDictionaryTemplate(LocalIsolate* isolate,
Handle<Object> value_handle;
PropertyDetails details(
value_kind != ClassBoilerplate::kData ? kAccessor : kData, DONT_ENUM,
- PropertyCellType::kNoCell, enum_order);
+ PropertyDetails::kConstIfDictConstnessTracking, enum_order);
if (value_kind == ClassBoilerplate::kData) {
value_handle = handle(value, isolate);
} else {
@@ -223,8 +224,9 @@ void AddToDictionaryTemplate(LocalIsolate* isolate,
// Either both getter and setter were defined before the computed
// method or just one of them was defined before while the other one
// was not defined yet, so overwrite property to kData.
- PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
- enum_order_existing);
+ PropertyDetails details(
+ kData, DONT_ENUM, PropertyDetails::kConstIfDictConstnessTracking,
+ enum_order_existing);
dictionary->DetailsAtPut(entry, details);
dictionary->ValueAtPut(entry, value);
@@ -280,8 +282,9 @@ void AddToDictionaryTemplate(LocalIsolate* isolate,
if (!existing_value.IsSmi() || Smi::ToInt(existing_value) < key_index) {
// Overwrite existing value because it was defined before the computed
// one (AccessorInfo "length" property is always defined before).
- PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
- enum_order_existing);
+ PropertyDetails details(
+ kData, DONT_ENUM, PropertyDetails::kConstIfDictConstnessTracking,
+ enum_order_existing);
dictionary->DetailsAtPut(entry, details);
dictionary->ValueAtPut(entry, value);
} else {
@@ -291,9 +294,11 @@ void AddToDictionaryTemplate(LocalIsolate* isolate,
// The enum index is unused by elements dictionaries,
// which is why we don't need to update the property details if
// |is_elements_dictionary| holds.
+ PropertyDetails details(
+ kData, DONT_ENUM,
+ PropertyDetails::kConstIfDictConstnessTracking,
+ enum_order_computed);
- PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
- enum_order_computed);
dictionary->DetailsAtPut(entry, details);
}
}
@@ -319,9 +324,10 @@ void AddToDictionaryTemplate(LocalIsolate* isolate,
// which is why we don't need to update the property details if
// |is_elements_dictionary| holds.
- PropertyDetails details(kAccessor, DONT_ENUM,
- PropertyCellType::kNoCell,
- enum_order_computed);
+ PropertyDetails details(
+ kAccessor, DONT_ENUM,
+ PropertyDetails::kConstIfDictConstnessTracking,
+ enum_order_computed);
dictionary->DetailsAtPut(entry, details);
}
}
@@ -335,9 +341,10 @@ void AddToDictionaryTemplate(LocalIsolate* isolate,
// the computed accessor property.
Handle<AccessorPair> pair(isolate->factory()->NewAccessorPair());
pair->set(component, value);
- PropertyDetails details(kAccessor, DONT_ENUM,
- PropertyCellType::kNoCell,
- enum_order_existing);
+ PropertyDetails details(
+ kAccessor, DONT_ENUM,
+ PropertyDetails::kConstIfDictConstnessTracking,
+ enum_order_existing);
dictionary->DetailsAtPut(entry, details);
dictionary->ValueAtPut(entry, *pair);
} else {
@@ -349,9 +356,11 @@ void AddToDictionaryTemplate(LocalIsolate* isolate,
// The enum index is unused by elements dictionaries,
// which is why we don't need to update the property details if
// |is_elements_dictionary| holds.
+ PropertyDetails details(
+ kData, DONT_ENUM,
+ PropertyDetails::kConstIfDictConstnessTracking,
+ enum_order_computed);
- PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
- enum_order_computed);
dictionary->DetailsAtPut(entry, details);
}
}
@@ -626,14 +635,6 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
static_desc.AddConstant(isolate, factory->prototype_string(),
factory->function_prototype_accessor(), attribs);
}
- if (FunctionLiteral::NeedsHomeObject(expr->constructor())) {
- PropertyAttributes attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<Object> value(
- Smi::FromInt(ClassBoilerplate::kPrototypeArgumentIndex), isolate);
- static_desc.AddConstant(isolate, factory->home_object_symbol(), value,
- attribs);
- }
{
Handle<ClassPositions> class_positions = factory->NewClassPositions(
expr->start_position(), expr->end_position());
@@ -742,5 +743,20 @@ template Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
template Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
LocalIsolate* isolate, ClassLiteral* expr);
+void ArrayBoilerplateDescription::BriefPrintDetails(std::ostream& os) {
+ os << " " << ElementsKindToString(elements_kind()) << ", "
+ << Brief(constant_elements());
+}
+
+void RegExpBoilerplateDescription::BriefPrintDetails(std::ostream& os) {
+ // Note: keep boilerplate layout synced with JSRegExp layout.
+ STATIC_ASSERT(JSRegExp::kDataOffset == JSObject::kHeaderSize);
+ STATIC_ASSERT(JSRegExp::kSourceOffset == JSRegExp::kDataOffset + kTaggedSize);
+ STATIC_ASSERT(JSRegExp::kFlagsOffset ==
+ JSRegExp::kSourceOffset + kTaggedSize);
+ STATIC_ASSERT(JSRegExp::kHeaderSize == JSRegExp::kFlagsOffset + kTaggedSize);
+ os << " " << Brief(data()) << ", " << Brief(source()) << ", " << flags();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 00a2651a66..78fa53011b 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -75,6 +75,18 @@ class ArrayBoilerplateDescription
TQ_OBJECT_CONSTRUCTORS(ArrayBoilerplateDescription)
};
+class RegExpBoilerplateDescription
+ : public TorqueGeneratedRegExpBoilerplateDescription<
+ RegExpBoilerplateDescription, Struct> {
+ public:
+ // Dispatched behavior.
+ DECL_PRINTER(RegExpBoilerplateDescription)
+ void BriefPrintDetails(std::ostream& os);
+
+ private:
+ TQ_OBJECT_CONSTRUCTORS(RegExpBoilerplateDescription)
+};
+
class ClassBoilerplate : public FixedArray {
public:
enum ValueKind { kData, kGetter, kSetter };
diff --git a/deps/v8/src/objects/literal-objects.tq b/deps/v8/src/objects/literal-objects.tq
index 1e2b80d2dd..bb087f7a5a 100644
--- a/deps/v8/src/objects/literal-objects.tq
+++ b/deps/v8/src/objects/literal-objects.tq
@@ -7,3 +7,10 @@ extern class ArrayBoilerplateDescription extends Struct {
flags: Smi;
constant_elements: FixedArrayBase;
}
+
+@generateCppClass
+extern class RegExpBoilerplateDescription extends Struct {
+ data: FixedArray;
+ source: String;
+ flags: SmiTagged<JSRegExpFlags>;
+}
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 75fcb32848..a8f2da66f8 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -174,26 +174,6 @@ void LookupIterator::ReloadPropertyInformation() {
DCHECK(IsFound() || !holder_->HasFastProperties(isolate_));
}
-namespace {
-
-bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, HeapObject object) {
- static uint32_t context_slots[] = {
-#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype) \
- Context::TYPE##_ARRAY_FUN_INDEX,
-
- TYPED_ARRAYS(TYPED_ARRAY_CONTEXT_SLOTS)
-#undef TYPED_ARRAY_CONTEXT_SLOTS
- };
-
- if (!object.IsJSFunction(isolate)) return false;
-
- return std::any_of(
- std::begin(context_slots), std::end(context_slots),
- [=](uint32_t slot) { return isolate->IsInAnyContext(object, slot); });
-}
-
-} // namespace
-
// static
void LookupIterator::InternalUpdateProtector(Isolate* isolate,
Handle<Object> receiver_generic,
@@ -204,12 +184,6 @@ void LookupIterator::InternalUpdateProtector(Isolate* isolate,
ReadOnlyRoots roots(isolate);
if (*name == roots.constructor_string()) {
- if (!Protectors::IsArraySpeciesLookupChainIntact(isolate) &&
- !Protectors::IsPromiseSpeciesLookupChainIntact(isolate) &&
- !Protectors::IsRegExpSpeciesLookupChainIntact(isolate) &&
- !Protectors::IsTypedArraySpeciesLookupChainIntact(isolate)) {
- return;
- }
// Setting the constructor property could change an instance's @@species
if (receiver->IsJSArray(isolate)) {
if (!Protectors::IsArraySpeciesLookupChainIntact(isolate)) return;
@@ -274,28 +248,20 @@ void LookupIterator::InternalUpdateProtector(Isolate* isolate,
Protectors::InvalidateStringIteratorLookupChain(isolate);
}
} else if (*name == roots.species_symbol()) {
- if (!Protectors::IsArraySpeciesLookupChainIntact(isolate) &&
- !Protectors::IsPromiseSpeciesLookupChainIntact(isolate) &&
- !Protectors::IsRegExpSpeciesLookupChainIntact(isolate) &&
- !Protectors::IsTypedArraySpeciesLookupChainIntact(isolate)) {
- return;
- }
// Setting the Symbol.species property of any Array, Promise or TypedArray
// constructor invalidates the @@species protector
- if (isolate->IsInAnyContext(*receiver, Context::ARRAY_FUNCTION_INDEX)) {
+ if (receiver->IsJSArrayConstructor()) {
if (!Protectors::IsArraySpeciesLookupChainIntact(isolate)) return;
isolate->CountUsage(
v8::Isolate::UseCounterFeature::kArraySpeciesModified);
Protectors::InvalidateArraySpeciesLookupChain(isolate);
- } else if (isolate->IsInAnyContext(*receiver,
- Context::PROMISE_FUNCTION_INDEX)) {
+ } else if (receiver->IsJSPromiseConstructor()) {
if (!Protectors::IsPromiseSpeciesLookupChainIntact(isolate)) return;
Protectors::InvalidatePromiseSpeciesLookupChain(isolate);
- } else if (isolate->IsInAnyContext(*receiver,
- Context::REGEXP_FUNCTION_INDEX)) {
+ } else if (receiver->IsJSRegExpConstructor()) {
if (!Protectors::IsRegExpSpeciesLookupChainIntact(isolate)) return;
Protectors::InvalidateRegExpSpeciesLookupChain(isolate);
- } else if (IsTypedArrayFunctionInAnyContext(isolate, *receiver)) {
+ } else if (receiver->IsTypedArrayConstructor()) {
if (!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate)) return;
Protectors::InvalidateTypedArraySpeciesLookupChain(isolate);
}
@@ -337,7 +303,7 @@ void LookupIterator::InternalUpdateProtector(Isolate* isolate,
if (!Protectors::IsPromiseResolveLookupChainIntact(isolate)) return;
// Setting the "resolve" property on any %Promise% intrinsic object
// invalidates the Promise.resolve protector.
- if (isolate->IsInAnyContext(*receiver, Context::PROMISE_FUNCTION_INDEX)) {
+ if (receiver->IsJSPromiseConstructor()) {
Protectors::InvalidatePromiseResolveLookupChain(isolate);
}
} else if (*name == roots.then_string()) {
@@ -361,15 +327,13 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
DCHECK(HolderIsReceiverOrHiddenPrototype());
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
- // JSProxy does not have fast properties so we do an early return.
- DCHECK_IMPLIES(holder->IsJSProxy(isolate_),
- !holder->HasFastProperties(isolate_));
+ // We are not interested in tracking constness of a JSProxy's direct
+ // properties.
DCHECK_IMPLIES(holder->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
if (holder->IsJSProxy(isolate_)) return;
- Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
-
if (IsElement(*holder)) {
+ Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
ElementsKind kind = holder_obj->GetElementsKind(isolate_);
ElementsKind to = value->OptimalElementsKind(isolate_);
if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
@@ -387,34 +351,55 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
return;
}
- if (holder_obj->IsJSGlobalObject(isolate_)) {
+ if (holder->IsJSGlobalObject(isolate_)) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder_obj)
- .global_dictionary(isolate_, kAcquireLoad),
+ JSGlobalObject::cast(*holder).global_dictionary(isolate_, kAcquireLoad),
isolate());
Handle<PropertyCell> cell(dictionary->CellAt(isolate_, dictionary_entry()),
isolate());
property_details_ = cell->property_details();
- PropertyCell::PrepareForValue(isolate(), dictionary, dictionary_entry(),
- value, property_details_);
+ PropertyCell::PrepareForAndSetValue(
+ isolate(), dictionary, dictionary_entry(), value, property_details_);
return;
}
- if (!holder_obj->HasFastProperties(isolate_)) return;
PropertyConstness new_constness = PropertyConstness::kConst;
if (constness() == PropertyConstness::kConst) {
DCHECK_EQ(kData, property_details_.kind());
// Check that current value matches new value otherwise we should make
// the property mutable.
- if (!IsConstFieldValueEqualTo(*value))
- new_constness = PropertyConstness::kMutable;
+ if (holder->HasFastProperties(isolate_)) {
+ if (!IsConstFieldValueEqualTo(*value)) {
+ new_constness = PropertyConstness::kMutable;
+ }
+ } else if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
+ if (!IsConstDictValueEqualTo(*value)) {
+ property_details_ =
+ property_details_.CopyWithConstness(PropertyConstness::kMutable);
+
+ // We won't reach the map updating code after Map::Update below, because
+ // that's only for the case that the existing map is a fast mode map.
+ // Therefore, we need to perform the necessary updates to the property
+ // details and the prototype validity cell directly.
+ NameDictionary dict = holder->property_dictionary();
+ dict.DetailsAtPut(dictionary_entry(), property_details_);
+
+ Map old_map = holder->map(isolate_);
+ if (old_map.is_prototype_map()) {
+ JSObject::InvalidatePrototypeChains(old_map);
+ }
+ }
+ return;
+ }
}
- Handle<Map> old_map(holder_obj->map(isolate_), isolate_);
- DCHECK(!old_map->is_dictionary_map());
+ if (!holder->HasFastProperties(isolate_)) return;
+
+ Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
+ Handle<Map> old_map(holder->map(isolate_), isolate_);
Handle<Map> new_map = Map::Update(isolate_, old_map);
- if (!new_map->is_dictionary_map()) {
+ if (!new_map->is_dictionary_map()) { // fast -> fast
new_map = Map::PrepareForDataProperty(
isolate(), new_map, descriptor_number(), new_constness, value);
@@ -434,6 +419,22 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
JSObject::MigrateToMap(isolate_, holder_obj, new_map);
ReloadPropertyInformation<false>();
+
+ // If we transitioned from fast to slow and the property changed from kConst
+ // to kMutable, then this change in the constness is indicated by neither the
+ // old or the new map. We need to update the constness ourselves.
+ DCHECK(!old_map->is_dictionary_map());
+ if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && new_map->is_dictionary_map() &&
+ new_constness == PropertyConstness::kMutable) { // fast -> slow
+ property_details_ =
+ property_details_.CopyWithConstness(PropertyConstness::kMutable);
+
+ NameDictionary dict = holder_obj->property_dictionary();
+ dict.DetailsAtPut(dictionary_entry(), property_details_);
+
+ DCHECK_IMPLIES(new_map->is_prototype_map(),
+ !new_map->IsPrototypeValidityCellValid());
+ }
}
void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
@@ -476,7 +477,6 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
}
if (!IsElement(*holder) && !holder_obj->HasFastProperties(isolate_)) {
- PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
if (holder_obj->map(isolate_).is_prototype_map() &&
(property_details_.attributes() & READ_ONLY) == 0 &&
(attributes & READ_ONLY) != 0) {
@@ -486,16 +486,18 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
JSObject::InvalidatePrototypeChains(holder->map(isolate_));
}
if (holder_obj->IsJSGlobalObject(isolate_)) {
+ PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
Handle<GlobalDictionary> dictionary(
JSGlobalObject::cast(*holder_obj)
.global_dictionary(isolate_, kAcquireLoad),
isolate());
- Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
+ Handle<PropertyCell> cell = PropertyCell::PrepareForAndSetValue(
isolate(), dictionary, dictionary_entry(), value, details);
- cell->set_value(*value);
property_details_ = cell->property_details();
+ DCHECK_EQ(cell->value(), *value);
} else {
+ PropertyDetails details(kData, attributes, PropertyConstness::kMutable);
if (V8_DICT_MODE_PROTOTYPES_BOOL) {
Handle<OrderedNameDictionary> dictionary(
holder_obj->property_dictionary_ordered(isolate_), isolate());
@@ -552,18 +554,17 @@ void LookupIterator::PrepareTransitionToDataProperty(
if (map->is_dictionary_map()) {
state_ = TRANSITION;
if (map->IsJSGlobalObjectMap()) {
- Handle<PropertyCell> cell = isolate_->factory()->NewPropertyCell(name());
- DCHECK(cell->value(isolate_).IsTheHole(isolate_));
DCHECK(!value->IsTheHole(isolate_));
// Don't set enumeration index (it will be set during value store).
property_details_ = PropertyDetails(
kData, attributes, PropertyCell::InitialType(isolate_, value));
- transition_ = cell;
+ transition_ = isolate_->factory()->NewPropertyCell(
+ name(), property_details_, value);
has_property_ = true;
} else {
// Don't set enumeration index (it will be set during value store).
- property_details_ =
- PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
+ property_details_ = PropertyDetails(
+ kData, attributes, PropertyDetails::kConstIfDictConstnessTracking);
transition_ = map;
}
return;
@@ -576,9 +577,10 @@ void LookupIterator::PrepareTransitionToDataProperty(
transition_ = transition;
if (transition->is_dictionary_map()) {
+ DCHECK(!transition->IsJSGlobalObjectMap());
// Don't set enumeration index (it will be set during value store).
- property_details_ =
- PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
+ property_details_ = PropertyDetails(
+ kData, attributes, PropertyDetails::kConstIfDictConstnessTracking);
} else {
property_details_ = transition->GetLastDescriptorDetails(isolate_);
has_property_ = true;
@@ -890,13 +892,9 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
if (property_details_.representation().IsDouble()) {
if (!value.IsNumber(isolate_)) return false;
uint64_t bits;
- if (holder->IsUnboxedDoubleField(isolate_, field_index)) {
- bits = holder->RawFastDoublePropertyAsBitsAt(field_index);
- } else {
- Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
- DCHECK(current_value.IsHeapNumber(isolate_));
- bits = HeapNumber::cast(current_value).value_as_bits();
- }
+ Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
+ DCHECK(current_value.IsHeapNumber(isolate_));
+ bits = HeapNumber::cast(current_value).value_as_bits();
// Use bit representation of double to check for hole double, since
// manipulating the signaling NaN used for the hole in C++, e.g. with
// bit_cast or value(), will change its value on ia32 (the x87 stack is
@@ -917,6 +915,33 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
}
}
+bool LookupIterator::IsConstDictValueEqualTo(Object value) const {
+ DCHECK(!IsElement(*holder_));
+ DCHECK(!holder_->HasFastProperties(isolate_));
+ DCHECK(!holder_->IsJSGlobalObject());
+ DCHECK(!holder_->IsJSProxy());
+ DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
+
+ DisallowHeapAllocation no_gc;
+
+ if (value.IsUninitialized(isolate())) {
+ // Storing uninitialized value means that we are preparing for a computed
+ // property value in an object literal. The initializing store will follow
+ // and it will properly update constness based on the actual value.
+ return true;
+ }
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
+ NameDictionary dict = holder->property_dictionary();
+
+ Object current_value = dict.ValueAt(dictionary_entry());
+
+ if (current_value.IsUninitialized(isolate()) || current_value == value) {
+ return true;
+ }
+ return current_value.IsNumber(isolate_) && value.IsNumber(isolate_) &&
+ Object::SameNumberValue(current_value.Number(), value.Number());
+}
+
int LookupIterator::GetFieldDescriptorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
@@ -1004,11 +1029,22 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
}
} else if (holder->IsJSGlobalObject(isolate_)) {
+ // PropertyCell::PrepareForAndSetValue already wrote the value into the
+ // cell.
+#ifdef DEBUG
GlobalDictionary dictionary =
JSGlobalObject::cast(*holder).global_dictionary(isolate_, kAcquireLoad);
- dictionary.CellAt(isolate_, dictionary_entry()).set_value(*value);
+ PropertyCell cell = dictionary.CellAt(isolate_, dictionary_entry());
+ DCHECK_EQ(cell.value(), *value);
+#endif // DEBUG
} else {
DCHECK_IMPLIES(holder->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
+ // Check similar to fast mode case above.
+ DCHECK_IMPLIES(
+ V8_DICT_PROPERTY_CONST_TRACKING_BOOL && !initializing_store &&
+ property_details_.constness() == PropertyConstness::kConst,
+ holder->IsJSProxy(isolate_) || IsConstDictValueEqualTo(*value));
+
if (V8_DICT_MODE_PROTOTYPES_BOOL) {
OrderedNameDictionary dictionary =
holder->property_dictionary_ordered(isolate_);
@@ -1105,7 +1141,9 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
number_ = dict.FindEntry(isolate(), name_);
if (number_.is_not_found()) return NOT_FOUND;
PropertyCell cell = dict.CellAt(isolate_, number_);
- if (cell.value(isolate_).IsTheHole(isolate_)) return NOT_FOUND;
+ if (cell.value(isolate_).IsTheHole(isolate_)) {
+ return NOT_FOUND;
+ }
property_details_ = cell.property_details();
has_property_ = true;
switch (property_details_.kind()) {
@@ -1202,17 +1240,24 @@ Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
return Handle<InterceptorInfo>();
}
+bool LookupIterator::TryLookupCachedProperty(Handle<AccessorPair> accessor) {
+ DCHECK_EQ(state(), LookupIterator::ACCESSOR);
+ return LookupCachedProperty(accessor);
+}
+
bool LookupIterator::TryLookupCachedProperty() {
- return state() == LookupIterator::ACCESSOR &&
- GetAccessors()->IsAccessorPair(isolate_) && LookupCachedProperty();
+ if (state() != LookupIterator::ACCESSOR) return false;
+
+ Handle<Object> accessor_pair = GetAccessors();
+ return accessor_pair->IsAccessorPair(isolate_) &&
+ LookupCachedProperty(Handle<AccessorPair>::cast(accessor_pair));
}
-bool LookupIterator::LookupCachedProperty() {
+bool LookupIterator::LookupCachedProperty(Handle<AccessorPair> accessor_pair) {
DCHECK_EQ(state(), LookupIterator::ACCESSOR);
DCHECK(GetAccessors()->IsAccessorPair(isolate_));
- AccessorPair accessor_pair = AccessorPair::cast(*GetAccessors());
- Handle<Object> getter(accessor_pair.getter(isolate_), isolate());
+ Handle<Object> getter(accessor_pair->getter(isolate_), isolate());
MaybeHandle<Name> maybe_name =
FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), getter);
if (maybe_name.is_null()) return false;
@@ -1224,5 +1269,49 @@ bool LookupIterator::LookupCachedProperty() {
return true;
}
+// static
+base::Optional<Object> ConcurrentLookupIterator::TryGetOwnCowElement(
+ Isolate* isolate, FixedArray array_elements, ElementsKind elements_kind,
+ int array_length, size_t index) {
+ DisallowGarbageCollection no_gc;
+
+ CHECK_EQ(array_elements.map(), ReadOnlyRoots(isolate).fixed_cow_array_map());
+ DCHECK(IsFastElementsKind(elements_kind) &&
+ IsSmiOrObjectElementsKind(elements_kind));
+ USE(elements_kind);
+ DCHECK_GE(array_length, 0);
+
+ // ________________________________________
+ // ( Check against both JSArray::length and )
+ // ( FixedArray::length. )
+ // ----------------------------------------
+ // o ^__^
+ // o (oo)\_______
+ // (__)\ )\/\
+ // ||----w |
+ // || ||
+ // The former is the source of truth, but due to concurrent reads it may not
+ // match the given `array_elements`.
+ if (index >= static_cast<size_t>(array_length)) return {};
+ if (index >= static_cast<size_t>(array_elements.length())) return {};
+
+ Object result = array_elements.get(isolate, static_cast<int>(index));
+
+ // ______________________________________
+ // ( Filter out holes irrespective of the )
+ // ( elements kind. )
+ // --------------------------------------
+ // o ^__^
+ // o (..)\_______
+ // (__)\ )\/\
+ // ||----w |
+ // || ||
+ // The elements kind may not be consistent with the given elements backing
+ // store.
+ if (result == ReadOnlyRoots(isolate).the_hole_value()) return {};
+
+ return result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
index 28a6bd8336..06c317beb7 100644
--- a/deps/v8/src/objects/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -190,12 +190,13 @@ class V8_EXPORT_PRIVATE LookupIterator final {
// Lookup a 'cached' private property for an accessor.
// If not found returns false and leaves the LookupIterator unmodified.
+ bool TryLookupCachedProperty(Handle<AccessorPair> accessor);
bool TryLookupCachedProperty();
- bool LookupCachedProperty();
private:
static const size_t kInvalidIndex = std::numeric_limits<size_t>::max();
+ bool LookupCachedProperty(Handle<AccessorPair> accessor);
inline LookupIterator(Isolate* isolate, Handle<Object> receiver,
Handle<Name> name, size_t index,
Handle<Object> lookup_start_object,
@@ -245,6 +246,8 @@ class V8_EXPORT_PRIVATE LookupIterator final {
Handle<Object> FetchValue(AllocationPolicy allocation_policy =
AllocationPolicy::kAllocationAllowed) const;
bool IsConstFieldValueEqualTo(Object value) const;
+ bool IsConstDictValueEqualTo(Object value) const;
+
template <bool is_element>
void ReloadPropertyInformation();
@@ -289,6 +292,32 @@ class V8_EXPORT_PRIVATE LookupIterator final {
InternalIndex number_ = InternalIndex::NotFound();
};
+// Similar to the LookupIterator, but for concurrent accesses from a background
+// thread.
+//
+// Note: This is a work in progress, intended to bundle code related to
+// concurrent lookups here. In its current state, the class is obviously not an
+// 'iterator'. Still, keeping the name for now, with the intent to clarify
+// names and implementation once we've gotten some experience with more
+// involved logic.
+// TODO(jgruber, v8:7790): Consider using a LookupIterator-style interface.
+// TODO(jgruber, v8:7790): Consider merging back into the LookupIterator once
+// functionality and constraints are better known.
+class ConcurrentLookupIterator final : public AllStatic {
+ public:
+ // Implements the own data property lookup for the specialized case of
+ // fixed_cow_array backing stores (these are only in use for array literal
+ // boilerplates). The contract is that the elements, elements kind, and array
+ // length passed to this function should all be read from the same JSArray
+ // instance; but due to concurrency it's possible that they may not be
+ // consistent among themselves (e.g. the elements kind may not match the
+ // given elements backing store). We are thus extra-careful to handle
+ // exceptional situations.
+ V8_EXPORT_PRIVATE static base::Optional<Object> TryGetOwnCowElement(
+ Isolate* isolate, FixedArray array_elements, ElementsKind elements_kind,
+ int array_length, size_t index);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index df9c93a497..ecb270e7ce 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -12,7 +12,6 @@
#include "src/objects/field-type.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/js-function-inl.h"
-#include "src/objects/layout-descriptor-inl.h"
#include "src/objects/map.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property.h"
@@ -43,11 +42,9 @@ RELEASE_ACQUIRE_ACCESSORS(Map, instance_descriptors, DescriptorArray,
// We need to use release-store and acquire-load accessor pairs to ensure
// that the concurrent marking thread observes initializing stores of the
// layout descriptor.
-RELEASE_ACQUIRE_ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
- kLayoutDescriptorOffset,
- FLAG_unbox_double_fields)
-SYNCHRONIZED_WEAK_ACCESSORS(Map, raw_transitions,
- kTransitionsOrPrototypeInfoOffset)
+WEAK_ACCESSORS(Map, raw_transitions, kTransitionsOrPrototypeInfoOffset)
+RELEASE_ACQUIRE_WEAK_ACCESSORS(Map, raw_transitions,
+ kTransitionsOrPrototypeInfoOffset)
ACCESSORS_CHECKED2(Map, prototype, HeapObject, kPrototypeOffset, true,
value.IsNull() || value.IsJSReceiver())
@@ -156,18 +153,6 @@ bool Map::EquivalentToForNormalization(const Map other,
return EquivalentToForNormalization(other, elements_kind(), mode);
}
-bool Map::IsUnboxedDoubleField(FieldIndex index) const {
- IsolateRoot isolate = GetIsolateForPtrCompr(*this);
- return IsUnboxedDoubleField(isolate, index);
-}
-
-bool Map::IsUnboxedDoubleField(IsolateRoot isolate, FieldIndex index) const {
- if (!FLAG_unbox_double_fields) return false;
- if (!index.is_inobject()) return false;
- return !layout_descriptor(isolate, kAcquireLoad)
- .IsTagged(index.property_index());
-}
-
bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
if (UnusedPropertyFields() != 0) return false;
if (is_prototype_map()) return false;
@@ -315,18 +300,17 @@ int Map::GetInObjectPropertyOffset(int index) const {
Handle<Map> Map::AddMissingTransitionsForTesting(
Isolate* isolate, Handle<Map> split_map,
- Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor) {
- return AddMissingTransitions(isolate, split_map, descriptors,
- full_layout_descriptor);
+ Handle<DescriptorArray> descriptors) {
+ return AddMissingTransitions(isolate, split_map, descriptors);
}
InstanceType Map::instance_type() const {
- return static_cast<InstanceType>(ReadField<uint16_t>(kInstanceTypeOffset));
+ return static_cast<InstanceType>(
+ RELAXED_READ_UINT16_FIELD(*this, kInstanceTypeOffset));
}
void Map::set_instance_type(InstanceType value) {
- WriteField<uint16_t>(kInstanceTypeOffset, value);
+ RELAXED_WRITE_UINT16_FIELD(*this, kInstanceTypeOffset, value);
}
int Map::UnusedPropertyFields() const {
@@ -602,63 +586,14 @@ bool Map::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE;
}
-LayoutDescriptor Map::layout_descriptor_gc_safe() const {
- DCHECK(FLAG_unbox_double_fields);
- // The loaded value can be dereferenced on background thread to load the
- // bitmap. We need acquire load in order to ensure that the bitmap
- // initializing stores are also visible to the background thread.
- Object layout_desc =
- TaggedField<Object, kLayoutDescriptorOffset>::Acquire_Load(*this);
- return LayoutDescriptor::cast_gc_safe(layout_desc);
-}
-
-bool Map::HasFastPointerLayout() const {
- DCHECK(FLAG_unbox_double_fields);
- // The loaded value is used for SMI check only and is not dereferenced,
- // so relaxed load is safe.
- Object layout_desc =
- TaggedField<Object, kLayoutDescriptorOffset>::Relaxed_Load(*this);
- return LayoutDescriptor::IsFastPointerLayout(layout_desc);
-}
-
void Map::UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
- LayoutDescriptor layout_desc,
int number_of_own_descriptors) {
SetInstanceDescriptors(isolate, descriptors, number_of_own_descriptors);
- if (FLAG_unbox_double_fields) {
- if (layout_descriptor(kAcquireLoad).IsSlowLayout()) {
- set_layout_descriptor(layout_desc, kReleaseStore);
- }
-#ifdef VERIFY_HEAP
- // TODO(ishell): remove these checks from VERIFY_HEAP mode.
- if (FLAG_verify_heap) {
- CHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
- CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
- }
-#else
- SLOW_DCHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
- DCHECK(visitor_id() == Map::GetVisitorId(*this));
-#endif
- }
}
-void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
- LayoutDescriptor layout_desc) {
+void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors) {
SetInstanceDescriptors(isolate, descriptors,
descriptors.number_of_descriptors());
-
- if (FLAG_unbox_double_fields) {
- set_layout_descriptor(layout_desc, kReleaseStore);
-#ifdef VERIFY_HEAP
- // TODO(ishell): remove these checks from VERIFY_HEAP mode.
- if (FLAG_verify_heap) {
- CHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
- }
-#else
- SLOW_DCHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
-#endif
- set_visitor_id(Map::GetVisitorId(*this));
- }
}
void Map::set_bit_field3(uint32_t bits) {
@@ -676,11 +611,6 @@ void Map::clear_padding() {
FIELD_SIZE(kOptionalPaddingOffset));
}
-LayoutDescriptor Map::GetLayoutDescriptor() const {
- return FLAG_unbox_double_fields ? layout_descriptor(kAcquireLoad)
- : LayoutDescriptor::FastPointerLayout();
-}
-
void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
int number_of_own_descriptors = NumberOfOwnDescriptors();
@@ -712,7 +642,7 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
}
DEF_GETTER(Map, GetBackPointer, HeapObject) {
- Object object = constructor_or_backpointer(isolate);
+ Object object = constructor_or_back_pointer(isolate);
// This is the equivalent of IsMap() but avoids reading the instance type so
// it can be used concurrently without acquire load.
if (object.IsHeapObject() && HeapObject::cast(object).map(isolate) ==
@@ -729,8 +659,8 @@ void Map::SetBackPointer(HeapObject value, WriteBarrierMode mode) {
CHECK(value.IsMap());
CHECK(GetBackPointer().IsUndefined());
CHECK_IMPLIES(value.IsMap(), Map::cast(value).GetConstructor() ==
- constructor_or_backpointer());
- set_constructor_or_backpointer(value, mode);
+ constructor_or_back_pointer());
+ set_constructor_or_back_pointer(value, mode);
}
// static
@@ -742,7 +672,7 @@ Map Map::ElementsTransitionMap(Isolate* isolate) {
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, prototype_validity_cell, Object, kPrototypeValidityCellOffset)
-ACCESSORS_CHECKED2(Map, constructor_or_backpointer, Object,
+ACCESSORS_CHECKED2(Map, constructor_or_back_pointer, Object,
kConstructorOrBackPointerOrNativeContextOffset,
!IsContextMap(), value.IsNull() || !IsContextMap())
ACCESSORS_CHECKED(Map, native_context, NativeContext,
@@ -760,22 +690,22 @@ bool Map::IsPrototypeValidityCellValid() const {
}
DEF_GETTER(Map, GetConstructor, Object) {
- Object maybe_constructor = constructor_or_backpointer(isolate);
+ Object maybe_constructor = constructor_or_back_pointer(isolate);
// Follow any back pointers.
while (maybe_constructor.IsMap(isolate)) {
maybe_constructor =
- Map::cast(maybe_constructor).constructor_or_backpointer(isolate);
+ Map::cast(maybe_constructor).constructor_or_back_pointer(isolate);
}
return maybe_constructor;
}
Object Map::TryGetConstructor(Isolate* isolate, int max_steps) {
- Object maybe_constructor = constructor_or_backpointer(isolate);
+ Object maybe_constructor = constructor_or_back_pointer(isolate);
// Follow any back pointers.
while (maybe_constructor.IsMap(isolate)) {
if (max_steps-- == 0) return Smi::FromInt(0);
maybe_constructor =
- Map::cast(maybe_constructor).constructor_or_backpointer(isolate);
+ Map::cast(maybe_constructor).constructor_or_back_pointer(isolate);
}
return maybe_constructor;
}
@@ -793,8 +723,8 @@ DEF_GETTER(Map, GetFunctionTemplateInfo, FunctionTemplateInfo) {
void Map::SetConstructor(Object constructor, WriteBarrierMode mode) {
// Never overwrite a back pointer with a constructor.
- CHECK(!constructor_or_backpointer().IsMap());
- set_constructor_or_backpointer(constructor, mode);
+ CHECK(!constructor_or_back_pointer().IsMap());
+ set_constructor_or_back_pointer(constructor, mode);
}
Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map) {
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index 0e0c3372cd..95b435085e 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -46,11 +46,15 @@ PropertyDetails MapUpdater::GetDetails(InternalIndex descriptor) const {
DCHECK(descriptor.is_found());
if (descriptor == modified_descriptor_) {
PropertyAttributes attributes = new_attributes_;
- // If the original map was sealed or frozen, let us used the old
+ // If the original map was sealed or frozen, let's use the old
// attributes so that we follow the same transition path as before.
// Note that the user could not have changed the attributes because
- // both seal and freeze make the properties non-configurable.
- if (integrity_level_ == SEALED || integrity_level_ == FROZEN) {
+ // both seal and freeze make the properties non-configurable. An exception
+ // is transitioning from [[Writable]] = true to [[Writable]] = false (this
+ // is allowed for frozen and sealed objects). To support it, we use the new
+ // attributes if they have [[Writable]] == false.
+ if ((integrity_level_ == SEALED || integrity_level_ == FROZEN) &&
+ !(new_attributes_ & READ_ONLY)) {
attributes = old_descriptors_->GetDetails(descriptor).attributes();
}
return PropertyDetails(new_kind_, attributes, new_location_, new_constness_,
@@ -121,6 +125,41 @@ Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor,
PropertyDetails old_details =
old_descriptors_->GetDetails(modified_descriptor_);
+ // If the {descriptor} was "const" data field so far, we need to update the
+ // {old_map_} here, otherwise we could get the constants wrong, i.e.
+ //
+ // o.x = 1;
+ // change o.x's attributes to something else
+ // delete o.x;
+ // o.x = 2;
+ //
+ // could trick V8 into thinking that `o.x` is still 1 even after the second
+ // assignment.
+ // This situation is similar to what might happen with property deletion.
+ if (old_details.constness() == PropertyConstness::kConst &&
+ old_details.location() == kField &&
+ old_details.attributes() != new_attributes_) {
+ Handle<FieldType> field_type(
+ old_descriptors_->GetFieldType(modified_descriptor_), isolate_);
+ Map::GeneralizeField(isolate_, old_map_, descriptor,
+ PropertyConstness::kMutable,
+ old_details.representation(), field_type);
+ // The old_map_'s property must become mutable.
+ // Note, that the {old_map_} and {old_descriptors_} are not expected to be
+ // updated by the generalization if the map is already deprecated.
+ DCHECK_IMPLIES(
+ !old_map_->is_deprecated(),
+ PropertyConstness::kMutable ==
+ old_descriptors_->GetDetails(modified_descriptor_).constness());
+ // Although the property in the old map is marked as mutable we still
+ // treat it as constant when merging with the new path in transition tree.
+ // This is fine because up until this reconfiguration the field was
+ // known to be constant, so it's fair to proceed treating it as such
+ // during this reconfiguration session. The issue is that after the
+ // reconfiguration the original field might become mutable (see the delete
+ // example above).
+ }
+
// If property kind is not reconfigured merge the result with
// representation/field type from the old descriptor.
if (old_details.kind() == new_kind_) {
@@ -775,17 +814,13 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
old_value, new_field_type, new_value);
}
- Handle<LayoutDescriptor> new_layout_descriptor =
- LayoutDescriptor::New(isolate_, split_map, new_descriptors, old_nof_);
-
- Handle<Map> new_map = Map::AddMissingTransitions(
- isolate_, split_map, new_descriptors, new_layout_descriptor);
+ Handle<Map> new_map =
+ Map::AddMissingTransitions(isolate_, split_map, new_descriptors);
// Deprecated part of the transition tree is no longer reachable, so replace
// current instance descriptors in the "survived" part of the tree with
// the new descriptors to maintain descriptors sharing invariant.
- split_map->ReplaceDescriptors(isolate_, *new_descriptors,
- *new_layout_descriptor);
+ split_map->ReplaceDescriptors(isolate_, *new_descriptors);
if (has_integrity_level_transition_) {
target_map_ = new_map;
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 93a9a92294..8bfbe5812b 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -17,7 +17,6 @@
#include "src/objects/elements-kind.h"
#include "src/objects/field-type.h"
#include "src/objects/js-objects.h"
-#include "src/objects/layout-descriptor.h"
#include "src/objects/map-updater.h"
#include "src/objects/maybe-object.h"
#include "src/objects/oddball.h"
@@ -154,7 +153,6 @@ VisitorId Map::GetVisitorId(Map map) {
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
- case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
return kVisitFixedArray;
@@ -222,6 +220,13 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitJSDataView;
case JS_FUNCTION_TYPE:
+ case JS_PROMISE_CONSTRUCTOR_TYPE:
+ case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_ARRAY_CONSTRUCTOR_TYPE:
+#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
+ case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
+#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
return kVisitJSFunction;
case JS_TYPED_ARRAY_TYPE:
@@ -236,6 +241,9 @@ VisitorId Map::GetVisitorId(Map map) {
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
return kVisitSmallOrderedNameDictionary;
+ case SWISS_NAME_DICTIONARY_TYPE:
+ return kVisitSwissNameDictionary;
+
case CODE_DATA_CONTAINER_TYPE:
return kVisitCodeDataContainer;
@@ -303,10 +311,10 @@ VisitorId Map::GetVisitorId(Map map) {
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
+ case WASM_VALUE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE: {
const bool has_raw_data_fields =
- (FLAG_unbox_double_fields && !map.HasFastPointerLayout()) ||
- (COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0);
+ COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0;
return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast;
}
case JS_API_OBJECT_TYPE:
@@ -490,32 +498,10 @@ MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
PropertyConstness::kConst, representation, flag);
}
-bool Map::TransitionRemovesTaggedField(Map target) const {
- int inobject = NumberOfFields();
- int target_inobject = target.NumberOfFields();
- for (int i = target_inobject; i < inobject; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(*this, i);
- if (!IsUnboxedDoubleField(index)) return true;
- }
- return false;
-}
-
-bool Map::TransitionChangesTaggedFieldToUntaggedField(Map target) const {
+bool Map::TransitionRequiresSynchronizationWithGC(Map target) const {
int inobject = NumberOfFields();
int target_inobject = target.NumberOfFields();
- int limit = std::min(inobject, target_inobject);
- for (int i = 0; i < limit; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
- if (!IsUnboxedDoubleField(index) && target.IsUnboxedDoubleField(index)) {
- return true;
- }
- }
- return false;
-}
-
-bool Map::TransitionRequiresSynchronizationWithGC(Map target) const {
- return TransitionRemovesTaggedField(target) ||
- TransitionChangesTaggedFieldToUntaggedField(target);
+ return target_inobject < inobject;
}
bool Map::InstancesNeedRewriting(Map target) const {
@@ -603,10 +589,10 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
for (int i = 0; i < num_transitions; ++i) {
transitions.GetTarget(i).DeprecateTransitionTree(isolate);
}
- DCHECK(!constructor_or_backpointer().IsFunctionTemplateInfo());
+ DCHECK(!constructor_or_back_pointer().IsFunctionTemplateInfo());
DCHECK(CanBeDeprecated());
set_is_deprecated(true);
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
LOG(isolate, MapEvent("Deprecate", handle(*this, isolate), Handle<Map>()));
}
dependent_code().DeoptimizeDependentCodeGroup(
@@ -616,8 +602,8 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
// Installs |new_descriptors| over the current instance_descriptors to ensure
// proper sharing of descriptor arrays.
-void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
- LayoutDescriptor new_layout_descriptor) {
+void Map::ReplaceDescriptors(Isolate* isolate,
+ DescriptorArray new_descriptors) {
// Don't overwrite the empty descriptor array or initial map's descriptors.
if (NumberOfOwnDescriptors() == 0 ||
GetBackPointer(isolate).IsUndefined(isolate)) {
@@ -636,7 +622,7 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
Object next = current.GetBackPointer(isolate);
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
current.SetEnumLength(kInvalidEnumCacheSentinel);
- current.UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
+ current.UpdateDescriptors(isolate, new_descriptors,
current.NumberOfOwnDescriptors());
current = Map::cast(next);
}
@@ -1111,11 +1097,8 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
DescriptorArray::CopyUpTo(isolate, descriptors, old_size, slack);
DisallowGarbageCollection no_gc;
- // The descriptors are still the same, so keep the layout descriptor.
- LayoutDescriptor layout_descriptor = map->GetLayoutDescriptor();
-
if (old_size == 0) {
- map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ map->UpdateDescriptors(isolate, *new_descriptors,
map->NumberOfOwnDescriptors());
return;
}
@@ -1138,11 +1121,11 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
while (current.instance_descriptors(kRelaxedLoad) == *descriptors) {
Object next = current.GetBackPointer();
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
- current.UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ current.UpdateDescriptors(isolate, *new_descriptors,
current.NumberOfOwnDescriptors());
current = Map::cast(next);
}
- map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ map->UpdateDescriptors(isolate, *new_descriptors,
map->NumberOfOwnDescriptors());
}
@@ -1453,7 +1436,7 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
inobject_properties);
Handle<HeapObject> prototype(map->prototype(), isolate);
Map::SetPrototype(isolate, result, prototype);
- result->set_constructor_or_backpointer(map->GetConstructor());
+ result->set_constructor_or_back_pointer(map->GetConstructor());
result->set_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
int new_bit_field3 = map->bit_field3();
@@ -1530,7 +1513,7 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
Map::kSize - offset));
}
#endif
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
LOG(isolate, MapEvent("NormalizeCached", fast_map, new_map, reason));
}
} else {
@@ -1540,7 +1523,7 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
cache->Set(fast_map, new_map);
isolate->counters()->maps_normalized()->Increment();
}
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
LOG(isolate, MapEvent("Normalize", fast_map, new_map, reason));
}
}
@@ -1599,13 +1582,9 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
// Same holds for GeneratorFunction and its initial map.
*map == *isolate->generator_function_map() ||
*map == *isolate->generator_function_with_name_map() ||
- *map == *isolate->generator_function_with_home_object_map() ||
- *map == *isolate->generator_function_with_name_and_home_object_map() ||
// AsyncFunction has Null as a constructor.
*map == *isolate->async_function_map() ||
- *map == *isolate->async_function_with_name_map() ||
- *map == *isolate->async_function_with_home_object_map() ||
- *map == *isolate->async_function_with_name_and_home_object_map());
+ *map == *isolate->async_function_with_name_map());
#endif
// Initial maps must not contain descriptors in the descriptors array
// that do not belong to the map.
@@ -1638,8 +1617,7 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
// The copy will use the same descriptors array without ownership.
DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
result->set_owns_descriptors(false);
- result->UpdateDescriptors(isolate, descriptors, map->GetLayoutDescriptor(),
- number_of_own_descriptors);
+ result->UpdateDescriptors(isolate, descriptors, number_of_own_descriptors);
DCHECK_EQ(result->NumberOfFields(),
result->GetInObjectProperties() - result->UnusedPropertyFields());
@@ -1690,16 +1668,10 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
}
}
- Handle<LayoutDescriptor> layout_descriptor =
- FLAG_unbox_double_fields
- ? LayoutDescriptor::ShareAppend(isolate, map,
- descriptor->GetDetails())
- : handle(LayoutDescriptor::FastPointerLayout(), isolate);
-
{
DisallowGarbageCollection no_gc;
descriptors->Append(descriptor);
- result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
+ result->InitializeDescriptors(isolate, *descriptors);
}
DCHECK(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1);
@@ -1726,22 +1698,23 @@ void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
}
if (parent->IsDetached(isolate)) {
DCHECK(child->IsDetached(isolate));
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
LOG(isolate, MapEvent("Transition", parent, child, "prototype", name));
}
} else {
TransitionsAccessor(isolate, parent).Insert(name, child, flag);
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
LOG(isolate, MapEvent("Transition", parent, child, "", name));
}
}
}
-Handle<Map> Map::CopyReplaceDescriptors(
- Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
- MaybeHandle<Name> maybe_name, const char* reason,
- SimpleTransitionFlag simple_flag) {
+Handle<Map> Map::CopyReplaceDescriptors(Isolate* isolate, Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ MaybeHandle<Name> maybe_name,
+ const char* reason,
+ SimpleTransitionFlag simple_flag) {
DCHECK(descriptors->IsSortedNoDuplicates());
Handle<Map> result = CopyDropDescriptors(isolate, map);
@@ -1754,22 +1727,21 @@ Handle<Map> Map::CopyReplaceDescriptors(
}
if (map->is_prototype_map()) {
- result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
+ result->InitializeDescriptors(isolate, *descriptors);
} else {
if (flag == INSERT_TRANSITION &&
TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
- result->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
+ result->InitializeDescriptors(isolate, *descriptors);
DCHECK(!maybe_name.is_null());
ConnectTransition(isolate, map, result, name, simple_flag);
is_connected = true;
} else {
descriptors->GeneralizeAllFields();
- result->InitializeDescriptors(isolate, *descriptors,
- LayoutDescriptor::FastPointerLayout());
+ result->InitializeDescriptors(isolate, *descriptors);
}
}
- if (FLAG_trace_maps && !is_connected) {
+ if (FLAG_log_maps && !is_connected) {
LOG(isolate, MapEvent("ReplaceDescriptors", map, result, reason,
maybe_name.is_null() ? Handle<HeapObject>() : name));
}
@@ -1780,10 +1752,8 @@ Handle<Map> Map::CopyReplaceDescriptors(
// starting from descriptor with index |split_map|.NumberOfOwnDescriptors().
// The way how it is done is tricky because of GC and special descriptors
// marking logic.
-Handle<Map> Map::AddMissingTransitions(
- Isolate* isolate, Handle<Map> split_map,
- Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor) {
+Handle<Map> Map::AddMissingTransitions(Isolate* isolate, Handle<Map> split_map,
+ Handle<DescriptorArray> descriptors) {
DCHECK(descriptors->IsSortedNoDuplicates());
int split_nof = split_map->NumberOfOwnDescriptors();
int nof_descriptors = descriptors->number_of_descriptors();
@@ -1799,8 +1769,7 @@ Handle<Map> Map::AddMissingTransitions(
// the flag and clear it right before the descriptors are installed. This
// makes heap verification happy and ensures the flag ends up accurate.
Handle<Map> last_map = CopyDropDescriptors(isolate, split_map);
- last_map->InitializeDescriptors(isolate, *descriptors,
- *full_layout_descriptor);
+ last_map->InitializeDescriptors(isolate, *descriptors);
last_map->SetInObjectUnusedPropertyFields(0);
last_map->set_may_have_interesting_symbols(true);
@@ -1812,15 +1781,14 @@ Handle<Map> Map::AddMissingTransitions(
Handle<Map> map = split_map;
for (InternalIndex i : InternalIndex::Range(split_nof, nof_descriptors - 1)) {
Handle<Map> new_map = CopyDropDescriptors(isolate, map);
- InstallDescriptors(isolate, map, new_map, i, descriptors,
- full_layout_descriptor);
+ InstallDescriptors(isolate, map, new_map, i, descriptors);
map = new_map;
}
map->NotifyLeafMapLayoutChange(isolate);
last_map->set_may_have_interesting_symbols(false);
InstallDescriptors(isolate, map, last_map, InternalIndex(nof_descriptors - 1),
- descriptors, full_layout_descriptor);
+ descriptors);
return last_map;
}
@@ -1828,8 +1796,7 @@ Handle<Map> Map::AddMissingTransitions(
// always insert transitions without checking.
void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
Handle<Map> child, InternalIndex new_descriptor,
- Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor) {
+ Handle<DescriptorArray> descriptors) {
DCHECK(descriptors->IsSortedNoDuplicates());
child->SetInstanceDescriptors(isolate, *descriptors,
@@ -1840,23 +1807,6 @@ void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
child->AccountAddedPropertyField();
}
- if (FLAG_unbox_double_fields) {
- Handle<LayoutDescriptor> layout_descriptor =
- LayoutDescriptor::AppendIfFastOrUseFull(isolate, parent, details,
- full_layout_descriptor);
- child->set_layout_descriptor(*layout_descriptor, kReleaseStore);
-#ifdef VERIFY_HEAP
- // TODO(ishell): remove these checks from VERIFY_HEAP mode.
- if (FLAG_verify_heap) {
- CHECK(child->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*child));
- }
-#else
- SLOW_DCHECK(
- child->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*child));
-#endif
- child->set_visitor_id(Map::GetVisitorId(*child));
- }
-
Handle<Name> name = handle(descriptors->GetKey(new_descriptor), isolate);
if (parent->may_have_interesting_symbols() || name->IsInterestingSymbol()) {
child->set_may_have_interesting_symbols(true);
@@ -1911,7 +1861,7 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::AsLanguageMode(Isolate* isolate, Handle<Map> initial_map,
Handle<SharedFunctionInfo> shared_info) {
- DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+ DCHECK(InstanceTypeChecker::IsJSFunction(initial_map->instance_type()));
// Initial map for sloppy mode function is stored in the function
// constructor. Initial maps for strict mode are cached as special transitions
// using |strict_function_transition_symbol| as a key.
@@ -1959,8 +1909,7 @@ Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
// The properties did not change, so reuse descriptors.
map->set_owns_descriptors(false);
new_map->InitializeDescriptors(isolate,
- map->instance_descriptors(kRelaxedLoad),
- map->GetLayoutDescriptor());
+ map->instance_descriptors(kRelaxedLoad));
} else {
// In case the map did not own its own descriptors, a split is forced by
// copying the map; creating a new descriptor array cell.
@@ -1969,10 +1918,7 @@ Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
isolate, descriptors, number_of_own_descriptors);
- Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
- isolate);
- new_map->InitializeDescriptors(isolate, *new_descriptors,
- *new_layout_descriptor);
+ new_map->InitializeDescriptors(isolate, *new_descriptors);
}
return new_map;
}
@@ -1983,11 +1929,9 @@ Handle<Map> Map::Copy(Isolate* isolate, Handle<Map> map, const char* reason) {
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
isolate, descriptors, number_of_own_descriptors);
- Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
- isolate);
- return CopyReplaceDescriptors(
- isolate, map, new_descriptors, new_layout_descriptor, OMIT_TRANSITION,
- MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
+ return CopyReplaceDescriptors(isolate, map, new_descriptors, OMIT_TRANSITION,
+ MaybeHandle<Name>(), reason,
+ SPECIAL_TRANSITION);
}
Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
@@ -2022,14 +1966,12 @@ Handle<Map> Map::CopyForPreventExtensions(
Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
isolate, handle(map->instance_descriptors(kRelaxedLoad), isolate),
num_descriptors, attrs_to_add);
- Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
- isolate);
// Do not track transitions during bootstrapping.
TransitionFlag flag =
isolate->bootstrapper()->IsActive() ? OMIT_TRANSITION : INSERT_TRANSITION;
- Handle<Map> new_map = CopyReplaceDescriptors(
- isolate, map, new_desc, new_layout_descriptor, flag, transition_marker,
- reason, SPECIAL_TRANSITION);
+ Handle<Map> new_map =
+ CopyReplaceDescriptors(isolate, map, new_desc, flag, transition_marker,
+ reason, SPECIAL_TRANSITION);
new_map->set_is_extensible(false);
if (!IsTypedArrayElementsKind(map->elements_kind())) {
ElementsKind new_kind = IsStringWrapperElementsKind(map->elements_kind())
@@ -2195,7 +2137,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
const char* reason = "TooManyFastProperties";
#if V8_TRACE_MAPS
std::unique_ptr<ScopedVector<char>> buffer;
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
ScopedVector<char> name_buffer(100);
name->NameShortPrint(name_buffer);
buffer.reset(new ScopedVector<char>(128));
@@ -2391,14 +2333,9 @@ Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
DescriptorArray::CopyUpTo(isolate, descriptors, nof, 1);
new_descriptors->Append(descriptor);
- Handle<LayoutDescriptor> new_layout_descriptor =
- FLAG_unbox_double_fields
- ? LayoutDescriptor::New(isolate, map, new_descriptors, nof + 1)
- : handle(LayoutDescriptor::FastPointerLayout(), isolate);
-
- return CopyReplaceDescriptors(
- isolate, map, new_descriptors, new_layout_descriptor, flag,
- descriptor->GetKey(), "CopyAddDescriptor", SIMPLE_PROPERTY_TRANSITION);
+ return CopyReplaceDescriptors(isolate, map, new_descriptors, flag,
+ descriptor->GetKey(), "CopyAddDescriptor",
+ SIMPLE_PROPERTY_TRANSITION);
}
Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
@@ -2433,15 +2370,12 @@ Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
isolate, descriptors, map->NumberOfOwnDescriptors());
new_descriptors->Replace(insertion_index, descriptor);
- Handle<LayoutDescriptor> new_layout_descriptor = LayoutDescriptor::New(
- isolate, map, new_descriptors, new_descriptors->number_of_descriptors());
SimpleTransitionFlag simple_flag =
(insertion_index.as_int() == descriptors->number_of_descriptors() - 1)
? SIMPLE_PROPERTY_TRANSITION
: PROPERTY_TRANSITION;
- return CopyReplaceDescriptors(isolate, map, new_descriptors,
- new_layout_descriptor, flag, key,
+ return CopyReplaceDescriptors(isolate, map, new_descriptors, flag, key,
"CopyReplaceDescriptor", simple_flag);
}
@@ -2482,7 +2416,7 @@ bool Map::EquivalentToForTransition(const Map other) const {
if (bit_field() != other.bit_field()) return false;
if (new_target_is_base() != other.new_target_is_base()) return false;
if (prototype() != other.prototype()) return false;
- if (instance_type() == JS_FUNCTION_TYPE) {
+ if (InstanceTypeChecker::IsJSFunction(instance_type())) {
// JSFunctions require more checks to ensure that sloppy function is
// not equivalent to strict function.
int nof =
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 561dc8b6bf..01b1bf3a65 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -32,46 +32,47 @@ enum InstanceType : uint16_t;
V(FeedbackMetadata) \
V(FixedDoubleArray)
-#define POINTER_VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(DataHandler) \
- V(EmbedderDataArray) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FreeSpace) \
- V(JSApiObject) \
- V(JSArrayBuffer) \
- V(JSDataView) \
- V(JSFunction) \
- V(JSObject) \
- V(JSObjectFast) \
- V(JSTypedArray) \
- V(JSWeakRef) \
- V(JSWeakCollection) \
- V(Map) \
- V(NativeContext) \
- V(PreparseData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(ShortcutCandidate) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(SmallOrderedNameDictionary) \
- V(SourceTextModule) \
- V(Struct) \
- V(Symbol) \
- V(SyntheticModule) \
- V(TransitionArray) \
- V(WasmIndirectFunctionTable) \
- V(WasmInstanceObject) \
- V(WasmArray) \
- V(WasmStruct) \
- V(WasmTypeInfo) \
+#define POINTER_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(DataHandler) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FreeSpace) \
+ V(JSApiObject) \
+ V(JSArrayBuffer) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSObjectFast) \
+ V(JSTypedArray) \
+ V(JSWeakRef) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(NativeContext) \
+ V(PreparseData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(ShortcutCandidate) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
+ V(Struct) \
+ V(SwissNameDictionary) \
+ V(Symbol) \
+ V(SyntheticModule) \
+ V(TransitionArray) \
+ V(WasmIndirectFunctionTable) \
+ V(WasmInstanceObject) \
+ V(WasmArray) \
+ V(WasmStruct) \
+ V(WasmTypeInfo) \
V(WeakCell)
#define TORQUE_VISITOR_ID_LIST(V) \
@@ -108,85 +109,81 @@ using MapHandles = std::vector<Handle<Map>>;
// - How to iterate over an object (for garbage collection)
//
// Map layout:
-// +---------------+------------------------------------------------+
-// | _ Type _ | _ Description _ |
-// +---------------+------------------------------------------------+
-// | TaggedPointer | map - Always a pointer to the MetaMap root |
-// +---------------+------------------------------------------------+
-// | Int | The first int field |
-// `---+----------+------------------------------------------------+
-// | Byte | [instance_size] |
-// +----------+------------------------------------------------+
-// | Byte | If Map for a primitive type: |
-// | | native context index for constructor fn |
-// | | If Map for an Object type: |
-// | | inobject properties start offset in words |
-// +----------+------------------------------------------------+
-// | Byte | [used_or_unused_instance_size_in_words] |
-// | | For JSObject in fast mode this byte encodes |
-// | | the size of the object that includes only |
-// | | the used property fields or the slack size |
-// | | in properties backing store. |
-// +----------+------------------------------------------------+
-// | Byte | [visitor_id] |
-// +----+----------+------------------------------------------------+
-// | Int | The second int field |
-// `---+----------+------------------------------------------------+
-// | Short | [instance_type] |
-// +----------+------------------------------------------------+
-// | Byte | [bit_field] |
-// | | - has_non_instance_prototype (bit 0) |
-// | | - is_callable (bit 1) |
-// | | - has_named_interceptor (bit 2) |
-// | | - has_indexed_interceptor (bit 3) |
-// | | - is_undetectable (bit 4) |
-// | | - is_access_check_needed (bit 5) |
-// | | - is_constructor (bit 6) |
-// | | - has_prototype_slot (bit 7) |
-// +----------+------------------------------------------------+
-// | Byte | [bit_field2] |
-// | | - new_target_is_base (bit 0) |
-// | | - is_immutable_proto (bit 1) |
-// | | - unused bit (bit 2) |
-// | | - elements_kind (bits 3..7) |
-// +----+----------+------------------------------------------------+
-// | Int | [bit_field3] |
-// | | - enum_length (bit 0..9) |
-// | | - number_of_own_descriptors (bit 10..19) |
-// | | - is_prototype_map (bit 20) |
-// | | - is_dictionary_map (bit 21) |
-// | | - owns_descriptors (bit 22) |
-// | | - is_in_retained_map_list (bit 23) |
-// | | - is_deprecated (bit 24) |
-// | | - is_unstable (bit 25) |
-// | | - is_migration_target (bit 26) |
-// | | - is_extensible (bit 28) |
-// | | - may_have_interesting_symbols (bit 28) |
-// | | - construction_counter (bit 29..31) |
-// | | |
-// +****************************************************************+
-// | Int | On systems with 64bit pointer types, there |
-// | | is an unused 32bits after bit_field3 |
-// +****************************************************************+
-// | TaggedPointer | [prototype] |
-// +---------------+------------------------------------------------+
-// | TaggedPointer | [constructor_or_backpointer_or_native_context] |
-// +---------------+------------------------------------------------+
-// | TaggedPointer | [instance_descriptors] |
-// +****************************************************************+
-// ! TaggedPointer ! [layout_descriptor] !
-// ! ! Field is only present if compile-time flag !
-// ! ! FLAG_unbox_double_fields is enabled !
-// +****************************************************************+
-// | TaggedPointer | [dependent_code] |
-// +---------------+------------------------------------------------+
-// | TaggedPointer | [prototype_validity_cell] |
-// +---------------+------------------------------------------------+
-// | TaggedPointer | If Map is a prototype map: |
-// | | [prototype_info] |
-// | | Else: |
-// | | [raw_transitions] |
-// +---------------+------------------------------------------------+
+// +---------------+-------------------------------------------------+
+// | _ Type _ | _ Description _ |
+// +---------------+-------------------------------------------------+
+// | TaggedPointer | map - Always a pointer to the MetaMap root |
+// +---------------+-------------------------------------------------+
+// | Int | The first int field |
+// `---+----------+-------------------------------------------------+
+// | Byte | [instance_size] |
+// +----------+-------------------------------------------------+
+// | Byte | If Map for a primitive type: |
+// | | native context index for constructor fn |
+// | | If Map for an Object type: |
+// | | inobject properties start offset in words |
+// +----------+-------------------------------------------------+
+// | Byte | [used_or_unused_instance_size_in_words] |
+// | | For JSObject in fast mode this byte encodes |
+// | | the size of the object that includes only |
+// | | the used property fields or the slack size |
+// | | in properties backing store. |
+// +----------+-------------------------------------------------+
+// | Byte | [visitor_id] |
+// +----+----------+-------------------------------------------------+
+// | Int | The second int field |
+// `---+----------+-------------------------------------------------+
+// | Short | [instance_type] |
+// +----------+-------------------------------------------------+
+// | Byte | [bit_field] |
+// | | - has_non_instance_prototype (bit 0) |
+// | | - is_callable (bit 1) |
+// | | - has_named_interceptor (bit 2) |
+// | | - has_indexed_interceptor (bit 3) |
+// | | - is_undetectable (bit 4) |
+// | | - is_access_check_needed (bit 5) |
+// | | - is_constructor (bit 6) |
+// | | - has_prototype_slot (bit 7) |
+// +----------+-------------------------------------------------+
+// | Byte | [bit_field2] |
+// | | - new_target_is_base (bit 0) |
+// | | - is_immutable_proto (bit 1) |
+// | | - unused bit (bit 2) |
+// | | - elements_kind (bits 3..7) |
+// +----+----------+-------------------------------------------------+
+// | Int | [bit_field3] |
+// | | - enum_length (bit 0..9) |
+// | | - number_of_own_descriptors (bit 10..19) |
+// | | - is_prototype_map (bit 20) |
+// | | - is_dictionary_map (bit 21) |
+// | | - owns_descriptors (bit 22) |
+// | | - is_in_retained_map_list (bit 23) |
+// | | - is_deprecated (bit 24) |
+// | | - is_unstable (bit 25) |
+// | | - is_migration_target (bit 26) |
+// | | - is_extensible (bit 28) |
+// | | - may_have_interesting_symbols (bit 28) |
+// | | - construction_counter (bit 29..31) |
+// | | |
+// +*****************************************************************+
+// | Int | On systems with 64bit pointer types, there |
+// | | is an unused 32bits after bit_field3 |
+// +*****************************************************************+
+// | TaggedPointer | [prototype] |
+// +---------------+-------------------------------------------------+
+// | TaggedPointer | [constructor_or_back_pointer_or_native_context] |
+// +---------------+-------------------------------------------------+
+// | TaggedPointer | [instance_descriptors] |
+// +*****************************************************************+
+// | TaggedPointer | [dependent_code] |
+// +---------------+-------------------------------------------------+
+// | TaggedPointer | [prototype_validity_cell] |
+// +---------------+-------------------------------------------------+
+// | TaggedPointer | If Map is a prototype map: |
+// | | [prototype_info] |
+// | | Else: |
+// | | [raw_transitions] |
+// +---------------+-------------------------------------------------+
class Map : public HeapObject {
public:
@@ -428,6 +425,7 @@ class Map : public HeapObject {
// Don't call set_raw_transitions() directly to overwrite transitions, use
// the TransitionArray::ReplaceTransitions() wrapper instead!
DECL_ACCESSORS(raw_transitions, MaybeObject)
+ DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(raw_transitions)
// [prototype_info]: Per-prototype metadata. Aliased with transitions
// (which prototype maps don't have).
DECL_ACCESSORS(prototype_info, Object)
@@ -486,12 +484,6 @@ class Map : public HeapObject {
// Returns true if transition to the given map requires special
// synchronization with the concurrent marker.
bool TransitionRequiresSynchronizationWithGC(Map target) const;
- // Returns true if transition to the given map removes a tagged in-object
- // field.
- bool TransitionRemovesTaggedField(Map target) const;
- // Returns true if transition to the given map replaces a tagged in-object
- // field with an untagged in-object field.
- bool TransitionChangesTaggedFieldToUntaggedField(Map target) const;
// TODO(ishell): candidate with JSObject::MigrateToMap().
bool InstancesNeedRewriting(Map target) const;
@@ -570,7 +562,7 @@ class Map : public HeapObject {
// FunctionTemplateInfo available.
// The field also overlaps with the native context pointer for context maps,
// and with the Wasm type info for WebAssembly object maps.
- DECL_ACCESSORS(constructor_or_backpointer, Object)
+ DECL_ACCESSORS(constructor_or_back_pointer, Object)
DECL_ACCESSORS(native_context, NativeContext)
DECL_ACCESSORS(wasm_type_info, WasmTypeInfo)
DECL_GETTER(GetConstructor, Object)
@@ -594,23 +586,10 @@ class Map : public HeapObject {
DescriptorArray descriptors,
int number_of_own_descriptors);
- // [layout descriptor]: describes the object layout.
- DECL_RELEASE_ACQUIRE_ACCESSORS(layout_descriptor, LayoutDescriptor)
- // |layout descriptor| accessor which can be used from GC.
- inline LayoutDescriptor layout_descriptor_gc_safe() const;
- inline bool HasFastPointerLayout() const;
-
- // |layout descriptor| accessor that is safe to call even when
- // FLAG_unbox_double_fields is disabled (in this case Map does not contain
- // |layout_descriptor| field at all).
- inline LayoutDescriptor GetLayoutDescriptor() const;
-
inline void UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
- LayoutDescriptor layout_descriptor,
int number_of_own_descriptors);
inline void InitializeDescriptors(Isolate* isolate,
- DescriptorArray descriptors,
- LayoutDescriptor layout_descriptor);
+ DescriptorArray descriptors);
// [dependent code]: list of optimized codes that weakly embed this map.
DECL_ACCESSORS(dependent_code, DependentCode)
@@ -854,16 +833,11 @@ class Map : public HeapObject {
inline bool EquivalentToForNormalization(
const Map other, PropertyNormalizationMode mode) const;
- // Returns true if given field is unboxed double.
- inline bool IsUnboxedDoubleField(FieldIndex index) const;
- inline bool IsUnboxedDoubleField(IsolateRoot isolate, FieldIndex index) const;
-
void PrintMapDetails(std::ostream& os);
static inline Handle<Map> AddMissingTransitionsForTesting(
Isolate* isolate, Handle<Map> split_map,
- Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor);
+ Handle<DescriptorArray> descriptors);
// Fires when the layout of an object with a leaf map changes.
// This includes adding transitions to the leaf map or changing
@@ -919,20 +893,20 @@ class Map : public HeapObject {
Handle<DescriptorArray> descriptors,
Descriptor* descriptor);
V8_EXPORT_PRIVATE static Handle<Map> AddMissingTransitions(
- Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor);
- static void InstallDescriptors(
- Isolate* isolate, Handle<Map> parent_map, Handle<Map> child_map,
- InternalIndex new_descriptor, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor);
+ Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors);
+ static void InstallDescriptors(Isolate* isolate, Handle<Map> parent_map,
+ Handle<Map> child_map,
+ InternalIndex new_descriptor,
+ Handle<DescriptorArray> descriptors);
static Handle<Map> CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag);
- static Handle<Map> CopyReplaceDescriptors(
- Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
- MaybeHandle<Name> maybe_name, const char* reason,
- SimpleTransitionFlag simple_flag);
+ static Handle<Map> CopyReplaceDescriptors(Isolate* isolate, Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ MaybeHandle<Name> maybe_name,
+ const char* reason,
+ SimpleTransitionFlag simple_flag);
static Handle<Map> CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors,
@@ -944,8 +918,7 @@ class Map : public HeapObject {
void DeprecateTransitionTree(Isolate* isolate);
- void ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
- LayoutDescriptor new_layout_descriptor);
+ void ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors);
// Update field type of the given descriptor to new representation and new
// type. The type must be prepared for storing in descriptor array:
diff --git a/deps/v8/src/objects/map.tq b/deps/v8/src/objects/map.tq
index e9f82f78cb..4cd3f2d67f 100644
--- a/deps/v8/src/objects/map.tq
+++ b/deps/v8/src/objects/map.tq
@@ -68,8 +68,6 @@ extern class Map extends HeapObject {
prototype: JSReceiver|Null;
constructor_or_back_pointer_or_native_context: Object;
instance_descriptors: DescriptorArray;
- @if(V8_DOUBLE_FIELDS_UNBOXING) layout_descriptor: LayoutDescriptor;
- @ifnot(V8_DOUBLE_FIELDS_UNBOXING) layout_descriptor: void;
dependent_code: DependentCode;
prototype_validity_cell: Smi|Cell;
weak transitions_or_prototype_info: Map|Weak<Map>|TransitionArray|
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index 97e4691912..b65c1ec4cd 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -33,6 +33,7 @@ CAST_ACCESSOR(Module)
ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
ACCESSORS(Module, module_namespace, HeapObject, kModuleNamespaceOffset)
ACCESSORS(Module, exception, Object, kExceptionOffset)
+ACCESSORS(Module, top_level_capability, HeapObject, kTopLevelCapabilityOffset)
SMI_ACCESSORS(Module, status, kStatusOffset)
SMI_ACCESSORS(Module, hash, kHashOffset)
@@ -41,8 +42,6 @@ BOOL_ACCESSORS(SourceTextModule, flags, async_evaluating,
AsyncEvaluatingBit::kShift)
ACCESSORS(SourceTextModule, async_parent_modules, ArrayList,
kAsyncParentModulesOffset)
-ACCESSORS(SourceTextModule, top_level_capability, HeapObject,
- kTopLevelCapabilityOffset)
struct Module::Hash {
V8_INLINE size_t operator()(Module const& module) const {
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 1bef1a1d45..eb7887f139 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -245,11 +245,56 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
PrintStatusMessage(*module, "Evaluating module ");
#endif // DEBUG
STACK_CHECK(isolate, MaybeHandle<Object>());
- if (FLAG_harmony_top_level_await && module->IsSourceTextModule()) {
+ if (FLAG_harmony_top_level_await) {
+ return Module::EvaluateMaybeAsync(isolate, module);
+ } else {
+ return Module::InnerEvaluate(isolate, module);
+ }
+}
+
+MaybeHandle<Object> Module::EvaluateMaybeAsync(Isolate* isolate,
+ Handle<Module> module) {
+ // In the event of errored evaluation, return a rejected promise.
+ if (module->status() == kErrored) {
+ // If we have a top level capability we assume it has already been
+ // rejected, and return it here. Otherwise create a new promise and
+ // reject it with the module's exception.
+ if (module->top_level_capability().IsJSPromise()) {
+ Handle<JSPromise> top_level_capability(
+ JSPromise::cast(module->top_level_capability()), isolate);
+ DCHECK(top_level_capability->status() == Promise::kRejected &&
+ top_level_capability->result() == module->exception());
+ return top_level_capability;
+ }
+ Handle<JSPromise> capability = isolate->factory()->NewJSPromise();
+ JSPromise::Reject(capability, handle(module->exception(), isolate));
+ return capability;
+ }
+
+ // Start of Evaluate () Concrete Method
+ // 2. Assert: module.[[Status]] is "linked" or "evaluated".
+ CHECK(module->status() == kInstantiated || module->status() == kEvaluated);
+
+ // 3. If module.[[Status]] is "evaluated", set module to
+ // module.[[CycleRoot]].
+ // A Synthetic Module has no children so it is its own cycle root.
+ if (module->status() == kEvaluated && module->IsSourceTextModule()) {
+ module = Handle<SourceTextModule>::cast(module)->GetCycleRoot(isolate);
+ }
+
+ // 4. If module.[[TopLevelCapability]] is not undefined, then
+ // a. Return module.[[TopLevelCapability]].[[Promise]].
+ if (module->top_level_capability().IsJSPromise()) {
+ return handle(JSPromise::cast(module->top_level_capability()), isolate);
+ }
+ DCHECK(module->top_level_capability().IsUndefined());
+
+ if (module->IsSourceTextModule()) {
return SourceTextModule::EvaluateMaybeAsync(
isolate, Handle<SourceTextModule>::cast(module));
} else {
- return Module::InnerEvaluate(isolate, module);
+ return SyntheticModule::Evaluate(isolate,
+ Handle<SyntheticModule>::cast(module));
}
}
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index d254f18420..a114a34a97 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -65,6 +65,10 @@ class Module : public HeapObject {
Object GetException();
DECL_ACCESSORS(exception, Object)
+ // The top level promise capability of this module. Will only be defined
+ // for cycle roots.
+ DECL_ACCESSORS(top_level_capability, HeapObject)
+
// Returns if this module or any transitively requested module is [[Async]],
// i.e. has a top-level await.
V8_WARN_UNUSED_RESULT bool IsGraphAsync(Isolate* isolate) const;
@@ -132,6 +136,9 @@ class Module : public HeapObject {
ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index,
Zone* zone);
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> EvaluateMaybeAsync(
+ Isolate* isolate, Handle<Module> module);
+
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> InnerEvaluate(
Isolate* isolate, Handle<Module> module);
diff --git a/deps/v8/src/objects/module.tq b/deps/v8/src/objects/module.tq
index c581e8d052..2d8e8b6327 100644
--- a/deps/v8/src/objects/module.tq
+++ b/deps/v8/src/objects/module.tq
@@ -9,6 +9,7 @@ extern class Module extends HeapObject {
status: Smi;
module_namespace: JSModuleNamespace|Undefined;
exception: Object;
+ top_level_capability: JSPromise|Undefined;
}
@generateCppClass
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 78f9a7f32e..40ef44c785 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -34,7 +34,6 @@ class JSPromise;
class JSProxy;
class JSProxyRevocableResult;
class KeyAccumulator;
-class LayoutDescriptor;
class LookupIterator;
class FieldType;
class Module;
@@ -71,7 +70,6 @@ template <typename T>
class ZoneForwardList;
#define OBJECT_TYPE_LIST(V) \
- V(LayoutDescriptor) \
V(Primitive) \
V(Number) \
V(Numeric)
@@ -117,7 +115,6 @@ class ZoneForwardList;
V(FixedArrayExact) \
V(FixedDoubleArray) \
V(Foreign) \
- V(FrameArray) \
V(FreeSpace) \
V(Function) \
V(GlobalDictionary) \
@@ -207,6 +204,7 @@ class ZoneForwardList;
V(StringSet) \
V(StringWrapper) \
V(Struct) \
+ V(SwissNameDictionary) \
V(Symbol) \
V(SymbolWrapper) \
V(SyntheticModule) \
@@ -226,6 +224,7 @@ class ZoneForwardList;
V(WasmStruct) \
V(WasmTypeInfo) \
V(WasmTableObject) \
+ V(WasmValueObject) \
V(WeakFixedArray) \
V(WeakArrayList) \
V(WeakCell) \
@@ -265,6 +264,9 @@ class ZoneForwardList;
V(FreeSpaceOrFiller) \
V(FunctionContext) \
V(JSApiObject) \
+ V(JSPromiseConstructor) \
+ V(JSArrayConstructor) \
+ V(JSRegExpConstructor) \
V(JSMapKeyIterator) \
V(JSMapKeyValueIterator) \
V(JSMapValueIterator) \
@@ -285,7 +287,19 @@ class ZoneForwardList;
V(JSMapIteratorPrototype) \
V(JSTypedArrayPrototype) \
V(JSSetIteratorPrototype) \
- V(JSStringIteratorPrototype)
+ V(JSStringIteratorPrototype) \
+ V(TypedArrayConstructor) \
+ V(Uint8TypedArrayConstructor) \
+ V(Int8TypedArrayConstructor) \
+ V(Uint16TypedArrayConstructor) \
+ V(Int16TypedArrayConstructor) \
+ V(Uint32TypedArrayConstructor) \
+ V(Int32TypedArrayConstructor) \
+ V(Float32TypedArrayConstructor) \
+ V(Float64TypedArrayConstructor) \
+ V(Uint8ClampedTypedArrayConstructor) \
+ V(Biguint64TypedArrayConstructor) \
+ V(Bigint64TypedArrayConstructor)
#define HEAP_OBJECT_TYPE_LIST(V) \
HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 7928eff5fb..f36751e1f9 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -48,9 +48,13 @@
return GetIsolateFromWritableObject(*this); \
}
+#define DECL_PRIMITIVE_GETTER(name, type) inline type name() const;
+
+#define DECL_PRIMITIVE_SETTER(name, type) inline void set_##name(type value);
+
#define DECL_PRIMITIVE_ACCESSORS(name, type) \
- inline type name() const; \
- inline void set_##name(type value);
+ DECL_PRIMITIVE_GETTER(name, type) \
+ DECL_PRIMITIVE_SETTER(name, type)
#define DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS(name, type) \
inline type synchronized_##name() const; \
@@ -127,6 +131,10 @@
DECL_ACQUIRE_GETTER(name, type) \
DECL_RELEASE_SETTER(name, type)
+#define DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(name) \
+ DECL_ACQUIRE_GETTER(name, MaybeObject) \
+ DECL_RELEASE_SETTER(name, MaybeObject)
+
#define DECL_CAST(Type) \
V8_INLINE static Type cast(Object object); \
V8_INLINE static Type unchecked_cast(Object object) { \
@@ -274,26 +282,32 @@
#define WEAK_ACCESSORS(holder, name, offset) \
WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
-#define SYNCHRONIZED_WEAK_ACCESSORS_CHECKED2(holder, name, offset, \
- get_condition, set_condition) \
- DEF_GETTER(holder, name, MaybeObject) { \
- MaybeObject value = \
- TaggedField<MaybeObject, offset>::Acquire_Load(isolate, *this); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(MaybeObject value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- TaggedField<MaybeObject, offset>::Release_Store(*this, value); \
- CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
+#define RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED2(holder, name, offset, \
+ get_condition, set_condition) \
+ MaybeObject holder::name(AcquireLoadTag tag) const { \
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
+ return holder::name(isolate, tag); \
+ } \
+ MaybeObject holder::name(IsolateRoot isolate, AcquireLoadTag) const { \
+ MaybeObject value = \
+ TaggedField<MaybeObject, offset>::Acquire_Load(isolate, *this); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(MaybeObject value, ReleaseStoreTag, \
+ WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ TaggedField<MaybeObject, offset>::Release_Store(*this, value); \
+ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
}
-#define SYNCHRONIZED_WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
- SYNCHRONIZED_WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, \
- condition)
+#define RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED(holder, name, offset, \
+ condition) \
+ RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, \
+ condition)
-#define SYNCHRONIZED_WEAK_ACCESSORS(holder, name, offset) \
- SYNCHRONIZED_WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
+#define RELEASE_ACQUIRE_WEAK_ACCESSORS(holder, name, offset) \
+ RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
// Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
@@ -491,6 +505,15 @@
static_cast<int8_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
+#define RELAXED_READ_UINT16_FIELD(p, offset) \
+ static_cast<uint16_t>(base::Relaxed_Load( \
+ reinterpret_cast<const base::Atomic16*>(FIELD_ADDR(p, offset))))
+
+#define RELAXED_WRITE_UINT16_FIELD(p, offset, value) \
+ base::Relaxed_Store( \
+ reinterpret_cast<base::Atomic16*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::Atomic16>(value));
+
#define RELAXED_READ_INT16_FIELD(p, offset) \
static_cast<int16_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic16*>(FIELD_ADDR(p, offset))))
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 874330e5eb..e4167229ca 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -19,6 +19,7 @@
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/source-text-module.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/objects/synthetic-module.h"
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions.h"
@@ -59,16 +60,7 @@ bool BodyDescriptorBase::IsValidJSObjectSlotImpl(Map map, HeapObject obj,
// embedder field area as tagged slots.
STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
#endif
- if (!FLAG_unbox_double_fields || map.HasFastPointerLayout()) {
- return true;
- } else {
- DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(offset, kSystemPointerSize));
-
- LayoutDescriptorHelper helper(map);
- DCHECK(!helper.all_fields_tagged());
- return helper.IsTagged(offset);
- }
+ return true;
}
template <typename ObjectVisitor>
@@ -100,23 +92,7 @@ void BodyDescriptorBase::IterateJSObjectBodyImpl(Map map, HeapObject obj,
// embedder field area as tagged slots.
STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
#endif
- if (!FLAG_unbox_double_fields || map.HasFastPointerLayout()) {
- IteratePointers(obj, start_offset, end_offset, v);
- } else {
- DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(start_offset, kSystemPointerSize) &&
- IsAligned(end_offset, kSystemPointerSize));
-
- LayoutDescriptorHelper helper(map);
- DCHECK(!helper.all_fields_tagged());
- for (int offset = start_offset; offset < end_offset;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
- IteratePointers(obj, offset, end_of_region_offset, v);
- }
- offset = end_of_region_offset;
- }
- }
+ IteratePointers(obj, start_offset, end_offset, v);
}
template <typename ObjectVisitor>
@@ -403,6 +379,38 @@ class V8_EXPORT_PRIVATE SmallOrderedHashTable<Derived>::BodyDescriptor final
}
};
+class V8_EXPORT_PRIVATE SwissNameDictionary::BodyDescriptor final
+ : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ // Using |unchecked_cast| here and elsewhere in this class because the
+ // Scavenger may be calling us while the map word contains the forwarding
+ // address (a Smi) rather than a map.
+
+ SwissNameDictionary table = SwissNameDictionary::unchecked_cast(obj);
+ STATIC_ASSERT(MetaTablePointerOffset() + kTaggedSize ==
+ DataTableStartOffset());
+ return offset >= MetaTablePointerOffset() &&
+ (offset < table.DataTableEndOffset(table.Capacity()));
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ SwissNameDictionary table = SwissNameDictionary::unchecked_cast(obj);
+ STATIC_ASSERT(MetaTablePointerOffset() + kTaggedSize ==
+ DataTableStartOffset());
+ int start_offset = MetaTablePointerOffset();
+ int end_offset = table.DataTableEndOffset(table.Capacity());
+ IteratePointers(obj, start_offset, end_offset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject obj) {
+ SwissNameDictionary table = SwissNameDictionary::unchecked_cast(obj);
+ return SwissNameDictionary::SizeFor(table.Capacity());
+ }
+};
+
class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
@@ -563,7 +571,6 @@ class WasmTypeInfo::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {
Foreign::BodyDescriptor::IterateBody<ObjectVisitor>(map, obj, object_size,
v);
- IteratePointer(obj, kParentOffset, v);
IteratePointer(obj, kSupertypesOffset, v);
IteratePointer(obj, kSubtypesOffset, v);
}
@@ -898,7 +905,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
- case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3, p4);
case EPHEMERON_HASH_TABLE_TYPE:
@@ -996,6 +1002,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
+ case WASM_VALUE_OBJECT_TYPE:
return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3, p4);
case WASM_INSTANCE_OBJECT_TYPE:
return Op::template apply<WasmInstanceObject::BodyDescriptor>(p1, p2, p3,
@@ -1011,6 +1018,13 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_TYPED_ARRAY_TYPE:
return Op::template apply<JSTypedArray::BodyDescriptor>(p1, p2, p3, p4);
case JS_FUNCTION_TYPE:
+ case JS_PROMISE_CONSTRUCTOR_TYPE:
+ case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_ARRAY_CONSTRUCTOR_TYPE:
+#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
+ case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
+#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
return Op::template apply<JSFunction::BodyDescriptor>(p1, p2, p3, p4);
case WEAK_CELL_TYPE:
return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3, p4);
@@ -1044,6 +1058,11 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<
SmallOrderedHashTable<SmallOrderedNameDictionary>::BodyDescriptor>(
p1, p2, p3, p4);
+
+ case SWISS_NAME_DICTIONARY_TYPE:
+ return Op::template apply<SwissNameDictionary::BodyDescriptor>(p1, p2, p3,
+ p4);
+
case CODE_DATA_CONTAINER_TYPE:
return Op::template apply<CodeDataContainer::BodyDescriptor>(p1, p2, p3,
p4);
@@ -1084,7 +1103,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case SYNTHETIC_MODULE_TYPE:
return Op::template apply<SyntheticModule::BodyDescriptor>(p1, p2, p3,
p4);
-// TODO(tebbi): Avoid duplicated cases when the body descriptors are identical.
+// TODO(turbofan): Avoid duplicated cases when the body descriptors are
+// identical.
#define MAKE_TORQUE_BODY_DESCRIPTOR_APPLY(TYPE, TypeName) \
case TYPE: \
return Op::template apply<TypeName::BodyDescriptor>(p1, p2, p3, p4);
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 3606f6c605..68b82b33d3 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -36,6 +36,8 @@ namespace internal {
V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
V(STRING_TYPE) \
V(CONS_STRING_TYPE) \
V(EXTERNAL_STRING_TYPE) \
@@ -83,6 +85,13 @@ namespace internal {
external_internalized_string, ExternalInternalizedString) \
V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
external_one_byte_internalized_string, ExternalOneByteInternalizedString) \
+ V(UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE, \
+ ExternalTwoByteString::kUncachedSize, \
+ uncached_external_internalized_string, UncachedExternalInternalizedString) \
+ V(UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, \
+ ExternalOneByteString::kUncachedSize, \
+ uncached_external_one_byte_internalized_string, \
+ UncachedExternalOneByteInternalizedString) \
V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
ThinOneByteString)
@@ -115,6 +124,7 @@ namespace internal {
V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
async_generator_request) \
+ V(_, BASELINE_DATA_TYPE, BaselineData, baseline_data) \
V(_, BREAK_POINT_TYPE, BreakPoint, break_point) \
V(_, BREAK_POINT_INFO_TYPE, BreakPointInfo, break_point_info) \
V(_, CACHED_TEMPLATE_OBJECT_TYPE, CachedTemplateObject, \
@@ -132,11 +142,12 @@ namespace internal {
V(_, PROPERTY_DESCRIPTOR_OBJECT_TYPE, PropertyDescriptorObject, \
property_descriptor_object) \
V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \
+ V(_, REG_EXP_BOILERPLATE_DESCRIPTION_TYPE, RegExpBoilerplateDescription, \
+ regexp_boilerplate_description) \
V(_, SCRIPT_TYPE, Script, script) \
V(_, SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, SourceTextModuleInfoEntry, \
module_info_entry) \
V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
- V(_, STACK_TRACE_FRAME_TYPE, StackTraceFrame, stack_trace_frame) \
V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
template_object_description) \
V(_, TUPLE2_TYPE, Tuple2, tuple2) \
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index e047999c00..39fa7b3381 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -34,7 +34,7 @@
#include "src/objects/property-details.h"
#include "src/objects/property.h"
#include "src/objects/regexp-match-info.h"
-#include "src/objects/scope-info.h"
+#include "src/objects/scope-info-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi-inl.h"
@@ -61,9 +61,7 @@ Smi PropertyDetails::AsSmi() const {
int PropertyDetails::field_width_in_words() const {
DCHECK_EQ(location(), kField);
- if (!FLAG_unbox_double_fields) return 1;
- if (kDoubleSize == kTaggedSize) return 1;
- return representation().IsDouble() ? kDoubleSize / kTaggedSize : 1;
+ return 1;
}
DEF_GETTER(HeapObject, IsClassBoilerplate, bool) {
@@ -256,10 +254,6 @@ DEF_GETTER(HeapObject, IsFreeSpaceOrFiller, bool) {
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
-DEF_GETTER(HeapObject, IsFrameArray, bool) {
- return IsFixedArrayExact(isolate);
-}
-
DEF_GETTER(HeapObject, IsArrayList, bool) {
// Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
// i::GetIsolateForPtrCompr(HeapObject).
@@ -272,17 +266,6 @@ DEF_GETTER(HeapObject, IsRegExpMatchInfo, bool) {
return IsFixedArrayExact(isolate);
}
-bool Object::IsLayoutDescriptor() const {
- if (IsSmi()) return true;
- HeapObject this_heap_object = HeapObject::cast(*this);
- IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
- return this_heap_object.IsByteArray(isolate);
-}
-
-bool Object::IsLayoutDescriptor(IsolateRoot isolate) const {
- return IsSmi() || IsByteArray(isolate);
-}
-
DEF_GETTER(HeapObject, IsDeoptimizationData, bool) {
// Must be a fixed array.
if (!IsFixedArrayExact(isolate)) return false;
@@ -450,7 +433,6 @@ bool Object::IsMinusZero() const {
}
OBJECT_CONSTRUCTORS_IMPL(RegExpMatchInfo, FixedArray)
-OBJECT_CONSTRUCTORS_IMPL(ScopeInfo, FixedArray)
OBJECT_CONSTRUCTORS_IMPL(BigIntBase, PrimitiveHeapObject)
OBJECT_CONSTRUCTORS_IMPL(BigInt, BigIntBase)
OBJECT_CONSTRUCTORS_IMPL(FreshlyAllocatedBigInt, BigIntBase)
@@ -461,7 +443,6 @@ OBJECT_CONSTRUCTORS_IMPL(FreshlyAllocatedBigInt, BigIntBase)
CAST_ACCESSOR(BigIntBase)
CAST_ACCESSOR(BigInt)
CAST_ACCESSOR(RegExpMatchInfo)
-CAST_ACCESSOR(ScopeInfo)
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray. ByteArray is used
@@ -1119,21 +1100,6 @@ static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Object> key,
PACKED_ELEMENTS, 2);
}
-bool ScopeInfo::IsAsmModule() const { return IsAsmModuleBit::decode(Flags()); }
-
-bool ScopeInfo::HasSimpleParameters() const {
- return HasSimpleParametersBit::decode(Flags());
-}
-
-#define FIELD_ACCESSORS(name) \
- void ScopeInfo::Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
- int ScopeInfo::name() const { \
- DCHECK_GE(length(), kVariablePartIndex); \
- return Smi::ToInt(get(k##name)); \
- }
-FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
-#undef FIELD_ACCESSORS
-
FreshlyAllocatedBigInt FreshlyAllocatedBigInt::cast(Object object) {
SLOW_DCHECK(object.IsBigInt());
return FreshlyAllocatedBigInt(object.ptr());
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 5e17fa85fc..d9cb7486be 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -59,7 +59,6 @@
#include "src/objects/field-index.h"
#include "src/objects/field-type.h"
#include "src/objects/foreign.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
@@ -138,7 +137,7 @@ ShouldThrow GetShouldThrow(Isolate* isolate, Maybe<ShouldThrow> should_throw) {
if (mode == LanguageMode::kStrict) return kThrowOnError;
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
- if (!(it.frame()->is_optimized() || it.frame()->is_interpreted())) {
+ if (!(it.frame()->is_optimized() || it.frame()->is_unoptimized())) {
continue;
}
// Get the language mode from closure.
@@ -1482,15 +1481,17 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
return reboxed_result;
}
+ Handle<AccessorPair> accessor_pair = Handle<AccessorPair>::cast(structure);
// AccessorPair with 'cached' private property.
- if (it->TryLookupCachedProperty()) {
+ if (it->TryLookupCachedProperty(accessor_pair)) {
return Object::GetProperty(it);
}
// Regular accessor.
- Handle<Object> getter(AccessorPair::cast(*structure).getter(), isolate);
+ Handle<Object> getter(accessor_pair->getter(), isolate);
if (getter->IsFunctionTemplateInfo()) {
- SaveAndSwitchContext save(isolate, *holder->GetCreationContext());
+ SaveAndSwitchContext save(isolate,
+ *holder->GetCreationContext().ToHandleChecked());
return Builtins::InvokeApiFunction(
isolate, false, Handle<FunctionTemplateInfo>::cast(getter), receiver, 0,
nullptr, isolate->factory()->undefined_value());
@@ -1595,7 +1596,8 @@ Maybe<bool> Object::SetPropertyWithAccessor(
// Regular accessor.
Handle<Object> setter(AccessorPair::cast(*structure).setter(), isolate);
if (setter->IsFunctionTemplateInfo()) {
- SaveAndSwitchContext save(isolate, *holder->GetCreationContext());
+ SaveAndSwitchContext save(isolate,
+ *holder->GetCreationContext().ToHandleChecked());
Handle<Object> argv[] = {value};
RETURN_ON_EXCEPTION_VALUE(
isolate,
@@ -2150,7 +2152,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << " value=";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- cell.value().ShortPrint(&accumulator);
+ cell.value(kAcquireLoad).ShortPrint(&accumulator);
os << accumulator.ToCString().get();
os << '>';
break;
@@ -2184,11 +2186,6 @@ void ClassPositions::BriefPrintDetails(std::ostream& os) {
os << " " << start() << ", " << end();
}
-void ArrayBoilerplateDescription::BriefPrintDetails(std::ostream& os) {
- os << " " << ElementsKindToString(elements_kind()) << ", "
- << Brief(constant_elements());
-}
-
void CallableTask::BriefPrintDetails(std::ostream& os) {
os << " callable=" << Brief(callable());
}
@@ -2290,6 +2287,10 @@ int HeapObject::SizeFromMap(Map map) const {
return SmallOrderedNameDictionary::SizeFor(
SmallOrderedNameDictionary::unchecked_cast(*this).Capacity());
}
+ if (instance_type == SWISS_NAME_DICTIONARY_TYPE) {
+ return SwissNameDictionary::SizeFor(
+ SwissNameDictionary::unchecked_cast(*this).Capacity());
+ }
if (instance_type == PROPERTY_ARRAY_TYPE) {
return PropertyArray::SizeFor(
PropertyArray::cast(*this).synchronized_length());
@@ -2602,6 +2603,13 @@ Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
if (it->GetReceiver()->IsJSGlobalObject() &&
(GetShouldThrow(it->isolate(), should_throw) ==
ShouldThrow::kThrowOnError)) {
+ if (it->state() == LookupIterator::TRANSITION) {
+ // The property cell that we have created is garbage because we are going
+ // to throw now instead of putting it into the global dictionary. However,
+ // the cell might already have been stored into the feedback vector, so
+ // we must invalidate it nevertheless.
+ it->transition_cell()->ClearAndInvalidate(ReadOnlyRoots(it->isolate()));
+ }
it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
MessageTemplate::kNotDefined, it->GetName()));
return Nothing<bool>();
@@ -3535,11 +3543,14 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
if (it.IsFound()) {
DCHECK_EQ(LookupIterator::DATA, it.state());
DCHECK_EQ(DONT_ENUM, it.property_attributes());
+ // We are not tracking constness for private symbols added to JSProxy
+ // objects.
+ DCHECK_EQ(PropertyConstness::kMutable, it.property_details().constness());
it.WriteDataValue(value, false);
return Just(true);
}
- PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, DONT_ENUM, PropertyConstness::kMutable);
if (V8_DICT_MODE_PROTOTYPES_BOOL) {
Handle<OrderedNameDictionary> dict(proxy->property_dictionary_ordered(),
isolate);
@@ -4266,67 +4277,6 @@ Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
return result;
}
-// static
-Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
- Handle<Object> receiver,
- Handle<JSFunction> function,
- Handle<AbstractCode> code,
- int offset, int flags,
- Handle<FixedArray> parameters) {
- const int frame_count = in->FrameCount();
- const int new_length = LengthFor(frame_count + 1);
- Handle<FrameArray> array =
- EnsureSpace(function->GetIsolate(), in, new_length);
- array->SetReceiver(frame_count, *receiver);
- array->SetFunction(frame_count, *function);
- array->SetCode(frame_count, *code);
- array->SetOffset(frame_count, Smi::FromInt(offset));
- array->SetFlags(frame_count, Smi::FromInt(flags));
- array->SetParameters(frame_count, *parameters);
- array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
- return array;
-}
-
-// static
-Handle<FrameArray> FrameArray::AppendWasmFrame(
- Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
- int wasm_function_index, wasm::WasmCode* code, int offset, int flags) {
- // This must be either a compiled or interpreted wasm frame, or an asm.js
- // frame (which is always compiled).
- DCHECK_EQ(1,
- ((flags & kIsWasmFrame) != 0) + ((flags & kIsAsmJsWasmFrame) != 0));
- Isolate* isolate = wasm_instance->GetIsolate();
- const int frame_count = in->FrameCount();
- const int new_length = LengthFor(frame_count + 1);
- Handle<FrameArray> array = EnsureSpace(isolate, in, new_length);
- // The {code} will be {nullptr} for interpreted wasm frames.
- Handle<Object> code_ref = isolate->factory()->undefined_value();
- if (code) {
- auto native_module = wasm_instance->module_object().shared_native_module();
- code_ref = Managed<wasm::GlobalWasmCodeRef>::Allocate(
- isolate, 0, code, std::move(native_module));
- }
- array->SetWasmInstance(frame_count, *wasm_instance);
- array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
- array->SetWasmCodeObject(frame_count, *code_ref);
- array->SetOffset(frame_count, Smi::FromInt(offset));
- array->SetFlags(frame_count, Smi::FromInt(flags));
- array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
- return array;
-}
-
-void FrameArray::ShrinkToFit(Isolate* isolate) {
- Shrink(isolate, LengthFor(FrameCount()));
-}
-
-// static
-Handle<FrameArray> FrameArray::EnsureSpace(Isolate* isolate,
- Handle<FrameArray> array,
- int length) {
- return Handle<FrameArray>::cast(
- EnsureSpaceInFixedArray(isolate, array, length));
-}
-
template <typename LocalIsolate>
Handle<DescriptorArray> DescriptorArray::Allocate(LocalIsolate* isolate,
int nof_descriptors,
@@ -6353,28 +6303,23 @@ void PropertyCell::ClearAndInvalidate(ReadOnlyRoots roots) {
DCHECK(!value().IsTheHole(roots));
PropertyDetails details = property_details();
details = details.set_cell_type(PropertyCellType::kConstant);
- set_value(roots.the_hole_value());
- set_property_details(details);
+ Transition(details, roots.the_hole_value_handle());
dependent_code().DeoptimizeDependentCodeGroup(
DependentCode::kPropertyCellChangedGroup);
}
// static
Handle<PropertyCell> PropertyCell::InvalidateAndReplaceEntry(
- Isolate* isolate, Handle<GlobalDictionary> dictionary,
- InternalIndex entry) {
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, InternalIndex entry,
+ PropertyDetails new_details, Handle<Object> new_value) {
Handle<PropertyCell> cell(dictionary->CellAt(entry), isolate);
Handle<Name> name(cell->name(), isolate);
- PropertyDetails details = cell->property_details();
- DCHECK(details.IsConfigurable());
+ DCHECK(cell->property_details().IsConfigurable());
DCHECK(!cell->value().IsTheHole(isolate));
- // Swap with a copy.
- Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell(name);
- new_cell->set_value(cell->value());
- // Cell is officially mutable henceforth.
- details = details.set_cell_type(PropertyCellType::kMutable);
- new_cell->set_property_details(details);
+ // Swap with a new property cell.
+ Handle<PropertyCell> new_cell =
+ isolate->factory()->NewPropertyCell(name, new_details, new_value);
dictionary->ValueAtPut(entry, *new_cell);
cell->ClearAndInvalidate(ReadOnlyRoots(isolate));
@@ -6422,10 +6367,9 @@ PropertyCellType PropertyCell::UpdatedType(Isolate* isolate,
case PropertyCellType::kMutable:
return PropertyCellType::kMutable;
}
- UNREACHABLE();
}
-Handle<PropertyCell> PropertyCell::PrepareForValue(
+Handle<PropertyCell> PropertyCell::PrepareForAndSetValue(
Isolate* isolate, Handle<GlobalDictionary> dictionary, InternalIndex entry,
Handle<Object> value, PropertyDetails details) {
DCHECK(!value->IsTheHole(isolate));
@@ -6441,47 +6385,79 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
PropertyCellType new_type =
UpdatedType(isolate, cell, value, original_details);
+ details = details.set_cell_type(new_type);
+
if (invalidate) {
- cell = PropertyCell::InvalidateAndReplaceEntry(isolate, dictionary, entry);
+ cell = PropertyCell::InvalidateAndReplaceEntry(isolate, dictionary, entry,
+ details, value);
+ } else {
+ cell->Transition(details, value);
+ // Deopt when transitioning from a constant type or when making a writable
+ // property read-only. Making a read-only property writable again is not
+ // interesting because Turbofan does not currently rely on read-only unless
+ // the property is also configurable, in which case it will stay read-only
+ // forever.
+ if (original_details.cell_type() != new_type ||
+ (!original_details.IsReadOnly() && details.IsReadOnly())) {
+ cell->dependent_code().DeoptimizeDependentCodeGroup(
+ DependentCode::kPropertyCellChangedGroup);
+ }
}
+ return cell;
+}
- // Install new property details.
- details = details.set_cell_type(new_type);
- cell->set_property_details(details);
-
- if (new_type == PropertyCellType::kConstant ||
- new_type == PropertyCellType::kConstantType) {
- // Store the value now to ensure that the cell contains the constant or
- // type information. Otherwise subsequent store operation will turn
- // the cell to mutable.
- cell->set_value(*value);
- }
-
- // Deopt when transitioning from a constant type or when making a writable
- // property read-only. Making a read-only property writable again is not
- // interesting because Turbofan does not currently rely on read-only unless
- // the property is also configurable, in which case it will stay read-only
- // forever.
- if (!invalidate &&
- (original_details.cell_type() != new_type ||
- (!original_details.IsReadOnly() && details.IsReadOnly()))) {
- cell->dependent_code().DeoptimizeDependentCodeGroup(
+// static
+void PropertyCell::InvalidateProtector() {
+ if (value() != Smi::FromInt(Protectors::kProtectorInvalid)) {
+ DCHECK_EQ(value(), Smi::FromInt(Protectors::kProtectorValid));
+ set_value(Smi::FromInt(Protectors::kProtectorInvalid), kReleaseStore);
+ dependent_code().DeoptimizeDependentCodeGroup(
DependentCode::kPropertyCellChangedGroup);
}
- return cell;
}
// static
-void PropertyCell::SetValueWithInvalidation(Isolate* isolate,
- const char* cell_name,
- Handle<PropertyCell> cell,
- Handle<Object> new_value) {
- if (cell->value() != *new_value) {
- cell->set_value(*new_value);
- cell->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kPropertyCellChangedGroup);
+bool PropertyCell::CheckDataIsCompatible(PropertyDetails details,
+ Object value) {
+ DisallowGarbageCollection no_gc;
+ PropertyCellType cell_type = details.cell_type();
+ if (value.IsTheHole()) {
+ CHECK_EQ(cell_type, PropertyCellType::kConstant);
+ } else {
+ CHECK_EQ(value.IsAccessorInfo() || value.IsAccessorPair(),
+ details.kind() == kAccessor);
+ DCHECK_IMPLIES(cell_type == PropertyCellType::kUndefined,
+ value.IsUndefined());
}
+ return true;
+}
+
+#ifdef DEBUG
+bool PropertyCell::CanTransitionTo(PropertyDetails new_details,
+ Object new_value) const {
+ // Extending the implementation of PropertyCells with additional states
+ // and/or transitions likely requires changes to PropertyCellData::Serialize.
+ DisallowGarbageCollection no_gc;
+ DCHECK(CheckDataIsCompatible(new_details, new_value));
+ switch (property_details().cell_type()) {
+ case PropertyCellType::kUndefined:
+ return new_details.cell_type() != PropertyCellType::kUndefined;
+ case PropertyCellType::kConstant:
+ return !value().IsTheHole() &&
+ new_details.cell_type() != PropertyCellType::kUndefined;
+ case PropertyCellType::kConstantType:
+ return new_details.cell_type() == PropertyCellType::kConstantType ||
+ new_details.cell_type() == PropertyCellType::kMutable ||
+ (new_details.cell_type() == PropertyCellType::kConstant &&
+ new_value.IsTheHole());
+ case PropertyCellType::kMutable:
+ return new_details.cell_type() == PropertyCellType::kMutable ||
+ (new_details.cell_type() == PropertyCellType::kConstant &&
+ new_value.IsTheHole());
+ }
+ UNREACHABLE();
}
+#endif // DEBUG
int JSGeneratorObject::source_position() const {
CHECK(is_suspended());
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index a6b6cd2d24..c68445597f 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -92,7 +92,6 @@
// - ByteArray
// - BytecodeArray
// - FixedArray
-// - FrameArray
// - HashTable
// - Dictionary
// - StringTable
@@ -167,7 +166,6 @@
// - BreakPointInfo
// - CachedTemplateObject
// - StackFrameInfo
-// - StackTraceFrame
// - CodeCache
// - PropertyDescriptorObject
// - PrototypeInfo
@@ -188,6 +186,7 @@
// - UncompiledData
// - UncompiledDataWithoutPreparseData
// - UncompiledDataWithPreparseData
+// - SwissNameDictionary
//
// Formats of Object::ptr_:
// Smi: [31 bit signed int] 0
diff --git a/deps/v8/src/objects/osr-optimized-code-cache.cc b/deps/v8/src/objects/osr-optimized-code-cache.cc
index 67d907a85c..05f031cf2f 100644
--- a/deps/v8/src/objects/osr-optimized-code-cache.cc
+++ b/deps/v8/src/objects/osr-optimized-code-cache.cc
@@ -17,7 +17,7 @@ const int OSROptimizedCodeCache::kMaxLength;
void OSROptimizedCodeCache::AddOptimizedCode(
Handle<NativeContext> native_context, Handle<SharedFunctionInfo> shared,
- Handle<Code> code, BailoutId osr_offset) {
+ Handle<Code> code, BytecodeOffset osr_offset) {
DCHECK(!osr_offset.IsNone());
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
STATIC_ASSERT(kEntryLength == 3);
@@ -91,7 +91,7 @@ void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
}
Code OSROptimizedCodeCache::GetOptimizedCode(Handle<SharedFunctionInfo> shared,
- BailoutId osr_offset,
+ BytecodeOffset osr_offset,
Isolate* isolate) {
DisallowGarbageCollection no_gc;
int index = FindEntry(shared, osr_offset);
@@ -157,21 +157,21 @@ SharedFunctionInfo OSROptimizedCodeCache::GetSFIFromEntry(int index) {
: SharedFunctionInfo::cast(sfi_entry);
}
-BailoutId OSROptimizedCodeCache::GetBailoutIdFromEntry(int index) {
+BytecodeOffset OSROptimizedCodeCache::GetBytecodeOffsetFromEntry(int index) {
DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
DCHECK_EQ(index % kEntryLength, 0);
Smi osr_offset_entry;
Get(index + kOsrIdOffset)->ToSmi(&osr_offset_entry);
- return BailoutId(osr_offset_entry.value());
+ return BytecodeOffset(osr_offset_entry.value());
}
int OSROptimizedCodeCache::FindEntry(Handle<SharedFunctionInfo> shared,
- BailoutId osr_offset) {
+ BytecodeOffset osr_offset) {
DisallowGarbageCollection no_gc;
DCHECK(!osr_offset.IsNone());
for (int index = 0; index < length(); index += kEntryLength) {
if (GetSFIFromEntry(index) != *shared) continue;
- if (GetBailoutIdFromEntry(index) != osr_offset) continue;
+ if (GetBytecodeOffsetFromEntry(index) != osr_offset) continue;
return index;
}
return -1;
@@ -188,7 +188,8 @@ void OSROptimizedCodeCache::ClearEntry(int index, Isolate* isolate) {
void OSROptimizedCodeCache::InitializeEntry(int entry,
SharedFunctionInfo shared,
- Code code, BailoutId osr_offset) {
+ Code code,
+ BytecodeOffset osr_offset) {
Set(entry + OSRCodeCacheConstants::kSharedOffset,
HeapObjectReference::Weak(shared));
Set(entry + OSRCodeCacheConstants::kCachedCodeOffset,
diff --git a/deps/v8/src/objects/osr-optimized-code-cache.h b/deps/v8/src/objects/osr-optimized-code-cache.h
index 99c148a7e1..62e135b02e 100644
--- a/deps/v8/src/objects/osr-optimized-code-cache.h
+++ b/deps/v8/src/objects/osr-optimized-code-cache.h
@@ -32,7 +32,7 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
// kOSRCodeCacheInitialLength entries.
static void AddOptimizedCode(Handle<NativeContext> context,
Handle<SharedFunctionInfo> shared,
- Handle<Code> code, BailoutId osr_offset);
+ Handle<Code> code, BytecodeOffset osr_offset);
// Reduces the size of the OSR code cache if the number of valid entries are
// less than the current capacity of the cache.
static void Compact(Handle<NativeContext> context);
@@ -40,10 +40,10 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
static void Clear(NativeContext context);
// Returns the code corresponding to the shared function |shared| and
- // BailoutId |offset| if an entry exists in the cache. Returns an empty
+ // BytecodeOffset |offset| if an entry exists in the cache. Returns an empty
// object otherwise.
- Code GetOptimizedCode(Handle<SharedFunctionInfo> shared, BailoutId osr_offset,
- Isolate* isolate);
+ Code GetOptimizedCode(Handle<SharedFunctionInfo> shared,
+ BytecodeOffset osr_offset, Isolate* isolate);
// Remove all code objects marked for deoptimization from OSR code cache.
void EvictMarkedCode(Isolate* isolate);
@@ -58,12 +58,13 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
// Helper functions to get individual items from an entry in the cache.
Code GetCodeFromEntry(int index);
SharedFunctionInfo GetSFIFromEntry(int index);
- BailoutId GetBailoutIdFromEntry(int index);
+ BytecodeOffset GetBytecodeOffsetFromEntry(int index);
- inline int FindEntry(Handle<SharedFunctionInfo> shared, BailoutId osr_offset);
+ inline int FindEntry(Handle<SharedFunctionInfo> shared,
+ BytecodeOffset osr_offset);
inline void ClearEntry(int src, Isolate* isolate);
inline void InitializeEntry(int entry, SharedFunctionInfo shared, Code code,
- BailoutId osr_offset);
+ BytecodeOffset osr_offset);
inline void MoveEntry(int src, int dst, Isolate* isolate);
OBJECT_CONSTRUCTORS(OSROptimizedCodeCache, WeakFixedArray);
diff --git a/deps/v8/src/objects/property-cell-inl.h b/deps/v8/src/objects/property-cell-inl.h
index 7f14109cba..154dcab41f 100644
--- a/deps/v8/src/objects/property-cell-inl.h
+++ b/deps/v8/src/objects/property-cell-inl.h
@@ -22,15 +22,43 @@ CAST_ACCESSOR(PropertyCell)
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(PropertyCell, name, Name, kNameOffset)
-ACCESSORS(PropertyCell, value, Object, kValueOffset)
ACCESSORS(PropertyCell, property_details_raw, Smi, kPropertyDetailsRawOffset)
+RELEASE_ACQUIRE_ACCESSORS(PropertyCell, property_details_raw, Smi,
+ kPropertyDetailsRawOffset)
+ACCESSORS(PropertyCell, value, Object, kValueOffset)
+RELEASE_ACQUIRE_ACCESSORS(PropertyCell, value, Object, kValueOffset)
PropertyDetails PropertyCell::property_details() const {
return PropertyDetails(Smi::cast(property_details_raw()));
}
-void PropertyCell::set_property_details(PropertyDetails details) {
- set_property_details_raw(details.AsSmi());
+PropertyDetails PropertyCell::property_details(AcquireLoadTag tag) const {
+ return PropertyDetails(Smi::cast(property_details_raw(tag)));
+}
+
+void PropertyCell::UpdatePropertyDetailsExceptCellType(
+ PropertyDetails details) {
+ DCHECK(CheckDataIsCompatible(details, value()));
+ PropertyDetails old_details = property_details();
+ CHECK_EQ(old_details.cell_type(), details.cell_type());
+ set_property_details_raw(details.AsSmi(), kReleaseStore);
+ // Deopt when making a writable property read-only. The reverse direction
+ // is uninteresting because Turbofan does not currently rely on read-only
+ // unless the property is also configurable, in which case it will stay
+ // read-only forever.
+ if (!old_details.IsReadOnly() && details.IsReadOnly()) {
+ dependent_code().DeoptimizeDependentCodeGroup(
+ DependentCode::kPropertyCellChangedGroup);
+ }
+}
+
+void PropertyCell::Transition(PropertyDetails new_details,
+ Handle<Object> new_value) {
+ DCHECK(CanTransitionTo(new_details, *new_value));
+ // This code must be in sync with its counterpart in
+ // PropertyCellData::Serialize.
+ set_value(*new_value, kReleaseStore);
+ set_property_details_raw(new_details.AsSmi(), kReleaseStore);
}
} // namespace internal
diff --git a/deps/v8/src/objects/property-cell.h b/deps/v8/src/objects/property-cell.h
index d86d92ca4d..f4fb2391c3 100644
--- a/deps/v8/src/objects/property-cell.h
+++ b/deps/v8/src/objects/property-cell.h
@@ -20,16 +20,24 @@ class PropertyCell : public HeapObject {
DECL_GETTER(name, Name)
// [property_details]: details of the global property.
- DECL_ACCESSORS(property_details_raw, Smi)
+ DECL_GETTER(property_details_raw, Smi)
+ DECL_ACQUIRE_GETTER(property_details_raw, Smi)
+ inline PropertyDetails property_details() const;
+ inline PropertyDetails property_details(AcquireLoadTag tag) const;
+ inline void UpdatePropertyDetailsExceptCellType(PropertyDetails details);
// [value]: value of the global property.
- DECL_ACCESSORS(value, Object)
+ DECL_GETTER(value, Object)
+ DECL_ACQUIRE_GETTER(value, Object)
// [dependent_code]: code that depends on the type of the global property.
DECL_ACCESSORS(dependent_code, DependentCode)
- inline PropertyDetails property_details() const;
- inline void set_property_details(PropertyDetails details);
+ // Changes the value and/or property details.
+ // For global properties:
+ inline void Transition(PropertyDetails new_details, Handle<Object> new_value);
+ // For protectors:
+ void InvalidateProtector();
static PropertyCellType InitialType(Isolate* isolate, Handle<Object> value);
@@ -40,21 +48,22 @@ class PropertyCell : public HeapObject {
Handle<Object> value,
PropertyDetails details);
- // Prepares property cell at given entry for receiving given value.
- // As a result the old cell could be invalidated and/or dependent code could
- // be deoptimized. Returns the prepared property cell.
- static Handle<PropertyCell> PrepareForValue(
+ // Prepares property cell at given entry for receiving given value and sets
+ // that value. As a result the old cell could be invalidated and/or dependent
+ // code could be deoptimized. Returns the (possibly new) property cell.
+ static Handle<PropertyCell> PrepareForAndSetValue(
Isolate* isolate, Handle<GlobalDictionary> dictionary,
InternalIndex entry, Handle<Object> value, PropertyDetails details);
void ClearAndInvalidate(ReadOnlyRoots roots);
static Handle<PropertyCell> InvalidateAndReplaceEntry(
Isolate* isolate, Handle<GlobalDictionary> dictionary,
- InternalIndex entry);
+ InternalIndex entry, PropertyDetails new_details,
+ Handle<Object> new_value);
- static void SetValueWithInvalidation(Isolate* isolate, const char* cell_name,
- Handle<PropertyCell> cell,
- Handle<Object> new_value);
+ // Whether or not the {details} and {value} fit together. This is an
+ // approximation with false positives.
+ static bool CheckDataIsCompatible(PropertyDetails details, Object value);
DECL_CAST(PropertyCell)
DECL_PRINTER(PropertyCell)
@@ -71,6 +80,16 @@ class PropertyCell : public HeapObject {
friend class Factory;
DECL_SETTER(name, Name)
+ DECL_SETTER(value, Object)
+ DECL_RELEASE_SETTER(value, Object)
+ DECL_SETTER(property_details_raw, Smi)
+ DECL_RELEASE_SETTER(property_details_raw, Smi)
+
+#ifdef DEBUG
+ // Whether the property cell can transition to the given state. This is an
+ // approximation with false positives.
+ bool CanTransitionTo(PropertyDetails new_details, Object new_value) const;
+#endif // DEBUG
};
} // namespace internal
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
index af6f0cdaeb..bab6e297e4 100644
--- a/deps/v8/src/objects/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -6,9 +6,10 @@
#define V8_OBJECTS_PROPERTY_DETAILS_H_
#include "include/v8.h"
-#include "src/utils/allocation.h"
#include "src/base/bit-field.h"
+#include "src/common/globals.h"
#include "src/flags/flags.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -107,10 +108,8 @@ class Representation {
// might cause a map deprecation.
bool MightCauseMapDeprecation() const {
// HeapObject to tagged representation change can be done in-place.
- if (IsTagged() || IsHeapObject()) return false;
- // When double fields unboxing is enabled, there must be a map deprecation.
// Boxed double to tagged transition is always done in-place.
- if (IsDouble()) return FLAG_unbox_double_fields;
+ if (IsTagged() || IsHeapObject() || IsDouble()) return false;
// None to double and smi to double representation changes require
// deprecation, because doubles might require box allocation, see
// CanBeInPlaceChangedTo().
@@ -127,10 +126,7 @@ class Representation {
// smi and tagged values. Doubles, however, would require a box allocation.
if (IsNone()) return !other.IsDouble();
if (!other.IsTagged()) return false;
- // Everything but unboxed doubles can be in-place changed to Tagged.
- if (FLAG_unbox_double_fields && IsDouble()) return false;
- DCHECK(IsSmi() || (!FLAG_unbox_double_fields && IsDouble()) ||
- IsHeapObject());
+ DCHECK(IsSmi() || IsDouble() || IsHeapObject());
return true;
}
@@ -138,8 +134,6 @@ class Representation {
// changed to in-place. If an in-place representation change is not allowed,
// then this will return the current representation.
Representation MostGenericInPlaceChange() const {
- // Everything but unboxed doubles can be in-place changed to Tagged.
- if (FLAG_unbox_double_fields && IsDouble()) return Representation::Double();
return Representation::Tagged();
}
@@ -210,29 +204,41 @@ static const int kInvalidEnumCacheSentinel =
// A PropertyCell's property details contains a cell type that is meaningful if
// the cell is still valid (does not hold the hole).
enum class PropertyCellType {
+ kMutable, // Cell will no longer be tracked as constant.
kUndefined, // The PREMONOMORPHIC of property cells.
kConstant, // Cell has been assigned only once.
kConstantType, // Cell has been assigned only one type.
- kMutable, // Cell will no longer be tracked as constant.
- // Arbitrary choice for dictionaries not holding cells.
+ // Value for dictionaries not holding cells, must be 0:
kNoCell = kMutable,
};
-std::ostream& operator<<(std::ostream& os, PropertyCellType type);
-
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails {
public:
- // Property details for dictionary mode properties/elements.
+ // Property details for global dictionary properties.
PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
PropertyCellType cell_type, int dictionary_index = 0) {
value_ = KindField::encode(kind) | LocationField::encode(kField) |
AttributesField::encode(attributes) |
+ // We track PropertyCell constness via PropertyCellTypeField,
+ // so we set ConstnessField to kMutable to simplify DCHECKs related
+ // to non-global property constness tracking.
+ ConstnessField::encode(PropertyConstness::kMutable) |
DictionaryStorageField::encode(dictionary_index) |
PropertyCellTypeField::encode(cell_type);
}
+ // Property details for dictionary mode properties/elements.
+ PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
+ PropertyConstness constness, int dictionary_index = 0) {
+ value_ = KindField::encode(kind) | LocationField::encode(kField) |
+ AttributesField::encode(attributes) |
+ ConstnessField::encode(constness) |
+ DictionaryStorageField::encode(dictionary_index) |
+ PropertyCellTypeField::encode(PropertyCellType::kNoCell);
+ }
+
// Property details for fast mode properties.
PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
PropertyLocation location, PropertyConstness constness,
@@ -249,6 +255,14 @@ class PropertyDetails {
return PropertyDetails(kData, NONE, cell_type);
}
+ bool operator==(PropertyDetails const& other) {
+ return value_ == other.value_;
+ }
+
+ bool operator!=(PropertyDetails const& other) {
+ return value_ != other.value_;
+ }
+
int pointer() const { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) const {
@@ -328,6 +342,8 @@ class PropertyDetails {
return PropertyCellTypeField::decode(value_);
}
+ bool operator==(const PropertyDetails& b) const { return value_ == b.value_; }
+
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
using KindField = base::BitField<PropertyKind, 0, 1>;
@@ -341,7 +357,7 @@ class PropertyDetails {
static const int kAttributesDontEnumMask =
(DONT_ENUM << AttributesField::kShift);
- // Bit fields for normalized objects.
+ // Bit fields for normalized/dictionary mode objects.
using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 2>;
using DictionaryStorageField = PropertyCellTypeField::Next<uint32_t, 23>;
@@ -356,8 +372,23 @@ class PropertyDetails {
STATIC_ASSERT(DictionaryStorageField::kLastUsedBit < 31);
STATIC_ASSERT(FieldIndexField::kLastUsedBit < 31);
+ // DictionaryStorageField must be the last field, so that overflowing it
+ // doesn't overwrite other fields.
+ STATIC_ASSERT(DictionaryStorageField::kLastUsedBit == 30);
+
+ // All bits for non-global dictionary mode objects except enumeration index
+ // must fit in a byte.
+ STATIC_ASSERT(KindField::kLastUsedBit < 8);
+ STATIC_ASSERT(ConstnessField::kLastUsedBit < 8);
+ STATIC_ASSERT(AttributesField::kLastUsedBit < 8);
+ STATIC_ASSERT(LocationField::kLastUsedBit < 8);
+
static const int kInitialIndex = 1;
+ static constexpr PropertyConstness kConstIfDictConstnessTracking =
+ V8_DICT_PROPERTY_CONST_TRACKING_BOOL ? PropertyConstness::kConst
+ : PropertyConstness::kMutable;
+
#ifdef OBJECT_PRINT
// For our gdb macros, we should perhaps change these in the future.
void Print(bool dictionary_mode);
@@ -376,6 +407,42 @@ class PropertyDetails {
void PrintAsSlowTo(std::ostream& out, bool print_dict_index);
void PrintAsFastTo(std::ostream& out, PrintMode mode = kPrintFull);
+ // Encodes those property details for non-global dictionary properties
+ // with an enumeration index of 0 as a single byte.
+ uint8_t ToByte() {
+ // We only care about the value of KindField, ConstnessField, and
+ // AttributesField. LocationField is also stored, but it will always be
+ // kField. We've statically asserted earlier that all those fields fit into
+ // a byte together.
+
+ // PropertyCellTypeField comes next, its value must be kNoCell == 0 for
+ // dictionary mode PropertyDetails anyway.
+ DCHECK_EQ(PropertyCellType::kNoCell, cell_type());
+ STATIC_ASSERT(static_cast<int>(PropertyCellType::kNoCell) == 0);
+
+ // Only to be used when the enum index isn't actually maintained
+ // by the PropertyDetails:
+ DCHECK_EQ(0, dictionary_index());
+
+ return value_;
+ }
+
+ // Only to be used for bytes obtained by ToByte. In particular, only used for
+ // non-global dictionary properties.
+ static PropertyDetails FromByte(uint8_t encoded_details) {
+ // The 0-extension to 32bit sets PropertyCellType to kNoCell and
+ // enumeration index to 0, as intended. Everything else is obtained from
+ // |encoded_details|.
+
+ PropertyDetails details(encoded_details);
+
+ DCHECK_EQ(0, details.dictionary_index());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
+ DCHECK_EQ(PropertyCellType::kNoCell, details.cell_type());
+
+ return details;
+ }
+
private:
PropertyDetails(int value, int pointer) {
value_ = DescriptorPointer::update(value, pointer);
@@ -391,6 +458,8 @@ class PropertyDetails {
value_ = AttributesField::update(value, attributes);
}
+ explicit PropertyDetails(uint32_t value) : value_{value} {}
+
uint32_t value_;
};
@@ -415,6 +484,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(
std::ostream& os, const PropertyAttributes& attributes);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
PropertyConstness constness);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ PropertyCellType type);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/scope-info-inl.h b/deps/v8/src/objects/scope-info-inl.h
new file mode 100644
index 0000000000..6ba93dd80f
--- /dev/null
+++ b/deps/v8/src/objects/scope-info-inl.h
@@ -0,0 +1,82 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SCOPE_INFO_INL_H_
+#define V8_OBJECTS_SCOPE_INFO_INL_H_
+
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/scope-info.h"
+#include "src/objects/string.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/scope-info-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(ScopeInfo)
+
+bool ScopeInfo::IsAsmModule() const { return IsAsmModuleBit::decode(Flags()); }
+
+bool ScopeInfo::HasSimpleParameters() const {
+ return HasSimpleParametersBit::decode(Flags());
+}
+
+int ScopeInfo::Flags() const { return flags(); }
+int ScopeInfo::ParameterCount() const { return parameter_count(); }
+int ScopeInfo::ContextLocalCount() const { return context_local_count(); }
+
+Object ScopeInfo::get(int index) const {
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+Object ScopeInfo::get(IsolateRoot isolate, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ return TaggedField<Object>::Relaxed_Load(
+ isolate, *this, FixedArray::OffsetOfElementAt(index));
+}
+
+void ScopeInfo::set(int index, Smi value) {
+ DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ DCHECK(Object(value).IsSmi());
+ int offset = FixedArray::OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+}
+
+void ScopeInfo::set(int index, Object value, WriteBarrierMode mode) {
+ DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
+ DCHECK(IsScopeInfo());
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ int offset = FixedArray::OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void ScopeInfo::CopyElements(Isolate* isolate, int dst_index, ScopeInfo src,
+ int src_index, int len, WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(dst_index + len, length());
+ DCHECK_LE(src_index + len, src.length());
+ DisallowGarbageCollection no_gc;
+
+ ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
+ ObjectSlot src_slot(src.RawFieldOfElementAt(src_index));
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
+}
+
+ObjectSlot ScopeInfo::RawFieldOfElementAt(int index) {
+ return RawField(FixedArray::OffsetOfElementAt(index));
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SCOPE_INFO_INL_H_
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index bd6fd3cb0b..642770a852 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -11,6 +11,7 @@
#include "src/init/bootstrapper.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/scope-info-inl.h"
#include "src/objects/string-set-inl.h"
#include "src/roots/roots.h"
@@ -157,6 +158,12 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
: 0;
const bool has_outer_scope_info = !outer_scope.is_null();
+ Handle<SourceTextModuleInfo> module_info;
+ if (scope->is_module_scope()) {
+ module_info = SourceTextModuleInfo::New(isolate, zone,
+ scope->AsModuleScope()->module());
+ }
+
const int length = kVariablePartIndex + 2 * context_local_count +
(should_save_class_variable_index ? 1 : 0) +
(has_receiver ? 1 : 0) +
@@ -216,16 +223,16 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
HasContextExtensionSlotBit::encode(scope->HasContextExtensionSlot()) |
IsReplModeScopeBit::encode(scope->is_repl_mode_scope()) |
HasLocalsBlockListBit::encode(false);
- scope_info.SetFlags(flags);
+ scope_info.set_flags(flags);
- scope_info.SetParameterCount(parameter_count);
- scope_info.SetContextLocalCount(context_local_count);
+ scope_info.set_parameter_count(parameter_count);
+ scope_info.set_context_local_count(context_local_count);
// Add context locals' names and info, module variables' names and info.
// Context locals are added using their index.
int context_local_base = index;
int context_local_info_base = context_local_base + context_local_count;
- int module_var_entry = scope_info.ModuleVariablesIndex();
+ int module_var_entry = scope_info.ModuleVariableCountIndex() + 1;
for (Variable* var : *scope->locals()) {
switch (var->location()) {
@@ -237,11 +244,11 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
DCHECK_LE(0, local_index);
DCHECK_LT(local_index, context_local_count);
uint32_t info =
- VariableModeField::encode(var->mode()) |
- InitFlagField::encode(var->initialization_flag()) |
- MaybeAssignedFlagField::encode(var->maybe_assigned()) |
- ParameterNumberField::encode(ParameterNumberField::kMax) |
- IsStaticFlagField::encode(var->is_static_flag());
+ VariableModeBits::encode(var->mode()) |
+ InitFlagBit::encode(var->initialization_flag()) |
+ MaybeAssignedFlagBit::encode(var->maybe_assigned()) |
+ ParameterNumberBits::encode(ParameterNumberBits::kMax) |
+ IsStaticFlagBit::encode(var->is_static_flag());
scope_info.set(context_local_base + local_index, *var->name(), mode);
scope_info.set(context_local_info_base + local_index,
Smi::FromInt(info));
@@ -253,11 +260,11 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
scope_info.set(module_var_entry + kModuleVariableIndexOffset,
Smi::FromInt(var->index()));
uint32_t properties =
- VariableModeField::encode(var->mode()) |
- InitFlagField::encode(var->initialization_flag()) |
- MaybeAssignedFlagField::encode(var->maybe_assigned()) |
- ParameterNumberField::encode(ParameterNumberField::kMax) |
- IsStaticFlagField::encode(var->is_static_flag());
+ VariableModeBits::encode(var->mode()) |
+ InitFlagBit::encode(var->initialization_flag()) |
+ MaybeAssignedFlagBit::encode(var->maybe_assigned()) |
+ ParameterNumberBits::encode(ParameterNumberBits::kMax) |
+ IsStaticFlagBit::encode(var->is_static_flag());
scope_info.set(module_var_entry + kModuleVariablePropertiesOffset,
Smi::FromInt(properties));
module_var_entry += kModuleVariableEntryLength;
@@ -282,7 +289,7 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
int index = parameter->index() - scope->ContextHeaderLength();
int info_index = context_local_info_base + index;
int info = Smi::ToInt(scope_info.get(info_index));
- info = ParameterNumberField::update(info, i);
+ info = ParameterNumberBits::update(info, i);
scope_info.set(info_index, Smi::FromInt(info));
}
@@ -292,11 +299,11 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
if (var->location() == VariableLocation::CONTEXT) {
int local_index = var->index() - scope->ContextHeaderLength();
uint32_t info =
- VariableModeField::encode(var->mode()) |
- InitFlagField::encode(var->initialization_flag()) |
- MaybeAssignedFlagField::encode(var->maybe_assigned()) |
- ParameterNumberField::encode(ParameterNumberField::kMax) |
- IsStaticFlagField::encode(var->is_static_flag());
+ VariableModeBits::encode(var->mode()) |
+ InitFlagBit::encode(var->initialization_flag()) |
+ MaybeAssignedFlagBit::encode(var->maybe_assigned()) |
+ ParameterNumberBits::encode(ParameterNumberBits::kMax) |
+ IsStaticFlagBit::encode(var->is_static_flag());
scope_info.set(context_local_base + local_index, *var->name(), mode);
scope_info.set(context_local_info_base + local_index,
Smi::FromInt(info));
@@ -326,7 +333,7 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
}
// If present, add the function variable name and its index.
- DCHECK_EQ(index, scope_info.FunctionNameInfoIndex());
+ DCHECK_EQ(index, scope_info.FunctionVariableInfoIndex());
if (has_function_name) {
Variable* var = scope->AsDeclarationScope()->function_var();
int var_index = -1;
@@ -358,19 +365,17 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
if (has_outer_scope_info) {
scope_info.set(index++, *outer_scope.ToHandleChecked(), mode);
}
- }
- // Module-specific information (only for module scopes).
- if (scope->is_module_scope()) {
- Handle<SourceTextModuleInfo> module_info = SourceTextModuleInfo::New(
- isolate, zone, scope->AsModuleScope()->module());
- DCHECK_EQ(index, scope_info_handle->ModuleInfoIndex());
- scope_info_handle->set(index++, *module_info);
- DCHECK_EQ(index, scope_info_handle->ModuleVariableCountIndex());
- scope_info_handle->set(index++, Smi::FromInt(module_vars_count));
- DCHECK_EQ(index, scope_info_handle->ModuleVariablesIndex());
- // The variable entries themselves have already been written above.
- index += kModuleVariableEntryLength * module_vars_count;
+ // Module-specific information (only for module scopes).
+ if (scope->is_module_scope()) {
+ DCHECK_EQ(index, scope_info.ModuleInfoIndex());
+ scope_info.set(index++, *module_info);
+ DCHECK_EQ(index, scope_info.ModuleVariableCountIndex());
+ scope_info.set(index++, Smi::FromInt(module_vars_count));
+ DCHECK_EQ(index, scope_info.ModuleVariablesIndex());
+ // The variable entries themselves have already been written above.
+ index += kModuleVariableEntryLength * module_vars_count;
+ }
}
DCHECK_EQ(index, scope_info_handle->length());
@@ -416,14 +421,14 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
PrivateNameLookupSkipsOuterClassBit::encode(false) |
HasContextExtensionSlotBit::encode(true) |
IsReplModeScopeBit::encode(false) | HasLocalsBlockListBit::encode(false);
- scope_info->SetFlags(flags);
+ scope_info->set_flags(flags);
- scope_info->SetParameterCount(0);
- scope_info->SetContextLocalCount(0);
+ scope_info->set_parameter_count(0);
+ scope_info->set_context_local_count(0);
int index = kVariablePartIndex;
DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
- DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+ DCHECK_EQ(index, scope_info->FunctionVariableInfoIndex());
DCHECK_EQ(index, scope_info->InferredFunctionNameIndex());
DCHECK_EQ(index, scope_info->PositionInfoIndex());
DCHECK(index == scope_info->OuterScopeInfoIndex());
@@ -496,9 +501,9 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
PrivateNameLookupSkipsOuterClassBit::encode(false) |
HasContextExtensionSlotBit::encode(is_native_context) |
IsReplModeScopeBit::encode(false) | HasLocalsBlockListBit::encode(false);
- scope_info->SetFlags(flags);
- scope_info->SetParameterCount(parameter_count);
- scope_info->SetContextLocalCount(context_local_count);
+ scope_info->set_flags(flags);
+ scope_info->set_parameter_count(parameter_count);
+ scope_info->set_context_local_count(context_local_count);
int index = kVariablePartIndex;
@@ -510,11 +515,11 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
DCHECK_EQ(index, scope_info->ContextLocalInfosIndex());
if (context_local_count > 0) {
const uint32_t value =
- VariableModeField::encode(VariableMode::kConst) |
- InitFlagField::encode(kCreatedInitialized) |
- MaybeAssignedFlagField::encode(kNotAssigned) |
- ParameterNumberField::encode(ParameterNumberField::kMax) |
- IsStaticFlagField::encode(IsStaticFlag::kNotStatic);
+ VariableModeBits::encode(VariableMode::kConst) |
+ InitFlagBit::encode(kCreatedInitialized) |
+ MaybeAssignedFlagBit::encode(kNotAssigned) |
+ ParameterNumberBits::encode(ParameterNumberBits::kMax) |
+ IsStaticFlagBit::encode(IsStaticFlag::kNotStatic);
scope_info->set(index++, Smi::FromInt(value));
}
@@ -525,7 +530,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
scope_info->set(index++, Smi::FromInt(receiver_index));
}
- DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+ DCHECK_EQ(index, scope_info->FunctionVariableInfoIndex());
if (is_empty_function) {
scope_info->set(index++, *isolate->factory()->empty_string());
scope_info->set(index++, Smi::zero());
@@ -564,7 +569,7 @@ Handle<ScopeInfo> ScopeInfo::RecreateWithBlockList(
// blocklist field, so {LocalsBlockListIndex} returns the correct value.
scope_info->CopyElements(isolate, 0, *original, 0, kVariablePartIndex,
WriteBarrierMode::UPDATE_WRITE_BARRIER);
- scope_info->SetFlags(
+ scope_info->set_flags(
HasLocalsBlockListBit::update(scope_info->Flags(), true));
// Copy the dynamic part including the provided blocklist:
@@ -575,7 +580,7 @@ Handle<ScopeInfo> ScopeInfo::RecreateWithBlockList(
isolate, kVariablePartIndex, *original, kVariablePartIndex,
scope_info->LocalsBlockListIndex() - kVariablePartIndex,
WriteBarrierMode::UPDATE_WRITE_BARRIER);
- scope_info->set(scope_info->LocalsBlockListIndex(), *blocklist);
+ scope_info->set_locals_block_list(0, *blocklist);
scope_info->CopyElements(
isolate, scope_info->LocalsBlockListIndex() + 1, *original,
scope_info->LocalsBlockListIndex(),
@@ -695,12 +700,12 @@ bool ScopeInfo::HasSharedFunctionName() const {
void ScopeInfo::SetFunctionName(Object name) {
DCHECK(HasFunctionName());
DCHECK(name.IsString() || name == SharedFunctionInfo::kNoSharedNameSentinel);
- set(FunctionNameInfoIndex(), name);
+ set_function_variable_info_name(0, name);
}
void ScopeInfo::SetInferredFunctionName(String name) {
DCHECK(HasInferredFunctionName());
- set(InferredFunctionNameIndex(), name);
+ set_inferred_function_name(0, name);
}
bool ScopeInfo::HasOuterScopeInfo() const {
@@ -714,7 +719,7 @@ bool ScopeInfo::IsDebugEvaluateScope() const {
void ScopeInfo::SetIsDebugEvaluateScope() {
CHECK(!IsEmpty());
DCHECK_EQ(scope_type(), WITH_SCOPE);
- SetFlags(Flags() | IsDebugEvaluateScopeBit::encode(true));
+ set_flags(Flags() | IsDebugEvaluateScopeBit::encode(true));
}
bool ScopeInfo::PrivateNameLookupSkipsOuterClass() const {
@@ -731,19 +736,19 @@ bool ScopeInfo::HasLocalsBlockList() const {
StringSet ScopeInfo::LocalsBlockList() const {
DCHECK(HasLocalsBlockList());
- return StringSet::cast(get(LocalsBlockListIndex()));
+ return StringSet::cast(locals_block_list(0));
}
bool ScopeInfo::HasContext() const { return ContextLength() > 0; }
Object ScopeInfo::FunctionName() const {
DCHECK(HasFunctionName());
- return get(FunctionNameInfoIndex());
+ return function_variable_info_name(0);
}
Object ScopeInfo::InferredFunctionName() const {
DCHECK(HasInferredFunctionName());
- return get(InferredFunctionNameIndex());
+ return inferred_function_name(0);
}
String ScopeInfo::FunctionDebugName() const {
@@ -761,83 +766,64 @@ String ScopeInfo::FunctionDebugName() const {
int ScopeInfo::StartPosition() const {
DCHECK(HasPositionInfo());
- return Smi::ToInt(get(PositionInfoIndex()));
+ return position_info_start(0);
}
int ScopeInfo::EndPosition() const {
DCHECK(HasPositionInfo());
- return Smi::ToInt(get(PositionInfoIndex() + 1));
+ return position_info_end(0);
}
void ScopeInfo::SetPositionInfo(int start, int end) {
DCHECK(HasPositionInfo());
DCHECK_LE(start, end);
- set(PositionInfoIndex(), Smi::FromInt(start));
- set(PositionInfoIndex() + 1, Smi::FromInt(end));
+ set_position_info_start(0, start);
+ set_position_info_end(0, end);
}
ScopeInfo ScopeInfo::OuterScopeInfo() const {
DCHECK(HasOuterScopeInfo());
- return ScopeInfo::cast(get(OuterScopeInfoIndex()));
+ return ScopeInfo::cast(outer_scope_info(0));
}
SourceTextModuleInfo ScopeInfo::ModuleDescriptorInfo() const {
DCHECK(scope_type() == MODULE_SCOPE);
- return SourceTextModuleInfo::cast(get(ModuleInfoIndex()));
+ return SourceTextModuleInfo::cast(module_info(0));
}
String ScopeInfo::ContextLocalName(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, ContextLocalCount());
- int info_index = ContextLocalNamesIndex() + var;
- return String::cast(get(info_index));
+ return context_local_names(var);
}
VariableMode ScopeInfo::ContextLocalMode(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, ContextLocalCount());
- int info_index = ContextLocalInfosIndex() + var;
- int value = Smi::ToInt(get(info_index));
- return VariableModeField::decode(value);
+ int value = context_local_infos(var);
+ return VariableModeBits::decode(value);
}
IsStaticFlag ScopeInfo::ContextLocalIsStaticFlag(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, ContextLocalCount());
- int info_index = ContextLocalInfosIndex() + var;
- int value = Smi::ToInt(get(info_index));
- return IsStaticFlagField::decode(value);
+ int value = context_local_infos(var);
+ return IsStaticFlagBit::decode(value);
}
InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, ContextLocalCount());
- int info_index = ContextLocalInfosIndex() + var;
- int value = Smi::ToInt(get(info_index));
- return InitFlagField::decode(value);
+ int value = context_local_infos(var);
+ return InitFlagBit::decode(value);
}
bool ScopeInfo::ContextLocalIsParameter(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, ContextLocalCount());
- int info_index = ContextLocalInfosIndex() + var;
- int value = Smi::ToInt(get(info_index));
- return ParameterNumberField::decode(value) != ParameterNumberField::kMax;
+ int value = context_local_infos(var);
+ return ParameterNumberBits::decode(value) != ParameterNumberBits::kMax;
}
uint32_t ScopeInfo::ContextLocalParameterNumber(int var) const {
DCHECK(ContextLocalIsParameter(var));
- int info_index = ContextLocalInfosIndex() + var;
- int value = Smi::ToInt(get(info_index));
- return ParameterNumberField::decode(value);
+ int value = context_local_infos(var);
+ return ParameterNumberBits::decode(value);
}
MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) const {
- DCHECK_LE(0, var);
- DCHECK_LT(var, ContextLocalCount());
- int info_index = ContextLocalInfosIndex() + var;
- int value = Smi::ToInt(get(info_index));
- return MaybeAssignedFlagField::decode(value);
+ int value = context_local_infos(var);
+ return MaybeAssignedFlagBit::decode(value);
}
// static
@@ -860,7 +846,7 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
DCHECK_NOT_NULL(init_flag);
DCHECK_NOT_NULL(maybe_assigned_flag);
- int module_vars_count = Smi::ToInt(get(ModuleVariableCountIndex()));
+ int module_vars_count = module_variable_count(0);
int entry = ModuleVariablesIndex();
for (int i = 0; i < module_vars_count; ++i) {
String var_name = String::cast(get(entry + kModuleVariableNameOffset));
@@ -890,7 +876,7 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
if (scope_info.IsEmpty()) return -1;
int start = scope_info.ContextLocalNamesIndex();
- int end = start + scope_info.ContextLocalCount();
+ int end = start + scope_info.context_local_count();
for (int i = start; i < end; ++i) {
if (name != scope_info.get(i)) continue;
int var = i - start;
@@ -909,7 +895,7 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
int ScopeInfo::SavedClassVariableContextLocalIndex() const {
if (HasSavedClassVariableIndexBit::decode(Flags())) {
- int index = Smi::ToInt(get(SavedClassVariableInfoIndex()));
+ int index = saved_class_variable_info(0);
return index - Context::MIN_CONTEXT_SLOTS;
}
return -1;
@@ -918,7 +904,7 @@ int ScopeInfo::SavedClassVariableContextLocalIndex() const {
int ScopeInfo::ReceiverContextSlotIndex() const {
if (ReceiverVariableBits::decode(Flags()) ==
VariableAllocationInfo::CONTEXT) {
- return Smi::ToInt(get(ReceiverInfoIndex()));
+ return receiver_info(0);
}
return -1;
}
@@ -928,7 +914,7 @@ int ScopeInfo::FunctionContextSlotIndex(String name) const {
if (FunctionVariableBits::decode(Flags()) ==
VariableAllocationInfo::CONTEXT &&
FunctionName() == name) {
- return Smi::ToInt(get(FunctionNameInfoIndex() + 1));
+ return function_variable_info_context_or_stack_slot_index(0);
}
return -1;
}
@@ -938,80 +924,74 @@ FunctionKind ScopeInfo::function_kind() const {
}
int ScopeInfo::ContextLocalNamesIndex() const {
- DCHECK_LE(kVariablePartIndex, length());
- return kVariablePartIndex;
+ return ConvertOffsetToIndex(kContextLocalNamesOffset);
}
int ScopeInfo::ContextLocalInfosIndex() const {
- return ContextLocalNamesIndex() + ContextLocalCount();
+ return ConvertOffsetToIndex(ContextLocalInfosOffset());
}
int ScopeInfo::SavedClassVariableInfoIndex() const {
- return ContextLocalInfosIndex() + ContextLocalCount();
+ return ConvertOffsetToIndex(SavedClassVariableInfoOffset());
}
int ScopeInfo::ReceiverInfoIndex() const {
- return SavedClassVariableInfoIndex() + (HasSavedClassVariableIndex() ? 1 : 0);
+ return ConvertOffsetToIndex(ReceiverInfoOffset());
}
-int ScopeInfo::FunctionNameInfoIndex() const {
- return ReceiverInfoIndex() + (HasAllocatedReceiver() ? 1 : 0);
+int ScopeInfo::FunctionVariableInfoIndex() const {
+ return ConvertOffsetToIndex(FunctionVariableInfoOffset());
}
int ScopeInfo::InferredFunctionNameIndex() const {
- return FunctionNameInfoIndex() +
- (HasFunctionName() ? kFunctionNameEntries : 0);
+ return ConvertOffsetToIndex(InferredFunctionNameOffset());
}
int ScopeInfo::PositionInfoIndex() const {
- return InferredFunctionNameIndex() + (HasInferredFunctionName() ? 1 : 0);
+ return ConvertOffsetToIndex(PositionInfoOffset());
}
int ScopeInfo::OuterScopeInfoIndex() const {
- return PositionInfoIndex() + (HasPositionInfo() ? kPositionInfoEntries : 0);
+ return ConvertOffsetToIndex(OuterScopeInfoOffset());
}
int ScopeInfo::LocalsBlockListIndex() const {
- return OuterScopeInfoIndex() + (HasOuterScopeInfo() ? 1 : 0);
+ return ConvertOffsetToIndex(LocalsBlockListOffset());
}
int ScopeInfo::ModuleInfoIndex() const {
- return LocalsBlockListIndex() + (HasLocalsBlockList() ? 1 : 0);
+ return ConvertOffsetToIndex(ModuleInfoOffset());
}
int ScopeInfo::ModuleVariableCountIndex() const {
- return ModuleInfoIndex() + 1;
+ return ConvertOffsetToIndex(ModuleVariableCountOffset());
}
int ScopeInfo::ModuleVariablesIndex() const {
- return ModuleVariableCountIndex() + 1;
+ return ConvertOffsetToIndex(ModuleVariablesOffset());
}
void ScopeInfo::ModuleVariable(int i, String* name, int* index,
VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
- DCHECK_LE(0, i);
- DCHECK_LT(i, Smi::ToInt(get(ModuleVariableCountIndex())));
-
- int entry = ModuleVariablesIndex() + i * kModuleVariableEntryLength;
- int properties = Smi::ToInt(get(entry + kModuleVariablePropertiesOffset));
+ int properties = module_variables_properties(i);
if (name != nullptr) {
- *name = String::cast(get(entry + kModuleVariableNameOffset));
+ *name = module_variables_name(i);
}
if (index != nullptr) {
- *index = Smi::ToInt(get(entry + kModuleVariableIndexOffset));
+ *index = module_variables_index(i);
DCHECK_NE(*index, 0);
}
if (mode != nullptr) {
- *mode = VariableModeField::decode(properties);
+ *mode = VariableModeBits::decode(properties);
}
if (init_flag != nullptr) {
- *init_flag = InitFlagField::decode(properties);
+ *init_flag = InitFlagBit::decode(properties);
}
if (maybe_assigned_flag != nullptr) {
- *maybe_assigned_flag = MaybeAssignedFlagField::decode(properties);
+ *maybe_assigned_flag = MaybeAssignedFlagBit::decode(properties);
}
}
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 719f1d9204..5ee404e15b 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -19,6 +19,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/scope-info-tq.inc"
+
template <typename T>
class Handle;
class Isolate;
@@ -36,12 +38,27 @@ class Zone;
// This object provides quick access to scope info details for runtime
// routines.
-class ScopeInfo : public FixedArray {
+class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, FixedArrayBase> {
public:
DEFINE_TORQUE_GENERATED_SCOPE_FLAGS()
- DECL_CAST(ScopeInfo)
DECL_PRINTER(ScopeInfo)
+ DECL_VERIFIER(ScopeInfo)
+
+ // For refactoring, clone some FixedArray member functions. Eventually this
+ // class will stop pretending to be a FixedArray, but we're not quite there.
+ inline Object get(int index) const;
+ inline Object get(IsolateRoot isolate, int index) const;
+ // Setter that doesn't need write barrier.
+ inline void set(int index, Smi value);
+ // Setter with explicit barrier mode.
+ inline void set(int index, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void CopyElements(Isolate* isolate, int dst_index, ScopeInfo src,
+ int src_index, int len, WriteBarrierMode mode);
+ inline ObjectSlot RawFieldOfElementAt(int index);
+
+ class BodyDescriptor;
// Return the type of this scope.
ScopeType scope_type() const;
@@ -52,9 +69,6 @@ class ScopeInfo : public FixedArray {
// True if this scope is a (var) declaration scope.
bool is_declaration_scope() const;
- // True if this scope is a class scope.
- bool is_class_scope() const;
-
// Does this scope make a sloppy eval call?
bool SloppyEvalCanExtendVars() const;
@@ -80,7 +94,7 @@ class ScopeInfo : public FixedArray {
// Does this scope has class brand (for private methods)?
bool HasClassBrand() const;
- // Does this scope contains a saved class variable context local slot index
+ // Does this scope contain a saved class variable context local slot index
// for checking receivers of static private methods?
bool HasSavedClassVariableIndex() const;
@@ -241,19 +255,12 @@ class ScopeInfo : public FixedArray {
// Serializes empty scope info.
V8_EXPORT_PRIVATE static ScopeInfo Empty(Isolate* isolate);
-// The layout of the static part of a ScopeInfo is as follows. Each entry is
-// numeric and occupies one array slot.
-// 1. A set of properties of the scope.
-// 2. The number of parameters. For non-function scopes this is 0.
-// 3. The number of non-parameter and parameter variables allocated in the
-// context.
#define FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(V) \
V(Flags) \
V(ParameterCount) \
V(ContextLocalCount)
#define FIELD_ACCESSORS(name) \
- inline void Set##name(int value); \
inline int name() const;
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
#undef FIELD_ACCESSORS
@@ -265,7 +272,11 @@ class ScopeInfo : public FixedArray {
kVariablePartIndex
};
- static const int kFlagsOffset = OffsetOfElementAt(Fields::kFlags);
+// Make sure the Fields enum agrees with Torque-generated offsets.
+#define ASSERT_MATCHED_FIELD(name) \
+ STATIC_ASSERT(FixedArray::OffsetOfElementAt(k##name) == k##name##Offset);
+ FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(ASSERT_MATCHED_FIELD)
+#undef ASSERT_MATCHED_FIELD
STATIC_ASSERT(LanguageModeSize == 1 << LanguageModeBit::kSize);
STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
@@ -273,49 +284,11 @@ class ScopeInfo : public FixedArray {
bool IsEmpty() const;
private:
- // The layout of the variable part of a ScopeInfo is as follows:
- // 1. ContextLocalNames:
- // Contains the names of local variables and parameters that are allocated
- // in the context. They are stored in increasing order of the context slot
- // index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
- // context local, so in total this part occupies ContextLocalCount() slots
- // in the array.
- // 2. ContextLocalInfos:
- // Contains the variable modes and initialization flags corresponding to
- // the context locals in ContextLocalNames. One slot is used per
- // context local, so in total this part occupies ContextLocalCount()
- // slots in the array.
- // 3. SavedClassVariableInfo:
- // If the scope is a class scope and it has static private methods that
- // may be accessed directly or through eval, one slot is reserved to hold
- // the context slot index for the class variable.
- // 4. ReceiverInfo:
- // If the scope binds a "this" value, one slot is reserved to hold the
- // context or stack slot index for the variable.
- // 5. FunctionNameInfo:
- // If the scope belongs to a named function expression this part contains
- // information about the function variable. It always occupies two array
- // slots: a. The name of the function variable.
- // b. The context or stack slot index for the variable.
- // 6. InferredFunctionName:
- // Contains the function's inferred name.
- // 7. SourcePosition:
- // Contains two slots with a) the startPosition and b) the endPosition if
- // the scope belongs to a function or script.
- // 8. OuterScopeInfoIndex:
- // The outer scope's ScopeInfo or the hole if there's none.
- // 9. LocalsBlockList: List of stack allocated local variables. Used by
- // debug evaluate to properly abort variable lookup when a name clashes
- // with a stack allocated local that can't be materialized.
- // 10. SourceTextModuleInfo, ModuleVariableCount, and ModuleVariables:
- // For a module scope, this part contains the SourceTextModuleInfo, the
- // number of MODULE-allocated variables, and the metadata of those
- // variables. For non-module scopes it is empty.
int ContextLocalNamesIndex() const;
int ContextLocalInfosIndex() const;
int SavedClassVariableInfoIndex() const;
int ReceiverInfoIndex() const;
- int FunctionNameInfoIndex() const;
+ int FunctionVariableInfoIndex() const;
int InferredFunctionNameIndex() const;
int PositionInfoIndex() const;
int OuterScopeInfoIndex() const;
@@ -326,6 +299,13 @@ class ScopeInfo : public FixedArray {
static bool NeedsPositionInfo(ScopeType type);
+ // Converts byte offsets within the object to FixedArray-style indices.
+ static constexpr int ConvertOffsetToIndex(int offset) {
+ int index = (offset - FixedArray::kHeaderSize) / kTaggedSize;
+ CONSTEXPR_DCHECK(FixedArray::OffsetOfElementAt(index) == offset);
+ return index;
+ }
+
enum class BootstrappingType { kScript, kFunction, kNative };
static Handle<ScopeInfo> CreateForBootstrapping(Isolate* isolate,
BootstrappingType type);
@@ -345,21 +325,13 @@ class ScopeInfo : public FixedArray {
static const int kFunctionNameEntries = 2;
static const int kPositionInfoEntries = 2;
- // Hide an inherited member function to ensure that callers have been updated
- // to use IsEmpty instead.
- using FixedArray::length;
-
// Properties of variables.
- using VariableModeField = base::BitField<VariableMode, 0, 4>;
- using InitFlagField = VariableModeField::Next<InitializationFlag, 1>;
- using MaybeAssignedFlagField = InitFlagField::Next<MaybeAssignedFlag, 1>;
- using ParameterNumberField = MaybeAssignedFlagField::Next<uint32_t, 16>;
- using IsStaticFlagField = ParameterNumberField::Next<IsStaticFlag, 1>;
+ DEFINE_TORQUE_GENERATED_VARIABLE_PROPERTIES()
friend class ScopeIterator;
friend std::ostream& operator<<(std::ostream& os, VariableAllocationInfo var);
- OBJECT_CONSTRUCTORS(ScopeInfo, FixedArray);
+ TQ_OBJECT_CONSTRUCTORS(ScopeInfo)
FRIEND_TEST(TestWithNativeContext, RecreateScopeInfoWithLocalsBlocklistWorks);
};
diff --git a/deps/v8/src/objects/scope-info.tq b/deps/v8/src/objects/scope-info.tq
index 3bd717171b..c238d5309d 100644
--- a/deps/v8/src/objects/scope-info.tq
+++ b/deps/v8/src/objects/scope-info.tq
@@ -2,23 +2,49 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-extern class ScopeInfo extends FixedArray;
-
extern macro EmptyScopeInfoConstant(): ScopeInfo;
const kEmptyScopeInfo: ScopeInfo = EmptyScopeInfoConstant();
-const kScopeInfoFlagsIndex:
- constexpr int32 generates 'ScopeInfo::Fields::kFlags';
+extern enum ScopeType extends uint32 {
+ CLASS_SCOPE, // Also used for the empty scope (for NativeContext & builtins).
+ EVAL_SCOPE,
+ FUNCTION_SCOPE,
+ MODULE_SCOPE,
+ SCRIPT_SCOPE,
+ CATCH_SCOPE,
+ BLOCK_SCOPE,
+ WITH_SCOPE
+}
+
+extern enum VariableAllocationInfo extends uint32 {
+ NONE,
+ STACK,
+ CONTEXT,
+ UNUSED
+}
+
+extern enum VariableMode extends uint32 {
+ kLet,
+ kConst,
+ kVar,
+ kTemporary,
+ kDynamic,
+ kDynamicGlobal,
+ kDynamicLocal,
+ kPrivateMethod,
+ kPrivateSetterOnly,
+ kPrivateGetterOnly,
+ kPrivateGetterAndSetter
+}
-operator '.flags' macro LoadScopeInfoFlags(implicit context: Context)(
- scopeInfo: ScopeInfo): ScopeFlags {
- return Convert<ScopeFlags>(
- UnsafeCast<Smi>(scopeInfo.objects[kScopeInfoFlagsIndex]));
+extern enum InitializationFlag extends uint32 {
+ kNeedsInitialization,
+ kCreatedInitialized
}
-type ScopeType extends uint32 constexpr 'ScopeType';
-type VariableAllocationInfo extends uint32
-constexpr 'VariableAllocationInfo';
+extern enum IsStaticFlag extends uint32 { kNotStatic, kStatic }
+
+extern enum MaybeAssignedFlag extends uint32 { kNotAssigned, kMaybeAssigned }
// Properties of scopes.
bitfield struct ScopeFlags extends uint31 {
@@ -46,3 +72,112 @@ bitfield struct ScopeFlags extends uint31 {
has_locals_block_list: bool: 1 bit;
is_empty: bool: 1 bit;
}
+
+struct PositionInfo {
+ start: Smi;
+ end: Smi;
+}
+
+struct FunctionVariableInfo {
+ name: String|Zero;
+ context_or_stack_slot_index: Smi;
+}
+
+bitfield struct VariableProperties extends uint31 {
+ variable_mode: VariableMode: 4 bit;
+ init_flag: InitializationFlag: 1 bit;
+ maybe_assigned_flag: MaybeAssignedFlag: 1 bit;
+ parameter_number: uint32: 16 bit;
+ is_static_flag: IsStaticFlag: 1 bit;
+}
+
+struct ModuleVariable {
+ name: String;
+ index: Smi;
+ properties: SmiTagged<VariableProperties>;
+}
+
+@generateCppClass
+@generateBodyDescriptor
+extern class ScopeInfo extends FixedArrayBase {
+ const flags: SmiTagged<ScopeFlags>;
+
+ // The number of parameters. For non-function scopes this is 0.
+ parameter_count: Smi;
+
+ // The number of non-parameter and parameter variables allocated in the
+ // context.
+ const context_local_count: Smi;
+
+ // Contains the names of local variables and parameters that are allocated
+ // in the context. They are stored in increasing order of the context slot
+ // index starting with Context::MIN_CONTEXT_SLOTS.
+ context_local_names[context_local_count]: String;
+
+ // Contains the variable modes and initialization flags corresponding to
+ // the context locals in ContextLocalNames.
+ context_local_infos[context_local_count]: SmiTagged<VariableProperties>;
+
+ // If the scope is a class scope and it has static private methods that
+ // may be accessed directly or through eval, one slot is reserved to hold
+ // the context slot index for the class variable.
+ saved_class_variable_info[flags.has_saved_class_variable_index ? 1 : 0]: Smi;
+
+ // If the scope binds a "this" value, one slot is reserved to hold the
+ // context or stack slot index for the variable.
+ receiver_info[
+ flags.receiver_variable ==
+ FromConstexpr<VariableAllocationInfo>(VariableAllocationInfo::STACK)
+ || flags.receiver_variable ==
+ FromConstexpr<VariableAllocationInfo>(VariableAllocationInfo::CONTEXT)
+ ? 1 : 0]: Smi;
+
+ // If the scope belongs to a named function expression this part contains
+ // information about the function variable. It always occupies two array
+ // slots: a. The name of the function variable.
+ // b. The context or stack slot index for the variable.
+ function_variable_info[flags.function_variable != FromConstexpr<VariableAllocationInfo>(VariableAllocationInfo::NONE) ? 1 : 0]:
+ FunctionVariableInfo;
+
+ inferred_function_name[flags.has_inferred_function_name ? 1 : 0]: String|
+ Undefined;
+
+ // Contains two slots with a) the startPosition and b) the endPosition if
+ // the scope belongs to a function or script.
+ position_info[flags.scope_type == ScopeType::FUNCTION_SCOPE ||
+ flags.scope_type == ScopeType::SCRIPT_SCOPE ||
+ flags.scope_type == ScopeType::EVAL_SCOPE ||
+ flags.scope_type == ScopeType::MODULE_SCOPE
+ ? 1 : 0]: PositionInfo;
+
+ outer_scope_info[flags.has_outer_scope_info ? 1 : 0]: ScopeInfo|TheHole;
+
+ // List of stack allocated local variables. Used by debug evaluate to properly
+ // abort variable lookup when a name clashes with a stack allocated local that
+ // can't be materialized.
+ locals_block_list[flags.has_locals_block_list ? 1 : 0]: HashTable;
+
+ // For a module scope, this part contains the SourceTextModuleInfo, the
+ // number of MODULE-allocated variables, and the metadata of those
+ // variables. For non-module scopes it is empty.
+ module_info[flags.scope_type == ScopeType::MODULE_SCOPE ? 1 : 0]:
+ SourceTextModuleInfo;
+ const module_variable_count[flags.scope_type == ScopeType::MODULE_SCOPE ? 1 : 0]:
+ Smi;
+ module_variables[flags.scope_type == ScopeType::MODULE_SCOPE ? module_variable_count[0] : 0]:
+ ModuleVariable;
+}
+
+// Returns the index of the named local in a ScopeInfo.
+// Assumes that the given name is internalized; uses pointer comparisons.
+@export
+macro IndexOfLocalName(scopeInfo: ScopeInfo, name: Name):
+ intptr labels NotFound {
+ const count: intptr = Convert<intptr>(scopeInfo.context_local_count);
+ for (let i: intptr = 0; i < count; ++i) {
+ if (TaggedEqual(name, scopeInfo.context_local_names[i])) {
+ return i;
+ }
+ }
+ goto NotFound;
+}
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 00c8bb0e2e..3865a7ccda 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -107,6 +107,12 @@ void Script::set_is_repl_mode(bool value) {
set_flags(IsReplModeBit::update(flags(), value));
}
+bool Script::break_on_entry() const { return BreakOnEntryBit::decode(flags()); }
+
+void Script::set_break_on_entry(bool value) {
+ set_flags(BreakOnEntryBit::update(flags(), value));
+}
+
ScriptOriginOptions Script::origin_options() {
return ScriptOriginOptions(OriginOptionsBits::decode(flags()));
}
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 6e3e633f53..b6da24372c 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -107,6 +107,13 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
inline bool is_repl_mode() const;
inline void set_is_repl_mode(bool value);
+ // [break_on_entry] (wasm only): whether an instrumentation breakpoint is set
+ // for this script; this information will be transferred to existing and
+ // future instances to make sure that we stop before executing any code in
+ // this wasm module.
+ inline bool break_on_entry() const;
+ inline void set_break_on_entry(bool value);
+
// [origin_options]: optional attributes set by the embedder via ScriptOrigin,
// and used by the embedder to make decisions about the script. V8 just passes
// this through. Encoded in the 'flags' field.
diff --git a/deps/v8/src/objects/script.tq b/deps/v8/src/objects/script.tq
index cac5ceb3ba..dac25360cb 100644
--- a/deps/v8/src/objects/script.tq
+++ b/deps/v8/src/objects/script.tq
@@ -10,6 +10,8 @@ bitfield struct ScriptFlags extends uint31 {
compilation_state: CompilationState: 1 bit;
is_repl_mode: bool: 1 bit;
origin_options: int32: 4 bit;
+ // Whether an instrumentation breakpoint is set for this script (wasm only).
+ break_on_entry: bool: 1 bit;
}
@generateCppClass
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 22b75c724c..b3884f4487 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -15,6 +15,7 @@
#include "src/objects/scope-info.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/templates.h"
+#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -88,6 +89,8 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData)
+TQ_OBJECT_CONSTRUCTORS_IMPL(BaselineData)
+
OBJECT_CONSTRUCTORS_IMPL(InterpreterData, Struct)
CAST_ACCESSOR(InterpreterData)
@@ -162,6 +165,8 @@ bool SharedFunctionInfo::needs_script_context() const {
template <typename LocalIsolate>
AbstractCode SharedFunctionInfo::abstract_code(LocalIsolate* isolate) {
+ // TODO(v8:11429): Decide if this return bytecode or baseline code, when the
+ // latter is present.
if (HasBytecodeArray()) {
return AbstractCode::cast(GetBytecodeArray(isolate));
} else {
@@ -227,9 +232,6 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
has_static_private_methods_or_accessors,
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, has_optimized_at_least_once,
- SharedFunctionInfo::HasOptimizedAtLeastOnceBit)
-
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, may_have_cached_code,
SharedFunctionInfo::MayHaveCachedCodeBit)
@@ -304,17 +306,6 @@ bool SharedFunctionInfo::is_wrapped() const {
return syntax_kind() == FunctionSyntaxKind::kWrapped;
}
-bool SharedFunctionInfo::needs_home_object() const {
- return NeedsHomeObjectBit::decode(flags());
-}
-
-void SharedFunctionInfo::set_needs_home_object(bool value) {
- int hints = flags();
- hints = NeedsHomeObjectBit::update(hints, value);
- set_flags(hints);
- UpdateFunctionMapIndex();
-}
-
bool SharedFunctionInfo::construct_as_builtin() const {
return ConstructAsBuiltinBit::decode(flags());
}
@@ -358,8 +349,8 @@ void SharedFunctionInfo::clear_padding() {
}
void SharedFunctionInfo::UpdateFunctionMapIndex() {
- int map_index = Context::FunctionMapIndex(
- language_mode(), kind(), HasSharedName(), needs_home_object());
+ int map_index =
+ Context::FunctionMapIndex(language_mode(), kind(), HasSharedName());
set_function_map_index(map_index);
}
@@ -495,7 +486,8 @@ FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() const {
bool SharedFunctionInfo::HasBytecodeArray() const {
Object data = function_data(kAcquireLoad);
- return data.IsBytecodeArray() || data.IsInterpreterData();
+ return data.IsBytecodeArray() || data.IsInterpreterData() ||
+ data.IsBaselineData();
}
template <typename LocalIsolate>
@@ -509,7 +501,11 @@ BytecodeArray SharedFunctionInfo::GetBytecodeArray(
return GetDebugInfo().OriginalBytecodeArray();
}
- Object data = function_data(kAcquireLoad);
+ return GetActiveBytecodeArray();
+}
+
+BytecodeArray BaselineData::GetActiveBytecodeArray() const {
+ Object data = this->data();
if (data.IsBytecodeArray()) {
return BytecodeArray::cast(data);
} else {
@@ -518,10 +514,22 @@ BytecodeArray SharedFunctionInfo::GetBytecodeArray(
}
}
+void BaselineData::SetActiveBytecodeArray(BytecodeArray bytecode) {
+ Object data = this->data();
+ if (data.IsBytecodeArray()) {
+ set_data(bytecode);
+ } else {
+ DCHECK(data.IsInterpreterData());
+ InterpreterData::cast(data).set_bytecode_array(bytecode);
+ }
+}
+
BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
Object data = function_data(kAcquireLoad);
if (data.IsBytecodeArray()) {
return BytecodeArray::cast(data);
+ } else if (data.IsBaselineData()) {
+ return baseline_data().GetActiveBytecodeArray();
} else {
DCHECK(data.IsInterpreterData());
return InterpreterData::cast(data).bytecode_array();
@@ -532,6 +540,8 @@ void SharedFunctionInfo::SetActiveBytecodeArray(BytecodeArray bytecode) {
Object data = function_data(kAcquireLoad);
if (data.IsBytecodeArray()) {
set_function_data(bytecode, kReleaseStore);
+ } else if (data.IsBaselineData()) {
+ baseline_data().SetActiveBytecodeArray(bytecode);
} else {
DCHECK(data.IsInterpreterData());
interpreter_data().set_bytecode_array(bytecode);
@@ -571,20 +581,43 @@ Code SharedFunctionInfo::InterpreterTrampoline() const {
}
bool SharedFunctionInfo::HasInterpreterData() const {
- return function_data(kAcquireLoad).IsInterpreterData();
+ Object data = function_data(kAcquireLoad);
+ if (data.IsBaselineData()) data = BaselineData::cast(data).data();
+ return data.IsInterpreterData();
}
InterpreterData SharedFunctionInfo::interpreter_data() const {
DCHECK(HasInterpreterData());
- return InterpreterData::cast(function_data(kAcquireLoad));
+ Object data = function_data(kAcquireLoad);
+ if (data.IsBaselineData()) data = BaselineData::cast(data).data();
+ return InterpreterData::cast(data);
}
void SharedFunctionInfo::set_interpreter_data(
InterpreterData interpreter_data) {
DCHECK(FLAG_interpreted_frames_native_stack);
+ DCHECK(!HasBaselineData());
set_function_data(interpreter_data, kReleaseStore);
}
+bool SharedFunctionInfo::HasBaselineData() const {
+ return function_data(kAcquireLoad).IsBaselineData();
+}
+
+BaselineData SharedFunctionInfo::baseline_data() const {
+ DCHECK(HasBaselineData());
+ return BaselineData::cast(function_data(kAcquireLoad));
+}
+
+void SharedFunctionInfo::set_baseline_data(BaselineData baseline_data) {
+ set_function_data(baseline_data, kReleaseStore);
+}
+
+void SharedFunctionInfo::flush_baseline_data() {
+ DCHECK(HasBaselineData());
+ set_function_data(baseline_data().data(), kReleaseStore);
+}
+
bool SharedFunctionInfo::HasAsmWasmData() const {
return function_data(kAcquireLoad).IsAsmWasmData();
}
@@ -702,6 +735,22 @@ bool SharedFunctionInfo::HasWasmJSFunctionData() const {
return function_data(kAcquireLoad).IsWasmJSFunctionData();
}
+const wasm::WasmModule* SharedFunctionInfo::wasm_module() const {
+ if (!HasWasmExportedFunctionData()) return nullptr;
+ const WasmExportedFunctionData& function_data = wasm_exported_function_data();
+ const WasmInstanceObject& wasm_instance = function_data.instance();
+ const WasmModuleObject& wasm_module_object = wasm_instance.module_object();
+ return wasm_module_object.module();
+}
+
+const wasm::FunctionSig* SharedFunctionInfo::wasm_function_signature() const {
+ const wasm::WasmModule* module = wasm_module();
+ if (!module) return nullptr;
+ const WasmExportedFunctionData& function_data = wasm_exported_function_data();
+ DCHECK_LT(function_data.function_index(), module->functions.size());
+ return module->functions[function_data.function_index()].sig;
+}
+
bool SharedFunctionInfo::HasWasmCapiFunctionData() const {
return function_data(kAcquireLoad).IsWasmCapiFunctionData();
}
@@ -777,8 +826,9 @@ bool SharedFunctionInfo::IsSubjectToDebugging() const {
}
bool SharedFunctionInfo::CanDiscardCompiled() const {
- bool can_decompile = (HasBytecodeArray() || HasAsmWasmData() ||
- HasUncompiledDataWithPreparseData());
+ bool can_decompile =
+ (HasBytecodeArray() || HasAsmWasmData() ||
+ HasUncompiledDataWithPreparseData() || HasBaselineData());
return can_decompile;
}
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index 503d678925..433c69de33 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -82,6 +82,10 @@ Code SharedFunctionInfo::GetCode() const {
// Having a bytecode array means we are a compiled, interpreted function.
DCHECK(HasBytecodeArray());
return isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+ } else if (data.IsBaselineData()) {
+ // Having BaselineData means we are a compiled, baseline function.
+ DCHECK(HasBaselineData());
+ return baseline_data().baseline_code();
} else if (data.IsAsmWasmData()) {
// Having AsmWasmData means we are an asm.js/wasm function.
DCHECK(HasAsmWasmData());
@@ -452,7 +456,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_function_literal_id(lit->function_literal_id());
// FunctionKind must have already been set.
DCHECK(lit->kind() == shared_info->kind());
- shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
DCHECK_IMPLIES(lit->requires_instance_members_initializer(),
IsClassConstructor(lit->kind()));
shared_info->set_requires_instance_members_initializer(
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index eb130c23b7..4318b23d32 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -18,7 +18,8 @@
#include "src/objects/smi.h"
#include "src/objects/struct.h"
#include "src/roots/roots.h"
-#include "testing/gtest/include/gtest/gtest_prod.h"
+#include "src/wasm/value-type.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
#include "torque-generated/bit-fields.h"
#include "torque-generated/field-offsets.h"
@@ -34,10 +35,16 @@ class BytecodeArray;
class CoverageInfo;
class DebugInfo;
class IsCompiledScope;
+template <typename>
+class Signature;
class WasmCapiFunctionData;
class WasmExportedFunctionData;
class WasmJSFunctionData;
+namespace wasm {
+struct WasmModule;
+} // namespace wasm
+
#include "torque-generated/src/objects/shared-function-info-tq.inc"
// Data collected by the pre-parser storing information about scopes and inner
@@ -147,6 +154,14 @@ class InterpreterData : public Struct {
OBJECT_CONSTRUCTORS(InterpreterData, Struct);
};
+class BaselineData : public TorqueGeneratedBaselineData<BaselineData, Struct> {
+ public:
+ inline BytecodeArray GetActiveBytecodeArray() const;
+ inline void SetActiveBytecodeArray(BytecodeArray bytecode);
+
+ TQ_OBJECT_CONSTRUCTORS(BaselineData)
+};
+
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
class SharedFunctionInfo
@@ -293,6 +308,10 @@ class SharedFunctionInfo
inline bool HasInterpreterData() const;
inline InterpreterData interpreter_data() const;
inline void set_interpreter_data(InterpreterData interpreter_data);
+ inline bool HasBaselineData() const;
+ inline BaselineData baseline_data() const;
+ inline void set_baseline_data(BaselineData Baseline_data);
+ inline void flush_baseline_data();
inline BytecodeArray GetActiveBytecodeArray() const;
inline void SetActiveBytecodeArray(BytecodeArray bytecode);
inline bool HasAsmWasmData() const;
@@ -320,6 +339,9 @@ class SharedFunctionInfo
inline bool HasWasmCapiFunctionData() const;
WasmCapiFunctionData wasm_capi_function_data() const;
+ inline const wasm::WasmModule* wasm_module() const;
+ inline const wasm::FunctionSig* wasm_function_signature() const;
+
// Clear out pre-parsed scope data from UncompiledDataWithPreparseData,
// turning it into UncompiledDataWithoutPreparseData.
inline void ClearPreparseData();
@@ -390,10 +412,6 @@ class SharedFunctionInfo
DECL_BOOLEAN_ACCESSORS(class_scope_has_private_brand)
DECL_BOOLEAN_ACCESSORS(has_static_private_methods_or_accessors)
- // True if this SFI has been (non-OSR) optimized in the past. This is used to
- // guide native-context-independent codegen.
- DECL_BOOLEAN_ACCESSORS(has_optimized_at_least_once)
-
// True if a Code object associated with this SFI has been inserted into the
// compilation cache. Note that the cache entry may be removed by aging,
// hence the 'may'.
@@ -622,11 +640,6 @@ class SharedFunctionInfo
STATIC_ASSERT(FunctionSyntaxKind::kLastFunctionSyntaxKind <=
FunctionSyntaxKindBits::kMax);
- // Indicates that this function uses a super property (or an eval that may
- // use a super property).
- // This is needed to set up the [[HomeObject]] on the function instance.
- inline bool needs_home_object() const;
-
// Sets the bytecode in {shared}'s DebugInfo as the bytecode to
// be returned by following calls to GetActiveBytecodeArray. Stores a
// reference to the original bytecode in the DebugInfo.
@@ -660,8 +673,6 @@ class SharedFunctionInfo
inline void set_kind(FunctionKind kind);
- inline void set_needs_home_object(bool value);
-
inline uint16_t get_property_estimate_from_literal(FunctionLiteral* literal);
template <typename Impl>
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index fab396a2cc..b38598efbb 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -14,6 +14,13 @@ extern class InterpreterData extends Struct {
interpreter_trampoline: Code;
}
+@generateCppClass
+@generatePrint
+extern class BaselineData extends Struct {
+ baseline_code: Code;
+ data: BytecodeArray|InterpreterData;
+}
+
type FunctionKind extends uint8 constexpr 'FunctionKind';
type FunctionSyntaxKind extends uint8 constexpr 'FunctionSyntaxKind';
type BailoutReason extends uint8 constexpr 'BailoutReason';
@@ -27,7 +34,6 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
is_class_constructor: bool: 1 bit;
has_duplicate_parameters: bool: 1 bit;
allow_lazy_compilation: bool: 1 bit;
- needs_home_object: bool: 1 bit;
is_asm_wasm_broken: bool: 1 bit;
function_map_index: uint32: 5 bit;
disabled_optimization_reason: BailoutReason: 4 bit;
@@ -43,7 +49,6 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
bitfield struct SharedFunctionInfoFlags2 extends uint8 {
class_scope_has_private_brand: bool: 1 bit;
has_static_private_methods_or_accessors: bool: 1 bit;
- has_optimized_at_least_once: bool: 1 bit;
may_have_cached_code: bool: 1 bit;
}
@@ -99,7 +104,7 @@ class UncompiledDataWithPreparseData extends UncompiledData {
@export
class OnHeapBasicBlockProfilerData extends HeapObject {
block_ids: ByteArray; // Stored as 4-byte ints
- counts: ByteArray; // Stored as 8-byte floats
+ counts: ByteArray; // Stored as 4-byte unsigned ints
name: String;
schedule: String;
code: String;
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index 19808bbb57..4747283a15 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -624,40 +624,6 @@ MaybeHandle<JSObject> SourceTextModule::GetImportMeta(
MaybeHandle<Object> SourceTextModule::EvaluateMaybeAsync(
Isolate* isolate, Handle<SourceTextModule> module) {
- // In the event of errored evaluation, return a rejected promise.
- if (module->status() == kErrored) {
- // If we have a top level capability we assume it has already been
- // rejected, and return it here. Otherwise create a new promise and
- // reject it with the module's exception.
- if (module->top_level_capability().IsJSPromise()) {
- Handle<JSPromise> top_level_capability(
- JSPromise::cast(module->top_level_capability()), isolate);
- DCHECK(top_level_capability->status() == Promise::kRejected &&
- top_level_capability->result() == module->exception());
- return top_level_capability;
- }
- Handle<JSPromise> capability = isolate->factory()->NewJSPromise();
- JSPromise::Reject(capability, handle(module->exception(), isolate));
- return capability;
- }
-
- // Start of Evaluate () Concrete Method
- // 2. Assert: module.[[Status]] is "linked" or "evaluated".
- CHECK(module->status() == kInstantiated || module->status() == kEvaluated);
-
- // 3. If module.[[Status]] is "evaluated", set module to
- // module.[[CycleRoot]].
- if (module->status() == kEvaluated) {
- module = module->GetCycleRoot(isolate);
- }
-
- // 4. If module.[[TopLevelCapability]] is not undefined, then
- // a. Return module.[[TopLevelCapability]].[[Promise]].
- if (module->top_level_capability().IsJSPromise()) {
- return handle(JSPromise::cast(module->top_level_capability()), isolate);
- }
- DCHECK(module->top_level_capability().IsUndefined());
-
// 6. Let capability be ! NewPromiseCapability(%Promise%).
Handle<JSPromise> capability = isolate->factory()->NewJSPromise();
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
index 1346f1db20..325fb7a2e3 100644
--- a/deps/v8/src/objects/source-text-module.h
+++ b/deps/v8/src/objects/source-text-module.h
@@ -109,10 +109,6 @@ class SourceTextModule
// an async child.
DECL_BOOLEAN_ACCESSORS(async_evaluating)
- // The top level promise capability of this module. Will only be defined
- // for cycle roots.
- DECL_ACCESSORS(top_level_capability, HeapObject)
-
// The parent modules of a given async dependency, use async_parent_modules()
// to retrieve the ArrayList representation.
DECL_ACCESSORS(async_parent_modules, ArrayList)
diff --git a/deps/v8/src/objects/source-text-module.tq b/deps/v8/src/objects/source-text-module.tq
index 3ecc40e292..d49aa79b15 100644
--- a/deps/v8/src/objects/source-text-module.tq
+++ b/deps/v8/src/objects/source-text-module.tq
@@ -37,7 +37,6 @@ extern class SourceTextModule extends Module {
cycle_root: SourceTextModule|TheHole;
async_parent_modules: ArrayList;
- top_level_capability: JSPromise|Undefined;
// TODO(neis): Don't store those in the module object?
dfs_index: Smi;
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index 376eda3a65..2df1e97ada 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -8,7 +8,6 @@
#include "src/objects/stack-frame-info.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/struct-inl.h"
@@ -21,24 +20,15 @@ namespace internal {
#include "torque-generated/src/objects/stack-frame-info-tq-inl.inc"
TQ_OBJECT_CONSTRUCTORS_IMPL(StackFrameInfo)
-
NEVER_READ_ONLY_SPACE_IMPL(StackFrameInfo)
-SMI_ACCESSORS_CHECKED(StackFrameInfo, function_offset,
- kPromiseCombinatorIndexOffset, is_wasm())
-BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, IsEvalBit::kShift)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, IsConstructorBit::kShift)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, IsWasmBit::kShift)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_asmjs_wasm, IsAsmJsWasmBit::kShift)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_user_java_script,
- IsUserJavaScriptBit::kShift)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_toplevel, IsToplevelBit::kShift)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_async, IsAsyncBit::kShift)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_promise_all, IsPromiseAllBit::kShift)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_promise_any, IsPromiseAnyBit::kShift)
-
-TQ_OBJECT_CONSTRUCTORS_IMPL(StackTraceFrame)
-NEVER_READ_ONLY_SPACE_IMPL(StackTraceFrame)
+BOOL_GETTER(StackFrameInfo, flags, IsWasm, IsWasmBit::kShift)
+BOOL_GETTER(StackFrameInfo, flags, IsAsmJsWasm, IsAsmJsWasmBit::kShift)
+BOOL_GETTER(StackFrameInfo, flags, IsStrict, IsStrictBit::kShift)
+BOOL_GETTER(StackFrameInfo, flags, IsConstructor, IsConstructorBit::kShift)
+BOOL_GETTER(StackFrameInfo, flags, IsAsmJsAtNumberConversion,
+ IsAsmJsAtNumberConversionBit::kShift)
+BOOL_GETTER(StackFrameInfo, flags, IsAsync, IsAsyncBit::kShift)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index b03329ea02..dff2e8e7ec 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -4,238 +4,510 @@
#include "src/objects/stack-frame-info.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/strings/string-builder-inl.h"
namespace v8 {
namespace internal {
-// static
-int StackTraceFrame::GetLineNumber(Handle<StackTraceFrame> frame) {
- int line = GetFrameInfo(frame)->line_number();
- return line != StackFrameBase::kNone ? line : Message::kNoLineNumberInfo;
-}
-
-// static
-int StackTraceFrame::GetOneBasedLineNumber(Handle<StackTraceFrame> frame) {
- // JavaScript line numbers are already 1-based. Wasm line numbers need
- // to be adjusted.
- int line = StackTraceFrame::GetLineNumber(frame);
- if (StackTraceFrame::IsWasm(frame) && line >= 0) line++;
- return line;
+bool StackFrameInfo::IsPromiseAll() const {
+ if (!IsAsync()) return false;
+ JSFunction fun = JSFunction::cast(function());
+ return fun == fun.native_context().promise_all();
}
-// static
-int StackTraceFrame::GetColumnNumber(Handle<StackTraceFrame> frame) {
- int column = GetFrameInfo(frame)->column_number();
- return column != StackFrameBase::kNone ? column : Message::kNoColumnInfo;
+bool StackFrameInfo::IsPromiseAny() const {
+ if (!IsAsync()) return false;
+ JSFunction fun = JSFunction::cast(function());
+ return fun == fun.native_context().promise_any();
}
-// static
-int StackTraceFrame::GetOneBasedColumnNumber(Handle<StackTraceFrame> frame) {
- // JavaScript colun numbers are already 1-based. Wasm column numbers need
- // to be adjusted.
- int column = StackTraceFrame::GetColumnNumber(frame);
- if (StackTraceFrame::IsWasm(frame) && column >= 0) column++;
- return column;
+bool StackFrameInfo::IsNative() const {
+ if (auto script = GetScript()) {
+ return script->type() == Script::TYPE_NATIVE;
+ }
+ return false;
}
-// static
-int StackTraceFrame::GetScriptId(Handle<StackTraceFrame> frame) {
- Isolate* isolate = frame->GetIsolate();
-
- // Use FrameInfo if it's already there, but avoid initializing it for just
- // the script id, as it is much more expensive than just getting this
- // directly. See GetScriptNameOrSourceUrl() for more detail.
- int id;
- if (!frame->frame_info().IsUndefined()) {
- id = GetFrameInfo(frame)->script_id();
- } else {
- FrameArrayIterator it(
- isolate, handle(FrameArray::cast(frame->frame_array()), isolate),
- frame->frame_index());
- DCHECK(it.HasFrame());
- id = it.Frame()->GetScriptId();
+bool StackFrameInfo::IsEval() const {
+ if (auto script = GetScript()) {
+ return script->compilation_type() == Script::COMPILATION_TYPE_EVAL;
}
- return id != StackFrameBase::kNone ? id : Message::kNoScriptIdInfo;
+ return false;
}
-// static
-int StackTraceFrame::GetPromiseCombinatorIndex(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->promise_combinator_index();
+bool StackFrameInfo::IsUserJavaScript() const {
+ return !IsWasm() && GetSharedFunctionInfo().IsUserJavaScript();
}
-// static
-int StackTraceFrame::GetFunctionOffset(Handle<StackTraceFrame> frame) {
- DCHECK(IsWasm(frame));
- return GetFrameInfo(frame)->function_offset();
+bool StackFrameInfo::IsMethodCall() const {
+ return !IsWasm() && !IsToplevel() && !IsConstructor();
}
-// static
-int StackTraceFrame::GetWasmFunctionIndex(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->wasm_function_index();
+bool StackFrameInfo::IsToplevel() const {
+ return receiver_or_instance().IsJSGlobalProxy() ||
+ receiver_or_instance().IsNullOrUndefined();
}
// static
-Handle<Object> StackTraceFrame::GetFileName(Handle<StackTraceFrame> frame) {
- Isolate* isolate = frame->GetIsolate();
-
- // Use FrameInfo if it's already there, but avoid initializing it for just
- // the file name, as it is much more expensive than just getting this
- // directly. See GetScriptNameOrSourceUrl() for more detail.
- if (!frame->frame_info().IsUndefined()) {
- auto name = GetFrameInfo(frame)->script_name();
- return handle(name, isolate);
+int StackFrameInfo::GetLineNumber(Handle<StackFrameInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ if (info->IsWasm() && !info->IsAsmJsWasm()) {
+ return 1;
+ }
+ Handle<Script> script;
+ if (GetScript(isolate, info).ToHandle(&script)) {
+ int position = GetSourcePosition(info);
+ return Script::GetLineNumber(script, position) + 1;
}
- FrameArrayIterator it(isolate,
- handle(FrameArray::cast(frame->frame_array()), isolate),
- frame->frame_index());
- DCHECK(it.HasFrame());
- return it.Frame()->GetFileName();
+ return Message::kNoLineNumberInfo;
}
// static
-Handle<Object> StackTraceFrame::GetScriptNameOrSourceUrl(
- Handle<StackTraceFrame> frame) {
- Isolate* isolate = frame->GetIsolate();
- // TODO(caseq, szuend): the logic below is a workaround for crbug.com/1057211.
- // We should probably have a dedicated API for the scenario described in the
- // bug above and make getters of this class behave consistently.
- // See https://bit.ly/2wkbuIy for further discussion.
- // Use FrameInfo if it's already there, but avoid initializing it for just
- // the script name, as it is much more expensive than just getting this
- // directly.
- if (!frame->frame_info().IsUndefined()) {
- auto name = GetFrameInfo(frame)->script_name_or_source_url();
- return handle(name, isolate);
- }
- FrameArrayIterator it(isolate,
- handle(FrameArray::cast(frame->frame_array()), isolate),
- frame->frame_index());
- DCHECK(it.HasFrame());
- return it.Frame()->GetScriptNameOrSourceUrl();
+int StackFrameInfo::GetColumnNumber(Handle<StackFrameInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ int position = GetSourcePosition(info);
+ if (info->IsWasm() && !info->IsAsmJsWasm()) {
+ return position + 1;
+ }
+ Handle<Script> script;
+ if (GetScript(isolate, info).ToHandle(&script)) {
+ return Script::GetColumnNumber(script, position) + 1;
+ }
+ return Message::kNoColumnInfo;
}
// static
-Handle<Object> StackTraceFrame::GetFunctionName(Handle<StackTraceFrame> frame) {
- auto name = GetFrameInfo(frame)->function_name();
- return handle(name, frame->GetIsolate());
+int StackFrameInfo::GetEnclosingLineNumber(Handle<StackFrameInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ if (info->IsWasm() && !info->IsAsmJsWasm()) {
+ return 1;
+ }
+ Handle<Script> script;
+ if (GetScript(isolate, info).ToHandle(&script)) {
+ int position;
+ if (info->IsAsmJsWasm()) {
+ auto module = info->GetWasmInstance().module();
+ auto func_index = info->GetWasmFunctionIndex();
+ position = wasm::GetSourcePosition(module, func_index, 0,
+ info->IsAsmJsAtNumberConversion());
+ } else {
+ position = info->GetSharedFunctionInfo().function_token_position();
+ }
+ return Script::GetLineNumber(script, position) + 1;
+ }
+ return Message::kNoLineNumberInfo;
}
// static
-Handle<Object> StackTraceFrame::GetMethodName(Handle<StackTraceFrame> frame) {
- auto name = GetFrameInfo(frame)->method_name();
- return handle(name, frame->GetIsolate());
+int StackFrameInfo::GetEnclosingColumnNumber(Handle<StackFrameInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ if (info->IsWasm() && !info->IsAsmJsWasm()) {
+ auto module = info->GetWasmInstance().module();
+ auto func_index = info->GetWasmFunctionIndex();
+ return GetWasmFunctionOffset(module, func_index);
+ }
+ Handle<Script> script;
+ if (GetScript(isolate, info).ToHandle(&script)) {
+ int position;
+ if (info->IsAsmJsWasm()) {
+ auto module = info->GetWasmInstance().module();
+ auto func_index = info->GetWasmFunctionIndex();
+ position = wasm::GetSourcePosition(module, func_index, 0,
+ info->IsAsmJsAtNumberConversion());
+ } else {
+ position = info->GetSharedFunctionInfo().function_token_position();
+ }
+ return Script::GetColumnNumber(script, position) + 1;
+ }
+ return Message::kNoColumnInfo;
}
-// static
-Handle<Object> StackTraceFrame::GetTypeName(Handle<StackTraceFrame> frame) {
- auto name = GetFrameInfo(frame)->type_name();
- return handle(name, frame->GetIsolate());
+int StackFrameInfo::GetScriptId() const {
+ if (auto script = GetScript()) {
+ return script->id();
+ }
+ return Message::kNoScriptIdInfo;
}
-// static
-Handle<Object> StackTraceFrame::GetEvalOrigin(Handle<StackTraceFrame> frame) {
- auto origin = GetFrameInfo(frame)->eval_origin();
- return handle(origin, frame->GetIsolate());
+Object StackFrameInfo::GetScriptName() const {
+ if (auto script = GetScript()) {
+ return script->name();
+ }
+ return ReadOnlyRoots(GetIsolate()).null_value();
}
-// static
-Handle<Object> StackTraceFrame::GetWasmModuleName(
- Handle<StackTraceFrame> frame) {
- auto module = GetFrameInfo(frame)->wasm_module_name();
- return handle(module, frame->GetIsolate());
+Object StackFrameInfo::GetScriptNameOrSourceURL() const {
+ if (auto script = GetScript()) {
+ return script->GetNameOrSourceURL();
+ }
+ return ReadOnlyRoots(GetIsolate()).null_value();
}
-// static
-Handle<WasmInstanceObject> StackTraceFrame::GetWasmInstance(
- Handle<StackTraceFrame> frame) {
- Object instance = GetFrameInfo(frame)->wasm_instance();
- return handle(WasmInstanceObject::cast(instance), frame->GetIsolate());
+namespace {
+
+MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
+ Handle<Object> sourceURL(script->GetNameOrSourceURL(), isolate);
+ if (sourceURL->IsString()) return Handle<String>::cast(sourceURL);
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("eval at ");
+ if (script->has_eval_from_shared()) {
+ Handle<SharedFunctionInfo> eval_shared(script->eval_from_shared(), isolate);
+ auto eval_name = SharedFunctionInfo::DebugName(eval_shared);
+ if (eval_name->length() != 0) {
+ builder.AppendString(eval_name);
+ } else {
+ builder.AppendCString("<anonymous>");
+ }
+ if (eval_shared->script().IsScript()) {
+ Handle<Script> eval_script(Script::cast(eval_shared->script()), isolate);
+ builder.AppendCString(" (");
+ if (eval_script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
+ // Eval script originated from another eval.
+ Handle<String> str;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, str, FormatEvalOrigin(isolate, eval_script), String);
+ builder.AppendString(str);
+ } else {
+ // eval script originated from "real" source.
+ Handle<Object> eval_script_name(eval_script->name(), isolate);
+ if (eval_script_name->IsString()) {
+ builder.AppendString(Handle<String>::cast(eval_script_name));
+ Script::PositionInfo info;
+ if (Script::GetPositionInfo(eval_script,
+ Script::GetEvalPosition(isolate, script),
+ &info, Script::NO_OFFSET)) {
+ builder.AppendCString(":");
+ builder.AppendInt(info.line + 1);
+ builder.AppendCString(":");
+ builder.AppendInt(info.column + 1);
+ }
+ } else {
+ builder.AppendCString("unknown source");
+ }
+ }
+ builder.AppendCString(")");
+ }
+ } else {
+ builder.AppendCString("<anonymous>");
+ }
+ return builder.Finish().ToHandleChecked();
}
+} // namespace
+
// static
-bool StackTraceFrame::IsEval(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->is_eval();
+Handle<PrimitiveHeapObject> StackFrameInfo::GetEvalOrigin(
+ Handle<StackFrameInfo> info) {
+ auto isolate = info->GetIsolate();
+ Handle<Script> script;
+ if (!GetScript(isolate, info).ToHandle(&script) ||
+ script->compilation_type() != Script::COMPILATION_TYPE_EVAL) {
+ return isolate->factory()->undefined_value();
+ }
+ return FormatEvalOrigin(isolate, script).ToHandleChecked();
}
// static
-bool StackTraceFrame::IsConstructor(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->is_constructor();
+Handle<Object> StackFrameInfo::GetFunctionName(Handle<StackFrameInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ if (info->IsWasm()) {
+ Handle<WasmModuleObject> module_object(
+ info->GetWasmInstance().module_object(), isolate);
+ uint32_t func_index = info->GetWasmFunctionIndex();
+ Handle<String> name;
+ if (WasmModuleObject::GetFunctionNameOrNull(isolate, module_object,
+ func_index)
+ .ToHandle(&name)) {
+ return name;
+ }
+ } else {
+ Handle<JSFunction> function(JSFunction::cast(info->function()), isolate);
+ Handle<String> name = JSFunction::GetDebugName(function);
+ if (name->length() != 0) return name;
+ if (info->IsEval()) return isolate->factory()->eval_string();
+ }
+ return isolate->factory()->null_value();
}
-// static
-bool StackTraceFrame::IsWasm(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->is_wasm();
+namespace {
+
+PrimitiveHeapObject InferMethodNameFromFastObject(Isolate* isolate,
+ JSObject receiver,
+ JSFunction fun,
+ PrimitiveHeapObject name) {
+ ReadOnlyRoots roots(isolate);
+ Map map = receiver.map();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
+ for (auto i : map.IterateOwnDescriptors()) {
+ PrimitiveHeapObject key = descriptors.GetKey(i);
+ if (key.IsSymbol()) continue;
+ auto details = descriptors.GetDetails(i);
+ if (details.IsDontEnum()) continue;
+ Object value;
+ if (details.location() == kField) {
+ auto field_index = FieldIndex::ForPropertyIndex(
+ map, details.field_index(), details.representation());
+ if (field_index.is_double()) continue;
+ value = receiver.RawFastPropertyAt(isolate, field_index);
+ } else {
+ value = descriptors.GetStrongValue(i);
+ }
+ if (value != fun) {
+ if (!value.IsAccessorPair()) continue;
+ auto pair = AccessorPair::cast(value);
+ if (pair.getter() != fun && pair.setter() != fun) continue;
+ }
+ if (name != key) {
+ name = name.IsUndefined(isolate) ? key : roots.null_value();
+ }
+ }
+ return name;
+}
+
+template <typename Dictionary>
+PrimitiveHeapObject InferMethodNameFromDictionary(Isolate* isolate,
+ Dictionary dictionary,
+ JSFunction fun,
+ PrimitiveHeapObject name) {
+ ReadOnlyRoots roots(isolate);
+ for (auto i : dictionary.IterateEntries()) {
+ Object key;
+ if (!dictionary.ToKey(roots, i, &key)) continue;
+ if (key.IsSymbol()) continue;
+ auto details = dictionary.DetailsAt(i);
+ if (details.IsDontEnum()) continue;
+ auto value = dictionary.ValueAt(i);
+ if (value != fun) {
+ if (!value.IsAccessorPair()) continue;
+ auto pair = AccessorPair::cast(value);
+ if (pair.getter() != fun && pair.setter() != fun) continue;
+ }
+ if (name != key) {
+ name = name.IsUndefined(isolate) ? PrimitiveHeapObject::cast(key)
+ : roots.null_value();
+ }
+ }
+ return name;
+}
+
+PrimitiveHeapObject InferMethodName(Isolate* isolate, JSReceiver receiver,
+ JSFunction fun) {
+ DisallowGarbageCollection no_gc;
+ ReadOnlyRoots roots(isolate);
+ PrimitiveHeapObject name = roots.undefined_value();
+ for (PrototypeIterator it(isolate, receiver, kStartAtReceiver); !it.IsAtEnd();
+ it.Advance()) {
+ auto current = it.GetCurrent();
+ if (!current.IsJSObject()) break;
+ auto object = JSObject::cast(current);
+ if (object.IsAccessCheckNeeded()) break;
+ if (object.HasFastProperties()) {
+ name = InferMethodNameFromFastObject(isolate, object, fun, name);
+ } else if (object.IsJSGlobalObject()) {
+ name = InferMethodNameFromDictionary(
+ isolate, JSGlobalObject::cast(object).global_dictionary(kAcquireLoad),
+ fun, name);
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ name = InferMethodNameFromDictionary(
+ isolate, object.property_dictionary_ordered(), fun, name);
+ } else {
+ name = InferMethodNameFromDictionary(
+ isolate, object.property_dictionary(), fun, name);
+ }
+ }
+ if (name.IsUndefined(isolate)) return roots.null_value();
+ return name;
}
+} // namespace
+
// static
-bool StackTraceFrame::IsAsmJsWasm(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->is_asmjs_wasm();
+Handle<Object> StackFrameInfo::GetMethodName(Handle<StackFrameInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ Handle<Object> receiver_or_instance(info->receiver_or_instance(), isolate);
+ if (info->IsWasm() || receiver_or_instance->IsNullOrUndefined(isolate)) {
+ return isolate->factory()->null_value();
+ }
+
+ Handle<JSReceiver> receiver =
+ JSReceiver::ToObject(isolate, receiver_or_instance).ToHandleChecked();
+ Handle<JSFunction> function =
+ handle(JSFunction::cast(info->function()), isolate);
+ Handle<String> name(function->shared().Name(), isolate);
+ name = String::Flatten(isolate, name);
+
+ // The static initializer function is not a method, so don't add a
+ // class name, just return the function name.
+ if (name->HasOneBytePrefix(CStrVector("<static_fields_initializer>"))) {
+ return name;
+ }
+
+ // ES2015 gives getters and setters name prefixes which must
+ // be stripped to find the property name.
+ if (name->HasOneBytePrefix(CStrVector("get ")) ||
+ name->HasOneBytePrefix(CStrVector("set "))) {
+ name = isolate->factory()->NewProperSubString(name, 4, name->length());
+ } else if (name->length() == 0) {
+ // The function doesn't have a meaningful "name" property, however
+ // the parser does store an inferred name "o.foo" for the common
+ // case of `o.foo = function() {...}`, so see if we can derive a
+ // property name to guess from that.
+ name = handle(function->shared().inferred_name(), isolate);
+ for (int index = name->length(); --index >= 0;) {
+ if (name->Get(index, isolate) == '.') {
+ name = isolate->factory()->NewProperSubString(name, index + 1,
+ name->length());
+ break;
+ }
+ }
+ }
+
+ if (name->length() != 0) {
+ LookupIterator::Key key(isolate, Handle<Name>::cast(name));
+ LookupIterator it(isolate, receiver, key,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ if (it.state() == LookupIterator::DATA) {
+ if (it.GetDataValue().is_identical_to(function)) {
+ return name;
+ }
+ } else if (it.state() == LookupIterator::ACCESSOR) {
+ Handle<Object> accessors = it.GetAccessors();
+ if (accessors->IsAccessorPair()) {
+ Handle<AccessorPair> pair = Handle<AccessorPair>::cast(accessors);
+ if (pair->getter() == *function || pair->setter() == *function) {
+ return name;
+ }
+ }
+ }
+ }
+
+ return handle(InferMethodName(isolate, *receiver, *function), isolate);
}
// static
-bool StackTraceFrame::IsUserJavaScript(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->is_user_java_script();
+Handle<Object> StackFrameInfo::GetTypeName(Handle<StackFrameInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ if (!info->IsMethodCall()) {
+ return isolate->factory()->null_value();
+ }
+ Handle<JSReceiver> receiver =
+ JSReceiver::ToObject(isolate,
+ handle(info->receiver_or_instance(), isolate))
+ .ToHandleChecked();
+ if (receiver->IsJSProxy()) {
+ return isolate->factory()->Proxy_string();
+ }
+ return JSReceiver::GetConstructorName(receiver);
}
-// static
-bool StackTraceFrame::IsToplevel(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->is_toplevel();
+uint32_t StackFrameInfo::GetWasmFunctionIndex() const {
+ DCHECK(IsWasm());
+ return Smi::ToInt(Smi::cast(function()));
}
-// static
-bool StackTraceFrame::IsAsync(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->is_async();
+WasmInstanceObject StackFrameInfo::GetWasmInstance() const {
+ DCHECK(IsWasm());
+ return WasmInstanceObject::cast(receiver_or_instance());
}
// static
-bool StackTraceFrame::IsPromiseAll(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->is_promise_all();
+int StackFrameInfo::GetSourcePosition(Handle<StackFrameInfo> info) {
+ if (info->flags() & kIsSourcePositionComputed) {
+ return info->code_offset_or_source_position();
+ }
+ DCHECK(!info->IsPromiseAll());
+ DCHECK(!info->IsPromiseAny());
+ int source_position =
+ ComputeSourcePosition(info, info->code_offset_or_source_position());
+ info->set_code_offset_or_source_position(source_position);
+ info->set_flags(info->flags() | kIsSourcePositionComputed);
+ return source_position;
}
// static
-bool StackTraceFrame::IsPromiseAny(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->is_promise_any();
+bool StackFrameInfo::ComputeLocation(Handle<StackFrameInfo> info,
+ MessageLocation* location) {
+ Isolate* isolate = info->GetIsolate();
+ if (info->IsWasm()) {
+ int pos = GetSourcePosition(info);
+ Handle<Script> script(info->GetWasmInstance().module_object().script(),
+ isolate);
+ *location = MessageLocation(script, pos, pos + 1);
+ return true;
+ }
+
+ Handle<SharedFunctionInfo> shared(info->GetSharedFunctionInfo(), isolate);
+ if (!shared->IsSubjectToDebugging()) return false;
+ Handle<Script> script(Script::cast(shared->script()), isolate);
+ if (script->source().IsUndefined()) return false;
+ if (info->flags() & kIsSourcePositionComputed ||
+ (shared->HasBytecodeArray() &&
+ shared->GetBytecodeArray(isolate).HasSourcePositionTable())) {
+ int pos = GetSourcePosition(info);
+ *location = MessageLocation(script, pos, pos + 1, shared);
+ } else {
+ int code_offset = info->code_offset_or_source_position();
+ *location = MessageLocation(script, shared, code_offset);
+ }
+ return true;
}
// static
-Handle<StackFrameInfo> StackTraceFrame::GetFrameInfo(
- Handle<StackTraceFrame> frame) {
- if (frame->frame_info().IsUndefined()) InitializeFrameInfo(frame);
- return handle(StackFrameInfo::cast(frame->frame_info()), frame->GetIsolate());
+int StackFrameInfo::ComputeSourcePosition(Handle<StackFrameInfo> info,
+ int offset) {
+ Isolate* isolate = info->GetIsolate();
+ if (info->IsWasm()) {
+ auto code_ref = Managed<wasm::GlobalWasmCodeRef>::cast(info->code_object());
+ int byte_offset = code_ref.get()->code()->GetSourcePositionBefore(offset);
+ auto module = info->GetWasmInstance().module();
+ uint32_t func_index = info->GetWasmFunctionIndex();
+ return wasm::GetSourcePosition(module, func_index, byte_offset,
+ info->IsAsmJsAtNumberConversion());
+ }
+ Handle<SharedFunctionInfo> shared(info->GetSharedFunctionInfo(), isolate);
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared);
+ return AbstractCode::cast(info->code_object()).SourcePosition(offset);
}
// static
-void StackTraceFrame::InitializeFrameInfo(Handle<StackTraceFrame> frame) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
- "SymbolizeStackFrame", "frameIndex", frame->frame_index());
+Handle<Object> StackFrameInfo::GetWasmModuleName(Handle<StackFrameInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ if (info->IsWasm()) {
+ Handle<String> name;
+ auto module_object =
+ handle(info->GetWasmInstance().module_object(), isolate);
+ if (WasmModuleObject::GetModuleNameOrNull(isolate, module_object)
+ .ToHandle(&name)) {
+ return name;
+ }
+ }
+ return isolate->factory()->null_value();
+}
- Isolate* isolate = frame->GetIsolate();
- Handle<StackFrameInfo> frame_info = isolate->factory()->NewStackFrameInfo(
- handle(FrameArray::cast(frame->frame_array()), isolate),
- frame->frame_index());
- frame->set_frame_info(*frame_info);
+base::Optional<Script> StackFrameInfo::GetScript() const {
+ if (IsWasm()) {
+ return GetWasmInstance().module_object().script();
+ }
+ Object script = GetSharedFunctionInfo().script();
+ if (script.IsScript()) return Script::cast(script);
+ return base::nullopt;
+}
- // After initializing, we no longer need to keep a reference
- // to the frame_array.
- frame->set_frame_array(ReadOnlyRoots(isolate).undefined_value());
- frame->set_frame_index(-1);
+SharedFunctionInfo StackFrameInfo::GetSharedFunctionInfo() const {
+ DCHECK(!IsWasm());
+ return JSFunction::cast(function()).shared();
}
-Handle<FrameArray> GetFrameArrayFromStackTrace(Isolate* isolate,
- Handle<FixedArray> stack_trace) {
- // For the empty case, a empty FrameArray needs to be allocated so the rest
- // of the code doesn't has to be special cased everywhere.
- if (stack_trace->length() == 0) {
- return isolate->factory()->NewFrameArray(0);
+// static
+MaybeHandle<Script> StackFrameInfo::GetScript(Isolate* isolate,
+ Handle<StackFrameInfo> info) {
+ if (auto script = info->GetScript()) {
+ return handle(*script, isolate);
}
-
- // Retrieve the FrameArray from the first StackTraceFrame.
- DCHECK_GT(stack_trace->length(), 0);
- Handle<StackTraceFrame> frame(StackTraceFrame::cast(stack_trace->get(0)),
- isolate);
- return handle(FrameArray::cast(frame->frame_array()), isolate);
+ return kNullMaybeHandle;
}
namespace {
@@ -244,18 +516,18 @@ bool IsNonEmptyString(Handle<Object> object) {
return (object->IsString() && String::cast(*object).length() > 0);
}
-void AppendFileLocation(Isolate* isolate, Handle<StackTraceFrame> frame,
+void AppendFileLocation(Isolate* isolate, Handle<StackFrameInfo> frame,
IncrementalStringBuilder* builder) {
- Handle<Object> file_name = StackTraceFrame::GetScriptNameOrSourceUrl(frame);
- if (!file_name->IsString() && StackTraceFrame::IsEval(frame)) {
- Handle<Object> eval_origin = StackTraceFrame::GetEvalOrigin(frame);
- DCHECK(eval_origin->IsString());
- builder->AppendString(Handle<String>::cast(eval_origin));
+ Handle<Object> script_name_or_source_url(frame->GetScriptNameOrSourceURL(),
+ isolate);
+ if (!script_name_or_source_url->IsString() && frame->IsEval()) {
+ builder->AppendString(
+ Handle<String>::cast(StackFrameInfo::GetEvalOrigin(frame)));
builder->AppendCString(", "); // Expecting source position to follow.
}
- if (IsNonEmptyString(file_name)) {
- builder->AppendString(Handle<String>::cast(file_name));
+ if (IsNonEmptyString(script_name_or_source_url)) {
+ builder->AppendString(Handle<String>::cast(script_name_or_source_url));
} else {
// Source code does not originate from a file and is not native, but we
// can still get the source position inside the source string, e.g. in
@@ -263,12 +535,12 @@ void AppendFileLocation(Isolate* isolate, Handle<StackTraceFrame> frame,
builder->AppendCString("<anonymous>");
}
- int line_number = StackTraceFrame::GetLineNumber(frame);
+ int line_number = StackFrameInfo::GetLineNumber(frame);
if (line_number != Message::kNoLineNumberInfo) {
builder->AppendCharacter(':');
builder->AppendInt(line_number);
- int column_number = StackTraceFrame::GetColumnNumber(frame);
+ int column_number = StackFrameInfo::GetColumnNumber(frame);
if (column_number != Message::kNoColumnInfo) {
builder->AppendCharacter(':');
builder->AppendInt(column_number);
@@ -313,11 +585,11 @@ bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
return true;
}
-void AppendMethodCall(Isolate* isolate, Handle<StackTraceFrame> frame,
+void AppendMethodCall(Isolate* isolate, Handle<StackFrameInfo> frame,
IncrementalStringBuilder* builder) {
- Handle<Object> type_name = StackTraceFrame::GetTypeName(frame);
- Handle<Object> method_name = StackTraceFrame::GetMethodName(frame);
- Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+ Handle<Object> type_name = StackFrameInfo::GetTypeName(frame);
+ Handle<Object> method_name = StackFrameInfo::GetMethodName(frame);
+ Handle<Object> function_name = StackFrameInfo::GetFunctionName(frame);
if (IsNonEmptyString(function_name)) {
Handle<String> function_string = Handle<String>::cast(function_name);
@@ -353,39 +625,23 @@ void AppendMethodCall(Isolate* isolate, Handle<StackTraceFrame> frame,
}
}
-void SerializeJSStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
+void SerializeJSStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
IncrementalStringBuilder* builder) {
- Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
-
- const bool is_toplevel = StackTraceFrame::IsToplevel(frame);
- const bool is_async = StackTraceFrame::IsAsync(frame);
- const bool is_promise_all = StackTraceFrame::IsPromiseAll(frame);
- const bool is_promise_any = StackTraceFrame::IsPromiseAny(frame);
- const bool is_constructor = StackTraceFrame::IsConstructor(frame);
- // Note: Keep the {is_method_call} predicate in sync with the corresponding
- // predicate in factory.cc where the StackFrameInfo is created.
- // Otherwise necessary fields for serialzing this frame might be
- // missing.
- const bool is_method_call = !(is_toplevel || is_constructor);
-
- if (is_async) {
+ Handle<Object> function_name = StackFrameInfo::GetFunctionName(frame);
+ if (frame->IsAsync()) {
builder->AppendCString("async ");
+ if (frame->IsPromiseAll() || frame->IsPromiseAny()) {
+ builder->AppendCString("Promise.");
+ builder->AppendString(Handle<String>::cast(function_name));
+ builder->AppendCString(" (index ");
+ builder->AppendInt(StackFrameInfo::GetSourcePosition(frame));
+ builder->AppendCString(")");
+ return;
+ }
}
- if (is_promise_all) {
- builder->AppendCString("Promise.all (index ");
- builder->AppendInt(StackTraceFrame::GetPromiseCombinatorIndex(frame));
- builder->AppendCString(")");
- return;
- }
- if (is_promise_any) {
- builder->AppendCString("Promise.any (index ");
- builder->AppendInt(StackTraceFrame::GetPromiseCombinatorIndex(frame));
- builder->AppendCString(")");
- return;
- }
- if (is_method_call) {
+ if (frame->IsMethodCall()) {
AppendMethodCall(isolate, frame, builder);
- } else if (is_constructor) {
+ } else if (frame->IsConstructor()) {
builder->AppendCString("new ");
if (IsNonEmptyString(function_name)) {
builder->AppendString(Handle<String>::cast(function_name));
@@ -398,46 +654,21 @@ void SerializeJSStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
AppendFileLocation(isolate, frame, builder);
return;
}
-
builder->AppendCString(" (");
AppendFileLocation(isolate, frame, builder);
builder->AppendCString(")");
}
-void SerializeAsmJsWasmStackFrame(Isolate* isolate,
- Handle<StackTraceFrame> frame,
- IncrementalStringBuilder* builder) {
- // The string should look exactly as the respective javascript frame string.
- // Keep this method in line to
- // JSStackFrame::ToString(IncrementalStringBuilder&).
- Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
-
- if (IsNonEmptyString(function_name)) {
- builder->AppendString(Handle<String>::cast(function_name));
- builder->AppendCString(" (");
- }
-
- AppendFileLocation(isolate, frame, builder);
-
- if (IsNonEmptyString(function_name)) builder->AppendCString(")");
-
- return;
-}
-
-bool IsAnonymousWasmScript(Isolate* isolate, Handle<StackTraceFrame> frame,
- Handle<Object> url) {
- DCHECK(url->IsString());
- Handle<String> anonymous_prefix =
- isolate->factory()->InternalizeString(StaticCharVector("wasm://wasm/"));
- return (StackTraceFrame::IsWasm(frame) &&
- StringIndexOf(isolate, Handle<String>::cast(url), anonymous_prefix) >=
- 0);
+bool IsAnonymousWasmScript(Isolate* isolate, Handle<Object> url) {
+ Handle<String> prefix =
+ isolate->factory()->NewStringFromStaticChars("wasm://wasm/");
+ return StringIndexOf(isolate, Handle<String>::cast(url), prefix) == 0;
}
-void SerializeWasmStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
+void SerializeWasmStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
IncrementalStringBuilder* builder) {
- Handle<Object> module_name = StackTraceFrame::GetWasmModuleName(frame);
- Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+ Handle<Object> module_name = StackFrameInfo::GetWasmModuleName(frame);
+ Handle<Object> function_name = StackFrameInfo::GetFunctionName(frame);
const bool has_name = !module_name->IsNull() || !function_name->IsNull();
if (has_name) {
if (module_name->IsNull()) {
@@ -452,22 +683,22 @@ void SerializeWasmStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
builder->AppendCString(" (");
}
- Handle<Object> url = StackTraceFrame::GetScriptNameOrSourceUrl(frame);
- if (IsNonEmptyString(url) && !IsAnonymousWasmScript(isolate, frame, url)) {
+ Handle<Object> url(frame->GetScriptNameOrSourceURL(), isolate);
+ if (IsNonEmptyString(url) && !IsAnonymousWasmScript(isolate, url)) {
builder->AppendString(Handle<String>::cast(url));
} else {
builder->AppendCString("<anonymous>");
}
builder->AppendCString(":");
- const int wasm_func_index = StackTraceFrame::GetWasmFunctionIndex(frame);
+ const int wasm_func_index = frame->GetWasmFunctionIndex();
builder->AppendCString("wasm-function[");
builder->AppendInt(wasm_func_index);
builder->AppendCString("]:");
char buffer[16];
SNPrintF(ArrayVector(buffer), "0x%x",
- StackTraceFrame::GetColumnNumber(frame));
+ StackFrameInfo::GetColumnNumber(frame) - 1);
builder->AppendCString(buffer);
if (has_name) builder->AppendCString(")");
@@ -475,22 +706,22 @@ void SerializeWasmStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
} // namespace
-void SerializeStackTraceFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
- IncrementalStringBuilder* builder) {
+void SerializeStackFrameInfo(Isolate* isolate, Handle<StackFrameInfo> frame,
+ IncrementalStringBuilder* builder) {
// Ordering here is important, as AsmJs frames are also marked as Wasm.
- if (StackTraceFrame::IsAsmJsWasm(frame)) {
- SerializeAsmJsWasmStackFrame(isolate, frame, builder);
- } else if (StackTraceFrame::IsWasm(frame)) {
+ if (frame->IsAsmJsWasm()) {
+ SerializeJSStackFrame(isolate, frame, builder);
+ } else if (frame->IsWasm()) {
SerializeWasmStackFrame(isolate, frame, builder);
} else {
SerializeJSStackFrame(isolate, frame, builder);
}
}
-MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate,
- Handle<StackTraceFrame> frame) {
+MaybeHandle<String> SerializeStackFrameInfo(Isolate* isolate,
+ Handle<StackFrameInfo> frame) {
IncrementalStringBuilder builder(isolate);
- SerializeStackTraceFrame(isolate, frame, &builder);
+ SerializeStackFrameInfo(isolate, frame, &builder);
return builder.Finish();
}
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index a2802792fd..941b774f45 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-class FrameArray;
+class MessageLocation;
class WasmInstanceObject;
#include "torque-generated/src/objects/stack-frame-info-tq.inc"
@@ -23,91 +23,83 @@ class StackFrameInfo
: public TorqueGeneratedStackFrameInfo<StackFrameInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
- // Wasm frames only: function_offset instead of promise_combinator_index.
- DECL_INT_ACCESSORS(function_offset)
- DECL_BOOLEAN_ACCESSORS(is_eval)
- DECL_BOOLEAN_ACCESSORS(is_constructor)
- DECL_BOOLEAN_ACCESSORS(is_wasm)
- DECL_BOOLEAN_ACCESSORS(is_asmjs_wasm)
- DECL_BOOLEAN_ACCESSORS(is_user_java_script)
- DECL_BOOLEAN_ACCESSORS(is_toplevel)
- DECL_BOOLEAN_ACCESSORS(is_async)
- DECL_BOOLEAN_ACCESSORS(is_promise_all)
- DECL_BOOLEAN_ACCESSORS(is_promise_any)
+
+ inline bool IsWasm() const;
+ inline bool IsAsmJsWasm() const;
+ inline bool IsStrict() const;
+ inline bool IsConstructor() const;
+ inline bool IsAsmJsAtNumberConversion() const;
+ inline bool IsAsync() const;
+ bool IsEval() const;
+ bool IsUserJavaScript() const;
+ bool IsMethodCall() const;
+ bool IsToplevel() const;
+ bool IsPromiseAll() const;
+ bool IsPromiseAny() const;
+ bool IsNative() const;
// Dispatched behavior.
DECL_PRINTER(StackFrameInfo)
+ DECL_VERIFIER(StackFrameInfo)
+
+ // Used to signal that the requested field is unknown.
+ static constexpr int kUnknown = kNoSourcePosition;
+
+ static int GetLineNumber(Handle<StackFrameInfo> info);
+ static int GetColumnNumber(Handle<StackFrameInfo> info);
+
+ static int GetEnclosingLineNumber(Handle<StackFrameInfo> info);
+ static int GetEnclosingColumnNumber(Handle<StackFrameInfo> info);
+
+ // Returns the script ID if one is attached,
+ // Message::kNoScriptIdInfo otherwise.
+ int GetScriptId() const;
+ Object GetScriptName() const;
+ Object GetScriptNameOrSourceURL() const;
+
+ static Handle<PrimitiveHeapObject> GetEvalOrigin(Handle<StackFrameInfo> info);
+ static Handle<Object> GetFunctionName(Handle<StackFrameInfo> info);
+ static Handle<Object> GetMethodName(Handle<StackFrameInfo> info);
+ static Handle<Object> GetTypeName(Handle<StackFrameInfo> info);
+
+ // These methods are only valid for Wasm and asm.js Wasm frames.
+ uint32_t GetWasmFunctionIndex() const;
+ WasmInstanceObject GetWasmInstance() const;
+ static Handle<Object> GetWasmModuleName(Handle<StackFrameInfo> info);
+
+ // Returns the 0-based source position, which is the offset into the
+ // Script in case of JavaScript and Asm.js, and the bytecode offset
+ // in the module in case of actual Wasm. In case of async promise
+ // combinator frames, this returns the index of the promise.
+ static int GetSourcePosition(Handle<StackFrameInfo> info);
+
+ // Attempts to fill the |location| based on the |info|, and avoids
+ // triggering source position table building for JavaScript frames.
+ static bool ComputeLocation(Handle<StackFrameInfo> info,
+ MessageLocation* location);
private:
// Bit position in the flag, from least significant bit position.
DEFINE_TORQUE_GENERATED_STACK_FRAME_INFO_FLAGS()
+ friend class StackTraceBuilder;
- TQ_OBJECT_CONSTRUCTORS(StackFrameInfo)
-};
+ static int ComputeSourcePosition(Handle<StackFrameInfo> info, int offset);
-// This class is used to lazily initialize a StackFrameInfo object from
-// a FrameArray plus an index.
-// The first time any of the Get* or Is* methods is called, a
-// StackFrameInfo object is allocated and all necessary information
-// retrieved.
-class StackTraceFrame
- : public TorqueGeneratedStackTraceFrame<StackTraceFrame, Struct> {
- public:
- NEVER_READ_ONLY_SPACE
+ base::Optional<Script> GetScript() const;
+ SharedFunctionInfo GetSharedFunctionInfo() const;
- // Dispatched behavior.
- DECL_PRINTER(StackTraceFrame)
-
- static int GetLineNumber(Handle<StackTraceFrame> frame);
- static int GetOneBasedLineNumber(Handle<StackTraceFrame> frame);
- static int GetColumnNumber(Handle<StackTraceFrame> frame);
- static int GetOneBasedColumnNumber(Handle<StackTraceFrame> frame);
- static int GetScriptId(Handle<StackTraceFrame> frame);
- static int GetPromiseCombinatorIndex(Handle<StackTraceFrame> frame);
- static int GetFunctionOffset(Handle<StackTraceFrame> frame);
- static int GetWasmFunctionIndex(Handle<StackTraceFrame> frame);
-
- static Handle<Object> GetFileName(Handle<StackTraceFrame> frame);
- static Handle<Object> GetScriptNameOrSourceUrl(Handle<StackTraceFrame> frame);
- static Handle<Object> GetFunctionName(Handle<StackTraceFrame> frame);
- static Handle<Object> GetMethodName(Handle<StackTraceFrame> frame);
- static Handle<Object> GetTypeName(Handle<StackTraceFrame> frame);
- static Handle<Object> GetEvalOrigin(Handle<StackTraceFrame> frame);
- static Handle<Object> GetWasmModuleName(Handle<StackTraceFrame> frame);
- static Handle<WasmInstanceObject> GetWasmInstance(
- Handle<StackTraceFrame> frame);
-
- static bool IsEval(Handle<StackTraceFrame> frame);
- static bool IsConstructor(Handle<StackTraceFrame> frame);
- static bool IsWasm(Handle<StackTraceFrame> frame);
- static bool IsAsmJsWasm(Handle<StackTraceFrame> frame);
- static bool IsUserJavaScript(Handle<StackTraceFrame> frame);
- static bool IsToplevel(Handle<StackTraceFrame> frame);
- static bool IsAsync(Handle<StackTraceFrame> frame);
- static bool IsPromiseAll(Handle<StackTraceFrame> frame);
- static bool IsPromiseAny(Handle<StackTraceFrame> frame);
+ static MaybeHandle<Script> GetScript(Isolate* isolate,
+ Handle<StackFrameInfo> info);
- private:
- static Handle<StackFrameInfo> GetFrameInfo(Handle<StackTraceFrame> frame);
- static void InitializeFrameInfo(Handle<StackTraceFrame> frame);
-
- TQ_OBJECT_CONSTRUCTORS(StackTraceFrame)
+ TQ_OBJECT_CONSTRUCTORS(StackFrameInfo)
};
-// Small helper that retrieves the FrameArray from a stack-trace
-// consisting of a FixedArray of StackTraceFrame objects.
-// This helper is only temporary until all FrameArray use-sites have
-// been converted to use StackTraceFrame and StackFrameInfo objects.
-V8_EXPORT_PRIVATE
-Handle<FrameArray> GetFrameArrayFromStackTrace(Isolate* isolate,
- Handle<FixedArray> stack_trace);
-
class IncrementalStringBuilder;
-void SerializeStackTraceFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
- IncrementalStringBuilder* builder);
+void SerializeStackFrameInfo(Isolate* isolate, Handle<StackFrameInfo> frame,
+ IncrementalStringBuilder* builder);
V8_EXPORT_PRIVATE
-MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate,
- Handle<StackTraceFrame> frame);
+MaybeHandle<String> SerializeStackFrameInfo(Isolate* isolate,
+ Handle<StackFrameInfo> frame);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info.tq b/deps/v8/src/objects/stack-frame-info.tq
index b977128b3e..0ecc0dc4ff 100644
--- a/deps/v8/src/objects/stack-frame-info.tq
+++ b/deps/v8/src/objects/stack-frame-info.tq
@@ -3,38 +3,23 @@
// found in the LICENSE file.
bitfield struct StackFrameInfoFlags extends uint31 {
- is_eval: bool: 1 bit;
- is_constructor: bool: 1 bit;
is_wasm: bool: 1 bit;
- is_asm_js_wasm: bool: 1 bit;
- is_user_java_script: bool: 1 bit;
- is_toplevel: bool: 1 bit;
+ is_asm_js_wasm: bool: 1 bit; // Implies that is_wasm bit is set.
+ is_strict: bool: 1 bit;
+ is_constructor: bool: 1 bit;
+ is_asm_js_at_number_conversion: bool: 1 bit;
is_async: bool: 1 bit;
- is_promise_all: bool: 1 bit;
- is_promise_any: bool: 1 bit;
-}
-@generateCppClass
-extern class StackFrameInfo extends Struct {
- line_number: Smi;
- column_number: Smi;
- promise_combinator_index: Smi;
- script_id: Smi;
- wasm_function_index: Smi;
- script_name: Object;
- script_name_or_source_url: Object;
- function_name: String|Null|Undefined;
- method_name: String|Null|Undefined;
- type_name: String|Null|Undefined;
- eval_origin: String|Null|Undefined;
- wasm_module_name: String|Null|Undefined;
- wasm_instance: WasmInstanceObject|Null|Undefined;
- flag: SmiTagged<StackFrameInfoFlags>;
+ // whether offset_or_source_position contains the source position.
+ is_source_position_computed: bool: 1 bit;
}
@generateCppClass
-extern class StackTraceFrame extends Struct {
- frame_array: FrameArray|Undefined;
- frame_index: Smi;
- frame_info: StackFrameInfo|Undefined;
+extern class StackFrameInfo extends Struct {
+ receiver_or_instance: JSAny;
+ function: JSFunction|Smi;
+ code_object: HeapObject;
+ code_offset_or_source_position: Smi;
+ flags: SmiTagged<StackFrameInfoFlags>;
+ parameters: FixedArray;
}
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 3fb4121588..5be0141ab8 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -42,28 +42,22 @@ class V8_NODISCARD SharedStringAccessGuardIfNeeded {
}
}
+ // Slow version which gets the isolate from the String.
+ explicit SharedStringAccessGuardIfNeeded(String str) {
+ Isolate* isolate = GetIsolateIfNeeded(str);
+ if (isolate != nullptr) mutex_guard.emplace(isolate->string_access());
+ }
+
static SharedStringAccessGuardIfNeeded NotNeeded() {
return SharedStringAccessGuardIfNeeded();
}
#ifdef DEBUG
static bool IsNeeded(String str) {
- LocalHeap* local_heap = LocalHeap::Current();
- // Don't acquire the lock for the main thread.
- if (!local_heap || local_heap->is_main_thread()) return false;
-
- Isolate* isolate;
- if (!GetIsolateFromHeapObject(str, &isolate)) {
- // If we can't get the isolate from the String, it must be read-only.
- DCHECK(ReadOnlyHeap::Contains(str));
- return false;
- }
- return true;
+ return GetIsolateIfNeeded(str) != nullptr;
}
#endif
- static bool IsNeeded(Isolate* isolate) { return false; }
-
static bool IsNeeded(LocalIsolate* local_isolate) {
// TODO(leszeks): Remove the nullptr check for local_isolate.
return local_isolate && !local_isolate->heap()->is_main_thread();
@@ -78,6 +72,21 @@ class V8_NODISCARD SharedStringAccessGuardIfNeeded {
DCHECK(!mutex_guard.has_value());
}
+ // Returns the Isolate from the String if we need it for the lock.
+ static Isolate* GetIsolateIfNeeded(String str) {
+ LocalHeap* local_heap = LocalHeap::Current();
+ // Don't acquire the lock for the main thread.
+ if (!local_heap || local_heap->is_main_thread()) return nullptr;
+
+ Isolate* isolate;
+ if (!GetIsolateFromHeapObject(str, &isolate)) {
+ // If we can't get the isolate from the String, it must be read-only.
+ DCHECK(ReadOnlyHeap::Contains(str));
+ return nullptr;
+ }
+ return isolate;
+ }
+
base::Optional<base::SharedMutexGuard<base::kShared>> mutex_guard;
};
@@ -630,6 +639,15 @@ String String::GetUnderlying() {
template <class Visitor>
ConsString String::VisitFlat(Visitor* visitor, String string,
const int offset) {
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(string));
+ return VisitFlat(visitor, string, offset,
+ SharedStringAccessGuardIfNeeded::NotNeeded());
+}
+
+template <class Visitor>
+ConsString String::VisitFlat(
+ Visitor* visitor, String string, const int offset,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
DisallowGarbageCollection no_gc;
int slice_offset = offset;
const int length = string.length();
@@ -639,13 +657,15 @@ ConsString String::VisitFlat(Visitor* visitor, String string,
switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
case kSeqStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- SeqOneByteString::cast(string).GetChars(no_gc) + slice_offset,
+ SeqOneByteString::cast(string).GetChars(no_gc, access_guard) +
+ slice_offset,
length - offset);
return ConsString();
case kSeqStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
- SeqTwoByteString::cast(string).GetChars(no_gc) + slice_offset,
+ SeqTwoByteString::cast(string).GetChars(no_gc, access_guard) +
+ slice_offset,
length - offset);
return ConsString();
@@ -844,15 +864,23 @@ void ExternalString::DisposeResource(Isolate* isolate) {
DEF_GETTER(ExternalOneByteString, resource,
const ExternalOneByteString::Resource*) {
+ return mutable_resource();
+}
+
+DEF_GETTER(ExternalOneByteString, mutable_resource,
+ ExternalOneByteString::Resource*) {
return reinterpret_cast<Resource*>(resource_as_address(isolate));
}
void ExternalOneByteString::update_data_cache(Isolate* isolate) {
- if (is_uncached()) return;
DisallowGarbageCollection no_gc;
- WriteExternalPointerField(kResourceDataOffset, isolate,
- reinterpret_cast<Address>(resource()->data()),
- kExternalStringResourceDataTag);
+ if (is_uncached()) {
+ if (resource()->IsCacheable()) mutable_resource()->UpdateDataCache();
+ } else {
+ WriteExternalPointerField(kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource()->data()),
+ kExternalStringResourceDataTag);
+ }
}
void ExternalOneByteString::SetResource(
@@ -874,6 +902,23 @@ void ExternalOneByteString::set_resource(
const uint8_t* ExternalOneByteString::GetChars() {
DisallowGarbageCollection no_gc;
+ if (is_uncached()) {
+ if (resource()->IsCacheable()) {
+ // TODO(solanes): Teach TurboFan/CSA to not bailout to the runtime to
+ // avoid this call.
+ return reinterpret_cast<const uint8_t*>(resource()->cached_data());
+ }
+#if DEBUG
+ // Check that this method is called only from the main thread if we have an
+ // uncached string with an uncacheable resource.
+ {
+ Isolate* isolate;
+ DCHECK_IMPLIES(GetIsolateFromHeapObject(*this, &isolate),
+ ThreadId::Current() == isolate->thread_id());
+ }
+#endif
+ }
+
return reinterpret_cast<const uint8_t*>(resource()->data());
}
@@ -884,15 +929,23 @@ uint8_t ExternalOneByteString::Get(int index) {
DEF_GETTER(ExternalTwoByteString, resource,
const ExternalTwoByteString::Resource*) {
+ return mutable_resource();
+}
+
+DEF_GETTER(ExternalTwoByteString, mutable_resource,
+ ExternalTwoByteString::Resource*) {
return reinterpret_cast<Resource*>(resource_as_address(isolate));
}
void ExternalTwoByteString::update_data_cache(Isolate* isolate) {
- if (is_uncached()) return;
DisallowGarbageCollection no_gc;
- WriteExternalPointerField(kResourceDataOffset, isolate,
- reinterpret_cast<Address>(resource()->data()),
- kExternalStringResourceDataTag);
+ if (is_uncached()) {
+ if (resource()->IsCacheable()) mutable_resource()->UpdateDataCache();
+ } else {
+ WriteExternalPointerField(kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource()->data()),
+ kExternalStringResourceDataTag);
+ }
}
void ExternalTwoByteString::SetResource(
@@ -914,6 +967,23 @@ void ExternalTwoByteString::set_resource(
const uint16_t* ExternalTwoByteString::GetChars() {
DisallowGarbageCollection no_gc;
+ if (is_uncached()) {
+ if (resource()->IsCacheable()) {
+ // TODO(solanes): Teach TurboFan/CSA to not bailout to the runtime to
+ // avoid this call.
+ return resource()->cached_data();
+ }
+#if DEBUG
+ // Check that this method is called only from the main thread if we have an
+ // uncached string with an uncacheable resource.
+ {
+ Isolate* isolate;
+ DCHECK_IMPLIES(GetIsolateFromHeapObject(*this, &isolate),
+ ThreadId::Current() == isolate->thread_id());
+ }
+#endif
+ }
+
return resource()->data();
}
@@ -948,6 +1018,28 @@ void ConsStringIterator::Pop() {
depth_--;
}
+class StringCharacterStream {
+ public:
+ inline explicit StringCharacterStream(String string, int offset = 0);
+ StringCharacterStream(const StringCharacterStream&) = delete;
+ StringCharacterStream& operator=(const StringCharacterStream&) = delete;
+ inline uint16_t GetNext();
+ inline bool HasMore();
+ inline void Reset(String string, int offset = 0);
+ inline void VisitOneByteString(const uint8_t* chars, int length);
+ inline void VisitTwoByteString(const uint16_t* chars, int length);
+
+ private:
+ ConsStringIterator iter_;
+ bool is_one_byte_;
+ union {
+ const uint8_t* buffer8_;
+ const uint16_t* buffer16_;
+ };
+ const uint8_t* end_;
+ SharedStringAccessGuardIfNeeded access_guard_;
+};
+
uint16_t StringCharacterStream::GetNext() {
DCHECK(buffer8_ != nullptr && end_ != nullptr);
// Advance cursor if needed.
@@ -956,19 +1048,25 @@ uint16_t StringCharacterStream::GetNext() {
return is_one_byte_ ? *buffer8_++ : *buffer16_++;
}
+// TODO(solanes, v8:7790, chromium:1166095): Assess if we need to use
+// Isolate/LocalIsolate and pipe them through, instead of using the slow
+// version of the SharedStringAccessGuardIfNeeded.
StringCharacterStream::StringCharacterStream(String string, int offset)
- : is_one_byte_(false) {
+ : is_one_byte_(false), access_guard_(string) {
Reset(string, offset);
}
void StringCharacterStream::Reset(String string, int offset) {
buffer8_ = nullptr;
end_ = nullptr;
- ConsString cons_string = String::VisitFlat(this, string, offset);
+
+ ConsString cons_string =
+ String::VisitFlat(this, string, offset, access_guard_);
iter_.Reset(cons_string, offset);
if (!cons_string.is_null()) {
string = iter_.Next(&offset);
- if (!string.is_null()) String::VisitFlat(this, string, offset);
+ if (!string.is_null())
+ String::VisitFlat(this, string, offset, access_guard_);
}
}
@@ -978,7 +1076,7 @@ bool StringCharacterStream::HasMore() {
String string = iter_.Next(&offset);
DCHECK_EQ(offset, 0);
if (string.is_null()) return false;
- String::VisitFlat(this, string);
+ String::VisitFlat(this, string, 0, access_guard_);
DCHECK(buffer8_ != end_);
return true;
}
diff --git a/deps/v8/src/objects/string-table.cc b/deps/v8/src/objects/string-table.cc
index 1f772f6edf..8d5b44c6c5 100644
--- a/deps/v8/src/objects/string-table.cc
+++ b/deps/v8/src/objects/string-table.cc
@@ -681,14 +681,14 @@ size_t StringTable::GetCurrentMemoryUsage() const {
void StringTable::IterateElements(RootVisitor* visitor) {
// This should only happen during garbage collection when background threads
// are paused, so the load can be relaxed.
- DCHECK_IMPLIES(FLAG_local_heaps, isolate_->heap()->safepoint()->IsActive());
+ DCHECK(isolate_->heap()->safepoint()->IsActive());
data_.load(std::memory_order_relaxed)->IterateElements(visitor);
}
void StringTable::DropOldData() {
// This should only happen during garbage collection when background threads
// are paused, so the load can be relaxed.
- DCHECK_IMPLIES(FLAG_local_heaps, isolate_->heap()->safepoint()->IsActive());
+ DCHECK(isolate_->heap()->safepoint()->IsActive());
DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
data_.load(std::memory_order_relaxed)->DropPreviousData();
}
@@ -696,7 +696,7 @@ void StringTable::DropOldData() {
void StringTable::NotifyElementsRemoved(int count) {
// This should only happen during garbage collection when background threads
// are paused, so the load can be relaxed.
- DCHECK_IMPLIES(FLAG_local_heaps, isolate_->heap()->safepoint()->IsActive());
+ DCHECK(isolate_->heap()->safepoint()->IsActive());
DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
data_.load(std::memory_order_relaxed)->ElementsRemoved(count);
}
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index 4abf15f333..b9413618ee 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -182,9 +182,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
ReadOnlyRoots roots(isolate);
if (size < ExternalString::kSizeOfAllExternalStrings) {
if (is_internalized) {
- // We do not support this case since accessing internal external
- // uncached strings is not thread-safe.
- return false;
+ new_map = roots.uncached_external_internalized_string_map();
} else {
new_map = roots.uncached_external_string_map();
}
@@ -261,13 +259,9 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
Map new_map;
ReadOnlyRoots roots(isolate);
if (size < ExternalString::kSizeOfAllExternalStrings) {
- if (is_internalized) {
- // We do not support this case since accessing internal external
- // uncached strings is not thread-safe.
- return false;
- } else {
- new_map = roots.uncached_external_one_byte_string_map();
- }
+ new_map = is_internalized
+ ? roots.uncached_external_one_byte_internalized_string_map()
+ : roots.uncached_external_one_byte_string_map();
} else {
new_map = is_internalized
? roots.external_one_byte_internalized_string_map()
@@ -315,11 +309,6 @@ bool String::SupportsExternalization() {
DCHECK_LE(ExternalString::kUncachedSize, this->Size());
#endif
- if (this->Size() < ExternalString::kSizeOfAllExternalStrings &&
- this->IsInternalizedString()) {
- return false;
- }
-
Isolate* isolate = GetIsolateFromWritableObject(*this);
return !isolate->heap()->IsInGCPostProcessing();
}
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 043beb730e..5f235fa381 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -361,7 +361,7 @@ class String : public TorqueGeneratedString<String, Name> {
v8::String::ExternalStringResource* resource);
V8_EXPORT_PRIVATE bool MakeExternal(
v8::String::ExternalOneByteStringResource* resource);
- V8_EXPORT_PRIVATE bool SupportsExternalization();
+ bool SupportsExternalization();
// Conversion.
// "array index": an index allowed by the ES spec for JSArrays.
@@ -512,10 +512,17 @@ class String : public TorqueGeneratedString<String, Name> {
return NonOneByteStart(chars, length) >= length;
}
+ // May only be called when a SharedStringAccessGuard is not needed (i.e. on
+ // the main thread or on read-only strings).
template <class Visitor>
static inline ConsString VisitFlat(Visitor* visitor, String string,
int offset = 0);
+ template <class Visitor>
+ static inline ConsString VisitFlat(
+ Visitor* visitor, String string, int offset,
+ const SharedStringAccessGuardIfNeeded& access_guard);
+
template <typename LocalIsolate>
static Handle<FixedArray> CalculateLineEnds(LocalIsolate* isolate,
Handle<String> string,
@@ -857,6 +864,10 @@ class ExternalOneByteString : public ExternalString {
STATIC_ASSERT(kSize == kSizeOfAllExternalStrings);
OBJECT_CONSTRUCTORS(ExternalOneByteString, ExternalString);
+
+ private:
+ // The underlying resource as a non-const pointer.
+ DECL_GETTER(mutable_resource, Resource*)
};
// The ExternalTwoByteString class is an external string backed by a UTF-16
@@ -902,6 +913,10 @@ class ExternalTwoByteString : public ExternalString {
STATIC_ASSERT(kSize == kSizeOfAllExternalStrings);
OBJECT_CONSTRUCTORS(ExternalTwoByteString, ExternalString);
+
+ private:
+ // The underlying resource as a non-const pointer.
+ DECL_GETTER(mutable_resource, Resource*)
};
// A flat string reader provides random access to the contents of a
@@ -975,26 +990,7 @@ class ConsStringIterator {
int consumed_;
};
-class StringCharacterStream {
- public:
- inline explicit StringCharacterStream(String string, int offset = 0);
- StringCharacterStream(const StringCharacterStream&) = delete;
- StringCharacterStream& operator=(const StringCharacterStream&) = delete;
- inline uint16_t GetNext();
- inline bool HasMore();
- inline void Reset(String string, int offset = 0);
- inline void VisitOneByteString(const uint8_t* chars, int length);
- inline void VisitTwoByteString(const uint16_t* chars, int length);
-
- private:
- ConsStringIterator iter_;
- bool is_one_byte_;
- union {
- const uint8_t* buffer8_;
- const uint16_t* buffer16_;
- };
- const uint8_t* end_;
-};
+class StringCharacterStream;
template <typename Char>
struct CharTraits;
diff --git a/deps/v8/src/objects/string.tq b/deps/v8/src/objects/string.tq
index 542dddc3b9..ad845760ae 100644
--- a/deps/v8/src/objects/string.tq
+++ b/deps/v8/src/objects/string.tq
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include 'src/builtins/builtins-string-gen.h'
+
@abstract
@generateCppClass
@reserveBitsInInstanceType(6)
@@ -306,3 +308,87 @@ macro TwoStringsToSlices<Result: type, Functor: type>(
}
}
}
+
+macro StaticAssertStringLengthFitsSmi(): void {
+ const kMaxStringLengthFitsSmi: constexpr bool =
+ kStringMaxLengthUintptr < kSmiMaxValue;
+ static_assert(kMaxStringLengthFitsSmi);
+}
+
+extern macro StringBuiltinsAssembler::SearchOneByteStringInTwoByteString(
+ RawPtr<char16>, intptr, RawPtr<char8>, intptr, intptr): intptr;
+extern macro StringBuiltinsAssembler::SearchOneByteStringInOneByteString(
+ RawPtr<char8>, intptr, RawPtr<char8>, intptr, intptr): intptr;
+extern macro StringBuiltinsAssembler::SearchTwoByteStringInTwoByteString(
+ RawPtr<char16>, intptr, RawPtr<char16>, intptr, intptr): intptr;
+extern macro StringBuiltinsAssembler::SearchTwoByteStringInOneByteString(
+ RawPtr<char8>, intptr, RawPtr<char16>, intptr, intptr): intptr;
+extern macro StringBuiltinsAssembler::SearchOneByteInOneByteString(
+ RawPtr<char8>, intptr, RawPtr<char8>, intptr): intptr;
+
+macro AbstractStringIndexOf(
+ subject: RawPtr<char16>, subjectLen: intptr, search: RawPtr<char8>,
+ searchLen: intptr, fromIndex: intptr): intptr {
+ return SearchOneByteStringInTwoByteString(
+ subject, subjectLen, search, searchLen, fromIndex);
+}
+macro AbstractStringIndexOf(
+ subject: RawPtr<char8>, subjectLen: intptr, search: RawPtr<char8>,
+ searchLen: intptr, fromIndex: intptr): intptr {
+ if (searchLen == 1) {
+ return SearchOneByteInOneByteString(subject, subjectLen, search, fromIndex);
+ }
+ return SearchOneByteStringInOneByteString(
+ subject, subjectLen, search, searchLen, fromIndex);
+}
+macro AbstractStringIndexOf(
+ subject: RawPtr<char16>, subjectLen: intptr, search: RawPtr<char16>,
+ searchLen: intptr, fromIndex: intptr): intptr {
+ return SearchTwoByteStringInTwoByteString(
+ subject, subjectLen, search, searchLen, fromIndex);
+}
+macro AbstractStringIndexOf(
+ subject: RawPtr<char8>, subjectLen: intptr, search: RawPtr<char16>,
+ searchLen: intptr, fromIndex: intptr): intptr {
+ return SearchTwoByteStringInOneByteString(
+ subject, subjectLen, search, searchLen, fromIndex);
+}
+
+struct AbstractStringIndexOfFunctor {
+ fromIndex: Smi;
+}
+// Ideally, this would be a method of AbstractStringIndexOfFunctor, but
+// currently methods don't support templates.
+macro Call<A: type, B: type>(
+ self: AbstractStringIndexOfFunctor, string: ConstSlice<A>,
+ searchStr: ConstSlice<B>): Smi {
+ return Convert<Smi>(AbstractStringIndexOf(
+ string.GCUnsafeStartPointer(), string.length,
+ searchStr.GCUnsafeStartPointer(), searchStr.length,
+ Convert<intptr>(self.fromIndex)));
+}
+
+macro AbstractStringIndexOf(implicit context: Context)(
+ string: String, searchString: String, fromIndex: Smi): Smi {
+ // Special case the empty string.
+ const searchStringLength = searchString.length_intptr;
+ const stringLength = string.length_intptr;
+ if (searchStringLength == 0 && SmiUntag(fromIndex) <= stringLength) {
+ return fromIndex;
+ }
+
+ // Don't bother to search if the searchString would go past the end
+ // of the string. This is actually necessary because of runtime
+ // checks.
+ if (SmiUntag(fromIndex) + searchStringLength > stringLength) {
+ return -1;
+ }
+
+ return TwoStringsToSlices<Smi>(
+ string, searchString, AbstractStringIndexOfFunctor{fromIndex: fromIndex});
+}
+
+builtin StringIndexOf(implicit context: Context)(
+ s: String, searchString: String, start: Smi): Smi {
+ return AbstractStringIndexOf(s, searchString, SmiMax(start, 0));
+}
diff --git a/deps/v8/src/objects/swiss-hash-table-helpers.h b/deps/v8/src/objects/swiss-hash-table-helpers.h
new file mode 100644
index 0000000000..db4b2d807e
--- /dev/null
+++ b/deps/v8/src/objects/swiss-hash-table-helpers.h
@@ -0,0 +1,363 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Collection of swiss table helpers that are independent from a specific
+// container, like SwissNameDictionary. Taken almost in verbatim from Abseil,
+// comments in this file indicate what is taken from what Abseil file.
+
+#include <cstdint>
+#include <type_traits>
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+#include "src/base/memory.h"
+
+#ifndef V8_OBJECTS_SWISS_HASH_TABLE_HELPERS_H_
+#define V8_OBJECTS_SWISS_HASH_TABLE_HELPERS_H_
+
+// The following #defines are taken from Abseil's have_sse.h (but renamed). They
+// are only defined within this file. However, we also take cross platform
+// snapshot creation into account, by only using SSE if the target supports it,
+// too. The SSE implementation uses a group width of 16, whereas the non-SSE
+// version uses 8. We therefore have to avoid building a snapshot that contains
+// Swiss Tables with one group size and use it in code that excepts a different
+// group size.
+#ifndef SWISS_TABLE_HAVE_SSE2
+#if (defined(__SSE2__) || \
+ (defined(_MSC_VER) && \
+ (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))) && \
+ (defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64))
+#define SWISS_TABLE_HAVE_SSE2 1
+#else
+#define SWISS_TABLE_HAVE_SSE2 0
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
+// TODO(v8:11388) Currently, building on a non-SSE platform for a SSE target
+// means that we cannot use the (more performant) SSE implementations of Swiss
+// Tables, even if the target would support it, just because the host doesn't.
+// This is due to the difference in group sizes (see comment at the beginning of
+// the file). We can solve this by implementating a new non-SSE Group that
+// behaves like GroupSse2Impl (and uses group size 16) in the future.
+#warning "You should avoid building on a non-SSE platform for a SSE target!"
+#endif
+#endif
+#endif
+
+#ifndef SWISS_TABLE_HAVE_SSSE3
+#if defined(__SSSE3__) && \
+ (defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64))
+#define SWISS_TABLE_HAVE_SSSE3 1
+#else
+#define SWISS_TABLE_HAVE_SSSE3 0
+#endif
+#endif
+
+#if SWISS_TABLE_HAVE_SSSE3 && !SWISS_TABLE_HAVE_SSE2
+#error "Bad configuration!"
+#endif
+
+#if SWISS_TABLE_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#if SWISS_TABLE_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+namespace v8 {
+namespace internal {
+namespace swiss_table {
+
+// All definitions below are taken from Abseil's raw_hash_set.h with only minor
+// changes, like using existing V8 versions of certain helper functions.
+
+// Denotes the group of the control table currently being probed.
+// Implements quadratic probing by advancing by i groups after the i-th
+// (unsuccesful) probe.
+template <size_t GroupSize>
+class ProbeSequence {
+ public:
+ ProbeSequence(uint32_t hash, uint32_t mask) {
+ // Mask must be a power of 2 minus 1.
+ DCHECK_EQ(0, ((mask + 1) & mask));
+ mask_ = mask;
+ offset_ = hash & mask_;
+ }
+ uint32_t offset() const { return offset_; }
+ uint32_t offset(int i) const { return (offset_ + i) & mask_; }
+
+ void next() {
+ index_ += GroupSize;
+ offset_ += index_;
+ offset_ &= mask_;
+ }
+
+ size_t index() const { return index_; }
+
+ private:
+ // Used for modulo calculation.
+ uint32_t mask_;
+
+ // The index/offset into the control table, meaning that {ctrl[offset_]} is
+ // the start of the group currently being probed, assuming that |ctrl| is the
+ // pointer to the beginning of the control table.
+ uint32_t offset_;
+
+ // States the number of probes that have been performed (starting at 0),
+ // multiplied by GroupSize.
+ uint32_t index_ = 0;
+};
+
+// An abstraction over a bitmask. It provides an easy way to iterate through the
+// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
+// this is a true bitmask.
+// When Shift=3 (used on non-SSE platforms), we obtain a "byte mask", where each
+// logical bit is represented by a full byte. The logical bit 0 is represented
+// as 0x00, whereas 1 is represented as 0x80. Other values must not appear.
+//
+// For example:
+// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
+// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0>
+class BitMask {
+ STATIC_ASSERT(std::is_unsigned<T>::value);
+ STATIC_ASSERT(Shift == 0 || Shift == 3);
+
+ public:
+ // These are useful for unit tests (gunit).
+ // using value_type = int;
+ // using iterator = BitMask;
+ // using const_iterator = BitMask;
+
+ explicit BitMask(T mask) : mask_(mask) {}
+ BitMask& operator++() {
+ // Clear the least significant bit that is set.
+ mask_ &= (mask_ - 1);
+ return *this;
+ }
+ explicit operator bool() const { return mask_ != 0; }
+ int operator*() const { return LowestBitSet(); }
+ int LowestBitSet() const { return TrailingZeros(); }
+ int HighestBitSet() const {
+ return (sizeof(T) * CHAR_BIT - base::bits::CountLeadingZeros(mask_) - 1) >>
+ Shift;
+ }
+
+ BitMask begin() const { return *this; }
+ BitMask end() const { return BitMask(0); }
+
+ int TrailingZeros() const {
+ DCHECK_NE(mask_, 0);
+ return base::bits::CountTrailingZerosNonZero(mask_) >> Shift;
+ }
+
+ int LeadingZeros() const {
+ constexpr int total_significant_bits = SignificantBits << Shift;
+ constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
+ return base::bits::CountLeadingZeros(mask_ << extra_bits) >> Shift;
+ }
+
+ private:
+ friend bool operator==(const BitMask& a, const BitMask& b) {
+ return a.mask_ == b.mask_;
+ }
+ friend bool operator!=(const BitMask& a, const BitMask& b) {
+ return a.mask_ != b.mask_;
+ }
+
+ T mask_;
+};
+
+using ctrl_t = signed char;
+using h2_t = uint8_t;
+
+// The values here are selected for maximum performance. See the static asserts
+// below for details.
+enum Ctrl : ctrl_t {
+ kEmpty = -128, // 0b10000000
+ kDeleted = -2, // 0b11111110
+ kSentinel = -1, // 0b11111111
+};
+static_assert(
+ kEmpty & kDeleted & kSentinel & 0x80,
+ "Special markers need to have the MSB to make checking for them efficient");
+static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
+ "kEmpty and kDeleted must be smaller than kSentinel to make the "
+ "SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(kSentinel == -1,
+ "kSentinel must be -1 to elide loading it from memory into SIMD "
+ "registers (pcmpeqd xmm, xmm)");
+static_assert(kEmpty == -128,
+ "kEmpty must be -128 to make the SIMD check for its "
+ "existence efficient (psignb xmm, xmm)");
+static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
+ "kEmpty and kDeleted must share an unset bit that is not shared "
+ "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
+ "efficient");
+static_assert(kDeleted == -2,
+ "kDeleted must be -2 to make the implementation of "
+ "ConvertSpecialToEmptyAndFullToDeleted efficient");
+
+// See below for explanation of H2. Just here for documentation purposes, Swiss
+// Table implementations rely on this being 7.
+static constexpr int kH2Bits = 7;
+
+// Extracts H1 from the given overall hash, which means discarding the lowest 7
+// bits of the overall hash. H1 is used to determine the first group to probe.
+inline static uint32_t H1(uint32_t hash) { return (hash >> kH2Bits); }
+
+// Extracts H2 from the given overall hash, which means using only the lowest 7
+// bits of the overall hash. H2 is stored in the control table byte for each
+// present entry.
+inline static swiss_table::ctrl_t H2(uint32_t hash) {
+ return hash & ((1 << kH2Bits) - 1);
+}
+
+#if SWISS_TABLE_HAVE_SSE2
+// https://github.com/abseil/abseil-cpp/issues/209
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
+// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
+// Work around this by using the portable implementation of Group
+// when using -funsigned-char under GCC.
+inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
+#if defined(__GNUC__) && !defined(__clang__)
+ if (std::is_unsigned<char>::value) {
+ const __m128i mask = _mm_set1_epi8(0x80);
+ const __m128i diff = _mm_subs_epi8(b, a);
+ return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
+ }
+#endif
+ return _mm_cmpgt_epi8(a, b);
+}
+
+struct GroupSse2Impl {
+ static constexpr size_t kWidth = 16; // the number of slots per group
+
+ explicit GroupSse2Impl(const ctrl_t* pos) {
+ ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
+ }
+
+ // Returns a bitmask representing the positions of slots that match |hash|.
+ BitMask<uint32_t, kWidth> Match(h2_t hash) const {
+ auto match = _mm_set1_epi8(hash);
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+ }
+
+ // Returns a bitmask representing the positions of empty slots.
+ BitMask<uint32_t, kWidth> MatchEmpty() const {
+#if SWISS_TABLE_HAVE_SSSE3
+ // This only works because kEmpty is -128.
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+#else
+ return Match(static_cast<h2_t>(kEmpty));
+#endif
+ }
+
+ // Returns a bitmask representing the positions of empty or deleted slots.
+ BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(kSentinel);
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+ }
+
+ // Returns the number of trailing empty or deleted elements in the group.
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(kSentinel);
+ return base::bits::CountTrailingZerosNonZero(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ auto msbs = _mm_set1_epi8(static_cast<char>(-128));
+ auto x126 = _mm_set1_epi8(126);
+#if SWISS_TABLE_HAVE_SSSE3
+ auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
+#else
+ auto zero = _mm_setzero_si128();
+ auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
+ auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
+#endif
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
+ }
+
+ __m128i ctrl;
+};
+#endif // SWISS_TABLE_HAVE_SSE2
+
+struct GroupPortableImpl {
+ static constexpr size_t kWidth = 8; // the number of slots per group
+
+ explicit GroupPortableImpl(const ctrl_t* pos)
+ : ctrl(base::ReadLittleEndianValue<uint64_t>(
+ reinterpret_cast<uintptr_t>(const_cast<ctrl_t*>(pos)))) {}
+
+ // Returns a bitmask representing the positions of slots that match |hash|.
+ BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ // For the technique, see:
+ // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+ // (Determine if a word has a byte equal to n).
+ //
+ // Caveat: there are false positives but:
+ // - they only occur if |hash| actually appears elsewhere in |ctrl|
+ // - they never occur on kEmpty, kDeleted, kSentinel
+ // - they will be handled gracefully by subsequent checks in code
+ //
+ // Example:
+ // v = 0x1716151413121110
+ // hash = 0x12
+ // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl ^ (lsbs * hash);
+ return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
+ }
+
+ // Returns a bitmask representing the positions of empty slots.
+ BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ }
+
+ // Returns a bitmask representing the positions of empty or deleted slots.
+ BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ }
+
+ // Returns the number of trailing empty or deleted elements in the group.
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
+ return (base::bits::CountTrailingZerosNonZero(
+ ((~ctrl & (ctrl >> 7)) | gaps) + 1) +
+ 7) >>
+ 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl & msbs;
+ auto res = (~x + (x >> 7)) & ~lsbs;
+ base::WriteLittleEndianValue(reinterpret_cast<uint64_t*>(dst), res);
+ }
+
+ uint64_t ctrl;
+};
+
+// Determine which Group implementation SwissNameDictionary uses.
+#if SWISS_TABLE_HAVE_SSE2
+using Group = GroupSse2Impl;
+#else
+using Group = GroupPortableImpl;
+#endif
+
+#undef SWISS_TABLE_HAVE_SSE2
+#undef SWISS_TABLE_HAVE_SSE3
+
+} // namespace swiss_table
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_SWISS_HASH_TABLE_HELPERS_H_
diff --git a/deps/v8/src/objects/swiss-name-dictionary-inl.h b/deps/v8/src/objects/swiss-name-dictionary-inl.h
new file mode 100644
index 0000000000..e6264a0bc3
--- /dev/null
+++ b/deps/v8/src/objects/swiss-name-dictionary-inl.h
@@ -0,0 +1,659 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SWISS_NAME_DICTIONARY_INL_H_
+#define V8_OBJECTS_SWISS_NAME_DICTIONARY_INL_H_
+
+#include <algorithm>
+
+#include "src/base/macros.h"
+#include "src/execution/isolate-utils-inl.h"
+#include "src/heap/heap.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/instance-type-inl.h"
+#include "src/objects/js-collection-iterator.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/objects/swiss-name-dictionary.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/swiss-name-dictionary-tq-inl.inc"
+
+CAST_ACCESSOR(SwissNameDictionary)
+OBJECT_CONSTRUCTORS_IMPL(SwissNameDictionary, HeapObject)
+
+swiss_table::ctrl_t* SwissNameDictionary::CtrlTable() {
+ return reinterpret_cast<ctrl_t*>(
+ field_address(CtrlTableStartOffset(Capacity())));
+}
+
+uint8_t* SwissNameDictionary::PropertyDetailsTable() {
+ return reinterpret_cast<uint8_t*>(
+ field_address(PropertyDetailsTableStartOffset(Capacity())));
+}
+
+int SwissNameDictionary::Capacity() {
+ return ReadField<int32_t>(CapacityOffset());
+}
+
+void SwissNameDictionary::SetCapacity(int capacity) {
+ DCHECK(IsValidCapacity(capacity));
+
+ WriteField<int32_t>(CapacityOffset(), capacity);
+}
+
+int SwissNameDictionary::NumberOfElements() {
+ return GetMetaTableField(kMetaTableElementCountOffset);
+}
+
+int SwissNameDictionary::NumberOfDeletedElements() {
+ return GetMetaTableField(kMetaTableDeletedElementCountOffset);
+}
+
+void SwissNameDictionary::SetNumberOfElements(int elements) {
+ SetMetaTableField(kMetaTableElementCountOffset, elements);
+}
+
+void SwissNameDictionary::SetNumberOfDeletedElements(int deleted_elements) {
+ SetMetaTableField(kMetaTableDeletedElementCountOffset, deleted_elements);
+}
+
+int SwissNameDictionary::UsedCapacity() {
+ return NumberOfElements() + NumberOfDeletedElements();
+}
+
+// static
+constexpr bool SwissNameDictionary::IsValidCapacity(int capacity) {
+ return capacity == 0 || (capacity >= kInitialCapacity &&
+ // Must be power of 2.
+ ((capacity & (capacity - 1)) == 0));
+}
+
+// static
+constexpr int SwissNameDictionary::DataTableSize(int capacity) {
+ return capacity * kTaggedSize * kDataTableEntryCount;
+}
+
+// static
+constexpr int SwissNameDictionary::CtrlTableSize(int capacity) {
+ // Doing + |kGroupWidth| due to the copy of first group at the end of control
+ // table.
+ return (capacity + kGroupWidth) * kOneByteSize;
+}
+
+// static
+constexpr int SwissNameDictionary::SizeFor(int capacity) {
+ CONSTEXPR_DCHECK(IsValidCapacity(capacity));
+ return PropertyDetailsTableStartOffset(capacity) + capacity;
+}
+
+// We use 7/8th as maximum load factor for non-special cases.
+// For 16-wide groups, that gives an average of two empty slots per group.
+// Similar to Abseil's CapacityToGrowth.
+// static
+constexpr int SwissNameDictionary::MaxUsableCapacity(int capacity) {
+ CONSTEXPR_DCHECK(IsValidCapacity(capacity));
+
+ if (Group::kWidth == 8 && capacity == 4) {
+ // If the group size is 16 we can fully utilize capacity 4: There will be
+ // enough kEmpty entries in the ctrl table.
+ return 3;
+ }
+ return capacity - capacity / 8;
+}
+
+// Returns |at_least_space_for| * 8/7 for non-special cases. Similar to Abseil's
+// GrowthToLowerboundCapacity.
+// static
+int SwissNameDictionary::CapacityFor(int at_least_space_for) {
+ if (at_least_space_for <= 4) {
+ if (at_least_space_for == 0) {
+ return 0;
+ } else if (at_least_space_for < 4) {
+ return 4;
+ } else if (kGroupWidth == 16) {
+ DCHECK_EQ(4, at_least_space_for);
+ return 4;
+ } else if (kGroupWidth == 8) {
+ DCHECK_EQ(4, at_least_space_for);
+ return 8;
+ }
+ }
+
+ int non_normalized = at_least_space_for + at_least_space_for / 7;
+ return base::bits::RoundUpToPowerOfTwo32(non_normalized);
+}
+
+int SwissNameDictionary::EntryForEnumerationIndex(int enumeration_index) {
+ DCHECK_LT(enumeration_index, UsedCapacity());
+ return GetMetaTableField(kMetaTableEnumerationTableStartOffset +
+ enumeration_index);
+}
+
+void SwissNameDictionary::SetEntryForEnumerationIndex(int enumeration_index,
+ int entry) {
+ DCHECK_LT(enumeration_index, UsedCapacity());
+ DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
+ DCHECK(IsFull(GetCtrl(entry)));
+
+ SetMetaTableField(kMetaTableEnumerationTableStartOffset + enumeration_index,
+ entry);
+}
+
+template <typename LocalIsolate>
+InternalIndex SwissNameDictionary::FindEntry(LocalIsolate* isolate,
+ Object key) {
+ Name name = Name::cast(key);
+ DCHECK(name.IsUniqueName());
+ uint32_t hash = name.hash();
+
+ // We probe the hash table in groups of |kGroupWidth| buckets. One bucket
+ // corresponds to a 1-byte entry in the control table.
+ // Each group can be uniquely identified by the index of its first bucket,
+ // which must be a value between 0 (inclusive) and Capacity() (exclusive).
+ // Note that logically, groups wrap around after index Capacity() - 1. This
+ // means that probing the group starting at, for example, index Capacity() - 1
+ // means probing CtrlTable()[Capacity() - 1] followed by CtrlTable()[0] to
+ // CtrlTable()[6], assuming a group width of 8. However, in memory, this is
+ // achieved by maintaining an additional |kGroupWidth| bytes after the first
+ // Capacity() entries of the control table. These contain a copy of the first
+ // max(Capacity(), kGroupWidth) entries of the control table. If Capacity() <
+ // |kGroupWidth|, then the remaining |kGroupWidth| - Capacity() control bytes
+ // are left as |kEmpty|.
+ // This means that actually, probing the group starting
+ // at index Capacity() - 1 is achieved by probing CtrlTable()[Capacity() - 1],
+ // followed by CtrlTable()[Capacity()] to CtrlTable()[Capacity() + 7].
+
+ ctrl_t* ctrl = CtrlTable();
+ auto seq = probe(hash, Capacity());
+ // At this point, seq.offset() denotes the index of the first bucket in the
+ // first group to probe. Note that this doesn't have to be divisible by
+ // |kGroupWidth|, but can have any value between 0 (inclusive) and Capacity()
+ // (exclusive).
+ while (true) {
+ Group g{ctrl + seq.offset()};
+ for (int i : g.Match(swiss_table::H2(hash))) {
+ int candidate_entry = seq.offset(i);
+ Object candidate_key = KeyAt(candidate_entry);
+ // This key matching is SwissNameDictionary specific!
+ if (candidate_key == key) return InternalIndex(candidate_entry);
+ }
+ if (g.MatchEmpty()) return InternalIndex::NotFound();
+
+ // The following selects the next group to probe. Note that seq.offset()
+ // always advances by a multiple of |kGroupWidth|, modulo Capacity(). This
+ // is done in a way such that we visit Capacity() / |kGroupWidth|
+ // non-overlapping (!) groups before we would visit the same group (or
+ // bucket) again.
+ seq.next();
+
+ // If the following DCHECK weren't true, we would have probed all Capacity()
+ // different buckets without finding one containing |kEmpty| (which would
+ // haved triggered the g.MatchEmpty() check above). This must not be the
+ // case because the maximum load factor of 7/8 guarantees that there must
+ // always remain empty buckets.
+ //
+ // The only exception from this rule are small tables, where 2 * Capacity()
+ // < |kGroupWidth|, in which case all Capacity() entries can be filled
+ // without leaving empty buckets. The layout of the control
+ // table guarantees that after the first Capacity() entries of the control
+ // table, the control table contains a copy of those first Capacity()
+ // entries, followed by kGroupWidth - 2 * Capacity() entries containing
+ // |kEmpty|. This guarantees that the g.MatchEmpty() check above will
+ // always trigger if the element wasn't found, correctly preventing us from
+ // probing more than one group in this special case.
+ DCHECK_LT(seq.index(), Capacity());
+ }
+}
+
+template <typename LocalIsolate>
+InternalIndex SwissNameDictionary::FindEntry(LocalIsolate* isolate,
+ Handle<Object> key) {
+ return FindEntry(isolate, *key);
+}
+
+Object SwissNameDictionary::LoadFromDataTable(int entry, int data_offset) {
+ return LoadFromDataTable(GetIsolateForPtrCompr(*this), entry, data_offset);
+}
+
+Object SwissNameDictionary::LoadFromDataTable(IsolateRoot isolate, int entry,
+ int data_offset) {
+ DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
+ int offset = DataTableStartOffset() +
+ (entry * kDataTableEntryCount + data_offset) * kTaggedSize;
+ return TaggedField<Object>::Relaxed_Load(isolate, *this, offset);
+}
+
+void SwissNameDictionary::StoreToDataTable(int entry, int data_offset,
+ Object data) {
+ DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
+
+ int offset = DataTableStartOffset() +
+ (entry * kDataTableEntryCount + data_offset) * kTaggedSize;
+
+ RELAXED_WRITE_FIELD(*this, offset, data);
+ WRITE_BARRIER(*this, offset, data);
+}
+
+void SwissNameDictionary::StoreToDataTableNoBarrier(int entry, int data_offset,
+ Object data) {
+ DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
+
+ int offset = DataTableStartOffset() +
+ (entry * kDataTableEntryCount + data_offset) * kTaggedSize;
+
+ RELAXED_WRITE_FIELD(*this, offset, data);
+}
+
+void SwissNameDictionary::ClearDataTableEntry(Isolate* isolate, int entry) {
+ ReadOnlyRoots roots(isolate);
+
+ StoreToDataTable(entry, kDataTableKeyEntryIndex, roots.the_hole_value());
+ StoreToDataTable(entry, kDataTableValueEntryIndex, roots.the_hole_value());
+}
+
+void SwissNameDictionary::ValueAtPut(int entry, Object value) {
+ DCHECK(!value.IsTheHole());
+ StoreToDataTable(entry, kDataTableValueEntryIndex, value);
+}
+
+void SwissNameDictionary::ValueAtPut(InternalIndex entry, Object value) {
+ ValueAtPut(entry.as_int(), value);
+}
+
+void SwissNameDictionary::SetKey(int entry, Object key) {
+ DCHECK(!key.IsTheHole());
+ StoreToDataTable(entry, kDataTableKeyEntryIndex, key);
+}
+
+void SwissNameDictionary::DetailsAtPut(int entry, PropertyDetails details) {
+ DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
+ uint8_t encoded_details = details.ToByte();
+ PropertyDetailsTable()[entry] = encoded_details;
+}
+
+void SwissNameDictionary::DetailsAtPut(InternalIndex entry,
+ PropertyDetails details) {
+ DetailsAtPut(entry.as_int(), details);
+}
+
+Object SwissNameDictionary::KeyAt(int entry) {
+ return LoadFromDataTable(entry, kDataTableKeyEntryIndex);
+}
+
+Object SwissNameDictionary::KeyAt(InternalIndex entry) {
+ return KeyAt(entry.as_int());
+}
+
+Name SwissNameDictionary::NameAt(InternalIndex entry) {
+ return Name::cast(KeyAt(entry));
+}
+
+// This version can be called on empty buckets.
+Object SwissNameDictionary::ValueAtRaw(int entry) {
+ return LoadFromDataTable(entry, kDataTableValueEntryIndex);
+}
+
+Object SwissNameDictionary::ValueAt(InternalIndex entry) {
+ DCHECK(IsFull(GetCtrl(entry.as_int())));
+ return ValueAtRaw(entry.as_int());
+}
+
+PropertyDetails SwissNameDictionary::DetailsAt(int entry) {
+ // GetCtrl(entry) does a bounds check for |entry| value.
+ DCHECK(IsFull(GetCtrl(entry)));
+
+ uint8_t encoded_details = PropertyDetailsTable()[entry];
+ return PropertyDetails::FromByte(encoded_details);
+}
+
+PropertyDetails SwissNameDictionary::DetailsAt(InternalIndex entry) {
+ return DetailsAt(entry.as_int());
+}
+
+swiss_table::ctrl_t SwissNameDictionary::GetCtrl(int entry) {
+ DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
+
+ return CtrlTable()[entry];
+}
+
+void SwissNameDictionary::SetCtrl(int entry, ctrl_t h) {
+ int capacity = Capacity();
+ DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(capacity));
+
+ ctrl_t* ctrl = CtrlTable();
+ ctrl[entry] = h;
+
+ // The ctrl table contains a copy of the first group (i.e., the group starting
+ // at entry 0) after the first |capacity| entries of the ctrl table. This
+ // means that the ctrl table always has size |capacity| + |kGroupWidth|.
+ // However, note that we may have |capacity| < |kGroupWidth|. For example, if
+ // Capacity() == 8 and |kGroupWidth| == 16, then ctrl[0] is copied to ctrl[8],
+ // ctrl[1] to ctrl[9], etc. In this case, ctrl[16] to ctrl[23] remain unused,
+ // which means that their values are always Ctrl::kEmpty.
+ // We achieve the necessary copying without branching here using some bit
+ // magic: We set {copy_entry = entry} in those cases where we don't actually
+ // have to perform a copy (meaning that we just repeat the {ctrl[entry] = h}
+ // from above). If we do need to do some actual copying, we set {copy_entry =
+ // Capacity() + entry}.
+
+ int mask = capacity - 1;
+ int copy_entry =
+ ((entry - Group::kWidth) & mask) + 1 + ((Group::kWidth - 1) & mask);
+ DCHECK_IMPLIES(entry < static_cast<int>(Group::kWidth),
+ copy_entry == capacity + entry);
+ DCHECK_IMPLIES(entry >= static_cast<int>(Group::kWidth), copy_entry == entry);
+ ctrl[copy_entry] = h;
+}
+
+void SwissNameDictionary::SetMetaTableField(int field_index, int value) {
+ // See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
+ // |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
+ // constants.
+ int capacity = Capacity();
+ ByteArray meta_table = this->meta_table();
+ if (capacity <= kMax1ByteMetaTableCapacity) {
+ SetMetaTableField<uint8_t>(meta_table, field_index, value);
+ } else if (capacity <= kMax2ByteMetaTableCapacity) {
+ SetMetaTableField<uint16_t>(meta_table, field_index, value);
+ } else {
+ SetMetaTableField<uint32_t>(meta_table, field_index, value);
+ }
+}
+
+int SwissNameDictionary::GetMetaTableField(int field_index) {
+ // See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
+ // |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
+ // constants.
+ int capacity = Capacity();
+ ByteArray meta_table = this->meta_table();
+ if (capacity <= kMax1ByteMetaTableCapacity) {
+ return GetMetaTableField<uint8_t>(meta_table, field_index);
+ } else if (capacity <= kMax2ByteMetaTableCapacity) {
+ return GetMetaTableField<uint16_t>(meta_table, field_index);
+ } else {
+ return GetMetaTableField<uint32_t>(meta_table, field_index);
+ }
+}
+
+// static
+template <typename T>
+void SwissNameDictionary::SetMetaTableField(ByteArray meta_table,
+ int field_index, int value) {
+ STATIC_ASSERT((std::is_same<T, uint8_t>::value) ||
+ (std::is_same<T, uint16_t>::value) ||
+ (std::is_same<T, uint32_t>::value));
+ DCHECK_LE(value, std::numeric_limits<T>::max());
+ DCHECK_LT(meta_table.GetDataStartAddress() + field_index * sizeof(T),
+ meta_table.GetDataEndAddress());
+ T* raw_data = reinterpret_cast<T*>(meta_table.GetDataStartAddress());
+ raw_data[field_index] = value;
+}
+
+// static
+template <typename T>
+int SwissNameDictionary::GetMetaTableField(ByteArray meta_table,
+ int field_index) {
+ STATIC_ASSERT((std::is_same<T, uint8_t>::value) ||
+ (std::is_same<T, uint16_t>::value) ||
+ (std::is_same<T, uint32_t>::value));
+ DCHECK_LT(meta_table.GetDataStartAddress() + field_index * sizeof(T),
+ meta_table.GetDataEndAddress());
+ T* raw_data = reinterpret_cast<T*>(meta_table.GetDataStartAddress());
+ return raw_data[field_index];
+}
+
+constexpr int SwissNameDictionary::MetaTableSizePerEntryFor(int capacity) {
+ CONSTEXPR_DCHECK(IsValidCapacity(capacity));
+
+ // See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
+ // |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
+ // constants.
+ if (capacity <= kMax1ByteMetaTableCapacity) {
+ return sizeof(uint8_t);
+ } else if (capacity <= kMax2ByteMetaTableCapacity) {
+ return sizeof(uint16_t);
+ } else {
+ return sizeof(uint32_t);
+ }
+}
+
+constexpr int SwissNameDictionary::MetaTableSizeFor(int capacity) {
+ CONSTEXPR_DCHECK(IsValidCapacity(capacity));
+
+ int per_entry_size = MetaTableSizePerEntryFor(capacity);
+
+ // The enumeration table only needs to have as many slots as there can be
+ // present + deleted entries in the hash table (= maximum load factor *
+ // capactiy). Two more slots to store the number of present and deleted
+ // entries.
+ return per_entry_size * (MaxUsableCapacity(capacity) + 2);
+}
+
+bool SwissNameDictionary::IsKey(ReadOnlyRoots roots, Object key_candidate) {
+ return key_candidate != roots.the_hole_value();
+}
+
+bool SwissNameDictionary::ToKey(ReadOnlyRoots roots, int entry,
+ Object* out_key) {
+ Object k = KeyAt(entry);
+ if (!IsKey(roots, k)) return false;
+ *out_key = k;
+ return true;
+}
+
+bool SwissNameDictionary::ToKey(ReadOnlyRoots roots, InternalIndex entry,
+ Object* out_key) {
+ return ToKey(roots, entry.as_int(), out_key);
+}
+
+template <typename LocalIsolate>
+void SwissNameDictionary::Initialize(LocalIsolate* isolate,
+ ByteArray meta_table, int capacity) {
+ DCHECK(IsValidCapacity(capacity));
+ DisallowHeapAllocation no_gc;
+ ReadOnlyRoots roots(isolate);
+
+ SetCapacity(capacity);
+ SetHash(PropertyArray::kNoHashSentinel);
+
+ memset(CtrlTable(), Ctrl::kEmpty, CtrlTableSize(capacity));
+
+ MemsetTagged(RawField(DataTableStartOffset()), roots.the_hole_value(),
+ capacity * kDataTableEntryCount);
+
+ set_meta_table(meta_table);
+
+ SetNumberOfElements(0);
+ SetNumberOfDeletedElements(0);
+
+ // We leave the enumeration table PropertyDetails table and uninitialized.
+}
+
+SwissNameDictionary::IndexIterator::IndexIterator(
+ Handle<SwissNameDictionary> dict, int start)
+ : enum_index_{start}, dict_{dict} {
+ if (!COMPRESS_POINTERS_BOOL && dict.is_null()) {
+ used_capacity_ = 0;
+ } else {
+ used_capacity_ = dict->UsedCapacity();
+ }
+}
+
+SwissNameDictionary::IndexIterator&
+SwissNameDictionary::IndexIterator::operator++() {
+ DCHECK_LT(enum_index_, used_capacity_);
+ ++enum_index_;
+ return *this;
+}
+
+bool SwissNameDictionary::IndexIterator::operator==(
+ const SwissNameDictionary::IndexIterator& b) const {
+ DCHECK_LE(enum_index_, used_capacity_);
+ DCHECK_LE(b.enum_index_, used_capacity_);
+ DCHECK(dict_.equals(b.dict_));
+
+ return this->enum_index_ == b.enum_index_;
+}
+
+bool SwissNameDictionary::IndexIterator::operator!=(
+ const IndexIterator& b) const {
+ return !(*this == b);
+}
+
+InternalIndex SwissNameDictionary::IndexIterator::operator*() {
+ DCHECK_LE(enum_index_, used_capacity_);
+
+ if (enum_index_ == used_capacity_) return InternalIndex::NotFound();
+
+ return InternalIndex(dict_->EntryForEnumerationIndex(enum_index_));
+}
+
+SwissNameDictionary::IndexIterable::IndexIterable(
+ Handle<SwissNameDictionary> dict)
+ : dict_{dict} {}
+
+SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::begin() {
+ return IndexIterator(dict_, 0);
+}
+
+SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::end() {
+ if (!COMPRESS_POINTERS_BOOL && dict_.is_null()) {
+ return IndexIterator(dict_, 0);
+ } else {
+ DCHECK(!dict_.is_null());
+ return IndexIterator(dict_, dict_->UsedCapacity());
+ }
+}
+
+SwissNameDictionary::IndexIterable
+SwissNameDictionary::IterateEntriesOrdered() {
+ // If we are supposed to iterate the empty dictionary (which is non-writable)
+ // and pointer compression is disabled, we have no simple way to get the
+ // isolate, which we would need to create a handle.
+ // TODO(emrich): Consider always using roots.empty_swiss_dictionary_handle()
+ // in the condition once this function gets Isolate as a parameter in order to
+ // avoid empty dict checks.
+ if (!COMPRESS_POINTERS_BOOL && Capacity() == 0)
+ return IndexIterable(Handle<SwissNameDictionary>::null());
+
+ Isolate* isolate;
+ GetIsolateFromHeapObject(*this, &isolate);
+ DCHECK_NE(isolate, nullptr);
+ return IndexIterable(handle(*this, isolate));
+}
+
+SwissNameDictionary::IndexIterable SwissNameDictionary::IterateEntries() {
+ return IterateEntriesOrdered();
+}
+
+void SwissNameDictionary::SetHash(int32_t hash) {
+ WriteField(PrefixOffset(), hash);
+}
+
+int SwissNameDictionary::Hash() { return ReadField<int32_t>(PrefixOffset()); }
+
+// static
+constexpr int SwissNameDictionary::MaxCapacity() {
+ int const_size =
+ DataTableStartOffset() + ByteArray::kHeaderSize +
+ // Size for present and deleted element count at max capacity:
+ 2 * sizeof(uint32_t);
+ int per_entry_size =
+ // size of data table entries:
+ kDataTableEntryCount * kTaggedSize +
+ // ctrl table entry size:
+ kOneByteSize +
+ // PropertyDetails table entry size:
+ kOneByteSize +
+ // Enumeration table entry size at maximum capacity:
+ sizeof(uint32_t);
+
+ int result = (FixedArray::kMaxSize - const_size) / per_entry_size;
+ CONSTEXPR_DCHECK(result <= Smi::kMaxValue);
+
+ return result;
+}
+
+// static
+constexpr int SwissNameDictionary::PrefixOffset() {
+ return HeapObject::kHeaderSize;
+}
+
+// static
+constexpr int SwissNameDictionary::CapacityOffset() {
+ return PrefixOffset() + sizeof(uint32_t);
+}
+
+// static
+constexpr int SwissNameDictionary::MetaTablePointerOffset() {
+ return CapacityOffset() + sizeof(int32_t);
+}
+
+// static
+constexpr int SwissNameDictionary::DataTableStartOffset() {
+ return MetaTablePointerOffset() + kTaggedSize;
+}
+
+// static
+constexpr int SwissNameDictionary::DataTableEndOffset(int capacity) {
+ return CtrlTableStartOffset(capacity);
+}
+
+// static
+constexpr int SwissNameDictionary::CtrlTableStartOffset(int capacity) {
+ return DataTableStartOffset() + DataTableSize(capacity);
+}
+
+// static
+constexpr int SwissNameDictionary::PropertyDetailsTableStartOffset(
+ int capacity) {
+ return CtrlTableStartOffset(capacity) + CtrlTableSize(capacity);
+}
+
+// static
+bool SwissNameDictionary::IsEmpty(ctrl_t c) { return c == Ctrl::kEmpty; }
+
+// static
+bool SwissNameDictionary::IsFull(ctrl_t c) {
+ STATIC_ASSERT(Ctrl::kEmpty < 0);
+ STATIC_ASSERT(Ctrl::kDeleted < 0);
+ STATIC_ASSERT(Ctrl::kSentinel < 0);
+ return c >= 0;
+}
+
+// static
+bool SwissNameDictionary::IsDeleted(ctrl_t c) { return c == Ctrl::kDeleted; }
+
+// static
+bool SwissNameDictionary::IsEmptyOrDeleted(ctrl_t c) {
+ STATIC_ASSERT(Ctrl::kDeleted < Ctrl::kSentinel);
+ STATIC_ASSERT(Ctrl::kEmpty < Ctrl::kSentinel);
+ STATIC_ASSERT(Ctrl::kSentinel < 0);
+ return c < Ctrl::kSentinel;
+}
+
+// static
+swiss_table::ProbeSequence<SwissNameDictionary::kGroupWidth>
+SwissNameDictionary::probe(uint32_t hash, int capacity) {
+ // If |capacity| is 0, we must produce 1 here, such that the - 1 below
+ // yields 0, which is the correct modulo mask for a table of capacity 0.
+ int non_zero_capacity = capacity | (capacity == 0);
+ return swiss_table::ProbeSequence<SwissNameDictionary::kGroupWidth>(
+ swiss_table::H1(hash), static_cast<uint32_t>(non_zero_capacity - 1));
+}
+
+ACCESSORS_CHECKED2(SwissNameDictionary, meta_table, ByteArray,
+ MetaTablePointerOffset(), true,
+ value.length() >= kMetaTableEnumerationTableStartOffset)
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_SWISS_NAME_DICTIONARY_INL_H_
diff --git a/deps/v8/src/objects/swiss-name-dictionary.cc b/deps/v8/src/objects/swiss-name-dictionary.cc
new file mode 100644
index 0000000000..89053d1818
--- /dev/null
+++ b/deps/v8/src/objects/swiss-name-dictionary.cc
@@ -0,0 +1,37 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Only including the -inl.h file directly makes the linter complain.
+#include "src/objects/swiss-name-dictionary.h"
+
+#include "src/objects/swiss-name-dictionary-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// The largest value we ever have to store in the enumeration table is
+// Capacity() - 1. The largest value we ever have to store for the present or
+// deleted element count is MaxUsableCapacity(Capacity()). All data in the
+// meta table is unsigned. Using this, we verify the values of the constants
+// |kMax1ByteMetaTableCapacity| and |kMax2ByteMetaTableCapacity|.
+STATIC_ASSERT(SwissNameDictionary::kMax1ByteMetaTableCapacity - 1 <=
+ std::numeric_limits<uint8_t>::max());
+STATIC_ASSERT(SwissNameDictionary::MaxUsableCapacity(
+ SwissNameDictionary::kMax1ByteMetaTableCapacity) <=
+ std::numeric_limits<uint8_t>::max());
+STATIC_ASSERT(SwissNameDictionary::kMax2ByteMetaTableCapacity - 1 <=
+ std::numeric_limits<uint16_t>::max());
+STATIC_ASSERT(SwissNameDictionary::MaxUsableCapacity(
+ SwissNameDictionary::kMax2ByteMetaTableCapacity) <=
+ std::numeric_limits<uint16_t>::max());
+
+template void SwissNameDictionary::Initialize(Isolate* isolate,
+ ByteArray meta_table,
+ int capacity);
+template void SwissNameDictionary::Initialize(LocalIsolate* isolate,
+ ByteArray meta_table,
+ int capacity);
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/swiss-name-dictionary.h b/deps/v8/src/objects/swiss-name-dictionary.h
new file mode 100644
index 0000000000..40466c441c
--- /dev/null
+++ b/deps/v8/src/objects/swiss-name-dictionary.h
@@ -0,0 +1,284 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SWISS_NAME_DICTIONARY_H_
+#define V8_OBJECTS_SWISS_NAME_DICTIONARY_H_
+
+#include <cstdint>
+
+#include "src/base/export-template.h"
+#include "src/common/globals.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/internal-index.h"
+#include "src/objects/js-objects.h"
+#include "src/objects/swiss-hash-table-helpers.h"
+#include "src/roots/roots.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// A property backing store based on Swiss Tables/Abseil's flat_hash_map. The
+// implementation is heavily based on Abseil's raw_hash_set.h.
+//
+// Memory layout (see below for detailed description of parts):
+// Prefix: [table type dependent part, can have 0 size]
+// Capacity: 4 bytes, raw int32_t
+// Meta table pointer: kTaggedSize bytes
+// Data table: 2 * |capacity| * |kTaggedSize| bytes
+// Ctrl table: |capacity| + |kGroupWidth| uint8_t entries
+// PropertyDetails table: |capacity| uint_8 entries
+//
+// Note that because of |kInitialCapacity| == 4 there is no need for padding.
+//
+// Description of parts directly contained in SwissNameDictionary allocation:
+// Prefix:
+// In case of SwissNameDictionary:
+// identity hash: 4 bytes, raw int32_t
+// Meta table pointer: kTaggedSize bytes.
+// See below for explanation of the meta table.
+// Data table:
+// For each logical bucket of the hash table, contains the corresponding key
+// and value.
+// Ctrl table:
+// The control table is used to implement a Swiss Table: Each byte is either
+// Ctrl::kEmpty, Ctrl::kDeleted, or in case of a bucket denoting a present
+// entry in the hash table, the 7 lowest bits of the key's hash. The first
+// |capacity| entries are the actual control table. The additional
+// |kGroupWidth| bytes contain a copy of the first min(capacity,
+// kGroupWidth) bytes of the table.
+// PropertyDetails table:
+// Each byte contains the PropertyDetails for the corresponding bucket of
+// the ctrl table. Entries may contain unitialized data if the corresponding
+// bucket hasn't been used before.
+//
+// Meta table:
+// The meta table (not to be confused with the control table used in any
+// Swiss Table design!) is a separate ByteArray. Here, the "X" in "uintX_t"
+// depends on the capacity of the swiss table. For capacities <= 256 we have X
+// = 8, for 256 < |capacity| <= 2^16 we have X = 16, and otherwise X = 32 (see
+// MetaTableSizePerEntryFor). It contais the following data:
+// Number of Entries: uintX_t.
+// Number of Deleted Entries: uintX_t.
+// Enumeration table: max_load_factor * Capacity() entries of type uintX_t:
+// The i-th entry in the enumeration table
+// contains the number of the bucket representing the i-th entry of the
+// table in enumeration order. Entries may contain unitialized data if the
+// corresponding bucket hasn't been used before.
+class SwissNameDictionary : public HeapObject {
+ public:
+ using Group = swiss_table::Group;
+
+ template <typename LocalIsolate>
+ inline InternalIndex FindEntry(LocalIsolate* isolate, Object key);
+
+ // This is to make the interfaces of NameDictionary::FindEntry and
+ // OrderedNameDictionary::FindEntry compatible.
+ // TODO(emrich) clean this up: NameDictionary uses Handle<Object>
+ // for FindEntry keys due to its Key typedef, but that's also used
+ // for adding, where we do need handles.
+ template <typename LocalIsolate>
+ inline InternalIndex FindEntry(LocalIsolate* isolate, Handle<Object> key);
+
+ static inline bool IsKey(ReadOnlyRoots roots, Object key_candidate);
+ inline bool ToKey(ReadOnlyRoots roots, InternalIndex entry, Object* out_key);
+
+ inline Object KeyAt(InternalIndex entry);
+ inline Name NameAt(InternalIndex entry);
+ inline Object ValueAt(InternalIndex entry);
+ inline PropertyDetails DetailsAt(InternalIndex entry);
+
+ inline void ValueAtPut(InternalIndex entry, Object value);
+ inline void DetailsAtPut(InternalIndex entry, PropertyDetails value);
+
+ inline int NumberOfElements();
+ inline int NumberOfDeletedElements();
+
+ inline int Capacity();
+ inline int UsedCapacity();
+
+ template <typename LocalIsolate>
+ void Initialize(LocalIsolate* isolate, ByteArray meta_table, int capacity);
+
+ inline void SetHash(int hash);
+ inline int Hash();
+
+ class IndexIterator {
+ public:
+ inline IndexIterator(Handle<SwissNameDictionary> dict, int start);
+
+ inline IndexIterator& operator++();
+
+ inline bool operator==(const IndexIterator& b) const;
+ inline bool operator!=(const IndexIterator& b) const;
+
+ inline InternalIndex operator*();
+
+ private:
+ int used_capacity_;
+ int enum_index_;
+
+ // This may be an empty handle, but only if the capacity of the table is
+ // 0 and pointer compression is disabled.
+ Handle<SwissNameDictionary> dict_;
+ };
+
+ class IndexIterable {
+ public:
+ inline explicit IndexIterable(Handle<SwissNameDictionary> dict);
+
+ inline IndexIterator begin();
+ inline IndexIterator end();
+
+ private:
+ // This may be an empty handle, but only if the capacity of the table is
+ // 0 and pointer compression is disabled.
+ Handle<SwissNameDictionary> dict_;
+ };
+
+ inline IndexIterable IterateEntriesOrdered();
+ inline IndexIterable IterateEntries();
+
+ // For the given enumeration index, returns the entry (= bucket of the Swiss
+ // Table) containing the data for the mapping with that enumeration index.
+ // The returned bucket may be deleted.
+ inline int EntryForEnumerationIndex(int enumeration_index);
+
+ inline static constexpr bool IsValidCapacity(int capacity);
+ inline static int CapacityFor(int at_least_space_for);
+
+ // Given a capacity, how much of it can we fill before resizing?
+ inline static constexpr int MaxUsableCapacity(int capacity);
+
+ // The maximum allowed capacity for any SwissNameDictionary.
+ inline static constexpr int MaxCapacity();
+
+ // Returns total size in bytes required for a table of given capacity.
+ inline static constexpr int SizeFor(int capacity);
+
+ inline static constexpr int MetaTableSizePerEntryFor(int capacity);
+ inline static constexpr int MetaTableSizeFor(int capacity);
+
+ inline static constexpr int DataTableSize(int capacity);
+ inline static constexpr int CtrlTableSize(int capacity);
+
+ // Indicates that IterateEntries() returns entries ordered.
+ static constexpr bool kIsOrderedDictionaryType = true;
+
+ static const int kGroupWidth = Group::kWidth;
+
+ class BodyDescriptor;
+
+ // Note that 0 is also a valid capacity. Changing this value to a smaller one
+ // may make some padding necessary in the data layout.
+ static constexpr int kInitialCapacity = kSwissNameDictionaryInitialCapacity;
+
+ // Defines how many kTaggedSize sized values are associcated which each entry
+ // in the data table.
+ static constexpr int kDataTableEntryCount = 2;
+ static constexpr int kDataTableKeyEntryIndex = 0;
+ static constexpr int kDataTableValueEntryIndex = kDataTableKeyEntryIndex + 1;
+
+ static constexpr int kMetaTableElementCountOffset = 0;
+ static constexpr int kMetaTableDeletedElementCountOffset = 1;
+ static constexpr int kMetaTableEnumerationTableStartOffset = 2;
+
+ // The maximum capacity of any SwissNameDictionary whose meta table can use 1
+ // byte per entry.
+ static constexpr int kMax1ByteMetaTableCapacity = (1 << 8);
+ // The maximum capacity of any SwissNameDictionary whose meta table can use 2
+ // bytes per entry.
+ static constexpr int kMax2ByteMetaTableCapacity = (1 << 16);
+
+ // TODO(v8:11388) We would like to use Torque-generated constants here, but
+ // those are currently incorrect.
+ // Offset into the overall table, starting at HeapObject standard fields,
+ // in bytes. This means that the map is stored at offset 0.
+ using Offset = int;
+ inline static constexpr Offset PrefixOffset();
+ inline static constexpr Offset CapacityOffset();
+ inline static constexpr Offset MetaTablePointerOffset();
+ inline static constexpr Offset DataTableStartOffset();
+ inline static constexpr Offset DataTableEndOffset(int capacity);
+ inline static constexpr Offset CtrlTableStartOffset(int capacity);
+ inline static constexpr Offset PropertyDetailsTableStartOffset(int capacity);
+
+#if VERIFY_HEAP
+ void SwissNameDictionaryVerify(Isolate* isolate, bool slow_checks);
+#endif
+ DECL_VERIFIER(SwissNameDictionary)
+ DECL_PRINTER(SwissNameDictionary)
+ DECL_CAST(SwissNameDictionary)
+ OBJECT_CONSTRUCTORS(SwissNameDictionary, HeapObject);
+
+ private:
+ using ctrl_t = swiss_table::ctrl_t;
+ using Ctrl = swiss_table::Ctrl;
+
+ // Returns table of byte-encoded PropertyDetails (without enumeration index
+ // stored in PropertyDetails).
+ inline uint8_t* PropertyDetailsTable();
+
+ // Sets key and value to the hole for the given entry.
+ inline void ClearDataTableEntry(Isolate* isolate, int entry);
+ inline void SetKey(int entry, Object key);
+
+ inline void DetailsAtPut(int entry, PropertyDetails value);
+ inline void ValueAtPut(int entry, Object value);
+
+ inline PropertyDetails DetailsAt(int entry);
+ inline Object ValueAtRaw(int entry);
+ inline Object KeyAt(int entry);
+
+ inline bool ToKey(ReadOnlyRoots roots, int entry, Object* out_key);
+
+ // Use |set_ctrl| for modifications whenever possible, since that function
+ // correctly maintains the copy of the first group at the end of the ctrl
+ // table.
+ inline ctrl_t* CtrlTable();
+
+ inline static bool IsEmpty(ctrl_t c);
+ inline static bool IsFull(ctrl_t c);
+ inline static bool IsDeleted(ctrl_t c);
+ inline static bool IsEmptyOrDeleted(ctrl_t c);
+
+ // Sets the a control byte, taking the necessary copying of the first group
+ // into account.
+ inline void SetCtrl(int entry, ctrl_t h);
+ inline ctrl_t GetCtrl(int entry);
+
+ inline Object LoadFromDataTable(int entry, int data_offset);
+ inline Object LoadFromDataTable(IsolateRoot root, int entry, int data_offset);
+ inline void StoreToDataTable(int entry, int data_offset, Object data);
+ inline void StoreToDataTableNoBarrier(int entry, int data_offset,
+ Object data);
+
+ inline void SetCapacity(int capacity);
+ inline void SetNumberOfElements(int elements);
+ inline void SetNumberOfDeletedElements(int deleted_elements);
+
+ static inline swiss_table::ProbeSequence<Group::kWidth> probe(uint32_t hash,
+ int capacity);
+
+ // Sets that the entry with the given |enumeration_index| is stored at the
+ // given bucket of the data table.
+ inline void SetEntryForEnumerationIndex(int enumeration_index, int entry);
+
+ DECL_ACCESSORS(meta_table, ByteArray)
+ inline void SetMetaTableField(int field_index, int value);
+ inline int GetMetaTableField(int field_index);
+
+ template <typename T>
+ inline static void SetMetaTableField(ByteArray meta_table, int field_index,
+ int value);
+ template <typename T>
+ inline static int GetMetaTableField(ByteArray meta_table, int field_index);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_SWISS_NAME_DICTIONARY_H_
diff --git a/deps/v8/src/objects/swiss-name-dictionary.tq b/deps/v8/src/objects/swiss-name-dictionary.tq
new file mode 100644
index 0000000000..575d8bab46
--- /dev/null
+++ b/deps/v8/src/objects/swiss-name-dictionary.tq
@@ -0,0 +1,15 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/swiss-name-dictionary.h'
+
+@noVerifier
+extern class SwissNameDictionary extends HeapObject {
+ hash: uint32;
+ const capacity: int32;
+ meta_table: ByteArray;
+ data_table[Convert<intptr>(capacity) * 2]: JSAny|TheHole;
+ ctrl_table[Convert<intptr>(capacity) + kSwissNameDictionaryGroupWidth]: uint8;
+ property_details_table[Convert<intptr>(capacity)]: uint8;
+}
diff --git a/deps/v8/src/objects/synthetic-module.cc b/deps/v8/src/objects/synthetic-module.cc
index 61051eff7d..451dcc5160 100644
--- a/deps/v8/src/objects/synthetic-module.cc
+++ b/deps/v8/src/objects/synthetic-module.cc
@@ -116,7 +116,27 @@ MaybeHandle<Object> SyntheticModule::Evaluate(Isolate* isolate,
}
module->SetStatus(kEvaluated);
- return Utils::OpenHandle(*result);
+
+ Handle<Object> result_from_callback = Utils::OpenHandle(*result);
+
+ if (FLAG_harmony_top_level_await) {
+ Handle<JSPromise> capability;
+ if (result_from_callback->IsJSPromise()) {
+ capability = Handle<JSPromise>::cast(result_from_callback);
+ } else {
+ // The host's evaluation steps should have returned a resolved Promise,
+ // but as an allowance to hosts that have not yet finished the migration
+ // to top-level await, create a Promise if the callback result didn't give
+ // us one.
+ capability = isolate->factory()->NewJSPromise();
+ JSPromise::Resolve(capability, isolate->factory()->undefined_value())
+ .ToHandleChecked();
+ }
+
+ module->set_top_level_capability(*capability);
+ }
+
+ return result_from_callback;
}
} // namespace internal
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 1c96cc9d92..8560c54cc4 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -48,7 +48,7 @@ class TaggedField : public AllStatic {
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load(IsolateRoot isolate, HeapObject host,
diff --git a/deps/v8/src/objects/templates.tq b/deps/v8/src/objects/templates.tq
index 564d3569dc..d26b6dd5b7 100644
--- a/deps/v8/src/objects/templates.tq
+++ b/deps/v8/src/objects/templates.tq
@@ -51,7 +51,7 @@ extern class FunctionTemplateInfo extends TemplateInfo {
// If any of the setters declared by DECL_RARE_ACCESSORS are used then a
// FunctionTemplateRareData will be stored here. Until then this contains
// undefined.
- rare_data: FunctionTemplateRareData|Undefined;
+ @acquireRead @releaseWrite rare_data: FunctionTemplateRareData|Undefined;
shared_function_info: SharedFunctionInfo|Undefined;
// Internal field to store a flag bitfield.
flag: SmiTagged<FunctionTemplateInfoFlags>;
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index f60f9bd55a..fbdde538be 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -217,7 +217,7 @@ void TransitionsAccessor::Reload() {
int TransitionsAccessor::Capacity() { return transitions().Capacity(); }
void TransitionsAccessor::Initialize() {
- raw_transitions_ = map_.raw_transitions(isolate_);
+ raw_transitions_ = map_.raw_transitions(isolate_, kAcquireLoad);
HeapObject heap_object;
if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
encoding_ = kUninitialized;
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index 0b81ad3726..75ca763a1b 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -429,13 +429,14 @@ void TransitionsAccessor::SetMigrationTarget(Map migration_target) {
// sake.
if (encoding() != kUninitialized) return;
DCHECK(map_.is_deprecated());
- map_.set_raw_transitions(MaybeObject::FromObject(migration_target));
+ map_.set_raw_transitions(MaybeObject::FromObject(migration_target),
+ kReleaseStore);
MarkNeedsReload();
}
Map TransitionsAccessor::GetMigrationTarget() {
if (encoding() == kMigrationTarget) {
- return map_.raw_transitions()->cast<Map>();
+ return map_.raw_transitions(kAcquireLoad)->cast<Map>();
}
return Map();
}
@@ -449,7 +450,7 @@ void TransitionsAccessor::ReplaceTransitions(MaybeObject new_transitions) {
DCHECK(old_transitions != new_transitions->GetHeapObjectAssumeStrong());
#endif
}
- map_.set_raw_transitions(new_transitions);
+ map_.set_raw_transitions(new_transitions, kReleaseStore);
MarkNeedsReload();
}
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index dc356d966a..4ecf483298 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -1667,6 +1667,9 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
if (!FLAG_enable_experimental_regexp_engine) {
bad_flags_mask |= JSRegExp::kLinear;
}
+ if (!FLAG_harmony_regexp_match_indices) {
+ bad_flags_mask |= JSRegExp::kHasIndices;
+ }
if ((raw_flags & bad_flags_mask) ||
!JSRegExp::New(isolate_, pattern, static_cast<JSRegExp::Flags>(raw_flags))
.ToHandle(&regexp)) {
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index fd0dcb79c1..ec615c9c4f 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -214,6 +214,7 @@ class ParserBase {
using BreakableStatementT = typename Types::BreakableStatement;
using ClassLiteralPropertyT = typename Types::ClassLiteralProperty;
using ClassPropertyListT = typename Types::ClassPropertyList;
+ using ClassStaticElementListT = typename Types::ClassStaticElementList;
using ExpressionT = typename Types::Expression;
using ExpressionListT = typename Types::ExpressionList;
using FormalParametersT = typename Types::FormalParameters;
@@ -589,40 +590,44 @@ class ParserBase {
: extends(parser->impl()->NullExpression()),
public_members(parser->impl()->NewClassPropertyList(4)),
private_members(parser->impl()->NewClassPropertyList(4)),
- static_fields(parser->impl()->NewClassPropertyList(4)),
+ static_elements(parser->impl()->NewClassStaticElementList(4)),
instance_fields(parser->impl()->NewClassPropertyList(4)),
constructor(parser->impl()->NullExpression()),
has_seen_constructor(false),
has_name_static_property(false),
has_static_computed_names(false),
- has_static_class_fields(false),
+ has_static_elements(false),
has_static_private_methods(false),
+ has_static_blocks(false),
has_instance_members(false),
requires_brand(false),
is_anonymous(false),
has_private_methods(false),
- static_fields_scope(nullptr),
+ static_elements_scope(nullptr),
instance_members_scope(nullptr),
computed_field_count(0) {}
ExpressionT extends;
ClassPropertyListT public_members;
ClassPropertyListT private_members;
- ClassPropertyListT static_fields;
+ ClassStaticElementListT static_elements;
ClassPropertyListT instance_fields;
FunctionLiteralT constructor;
bool has_seen_constructor;
bool has_name_static_property;
bool has_static_computed_names;
- bool has_static_class_fields;
+ bool has_static_elements;
bool has_static_private_methods;
+ bool has_static_blocks;
bool has_instance_members;
bool requires_brand;
bool is_anonymous;
bool has_private_methods;
- DeclarationScope* static_fields_scope;
+ DeclarationScope* static_elements_scope;
DeclarationScope* instance_members_scope;
int computed_field_count;
+ Variable* home_object_variable = nullptr;
+ Variable* static_home_object_variable = nullptr;
};
enum class PropertyPosition { kObjectLiteral, kClassLiteral };
@@ -781,6 +786,12 @@ class ParserBase {
return zone()->template New<ClassScope>(zone(), parent, is_anonymous);
}
+ Scope* NewBlockScopeForObjectLiteral() {
+ Scope* scope = NewScope(BLOCK_SCOPE);
+ scope->set_is_block_scope_for_object_literal();
+ return scope;
+ }
+
Scope* NewScope(ScopeType scope_type) const {
return NewScopeWithParent(scope(), scope_type);
}
@@ -1049,6 +1060,10 @@ class ParserBase {
bool is_resumable() const {
return IsResumableFunction(function_state_->kind());
}
+ bool is_class_static_block() const {
+ return function_state_->kind() ==
+ FunctionKind::kClassStaticInitializerFunction;
+ }
bool is_await_allowed() const {
return is_async_function() || (flags().allow_harmony_top_level_await() &&
IsModule(function_state_->kind()));
@@ -1168,6 +1183,7 @@ class ParserBase {
bool* has_seen_constructor);
ExpressionT ParseMemberInitializer(ClassInfo* class_info, int beg_pos,
bool is_static);
+ BlockT ParseClassStaticBlock(ClassInfo* class_info);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
ParsePropertyInfo* prop_info, bool* has_seen_proto);
void ParseArguments(
@@ -1287,6 +1303,8 @@ class ParserBase {
StatementT ParseStatement(ZonePtrList<const AstRawString>* labels,
ZonePtrList<const AstRawString>* own_labels,
AllowLabelledFunctionStatement allow_function);
+ BlockT ParseBlock(ZonePtrList<const AstRawString>* labels,
+ Scope* block_scope);
BlockT ParseBlock(ZonePtrList<const AstRawString>* labels);
// Parse a SubStatement in strict mode, or with an extra block scope in
@@ -1371,10 +1389,13 @@ class ParserBase {
return true;
}
- FunctionKind FunctionKindForImpl(bool is_method, ParseFunctionFlags flags) {
+ enum SubFunctionKind { kFunction, kNonStaticMethod, kStaticMethod };
+
+ FunctionKind FunctionKindForImpl(SubFunctionKind sub_function_kind,
+ ParseFunctionFlags flags) {
static const FunctionKind kFunctionKinds[][2][2] = {
{
- // is_method=false
+ // SubFunctionKind::kNormalFunction
{// is_generator=false
FunctionKind::kNormalFunction, FunctionKind::kAsyncFunction},
{// is_generator=true
@@ -1382,26 +1403,35 @@ class ParserBase {
FunctionKind::kAsyncGeneratorFunction},
},
{
- // is_method=true
+ // SubFunctionKind::kNonStaticMethod
{// is_generator=false
FunctionKind::kConciseMethod, FunctionKind::kAsyncConciseMethod},
{// is_generator=true
FunctionKind::kConciseGeneratorMethod,
FunctionKind::kAsyncConciseGeneratorMethod},
+ },
+ {
+ // SubFunctionKind::kStaticMethod
+ {// is_generator=false
+ FunctionKind::kStaticConciseMethod,
+ FunctionKind::kStaticAsyncConciseMethod},
+ {// is_generator=true
+ FunctionKind::kStaticConciseGeneratorMethod,
+ FunctionKind::kStaticAsyncConciseGeneratorMethod},
}};
- return kFunctionKinds[is_method]
+ return kFunctionKinds[sub_function_kind]
[(flags & ParseFunctionFlag::kIsGenerator) != 0]
[(flags & ParseFunctionFlag::kIsAsync) != 0];
}
inline FunctionKind FunctionKindFor(ParseFunctionFlags flags) {
- const bool kIsMethod = false;
- return FunctionKindForImpl(kIsMethod, flags);
+ return FunctionKindForImpl(SubFunctionKind::kFunction, flags);
}
- inline FunctionKind MethodKindFor(ParseFunctionFlags flags) {
- const bool kIsMethod = true;
- return FunctionKindForImpl(kIsMethod, flags);
+ inline FunctionKind MethodKindFor(bool is_static, ParseFunctionFlags flags) {
+ return FunctionKindForImpl(is_static ? SubFunctionKind::kStaticMethod
+ : SubFunctionKind::kNonStaticMethod,
+ flags);
}
// Keep track of eval() calls since they disable all local variable
@@ -1505,6 +1535,8 @@ class ParserBase {
// Parser base's protected field members.
Scope* scope_; // Scope stack.
+ // Stack of scopes for object literals we're currently parsing.
+ Scope* object_literal_scope_ = nullptr;
Scope* original_scope_; // The top scope for the current parsing item.
FunctionState* function_state_; // Function state stack.
v8::Extension* extension_;
@@ -1611,14 +1643,16 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(Token::Value next) {
IdentifierT name = impl()->GetIdentifier();
if (V8_UNLIKELY(impl()->IsArguments(name) &&
scope()->ShouldBanArguments())) {
- ReportMessage(MessageTemplate::kArgumentsDisallowedInInitializer);
+ ReportMessage(
+ MessageTemplate::kArgumentsDisallowedInInitializerAndStaticBlock);
return impl()->EmptyIdentifierString();
}
return name;
}
if (!Token::IsValidIdentifier(next, language_mode(), is_generator(),
- flags().is_module() || is_async_function())) {
+ flags().is_module() || is_async_function() ||
+ is_class_static_block())) {
ReportUnexpectedToken(next);
return impl()->EmptyIdentifierString();
}
@@ -2332,7 +2366,8 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassInfo* class_info,
&class_info->has_seen_constructor);
}
- FunctionKind kind = MethodKindFor(prop_info->function_flags);
+ FunctionKind kind =
+ MethodKindFor(prop_info->is_static, prop_info->function_flags);
if (!prop_info->is_static && impl()->IsConstructor(prop_info->name)) {
class_info->has_seen_constructor = true;
@@ -2369,8 +2404,14 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassInfo* class_info,
prop_info->name, name_expression->position());
}
- FunctionKind kind = is_get ? FunctionKind::kGetterFunction
- : FunctionKind::kSetterFunction;
+ FunctionKind kind;
+ if (prop_info->is_static) {
+ kind = is_get ? FunctionKind::kStaticGetterFunction
+ : FunctionKind::kStaticSetterFunction;
+ } else {
+ kind = is_get ? FunctionKind::kGetterFunction
+ : FunctionKind::kSetterFunction;
+ }
FunctionLiteralT value = impl()->ParseFunctionLiteral(
prop_info->name, scanner()->location(), kSkipFunctionNameCheck, kind,
@@ -2404,12 +2445,14 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberInitializer(
ClassInfo* class_info, int beg_pos, bool is_static) {
FunctionParsingScope body_parsing_scope(impl());
DeclarationScope* initializer_scope =
- is_static ? class_info->static_fields_scope
+ is_static ? class_info->static_elements_scope
: class_info->instance_members_scope;
+ FunctionKind function_kind =
+ is_static ? FunctionKind::kClassStaticInitializerFunction
+ : FunctionKind::kClassMembersInitializerFunction;
if (initializer_scope == nullptr) {
- initializer_scope =
- NewFunctionScope(FunctionKind::kClassMembersInitializerFunction);
+ initializer_scope = NewFunctionScope(function_kind);
// TODO(gsathya): Make scopes be non contiguous.
initializer_scope->set_start_position(beg_pos);
initializer_scope->SetLanguageMode(LanguageMode::kStrict);
@@ -2428,8 +2471,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberInitializer(
initializer_scope->set_end_position(end_position());
if (is_static) {
- class_info->static_fields_scope = initializer_scope;
- class_info->has_static_class_fields = true;
+ class_info->static_elements_scope = initializer_scope;
+ class_info->has_static_elements = true;
} else {
class_info->instance_members_scope = initializer_scope;
class_info->has_instance_members = true;
@@ -2439,6 +2482,32 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberInitializer(
}
template <typename Impl>
+typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseClassStaticBlock(
+ ClassInfo* class_info) {
+ Consume(Token::STATIC);
+
+ DeclarationScope* initializer_scope = class_info->static_elements_scope;
+ if (initializer_scope == nullptr) {
+ initializer_scope =
+ NewFunctionScope(FunctionKind::kClassStaticInitializerFunction);
+ initializer_scope->set_start_position(position());
+ initializer_scope->SetLanguageMode(LanguageMode::kStrict);
+ class_info->static_elements_scope = initializer_scope;
+ }
+
+ FunctionState initializer_state(&function_state_, &scope_, initializer_scope);
+ AcceptINScope accept_in(this, true);
+
+ // Each static block has its own var and lexical scope, so make a new var
+ // block scope instead of using the synthetic members initializer function
+ // scope.
+ BlockT static_block = ParseBlock(nullptr, NewVarblockScope());
+ initializer_scope->set_end_position(end_position());
+ class_info->has_static_elements = true;
+ return static_block;
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ObjectLiteralPropertyT
ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
bool* has_seen_proto) {
@@ -2553,7 +2622,13 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
Scanner::Location(next_loc.beg_pos, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
- FunctionKind kind = MethodKindFor(function_flags);
+ std::unique_ptr<BlockState> block_state;
+ if (object_literal_scope_ != nullptr) {
+ DCHECK_EQ(object_literal_scope_->outer_scope(), scope_);
+ block_state.reset(new BlockState(&scope_, object_literal_scope_));
+ }
+ constexpr bool kIsStatic = false;
+ FunctionKind kind = MethodKindFor(kIsStatic, function_flags);
ExpressionT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
@@ -2584,6 +2659,12 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
factory()->NewStringLiteral(name, name_expression->position());
}
+ std::unique_ptr<BlockState> block_state;
+ if (object_literal_scope_ != nullptr) {
+ DCHECK_EQ(object_literal_scope_->outer_scope(), scope_);
+ block_state.reset(new BlockState(&scope_, object_literal_scope_));
+ }
+
FunctionKind kind = is_get ? FunctionKind::kGetterFunction
: FunctionKind::kSetterFunction;
@@ -2628,6 +2709,11 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral() {
Consume(Token::LBRACE);
AccumulationScope accumulation_scope(expression_scope());
+ // If methods appear inside the object literal, we'll enter this scope.
+ Scope* block_scope = NewBlockScopeForObjectLiteral();
+ block_scope->set_start_position(pos);
+ BlockState object_literal_scope_state(&object_literal_scope_, block_scope);
+
while (!Check(Token::RBRACE)) {
FuncNameInferrerState fni_state(&fni_);
@@ -2660,6 +2746,15 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral() {
fni_.Infer();
}
+ Variable* home_object = nullptr;
+ if (block_scope->needs_home_object()) {
+ home_object = block_scope->DeclareHomeObjectVariable(ast_value_factory());
+ block_scope->set_end_position(end_position());
+ } else {
+ block_scope = block_scope->FinalizeBlockScope();
+ DCHECK_NULL(block_scope);
+ }
+
// In pattern rewriter, we rewrite rest property to call out to a
// runtime function passing all the other properties as arguments to
// this runtime function. Here, we make sure that the number of
@@ -2670,8 +2765,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral() {
MessageTemplate::kTooManyArguments);
}
- return impl()->InitializeObjectLiteral(factory()->NewObjectLiteral(
- properties, number_of_boilerplate_properties, pos, has_rest_property));
+ return impl()->InitializeObjectLiteral(
+ factory()->NewObjectLiteral(properties, number_of_boilerplate_properties,
+ pos, has_rest_property, home_object));
}
template <typename Impl>
@@ -3086,7 +3182,20 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
int prec) {
DCHECK_GE(prec, 4);
- ExpressionT x = ParseUnaryExpression();
+ ExpressionT x;
+ // "#foo in ShiftExpression" needs to be parsed separately, since private
+ // identifiers are not valid PrimaryExpressions.
+ if (V8_UNLIKELY(FLAG_harmony_private_brand_checks &&
+ peek() == Token::PRIVATE_NAME)) {
+ x = ParsePropertyOrPrivatePropertyName();
+ if (peek() != Token::IN) {
+ ReportUnexpectedToken(peek());
+ return impl()->FailureExpression();
+ }
+ } else {
+ x = ParseUnaryExpression();
+ }
+
int prec1 = Token::Precedence(peek(), accept_IN_);
if (prec1 >= prec) {
return ParseBinaryContinuation(x, prec, prec1);
@@ -4533,6 +4642,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
BlockState block_state(&scope_, class_scope);
RaiseLanguageMode(LanguageMode::kStrict);
+ BlockState object_literal_scope_state(&object_literal_scope_, nullptr);
+
ClassInfo class_info(this);
class_info.is_anonymous = is_anonymous;
@@ -4550,12 +4661,22 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
const bool has_extends = !impl()->IsNull(class_info.extends);
while (peek() != Token::RBRACE) {
if (Check(Token::SEMICOLON)) continue;
+
+ // Either we're parsing a `static { }` initialization block or a property.
+ if (FLAG_harmony_class_static_blocks && peek() == Token::STATIC &&
+ PeekAhead() == Token::LBRACE) {
+ BlockT static_block = ParseClassStaticBlock(&class_info);
+ impl()->AddClassStaticBlock(static_block, &class_info);
+ continue;
+ }
+
FuncNameInferrerState fni_state(&fni_);
// If we haven't seen the constructor yet, it potentially is the next
// property.
bool is_constructor = !class_info.has_seen_constructor;
ParsePropertyInfo prop_info(this);
prop_info.position = PropertyPosition::kClassLiteral;
+
ClassLiteralPropertyT property =
ParseClassPropertyDefinition(&class_info, &prop_info, has_extends);
@@ -4619,6 +4740,13 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
ast_value_factory(), IsStaticFlag::kNotStatic, kNoSourcePosition);
}
+ if (class_scope->needs_home_object()) {
+ class_info.home_object_variable =
+ class_scope->DeclareHomeObjectVariable(ast_value_factory());
+ class_info.static_home_object_variable =
+ class_scope->DeclareStaticHomeObjectVariable(ast_value_factory());
+ }
+
bool should_save_class_variable_index =
class_scope->should_save_class_variable_index();
if (!is_anonymous || should_save_class_variable_index) {
@@ -5116,7 +5244,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
template <typename Impl>
typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
- ZonePtrList<const AstRawString>* labels) {
+ ZonePtrList<const AstRawString>* labels, Scope* block_scope) {
// Block ::
// '{' StatementList '}'
@@ -5127,7 +5255,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
CheckStackOverflow();
{
- BlockState block_state(zone(), &scope_);
+ BlockState block_state(&scope_, block_scope);
scope()->set_start_position(peek_position());
Target target(this, body, labels, nullptr, Target::TARGET_FOR_NAMED_ONLY);
@@ -5154,6 +5282,12 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
}
template <typename Impl>
+typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
+ ZonePtrList<const AstRawString>* labels) {
+ return ParseBlock(labels, NewScope(BLOCK_SCOPE));
+}
+
+template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
ZonePtrList<const AstRawString>* labels) {
if (is_strict(language_mode()) || peek() != Token::FUNCTION) {
@@ -5861,12 +5995,18 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
ExpressionParsingScope parsing_scope(impl());
AcceptINScope scope(this, false);
expression = ParseExpressionCoverGrammar();
+ // `for (async of` is disallowed but `for (async.x of` is allowed, so
+ // check if the token is ASYNC after parsing the expression.
+ bool expression_is_async = scanner()->current_token() == Token::ASYNC;
// Initializer is reference followed by in/of.
lhs_end_pos = end_position();
is_for_each = CheckInOrOf(&for_info.mode);
if (is_for_each) {
- if (starts_with_let && for_info.mode == ForEachStatement::ITERATE) {
- impl()->ReportMessageAt(next_loc, MessageTemplate::kForOfLet);
+ if ((starts_with_let || expression_is_async) &&
+ for_info.mode == ForEachStatement::ITERATE) {
+ impl()->ReportMessageAt(next_loc, starts_with_let
+ ? MessageTemplate::kForOfLet
+ : MessageTemplate::kForOfAsync);
return impl()->NullStatement();
}
if (expression->IsPattern()) {
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index e96fe368d1..9366d195f3 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -281,14 +281,14 @@ Expression* Parser::NewThrowError(Runtime::FunctionId id,
}
Expression* Parser::NewSuperPropertyReference(int pos) {
- // this_function[home_object_symbol]
- VariableProxy* this_function_proxy =
- NewUnresolved(ast_value_factory()->this_function_string(), pos);
- Expression* home_object_symbol_literal = factory()->NewSymbolLiteral(
- AstSymbol::kHomeObjectSymbol, kNoSourcePosition);
- Expression* home_object = factory()->NewProperty(
- this_function_proxy, home_object_symbol_literal, pos);
- return factory()->NewSuperPropertyReference(home_object, pos);
+ const AstRawString* home_object_name;
+ if (IsStatic(scope()->GetReceiverScope()->function_kind())) {
+ home_object_name = ast_value_factory_->dot_static_home_object_string();
+ } else {
+ home_object_name = ast_value_factory_->dot_home_object_string();
+ }
+ return factory()->NewSuperPropertyReference(
+ NewUnresolved(home_object_name, pos), pos);
}
Expression* Parser::NewSuperCallReference(int pos) {
@@ -3026,7 +3026,8 @@ void Parser::DeclarePublicClassField(ClassScope* scope,
bool is_static, bool is_computed_name,
ClassInfo* class_info) {
if (is_static) {
- class_info->static_fields->Add(property, zone());
+ class_info->static_elements->Add(
+ factory()->NewClassLiteralStaticElement(property), zone());
} else {
class_info->instance_fields->Add(property, zone());
}
@@ -3049,7 +3050,8 @@ void Parser::DeclarePrivateClassMember(ClassScope* scope,
bool is_static, ClassInfo* class_info) {
if (kind == ClassLiteralProperty::Kind::FIELD) {
if (is_static) {
- class_info->static_fields->Add(property, zone());
+ class_info->static_elements->Add(
+ factory()->NewClassLiteralStaticElement(property), zone());
} else {
class_info->instance_fields->Add(property, zone());
}
@@ -3089,16 +3091,18 @@ void Parser::DeclarePublicClassMethod(const AstRawString* class_name,
class_info->public_members->Add(property, zone());
}
+void Parser::AddClassStaticBlock(Block* block, ClassInfo* class_info) {
+ DCHECK(class_info->has_static_elements);
+ class_info->static_elements->Add(
+ factory()->NewClassLiteralStaticElement(block), zone());
+}
+
FunctionLiteral* Parser::CreateInitializerFunction(
- const char* name, DeclarationScope* scope,
- ZonePtrList<ClassLiteral::Property>* fields) {
- DCHECK_EQ(scope->function_kind(),
- FunctionKind::kClassMembersInitializerFunction);
+ const char* name, DeclarationScope* scope, Statement* initializer_stmt) {
+ DCHECK(IsClassMembersInitializerFunction(scope->function_kind()));
// function() { .. class fields initializer .. }
ScopedPtrList<Statement> statements(pointer_buffer());
- InitializeClassMembersStatement* stmt =
- factory()->NewInitializeClassMembersStatement(fields, kNoSourcePosition);
- statements.Add(stmt);
+ statements.Add(initializer_stmt);
FunctionLiteral* result = factory()->NewFunctionLiteral(
ast_value_factory()->GetOneByteString(name), scope, statements, 0, 0, 0,
FunctionLiteral::kNoDuplicateParameters,
@@ -3139,18 +3143,20 @@ Expression* Parser::RewriteClassLiteral(ClassScope* block_scope,
block_scope->class_variable()->set_initializer_position(end_pos);
}
- FunctionLiteral* static_fields_initializer = nullptr;
- if (class_info->has_static_class_fields) {
- static_fields_initializer = CreateInitializerFunction(
- "<static_fields_initializer>", class_info->static_fields_scope,
- class_info->static_fields);
+ FunctionLiteral* static_initializer = nullptr;
+ if (class_info->has_static_elements) {
+ static_initializer = CreateInitializerFunction(
+ "<static_initializer>", class_info->static_elements_scope,
+ factory()->NewInitializeClassStaticElementsStatement(
+ class_info->static_elements, kNoSourcePosition));
}
FunctionLiteral* instance_members_initializer_function = nullptr;
if (class_info->has_instance_members) {
instance_members_initializer_function = CreateInitializerFunction(
"<instance_members_initializer>", class_info->instance_members_scope,
- class_info->instance_fields);
+ factory()->NewInitializeClassMembersStatement(
+ class_info->instance_fields, kNoSourcePosition));
class_info->constructor->set_requires_instance_members_initializer(true);
class_info->constructor->add_expected_properties(
class_info->instance_fields->length());
@@ -3165,10 +3171,11 @@ Expression* Parser::RewriteClassLiteral(ClassScope* block_scope,
ClassLiteral* class_literal = factory()->NewClassLiteral(
block_scope, class_info->extends, class_info->constructor,
class_info->public_members, class_info->private_members,
- static_fields_initializer, instance_members_initializer_function, pos,
- end_pos, class_info->has_name_static_property,
+ static_initializer, instance_members_initializer_function, pos, end_pos,
+ class_info->has_name_static_property,
class_info->has_static_computed_names, class_info->is_anonymous,
- class_info->has_private_methods);
+ class_info->has_private_methods, class_info->home_object_variable,
+ class_info->static_home_object_variable);
AddFunctionForNameInference(class_info->constructor);
return class_literal;
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 3bc5bb0fe5..db7d5aed0e 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -103,7 +103,9 @@ struct ParserTypes<Parser> {
using Block = v8::internal::Block*;
using BreakableStatement = v8::internal::BreakableStatement*;
using ClassLiteralProperty = ClassLiteral::Property*;
+ using ClassLiteralStaticElement = ClassLiteral::StaticElement*;
using ClassPropertyList = ZonePtrList<ClassLiteral::Property>*;
+ using ClassStaticElementList = ZonePtrList<ClassLiteral::StaticElement>*;
using Expression = v8::internal::Expression*;
using ExpressionList = ScopedPtrList<v8::internal::Expression>;
using FormalParameters = ParserFormalParameters;
@@ -313,9 +315,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Variable* CreatePrivateNameVariable(ClassScope* scope, VariableMode mode,
IsStaticFlag is_static_flag,
const AstRawString* name);
- FunctionLiteral* CreateInitializerFunction(
- const char* name, DeclarationScope* scope,
- ZonePtrList<ClassLiteral::Property>* fields);
+ FunctionLiteral* CreateInitializerFunction(const char* name,
+ DeclarationScope* scope,
+ Statement* initializer_stmt);
bool IdentifierEquals(const AstRawString* identifier,
const AstRawString* other) {
@@ -347,6 +349,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
const AstRawString* property_name, bool is_static,
bool is_computed_name, bool is_private,
ClassInfo* class_info);
+ void AddClassStaticBlock(Block* block, ClassInfo* class_info);
Expression* RewriteClassLiteral(ClassScope* block_scope,
const AstRawString* name,
ClassInfo* class_info, int pos, int end_pos);
@@ -557,7 +560,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return property != nullptr && property->IsPrivateReference();
}
- // This returns true if the expression is an indentifier (wrapped
+ // This returns true if the expression is an identifier (wrapped
// inside a variable proxy). We exclude the case of 'this', which
// has been converted to a variable proxy.
V8_INLINE static bool IsIdentifier(Expression* expression) {
@@ -842,6 +845,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int size) const {
return zone()->New<ZonePtrList<ClassLiteral::Property>>(size, zone());
}
+ V8_INLINE ZonePtrList<ClassLiteral::StaticElement>* NewClassStaticElementList(
+ int size) const {
+ return zone()->New<ZonePtrList<ClassLiteral::StaticElement>>(size, zone());
+ }
V8_INLINE ZonePtrList<Statement>* NewStatementList(int size) const {
return zone()->New<ZonePtrList<Statement>>(size, zone());
}
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index aa49b55227..a085d55e1e 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -306,7 +306,7 @@ bool PreparseDataBuilder::SaveDataForSkippableFunction(
uint8_t language_and_super =
LanguageField::encode(function_scope->language_mode()) |
- UsesSuperField::encode(function_scope->NeedsHomeObject());
+ UsesSuperField::encode(function_scope->uses_super_property());
byte_data_.WriteQuarter(language_and_super);
return has_data;
}
@@ -361,7 +361,7 @@ void PreparseDataBuilder::SaveDataForScope(Scope* scope) {
byte_data_.WriteUint8(scope->scope_type());
#endif
- uint8_t eval_and_private_recalc =
+ uint8_t scope_data_flags =
ScopeSloppyEvalCanExtendVarsBit::encode(
scope->is_declaration_scope() &&
scope->AsDeclarationScope()->sloppy_eval_can_extend_vars()) |
@@ -374,7 +374,7 @@ void PreparseDataBuilder::SaveDataForScope(Scope* scope) {
scope->is_class_scope() &&
scope->AsClassScope()->should_save_class_variable_index());
byte_data_.Reserve(kUint8Size);
- byte_data_.WriteUint8(eval_and_private_recalc);
+ byte_data_.WriteUint8(scope_data_flags);
if (scope->is_function_scope()) {
Variable* function = scope->AsDeclarationScope()->function_var();
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index ff2b1ff157..3c8b01eb02 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -567,7 +567,7 @@ class PreParserFactory {
}
PreParserExpression NewObjectLiteral(
const PreParserExpressionList& properties, int boilerplate_properties,
- int pos, bool has_rest_property) {
+ int pos, bool has_rest_property, Variable* home_object = nullptr) {
return PreParserExpression::ObjectLiteral();
}
PreParserExpression NewVariableProxy(void* variable) {
@@ -876,6 +876,7 @@ struct ParserTypes<PreParser> {
// Return types for traversing functions.
using ClassLiteralProperty = PreParserExpression;
+ using ClassLiteralStaticElement = PreParserExpression;
using Expression = PreParserExpression;
using FunctionLiteral = PreParserExpression;
using ObjectLiteralProperty = PreParserExpression;
@@ -885,6 +886,7 @@ struct ParserTypes<PreParser> {
using FormalParameters = PreParserFormalParameters;
using Identifier = PreParserIdentifier;
using ClassPropertyList = PreParserPropertyList;
+ using ClassStaticElementList = PreParserPropertyList;
using StatementList = PreParserScopedStatementList;
using Block = PreParserBlock;
using BreakableStatement = PreParserStatement;
@@ -1239,6 +1241,11 @@ class PreParser : public ParserBase<PreParser> {
}
}
+ V8_INLINE void AddClassStaticBlock(PreParserBlock block,
+ ClassInfo* class_info) {
+ DCHECK(class_info->has_static_elements);
+ }
+
V8_INLINE PreParserExpression
RewriteClassLiteral(ClassScope* scope, const PreParserIdentifier& name,
ClassInfo* class_info, int pos, int end_pos) {
@@ -1260,7 +1267,7 @@ class PreParser : public ParserBase<PreParser> {
FunctionState function_state(&function_state_, &scope_, function_scope);
GetNextFunctionLiteralId();
}
- if (class_info->has_static_class_fields) {
+ if (class_info->has_static_elements) {
GetNextFunctionLiteralId();
}
if (class_info->has_instance_members) {
@@ -1544,9 +1551,6 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE PreParserExpression NewSuperPropertyReference(int pos) {
- scope()->NewUnresolved(factory()->ast_node_factory(),
- ast_value_factory()->this_function_string(), pos,
- NORMAL_VARIABLE);
return PreParserExpression::Default();
}
@@ -1604,6 +1608,10 @@ class PreParser : public ParserBase<PreParser> {
return PreParserPropertyList();
}
+ V8_INLINE PreParserPropertyList NewClassStaticElementList(int size) const {
+ return PreParserPropertyList();
+ }
+
V8_INLINE PreParserStatementList NewStatementList(int size) const {
return PreParserStatementList();
}
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index c8394a4281..dee0763280 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -360,6 +360,11 @@ void Processor::VisitInitializeClassMembersStatement(
replacement_ = node;
}
+void Processor::VisitInitializeClassStaticElementsStatement(
+ InitializeClassStaticElementsStatement* node) {
+ replacement_ = node;
+}
+
// Expressions are never visited.
#define DEF_VISIT(type) \
void Processor::Visit##type(type* expr) { UNREACHABLE(); }
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index 2e479f75b7..3e2e1e56c2 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -64,6 +64,10 @@ TickSample* SamplingEventsProcessor::StartTickSample() {
return &evt->sample;
}
+void BytecodeFlushEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ code_map->ClearCodesInRange(instruction_start, instruction_start + 1);
+}
+
void SamplingEventsProcessor::FinishTickSample() {
ticks_buffer_.FinishEnqueue();
}
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index a18e16359e..a161f4bce4 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -32,12 +32,13 @@ class CpuSampler : public sampler::Sampler {
CpuSampler(Isolate* isolate, SamplingEventsProcessor* processor)
: sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
processor_(processor),
- threadId_(ThreadId::Current()) {}
+ perThreadData_(isolate->FindPerThreadDataForThisThread()) {}
void SampleStack(const v8::RegisterState& regs) override {
Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByThread(threadId_)) {
+ if (v8::Locker::IsActive() && (!isolate->thread_manager()->IsLockedByThread(
+ perThreadData_->thread_id()) ||
+ perThreadData_->thread_state() != nullptr)) {
ProfilerStats::Instance()->AddReason(
ProfilerStats::Reason::kIsolateNotLocked);
return;
@@ -62,7 +63,7 @@ class CpuSampler : public sampler::Sampler {
private:
SamplingEventsProcessor* processor_;
- ThreadId threadId_;
+ Isolate::PerIsolateThreadData* perThreadData_;
};
ProfilingScope::ProfilingScope(Isolate* isolate, ProfilerListener* listener)
@@ -195,6 +196,7 @@ void ProfilerEventsProcessor::CodeEventHandler(
case CodeEventRecord::CODE_CREATION:
case CodeEventRecord::CODE_MOVE:
case CodeEventRecord::CODE_DISABLE_OPT:
+ case CodeEventRecord::BYTECODE_FLUSH:
Enqueue(evt_rec);
break;
case CodeEventRecord::CODE_DEOPT: {
@@ -543,9 +545,11 @@ void CpuProfiler::CollectSample() {
}
}
-CpuProfilingStatus CpuProfiler::StartProfiling(const char* title,
- CpuProfilingOptions options) {
- StartProfilingStatus status = profiles_->StartProfiling(title, options);
+CpuProfilingStatus CpuProfiler::StartProfiling(
+ const char* title, CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate) {
+ StartProfilingStatus status =
+ profiles_->StartProfiling(title, options, std::move(delegate));
// TODO(nicodubus): Revisit logic for if we want to do anything different for
// kAlreadyStarted
@@ -559,9 +563,11 @@ CpuProfilingStatus CpuProfiler::StartProfiling(const char* title,
return status;
}
-CpuProfilingStatus CpuProfiler::StartProfiling(String title,
- CpuProfilingOptions options) {
- return StartProfiling(profiles_->GetName(title), options);
+CpuProfilingStatus CpuProfiler::StartProfiling(
+ String title, CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate) {
+ return StartProfiling(profiles_->GetName(title), options,
+ std::move(delegate));
}
void CpuProfiler::StartProcessorIfNotStarted() {
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 572fe853fb..25084a4265 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -34,7 +34,8 @@ class Symbolizer;
V(CODE_MOVE, CodeMoveEventRecord) \
V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
V(CODE_DEOPT, CodeDeoptEventRecord) \
- V(REPORT_BUILTIN, ReportBuiltinEventRecord)
+ V(REPORT_BUILTIN, ReportBuiltinEventRecord) \
+ V(BYTECODE_FLUSH, BytecodeFlushEventRecord)
class CodeEventRecord {
public:
@@ -111,6 +112,13 @@ class TickSampleEventRecord {
TickSample sample;
};
+class BytecodeFlushEventRecord : public CodeEventRecord {
+ public:
+ Address instruction_start;
+
+ V8_INLINE void UpdateCodeMap(CodeMap* code_map);
+};
+
// A record type for sending code events (e.g. create, move, delete) to the
// profiling thread.
class CodeEventsContainer {
@@ -320,10 +328,12 @@ class V8_EXPORT_PRIVATE CpuProfiler {
void set_sampling_interval(base::TimeDelta value);
void set_use_precise_sampling(bool);
void CollectSample();
- StartProfilingStatus StartProfiling(const char* title,
- CpuProfilingOptions options = {});
- StartProfilingStatus StartProfiling(String title,
- CpuProfilingOptions options = {});
+ StartProfilingStatus StartProfiling(
+ const char* title, CpuProfilingOptions options = {},
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
+ StartProfilingStatus StartProfiling(
+ String title, CpuProfilingOptions options = {},
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
CpuProfile* StopProfiling(const char* title);
CpuProfile* StopProfiling(String title);
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index b6b67e38fd..436dbe7797 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -26,7 +26,6 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
-#include "src/objects/layout-descriptor.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-body-descriptors.h"
#include "src/objects/objects-inl.h"
@@ -636,7 +635,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
} else if (object.IsContext()) {
return AddEntry(object, HeapEntry::kObject, "system / Context");
} else if (object.IsFixedArray() || object.IsFixedDoubleArray() ||
- object.IsByteArray()) {
+ object.IsByteArray() || object.IsScopeInfo()) {
return AddEntry(object, HeapEntry::kArray, "");
} else if (object.IsHeapNumber()) {
return AddEntry(object, HeapEntry::kHeapNumber, "number");
@@ -646,6 +645,10 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
HeapEntry* V8HeapExplorer::AddEntry(HeapObject object, HeapEntry::Type type,
const char* name) {
+ if (FLAG_heap_profiler_show_hidden_objects && type == HeapEntry::kHidden) {
+ type = HeapEntry::kNative;
+ }
+
return AddEntry(object.address(), type, name, object.Size());
}
@@ -665,27 +668,35 @@ HeapEntry* V8HeapExplorer::AddEntry(Address address,
}
const char* V8HeapExplorer::GetSystemEntryName(HeapObject object) {
- switch (object.map().instance_type()) {
- case MAP_TYPE:
- switch (Map::cast(object).instance_type()) {
+ if (object.IsMap()) {
+ switch (Map::cast(object).instance_type()) {
#define MAKE_STRING_MAP_CASE(instance_type, size, name, Name) \
case instance_type: return "system / Map (" #Name ")";
STRING_TYPE_LIST(MAKE_STRING_MAP_CASE)
#undef MAKE_STRING_MAP_CASE
default: return "system / Map";
- }
- case CELL_TYPE: return "system / Cell";
- case PROPERTY_CELL_TYPE: return "system / PropertyCell";
- case FOREIGN_TYPE: return "system / Foreign";
- case ODDBALL_TYPE: return "system / Oddball";
- case ALLOCATION_SITE_TYPE:
- return "system / AllocationSite";
-#define MAKE_STRUCT_CASE(TYPE, Name, name) \
- case TYPE: \
+ }
+ }
+
+ switch (object.map().instance_type()) {
+#define MAKE_TORQUE_CASE(Name, TYPE) \
+ case TYPE: \
return "system / " #Name;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return "system";
+ // The following lists include every non-String instance type.
+ // This includes a few types that already have non-"system" names assigned
+ // by AddEntry, but this is a convenient way to avoid manual upkeep here.
+ TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED(MAKE_TORQUE_CASE)
+ TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(MAKE_TORQUE_CASE)
+ TORQUE_INSTANCE_CHECKERS_SINGLE_ONLY_DECLARED(MAKE_TORQUE_CASE)
+ TORQUE_INSTANCE_CHECKERS_MULTIPLE_ONLY_DECLARED(MAKE_TORQUE_CASE)
+#undef MAKE_TORQUE_CASE
+
+ // Strings were already handled by AddEntry.
+#define MAKE_STRING_CASE(instance_type, size, name, Name) \
+ case instance_type: \
+ UNREACHABLE();
+ STRING_TYPE_LIST(MAKE_STRING_CASE)
+#undef MAKE_STRING_CASE
}
}
@@ -1073,29 +1084,24 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map map) {
Map::kInstanceDescriptorsOffset);
SetInternalReference(entry, "prototype", map.prototype(),
Map::kPrototypeOffset);
- if (FLAG_unbox_double_fields) {
- SetInternalReference(entry, "layout_descriptor",
- map.layout_descriptor(kAcquireLoad),
- Map::kLayoutDescriptorOffset);
- }
if (map.IsContextMap()) {
Object native_context = map.native_context();
TagObject(native_context, "(native context)");
SetInternalReference(entry, "native_context", native_context,
Map::kConstructorOrBackPointerOrNativeContextOffset);
} else {
- Object constructor_or_backpointer = map.constructor_or_backpointer();
- if (constructor_or_backpointer.IsMap()) {
- TagObject(constructor_or_backpointer, "(back pointer)");
- SetInternalReference(entry, "back_pointer", constructor_or_backpointer,
+ Object constructor_or_back_pointer = map.constructor_or_back_pointer();
+ if (constructor_or_back_pointer.IsMap()) {
+ TagObject(constructor_or_back_pointer, "(back pointer)");
+ SetInternalReference(entry, "back_pointer", constructor_or_back_pointer,
Map::kConstructorOrBackPointerOrNativeContextOffset);
- } else if (constructor_or_backpointer.IsFunctionTemplateInfo()) {
- TagObject(constructor_or_backpointer, "(constructor function data)");
+ } else if (constructor_or_back_pointer.IsFunctionTemplateInfo()) {
+ TagObject(constructor_or_back_pointer, "(constructor function data)");
SetInternalReference(entry, "constructor_function_data",
- constructor_or_backpointer,
+ constructor_or_back_pointer,
Map::kConstructorOrBackPointerOrNativeContextOffset);
} else {
- SetInternalReference(entry, "constructor", constructor_or_backpointer,
+ SetInternalReference(entry, "constructor", constructor_or_back_pointer,
Map::kConstructorOrBackPointerOrNativeContextOffset);
}
}
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 8239bdb000..83c1108ddb 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -16,9 +16,10 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
const char* resource_name, int line_number,
int column_number,
std::unique_ptr<SourcePositionTable> line_info,
- bool is_shared_cross_origin)
+ bool is_shared_cross_origin, CodeType code_type)
: bit_field_(TagField::encode(tag) |
BuiltinIdField::encode(Builtins::builtin_count) |
+ CodeTypeField::encode(code_type) |
SharedCrossOriginField::encode(is_shared_cross_origin)),
name_(name),
resource_name_(resource_name),
@@ -39,48 +40,6 @@ ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry,
tree_->EnqueueNode(this);
}
-inline CpuProfileNode::SourceType ProfileNode::source_type() const {
- // Handle metadata and VM state code entry types.
- if (entry_ == CodeEntry::program_entry() ||
- entry_ == CodeEntry::idle_entry() || entry_ == CodeEntry::gc_entry() ||
- entry_ == CodeEntry::root_entry()) {
- return CpuProfileNode::kInternal;
- }
- if (entry_ == CodeEntry::unresolved_entry())
- return CpuProfileNode::kUnresolved;
-
- // Otherwise, resolve based on logger tag.
- switch (entry_->tag()) {
- case CodeEventListener::EVAL_TAG:
- case CodeEventListener::SCRIPT_TAG:
- case CodeEventListener::LAZY_COMPILE_TAG:
- case CodeEventListener::FUNCTION_TAG:
- case CodeEventListener::INTERPRETED_FUNCTION_TAG:
- return CpuProfileNode::kScript;
- case CodeEventListener::BUILTIN_TAG:
- case CodeEventListener::HANDLER_TAG:
- case CodeEventListener::BYTECODE_HANDLER_TAG:
- case CodeEventListener::NATIVE_FUNCTION_TAG:
- case CodeEventListener::NATIVE_SCRIPT_TAG:
- case CodeEventListener::NATIVE_LAZY_COMPILE_TAG:
- return CpuProfileNode::kBuiltin;
- case CodeEventListener::CALLBACK_TAG:
- return CpuProfileNode::kCallback;
- case CodeEventListener::REG_EXP_TAG:
- case CodeEventListener::STUB_TAG:
- case CodeEventListener::CODE_CREATION_EVENT:
- case CodeEventListener::CODE_DISABLE_OPT_EVENT:
- case CodeEventListener::CODE_MOVE_EVENT:
- case CodeEventListener::CODE_DELETE_EVENT:
- case CodeEventListener::CODE_MOVING_GC:
- case CodeEventListener::SHARED_FUNC_MOVE_EVENT:
- case CodeEventListener::SNAPSHOT_CODE_NAME_EVENT:
- case CodeEventListener::TICK_EVENT:
- case CodeEventListener::NUMBER_OF_LOG_EVENTS:
- return CpuProfileNode::kInternal;
- }
-}
-
inline Isolate* ProfileNode::isolate() const { return tree_->isolate(); }
} // namespace internal
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index fa6c452ba7..9f150f1e48 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -6,6 +6,8 @@
#include <algorithm>
+#include "include/v8-profiler.h"
+#include "src/base/lazy-instance.h"
#include "src/codegen/source-position.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/profiler/cpu-profiler.h"
@@ -81,49 +83,58 @@ const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
const char* const CodeEntry::kRootEntryName = "(root)";
-base::LazyDynamicInstance<CodeEntry, CodeEntry::ProgramEntryCreateTrait>::type
- CodeEntry::kProgramEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
-
-base::LazyDynamicInstance<CodeEntry, CodeEntry::IdleEntryCreateTrait>::type
- CodeEntry::kIdleEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
-
-base::LazyDynamicInstance<CodeEntry, CodeEntry::GCEntryCreateTrait>::type
- CodeEntry::kGCEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
-
-base::LazyDynamicInstance<CodeEntry,
- CodeEntry::UnresolvedEntryCreateTrait>::type
- CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
-
-base::LazyDynamicInstance<CodeEntry, CodeEntry::RootEntryCreateTrait>::type
- CodeEntry::kRootEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
-
-CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
- return new CodeEntry(CodeEventListener::FUNCTION_TAG,
- CodeEntry::kProgramEntryName);
-}
-
-CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
- return new CodeEntry(CodeEventListener::FUNCTION_TAG,
- CodeEntry::kIdleEntryName);
-}
-
-CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
- return new CodeEntry(CodeEventListener::BUILTIN_TAG,
- CodeEntry::kGarbageCollectorEntryName);
-}
-
-CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
- return new CodeEntry(CodeEventListener::FUNCTION_TAG,
- CodeEntry::kUnresolvedFunctionName);
-}
-
-CodeEntry* CodeEntry::RootEntryCreateTrait::Create() {
- return new CodeEntry(CodeEventListener::FUNCTION_TAG,
- CodeEntry::kRootEntryName);
+// static
+CodeEntry* CodeEntry::program_entry() {
+ static base::LeakyObject<CodeEntry> kProgramEntry(
+ CodeEventListener::FUNCTION_TAG, CodeEntry::kProgramEntryName,
+ CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
+ v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
+ CodeEntry::CodeType::OTHER);
+ return kProgramEntry.get();
+}
+
+// static
+CodeEntry* CodeEntry::idle_entry() {
+ static base::LeakyObject<CodeEntry> kIdleEntry(
+ CodeEventListener::FUNCTION_TAG, CodeEntry::kIdleEntryName,
+ CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
+ v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
+ CodeEntry::CodeType::OTHER);
+ return kIdleEntry.get();
+}
+
+// static
+CodeEntry* CodeEntry::gc_entry() {
+ static base::LeakyObject<CodeEntry> kGcEntry(
+ CodeEventListener::BUILTIN_TAG, CodeEntry::kGarbageCollectorEntryName,
+ CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
+ v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
+ CodeEntry::CodeType::OTHER);
+ return kGcEntry.get();
+}
+
+// static
+CodeEntry* CodeEntry::unresolved_entry() {
+ static base::LeakyObject<CodeEntry> kUnresolvedEntry(
+ CodeEventListener::FUNCTION_TAG, CodeEntry::kUnresolvedFunctionName,
+ CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
+ v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
+ CodeEntry::CodeType::OTHER);
+ return kUnresolvedEntry.get();
+}
+
+// static
+CodeEntry* CodeEntry::root_entry() {
+ static base::LeakyObject<CodeEntry> kRootEntry(
+ CodeEventListener::FUNCTION_TAG, CodeEntry::kRootEntryName,
+ CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
+ v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
+ CodeEntry::CodeType::OTHER);
+ return kRootEntry.get();
}
uint32_t CodeEntry::GetHash() const {
- uint32_t hash = ComputeUnseededHash(tag());
+ uint32_t hash = 0;
if (script_id_ != v8::UnboundScript::kNoScriptId) {
hash ^= ComputeUnseededHash(static_cast<uint32_t>(script_id_));
hash ^= ComputeUnseededHash(static_cast<uint32_t>(position_));
@@ -287,6 +298,49 @@ void CodeEntry::print() const {
base::OS::Print("\n");
}
+CpuProfileNode::SourceType ProfileNode::source_type() const {
+ // Handle metadata and VM state code entry types.
+ if (entry_ == CodeEntry::program_entry() ||
+ entry_ == CodeEntry::idle_entry() || entry_ == CodeEntry::gc_entry() ||
+ entry_ == CodeEntry::root_entry()) {
+ return CpuProfileNode::kInternal;
+ }
+ if (entry_ == CodeEntry::unresolved_entry())
+ return CpuProfileNode::kUnresolved;
+
+ // Otherwise, resolve based on logger tag.
+ switch (entry_->tag()) {
+ case CodeEventListener::EVAL_TAG:
+ case CodeEventListener::SCRIPT_TAG:
+ case CodeEventListener::LAZY_COMPILE_TAG:
+ case CodeEventListener::FUNCTION_TAG:
+ case CodeEventListener::INTERPRETED_FUNCTION_TAG:
+ return CpuProfileNode::kScript;
+ case CodeEventListener::BUILTIN_TAG:
+ case CodeEventListener::HANDLER_TAG:
+ case CodeEventListener::BYTECODE_HANDLER_TAG:
+ case CodeEventListener::NATIVE_FUNCTION_TAG:
+ case CodeEventListener::NATIVE_SCRIPT_TAG:
+ case CodeEventListener::NATIVE_LAZY_COMPILE_TAG:
+ return CpuProfileNode::kBuiltin;
+ case CodeEventListener::CALLBACK_TAG:
+ return CpuProfileNode::kCallback;
+ case CodeEventListener::REG_EXP_TAG:
+ case CodeEventListener::STUB_TAG:
+ case CodeEventListener::CODE_CREATION_EVENT:
+ case CodeEventListener::CODE_DISABLE_OPT_EVENT:
+ case CodeEventListener::CODE_MOVE_EVENT:
+ case CodeEventListener::CODE_DELETE_EVENT:
+ case CodeEventListener::CODE_MOVING_GC:
+ case CodeEventListener::SHARED_FUNC_MOVE_EVENT:
+ case CodeEventListener::SNAPSHOT_CODE_NAME_EVENT:
+ case CodeEventListener::TICK_EVENT:
+ case CodeEventListener::BYTECODE_FLUSH_EVENT:
+ case CodeEventListener::NUMBER_OF_LOG_EVENTS:
+ return CpuProfileNode::kInternal;
+ }
+}
+
void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
deopt_infos_.push_back(entry->GetDeoptInfo());
entry->clear_deopt_info();
@@ -486,9 +540,11 @@ using v8::tracing::TracedValue;
std::atomic<uint32_t> CpuProfile::last_id_;
CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
- CpuProfilingOptions options)
+ CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate)
: title_(title),
options_(options),
+ delegate_(std::move(delegate)),
start_time_(base::TimeTicks::HighResolutionNow()),
top_down_(profiler->isolate()),
profiler_(profiler),
@@ -535,8 +591,19 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
(options_.max_samples() == CpuProfilingOptions::kNoSampleLimit ||
samples_.size() < options_.max_samples());
- if (should_record_sample)
+ if (should_record_sample) {
samples_.push_back({top_frame_node, timestamp, src_line});
+ }
+
+ if (!should_record_sample && delegate_ != nullptr) {
+ const auto task_runner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
+ reinterpret_cast<v8::Isolate*>(profiler_->isolate()));
+
+ task_runner->PostTask(std::make_unique<CpuProfileMaxSamplesCallbackTask>(
+ std::move(delegate_)));
+ // std::move ensures that the delegate_ will be null on the next sample,
+ // so we don't post a task multiple times.
+ }
const int kSamplesFlushCount = 100;
const int kNodesFlushCount = 10;
@@ -562,6 +629,7 @@ void BuildNodeValue(const ProfileNode* node, TracedValue* value) {
if (entry->column_number()) {
value->SetInteger("columnNumber", entry->column_number() - 1);
}
+ value->SetString("codeType", entry->code_type_string());
value->EndDictionary();
value->SetInteger("id", node->id());
if (node->parent()) {
@@ -748,7 +816,8 @@ CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
: profiler_(nullptr), current_profiles_semaphore_(1) {}
CpuProfilingStatus CpuProfilesCollection::StartProfiling(
- const char* title, CpuProfilingOptions options) {
+ const char* title, CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate) {
current_profiles_semaphore_.Wait();
if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
@@ -764,7 +833,9 @@ CpuProfilingStatus CpuProfilesCollection::StartProfiling(
return CpuProfilingStatus::kAlreadyStarted;
}
}
- current_profiles_.emplace_back(new CpuProfile(profiler_, title, options));
+
+ current_profiles_.emplace_back(
+ new CpuProfile(profiler_, title, options, std::move(delegate)));
current_profiles_semaphore_.Signal();
return CpuProfilingStatus::kStarted;
}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 74f1bee35a..44a4007445 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -59,6 +59,8 @@ struct CodeEntryAndLineNumber;
class CodeEntry {
public:
+ enum class CodeType { JS, WASM, OTHER };
+
// CodeEntry may reference strings (|name|, |resource_name|) managed by a
// StringsStorage instance. These must be freed via ReleaseStrings.
inline CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
@@ -66,7 +68,8 @@ class CodeEntry {
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
std::unique_ptr<SourcePositionTable> line_info = nullptr,
- bool is_shared_cross_origin = false);
+ bool is_shared_cross_origin = false,
+ CodeType code_type = CodeType::JS);
CodeEntry(const CodeEntry&) = delete;
CodeEntry& operator=(const CodeEntry&) = delete;
@@ -102,6 +105,17 @@ class CodeEntry {
void mark_used() { bit_field_ = UsedField::update(bit_field_, true); }
bool used() const { return UsedField::decode(bit_field_); }
+ const char* code_type_string() const {
+ switch (CodeTypeField::decode(bit_field_)) {
+ case CodeType::JS:
+ return "JS";
+ case CodeType::WASM:
+ return "wasm";
+ case CodeType::OTHER:
+ return "other";
+ }
+ }
+
void FillFunctionInfo(SharedFunctionInfo shared);
void SetBuiltinId(Builtins::Name id);
@@ -148,21 +162,17 @@ class CodeEntry {
V8_EXPORT_PRIVATE static const char* const kProgramEntryName;
V8_EXPORT_PRIVATE static const char* const kIdleEntryName;
- static const char* const kGarbageCollectorEntryName;
+ V8_EXPORT_PRIVATE static const char* const kGarbageCollectorEntryName;
// Used to represent frames for which we have no reliable way to
// detect function.
V8_EXPORT_PRIVATE static const char* const kUnresolvedFunctionName;
V8_EXPORT_PRIVATE static const char* const kRootEntryName;
- V8_INLINE static CodeEntry* program_entry() {
- return kProgramEntry.Pointer();
- }
- V8_INLINE static CodeEntry* idle_entry() { return kIdleEntry.Pointer(); }
- V8_INLINE static CodeEntry* gc_entry() { return kGCEntry.Pointer(); }
- V8_INLINE static CodeEntry* unresolved_entry() {
- return kUnresolvedEntry.Pointer();
- }
- V8_INLINE static CodeEntry* root_entry() { return kRootEntry.Pointer(); }
+ V8_EXPORT_PRIVATE static CodeEntry* program_entry();
+ V8_EXPORT_PRIVATE static CodeEntry* idle_entry();
+ V8_EXPORT_PRIVATE static CodeEntry* gc_entry();
+ V8_EXPORT_PRIVATE static CodeEntry* unresolved_entry();
+ V8_EXPORT_PRIVATE static CodeEntry* root_entry();
// Releases strings owned by this CodeEntry, which may be allocated in the
// provided StringsStorage instance. This instance is not stored directly
@@ -185,41 +195,17 @@ class CodeEntry {
RareData* EnsureRareData();
- struct V8_EXPORT_PRIVATE ProgramEntryCreateTrait {
- static CodeEntry* Create();
- };
- struct V8_EXPORT_PRIVATE IdleEntryCreateTrait {
- static CodeEntry* Create();
- };
- struct V8_EXPORT_PRIVATE GCEntryCreateTrait {
- static CodeEntry* Create();
- };
- struct V8_EXPORT_PRIVATE UnresolvedEntryCreateTrait {
- static CodeEntry* Create();
- };
- struct V8_EXPORT_PRIVATE RootEntryCreateTrait {
- static CodeEntry* Create();
- };
-
- V8_EXPORT_PRIVATE static base::LazyDynamicInstance<
- CodeEntry, ProgramEntryCreateTrait>::type kProgramEntry;
- V8_EXPORT_PRIVATE static base::LazyDynamicInstance<
- CodeEntry, IdleEntryCreateTrait>::type kIdleEntry;
- V8_EXPORT_PRIVATE static base::LazyDynamicInstance<
- CodeEntry, GCEntryCreateTrait>::type kGCEntry;
- V8_EXPORT_PRIVATE static base::LazyDynamicInstance<
- CodeEntry, UnresolvedEntryCreateTrait>::type kUnresolvedEntry;
- V8_EXPORT_PRIVATE static base::LazyDynamicInstance<
- CodeEntry, RootEntryCreateTrait>::type kRootEntry;
-
using TagField = base::BitField<CodeEventListener::LogEventsAndTags, 0, 8>;
- using BuiltinIdField = base::BitField<Builtins::Name, 8, 22>;
+ using BuiltinIdField = base::BitField<Builtins::Name, 8, 20>;
static_assert(Builtins::builtin_count <= BuiltinIdField::kNumValues,
"builtin_count exceeds size of bitfield");
+ using CodeTypeField = base::BitField<CodeType, 28, 2>;
using UsedField = base::BitField<bool, 30, 1>;
using SharedCrossOriginField = base::BitField<bool, 31, 1>;
- uint32_t bit_field_;
+ // Atomic because Used is written from the profiler thread while CodeType is
+ // read from the main thread.
+ std::atomic<std::uint32_t> bit_field_;
const char* name_;
const char* resource_name_;
int line_number_;
@@ -358,8 +344,9 @@ class CpuProfile {
int line;
};
- V8_EXPORT_PRIVATE CpuProfile(CpuProfiler* profiler, const char* title,
- CpuProfilingOptions options);
+ V8_EXPORT_PRIVATE CpuProfile(
+ CpuProfiler* profiler, const char* title, CpuProfilingOptions options,
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
CpuProfile(const CpuProfile&) = delete;
CpuProfile& operator=(const CpuProfile&) = delete;
@@ -395,6 +382,7 @@ class CpuProfile {
const char* title_;
const CpuProfilingOptions options_;
+ std::unique_ptr<DiscardedSamplesDelegate> delegate_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
std::deque<SampleInfo> samples_;
@@ -409,6 +397,18 @@ class CpuProfile {
static std::atomic<uint32_t> last_id_;
};
+class CpuProfileMaxSamplesCallbackTask : public v8::Task {
+ public:
+ CpuProfileMaxSamplesCallbackTask(
+ std::unique_ptr<DiscardedSamplesDelegate> delegate)
+ : delegate_(std::move(delegate)) {}
+
+ void Run() override { delegate_->Notify(); }
+
+ private:
+ std::unique_ptr<DiscardedSamplesDelegate> delegate_;
+};
+
class V8_EXPORT_PRIVATE CodeMap {
public:
// Creates a new CodeMap with an associated StringsStorage to store the
@@ -420,6 +420,7 @@ class V8_EXPORT_PRIVATE CodeMap {
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
+ void ClearCodesInRange(Address start, Address end);
CodeEntry* FindEntry(Address addr, Address* out_instruction_start = nullptr);
void Print();
@@ -431,7 +432,6 @@ class V8_EXPORT_PRIVATE CodeMap {
unsigned size;
};
- void ClearCodesInRange(Address start, Address end);
void DeleteCodeEntry(CodeEntry*);
std::map<Address, CodeEntryMapInfo> code_map_;
@@ -447,8 +447,9 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
CpuProfilesCollection& operator=(const CpuProfilesCollection&) = delete;
void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
- CpuProfilingStatus StartProfiling(const char* title,
- CpuProfilingOptions options = {});
+ CpuProfilingStatus StartProfiling(
+ const char* title, CpuProfilingOptions options = {},
+ std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
CpuProfile* StopProfiling(const char* title);
std::vector<std::unique_ptr<CpuProfile>>* profiles() {
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index bf76b54193..a851a97019 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -11,6 +11,7 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/handles/handles-inl.h"
#include "src/objects/code-inl.h"
+#include "src/objects/code.h"
#include "src/objects/objects-inl.h"
#include "src/objects/script-inl.h"
#include "src/objects/shared-function-info-inl.h"
@@ -115,21 +116,37 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
is_shared_cross_origin = script->origin_options().IsSharedCrossOrigin();
+ // TODO(v8:11429,cbruni): improve iteration for baseline code
+ bool is_baseline = abstract_code->kind() == CodeKind::BASELINE;
+ Handle<ByteArray> source_position_table(
+ abstract_code->source_position_table(), isolate_);
+ if (is_baseline) {
+ source_position_table = handle(
+ shared->GetBytecodeArray(isolate_).SourcePositionTable(), isolate_);
+ }
// Add each position to the source position table and store inlining stacks
// for inline positions. We store almost the same information in the
// profiler as is stored on the code object, except that we transform source
// positions to line numbers here, because we only care about attributing
// ticks to a given line.
- for (SourcePositionTableIterator it(
- handle(abstract_code->source_position_table(), isolate_));
- !it.done(); it.Advance()) {
+ for (SourcePositionTableIterator it(source_position_table); !it.done();
+ it.Advance()) {
int position = it.source_position().ScriptOffset();
int inlining_id = it.source_position().InliningId();
+ int code_offset = it.code_offset();
+ if (is_baseline) {
+ // Use the bytecode offset to calculate pc offset for baseline code.
+ // TODO(v8:11429,cbruni): Speed this up.
+ code_offset = static_cast<int>(
+ abstract_code->GetCode().GetBaselinePCForBytecodeOffset(code_offset,
+ false));
+ }
if (inlining_id == SourcePosition::kNotInlined) {
int line_number = script->GetLineNumber(position) + 1;
- line_table->SetPosition(it.code_offset(), line_number, inlining_id);
+ line_table->SetPosition(code_offset, line_number, inlining_id);
} else {
+ DCHECK(!is_baseline);
DCHECK(abstract_code->IsCode());
Handle<Code> code = handle(abstract_code->GetCode(), isolate_);
std::vector<SourcePositionInfo> stack =
@@ -140,7 +157,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
// then the script of the inlined frames may be different to the script
// of |shared|.
int line_number = stack.front().line + 1;
- line_table->SetPosition(it.code_offset(), line_number, inlining_id);
+ line_table->SetPosition(code_offset, line_number, inlining_id);
std::vector<CodeEntryAndLineNumber> inline_stack;
for (SourcePositionInfo& pos_info : stack) {
@@ -204,12 +221,16 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
wasm::WasmName name,
const char* source_url, int code_offset,
int script_id) {
- DCHECK_NOT_NULL(source_url);
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->instruction_start();
- rec->entry = new CodeEntry(tag, GetName(name), GetName(source_url), 1,
- code_offset + 1, nullptr, true);
+ // Wasm modules always have a source URL. Asm.js modules never have one.
+ DCHECK_EQ(code->native_module()->module()->origin == wasm::kWasmOrigin,
+ source_url != nullptr);
+ rec->entry = new CodeEntry(
+ tag, GetName(name),
+ source_url ? GetName(source_url) : CodeEntry::kEmptyResourceName, 1,
+ code_offset + 1, nullptr, true, CodeEntry::CodeType::WASM);
rec->entry->set_script_id(script_id);
rec->entry->set_position(code_offset);
rec->instruction_size = code->instructions().length();
@@ -298,6 +319,14 @@ void ProfilerListener::CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind,
DispatchCodeEvent(evt_rec);
}
+void ProfilerListener::BytecodeFlushEvent(Address compiled_data_start) {
+ CodeEventsContainer evt_rec(CodeEventRecord::BYTECODE_FLUSH);
+ BytecodeFlushEventRecord* rec = &evt_rec.BytecodeFlushEventRecord_;
+ rec->instruction_start = compiled_data_start + BytecodeArray::kHeaderSize;
+
+ DispatchCodeEvent(evt_rec);
+}
+
const char* ProfilerListener::GetName(Vector<const char> name) {
// TODO(all): Change {StringsStorage} to accept non-null-terminated strings.
OwnedVector<char> null_terminated = OwnedVector<char>::New(name.size() + 1);
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index c264ef3e66..d4fd34a006 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -62,6 +62,7 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
+ void BytecodeFlushEvent(Address compiled_data_start) override;
const char* GetName(Name name) {
return function_and_resource_names_.GetName(name);
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index bc59403122..f149920820 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -27,6 +27,7 @@ StringsStorage::~StringsStorage() {
}
const char* StringsStorage::GetCopy(const char* src) {
+ base::MutexGuard guard(&mutex_);
int len = static_cast<int>(strlen(src));
base::HashMap::Entry* entry = GetEntry(src, len);
if (entry->value == nullptr) {
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 7cffbd3cda..638aa5545a 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -125,6 +125,13 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
state->sp = reinterpret_cast<void*>(simulator->get_register(Simulator::sp));
state->fp = reinterpret_cast<void*>(simulator->get_register(Simulator::fp));
state->lr = reinterpret_cast<void*>(simulator->get_register(Simulator::ra));
+#elif V8_TARGET_ARCH_RISCV64
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<void*>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<void*>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<void*>(simulator->get_register(Simulator::fp));
+ state->lr = reinterpret_cast<void*>(simulator->get_register(Simulator::ra));
#endif
if (state->sp == 0 || state->fp == 0) {
// It possible that the simulator is interrupted while it is updating
diff --git a/deps/v8/src/regexp/experimental/experimental.cc b/deps/v8/src/regexp/experimental/experimental.cc
index d23c34c573..500269c40e 100644
--- a/deps/v8/src/regexp/experimental/experimental.cc
+++ b/deps/v8/src/regexp/experimental/experimental.cc
@@ -214,7 +214,8 @@ int32_t ExperimentalRegExp::MatchForCallFromJs(
MaybeHandle<Object> ExperimentalRegExp::Exec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int subject_index, Handle<RegExpMatchInfo> last_match_info) {
+ int subject_index, Handle<RegExpMatchInfo> last_match_info,
+ RegExp::ExecQuirks exec_quirks) {
DCHECK(FLAG_enable_experimental_regexp_engine);
DCHECK_EQ(regexp->TypeTag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
@@ -248,6 +249,11 @@ MaybeHandle<Object> ExperimentalRegExp::Exec(
if (num_matches > 0) {
DCHECK_EQ(num_matches, 1);
+ if (exec_quirks == RegExp::ExecQuirks::kTreatMatchAtEndAsFailure) {
+ if (output_registers[0] >= subject->length()) {
+ return isolate->factory()->null_value();
+ }
+ }
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
} else if (num_matches == 0) {
@@ -285,7 +291,8 @@ int32_t ExperimentalRegExp::OneshotExecRaw(Isolate* isolate,
MaybeHandle<Object> ExperimentalRegExp::OneshotExec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int subject_index, Handle<RegExpMatchInfo> last_match_info) {
+ int subject_index, Handle<RegExpMatchInfo> last_match_info,
+ RegExp::ExecQuirks exec_quirks) {
DCHECK(FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
DCHECK_NE(regexp->TypeTag(), JSRegExp::NOT_COMPILED);
@@ -306,6 +313,11 @@ MaybeHandle<Object> ExperimentalRegExp::OneshotExec(
if (num_matches > 0) {
DCHECK_EQ(num_matches, 1);
+ if (exec_quirks == RegExp::ExecQuirks::kTreatMatchAtEndAsFailure) {
+ if (output_registers[0] >= subject->length()) {
+ return isolate->factory()->null_value();
+ }
+ }
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
} else if (num_matches == 0) {
diff --git a/deps/v8/src/regexp/experimental/experimental.h b/deps/v8/src/regexp/experimental/experimental.h
index a0ee8d1081..1b44100cc8 100644
--- a/deps/v8/src/regexp/experimental/experimental.h
+++ b/deps/v8/src/regexp/experimental/experimental.h
@@ -36,9 +36,10 @@ class ExperimentalRegExp final : public AllStatic {
Address backtrack_stack,
RegExp::CallOrigin call_origin,
Isolate* isolate, Address regexp);
- static MaybeHandle<Object> Exec(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info);
+ static MaybeHandle<Object> Exec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info,
+ RegExp::ExecQuirks exec_quirks = RegExp::ExecQuirks::kNone);
static int32_t ExecRaw(Isolate* isolate, RegExp::CallOrigin call_origin,
JSRegExp regexp, String subject,
int32_t* output_registers,
@@ -48,7 +49,8 @@ class ExperimentalRegExp final : public AllStatic {
// its type tag. The regexp itself is not changed (apart from lastIndex).
static MaybeHandle<Object> OneshotExec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int index, Handle<RegExpMatchInfo> last_match_info);
+ int index, Handle<RegExpMatchInfo> last_match_info,
+ RegExp::ExecQuirks exec_quirks = RegExp::ExecQuirks::kNone);
static int32_t OneshotExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject,
int32_t* output_registers,
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
index bd906fea15..2a6ffec929 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
@@ -14,13 +14,13 @@ namespace v8 {
namespace internal {
void RegExpBytecodeGenerator::Emit(uint32_t byte, uint32_t twenty_four_bits) {
- uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte);
- DCHECK(pc_ <= buffer_.length());
- if (pc_ + 3 >= buffer_.length()) {
- Expand();
- }
- *reinterpret_cast<uint32_t*>(buffer_.begin() + pc_) = word;
- pc_ += 4;
+ DCHECK(is_uint24(twenty_four_bits));
+ Emit32((twenty_four_bits << BYTECODE_SHIFT) | byte);
+}
+
+void RegExpBytecodeGenerator::Emit(uint32_t byte, int32_t twenty_four_bits) {
+ DCHECK(is_int24(twenty_four_bits));
+ Emit32((static_cast<uint32_t>(twenty_four_bits) << BYTECODE_SHIFT) | byte);
}
void RegExpBytecodeGenerator::Emit16(uint32_t word) {
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index 262d788068..dbfaab5cb3 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -165,8 +165,10 @@ bool RegExpBytecodeGenerator::Succeed() {
void RegExpBytecodeGenerator::Fail() { Emit(BC_FAIL, 0); }
void RegExpBytecodeGenerator::AdvanceCurrentPosition(int by) {
- DCHECK_LE(kMinCPOffset, by);
- DCHECK_GE(kMaxCPOffset, by);
+ // TODO(chromium:1166138): Turn back into DCHECKs once the underlying issue
+ // is fixed.
+ CHECK_LE(kMinCPOffset, by);
+ CHECK_GE(kMaxCPOffset, by);
advance_current_start_ = pc_;
advance_current_offset_ = by;
Emit(BC_ADVANCE_CP, by);
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index 9c4b6057c2..6307a802d9 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -86,6 +86,7 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
inline void Emit16(uint32_t x);
inline void Emit8(uint32_t x);
inline void Emit(uint32_t bc, uint32_t arg);
+ inline void Emit(uint32_t bc, int32_t arg);
// Bytecode buffer.
int length();
void Copy(byte* a);
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index 2b712d3c3c..c743ee2563 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -954,17 +954,18 @@ static void EmitDoubleBoundaryTest(RegExpMacroAssembler* masm, int first,
// even_label is for ranges[i] to ranges[i + 1] where i - start_index is even.
// odd_label is for ranges[i] to ranges[i + 1] where i - start_index is odd.
static void EmitUseLookupTable(RegExpMacroAssembler* masm,
- ZoneList<int>* ranges, int start_index,
- int end_index, int min_char, Label* fall_through,
- Label* even_label, Label* odd_label) {
- static const int kSize = RegExpMacroAssembler::kTableSize;
- static const int kMask = RegExpMacroAssembler::kTableMask;
-
- int base = (min_char & ~kMask);
+ ZoneList<uc32>* ranges, uint32_t start_index,
+ uint32_t end_index, uc32 min_char,
+ Label* fall_through, Label* even_label,
+ Label* odd_label) {
+ static const uint32_t kSize = RegExpMacroAssembler::kTableSize;
+ static const uint32_t kMask = RegExpMacroAssembler::kTableMask;
+
+ uc32 base = (min_char & ~kMask);
USE(base);
// Assert that everything is on one kTableSize page.
- for (int i = start_index; i <= end_index; i++) {
+ for (uint32_t i = start_index; i <= end_index; i++) {
DCHECK_EQ(ranges->at(i) & ~kMask, base);
}
DCHECK(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base);
@@ -982,33 +983,35 @@ static void EmitUseLookupTable(RegExpMacroAssembler* masm,
on_bit_clear = odd_label;
bit = 0;
}
- for (int i = 0; i < (ranges->at(start_index) & kMask) && i < kSize; i++) {
+ for (uint32_t i = 0; i < (ranges->at(start_index) & kMask) && i < kSize;
+ i++) {
templ[i] = bit;
}
- int j = 0;
+ uint32_t j = 0;
bit ^= 1;
- for (int i = start_index; i < end_index; i++) {
+ for (uint32_t i = start_index; i < end_index; i++) {
for (j = (ranges->at(i) & kMask); j < (ranges->at(i + 1) & kMask); j++) {
templ[j] = bit;
}
bit ^= 1;
}
- for (int i = j; i < kSize; i++) {
+ for (uint32_t i = j; i < kSize; i++) {
templ[i] = bit;
}
Factory* factory = masm->isolate()->factory();
// TODO(erikcorry): Cache these.
Handle<ByteArray> ba = factory->NewByteArray(kSize, AllocationType::kOld);
- for (int i = 0; i < kSize; i++) {
+ for (uint32_t i = 0; i < kSize; i++) {
ba->set(i, templ[i]);
}
masm->CheckBitInTable(ba, on_bit_set);
if (on_bit_clear != fall_through) masm->GoTo(on_bit_clear);
}
-static void CutOutRange(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
- int start_index, int end_index, int cut_index,
- Label* even_label, Label* odd_label) {
+static void CutOutRange(RegExpMacroAssembler* masm, ZoneList<uc32>* ranges,
+ uint32_t start_index, uint32_t end_index,
+ uint32_t cut_index, Label* even_label,
+ Label* odd_label) {
bool odd = (((cut_index - start_index) & 1) == 1);
Label* in_range_label = odd ? odd_label : even_label;
Label dummy;
@@ -1019,24 +1022,24 @@ static void CutOutRange(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
// Cut out the single range by rewriting the array. This creates a new
// range that is a merger of the two ranges on either side of the one we
// are cutting out. The oddity of the labels is preserved.
- for (int j = cut_index; j > start_index; j--) {
+ for (uint32_t j = cut_index; j > start_index; j--) {
ranges->at(j) = ranges->at(j - 1);
}
- for (int j = cut_index + 1; j < end_index; j++) {
+ for (uint32_t j = cut_index + 1; j < end_index; j++) {
ranges->at(j) = ranges->at(j + 1);
}
}
// Unicode case. Split the search space into kSize spaces that are handled
// with recursion.
-static void SplitSearchSpace(ZoneList<int>* ranges, int start_index,
- int end_index, int* new_start_index,
- int* new_end_index, int* border) {
- static const int kSize = RegExpMacroAssembler::kTableSize;
- static const int kMask = RegExpMacroAssembler::kTableMask;
+static void SplitSearchSpace(ZoneList<uc32>* ranges, uint32_t start_index,
+ uint32_t end_index, uint32_t* new_start_index,
+ uint32_t* new_end_index, uc32* border) {
+ static const uint32_t kSize = RegExpMacroAssembler::kTableSize;
+ static const uint32_t kMask = RegExpMacroAssembler::kTableMask;
- int first = ranges->at(start_index);
- int last = ranges->at(end_index) - 1;
+ uc32 first = ranges->at(start_index);
+ uc32 last = ranges->at(end_index) - 1;
*new_start_index = start_index;
*border = (ranges->at(start_index) & ~kMask) + kSize;
@@ -1055,7 +1058,7 @@ static void SplitSearchSpace(ZoneList<int>* ranges, int start_index,
// 128-character space can take up a lot of space in the ranges array if,
// for example, we only want to match every second character (eg. the lower
// case characters on some Unicode pages).
- int binary_chop_index = (end_index + start_index) / 2;
+ uint32_t binary_chop_index = (end_index + start_index) / 2;
// The first test ensures that we get to the code that handles the Latin1
// range with a single not-taken branch, speeding up this important
// character range (even non-Latin1 charset-based text has spaces and
@@ -1064,8 +1067,8 @@ static void SplitSearchSpace(ZoneList<int>* ranges, int start_index,
end_index - start_index > (*new_start_index - start_index) * 2 &&
last - first > kSize * 2 && binary_chop_index > *new_start_index &&
ranges->at(binary_chop_index) >= first + 2 * kSize) {
- int scan_forward_for_section_border = binary_chop_index;
- int new_border = (ranges->at(binary_chop_index) | kMask) + 1;
+ uint32_t scan_forward_for_section_border = binary_chop_index;
+ uint32_t new_border = (ranges->at(binary_chop_index) | kMask) + 1;
while (scan_forward_for_section_border < end_index) {
if (ranges->at(scan_forward_for_section_border) > new_border) {
@@ -1095,15 +1098,15 @@ static void SplitSearchSpace(ZoneList<int>* ranges, int start_index,
// know that the character is in the range of min_char to max_char inclusive.
// Either label can be nullptr indicating backtracking. Either label can also
// be equal to the fall_through label.
-static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
- int start_index, int end_index, uc32 min_char,
- uc32 max_char, Label* fall_through,
+static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<uc32>* ranges,
+ uint32_t start_index, uint32_t end_index,
+ uc32 min_char, uc32 max_char, Label* fall_through,
Label* even_label, Label* odd_label) {
DCHECK_LE(min_char, String::kMaxUtf16CodeUnit);
DCHECK_LE(max_char, String::kMaxUtf16CodeUnit);
- int first = ranges->at(start_index);
- int last = ranges->at(end_index) - 1;
+ uc32 first = ranges->at(start_index);
+ uc32 last = ranges->at(end_index) - 1;
DCHECK_LT(min_char, first);
@@ -1127,9 +1130,9 @@ static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
if (end_index - start_index <= 6) {
// It is faster to test for individual characters, so we look for those
// first, then try arbitrary ranges in the second round.
- static int kNoCutIndex = -1;
- int cut = kNoCutIndex;
- for (int i = start_index; i < end_index; i++) {
+ static uint32_t kNoCutIndex = -1;
+ uint32_t cut = kNoCutIndex;
+ for (uint32_t i = start_index; i < end_index; i++) {
if (ranges->at(i) == ranges->at(i + 1) - 1) {
cut = i;
break;
@@ -1154,16 +1157,16 @@ static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
return;
}
- if ((min_char >> kBits) != static_cast<uc32>(first >> kBits)) {
+ if ((min_char >> kBits) != first >> kBits) {
masm->CheckCharacterLT(first, odd_label);
GenerateBranches(masm, ranges, start_index + 1, end_index, first, max_char,
fall_through, odd_label, even_label);
return;
}
- int new_start_index = 0;
- int new_end_index = 0;
- int border = 0;
+ uint32_t new_start_index = 0;
+ uint32_t new_end_index = 0;
+ uc32 border = 0;
SplitSearchSpace(ranges, start_index, end_index, &new_start_index,
&new_end_index, &border);
@@ -1260,9 +1263,8 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
// entry at zero which goes to the failure label, but if there
// was already one there we fall through for success on that entry.
// Subsequent entries have alternating meaning (success/failure).
- // TODO(jgruber,v8:10568): Change `range_boundaries` to a ZoneList<uc32>.
- ZoneList<int>* range_boundaries =
- zone->New<ZoneList<int>>(last_valid_range, zone);
+ ZoneList<uc32>* range_boundaries =
+ zone->New<ZoneList<uc32>>(last_valid_range, zone);
bool zeroth_entry_is_failure = !cc->is_negated();
@@ -1277,7 +1279,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
range_boundaries->Add(range.to() + 1, zone);
}
int end_index = range_boundaries->length() - 1;
- if (static_cast<uc32>(range_boundaries->at(end_index)) > max_char) {
+ if (range_boundaries->at(end_index) > max_char) {
end_index--;
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-arch.h b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
index 8ec12a0ae6..5d5e3e6a44 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-arch.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
@@ -23,6 +23,8 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/regexp/riscv64/regexp-macro-assembler-riscv64.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index f1dc57db64..3c2c06f64b 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -44,6 +44,7 @@ class RegExpMacroAssembler {
kARMImplementation,
kARM64Implementation,
kMIPSImplementation,
+ kRISCVImplementation,
kS390Implementation,
kPPCImplementation,
kX64Implementation,
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 3c7d909c54..dc8711d8a7 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -1829,6 +1829,15 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
return success;
}
+bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
+ FlatStringReader* input,
+ JSRegExp::Flags flags,
+ RegExpCompileData* result,
+ const DisallowGarbageCollection& no_gc) {
+ RegExpParser parser(input, flags, isolate, zone);
+ return parser.Parse(result, no_gc);
+}
+
RegExpBuilder::RegExpBuilder(Zone* zone, JSRegExp::Flags flags)
: zone_(zone),
pending_empty_(false),
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index 33389690ca..c33a5d8115 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -160,6 +160,12 @@ class V8_EXPORT_PRIVATE RegExpParser {
static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
JSRegExp::Flags flags, RegExpCompileData* result);
+ // Used by the SpiderMonkey embedding of irregexp.
+ static bool VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
+ FlatStringReader* input, JSRegExp::Flags flags,
+ RegExpCompileData* result,
+ const DisallowGarbageCollection& nogc);
+
private:
bool Parse(RegExpCompileData* result, const DisallowGarbageCollection&);
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index f4497842e2..5f83269a8f 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -76,7 +76,8 @@ class RegExpImpl final : public AllStatic {
// Returns an empty handle in case of an exception.
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> IrregexpExec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int index, Handle<RegExpMatchInfo> last_match_info);
+ int index, Handle<RegExpMatchInfo> last_match_info,
+ RegExp::ExecQuirks exec_quirks = RegExp::ExecQuirks::kNone);
static bool CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject, bool is_one_byte);
@@ -268,15 +269,17 @@ bool RegExp::EnsureFullyCompiled(Isolate* isolate, Handle<JSRegExp> re,
// static
MaybeHandle<Object> RegExp::ExperimentalOneshotExec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int index, Handle<RegExpMatchInfo> last_match_info) {
+ int index, Handle<RegExpMatchInfo> last_match_info,
+ RegExp::ExecQuirks exec_quirks) {
return ExperimentalRegExp::OneshotExec(isolate, regexp, subject, index,
- last_match_info);
+ last_match_info, exec_quirks);
}
// static
MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info) {
+ Handle<RegExpMatchInfo> last_match_info,
+ ExecQuirks exec_quirks) {
switch (regexp->TypeTag()) {
case JSRegExp::NOT_COMPILED:
UNREACHABLE();
@@ -285,10 +288,10 @@ MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
last_match_info);
case JSRegExp::IRREGEXP:
return RegExpImpl::IrregexpExec(isolate, regexp, subject, index,
- last_match_info);
+ last_match_info, exec_quirks);
case JSRegExp::EXPERIMENTAL:
return ExperimentalRegExp::Exec(isolate, regexp, subject, index,
- last_match_info);
+ last_match_info, exec_quirks);
}
}
@@ -641,7 +644,8 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
MaybeHandle<Object> RegExpImpl::IrregexpExec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int previous_index, Handle<RegExpMatchInfo> last_match_info) {
+ int previous_index, Handle<RegExpMatchInfo> last_match_info,
+ RegExp::ExecQuirks exec_quirks) {
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
subject = String::Flatten(isolate, subject);
@@ -691,6 +695,11 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
output_registers, required_registers);
if (res == RegExp::RE_SUCCESS) {
+ if (exec_quirks == RegExp::ExecQuirks::kTreatMatchAtEndAsFailure) {
+ if (output_registers[0] >= subject->length()) {
+ return isolate->factory()->null_value();
+ }
+ }
int capture_count = regexp->CaptureCount();
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
@@ -847,6 +856,9 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
#elif V8_TARGET_ARCH_MIPS64
macro_assembler.reset(new RegExpMacroAssemblerMIPS(isolate, zone, mode,
output_register_count));
+#elif V8_TARGET_ARCH_RISCV64
+ macro_assembler.reset(new RegExpMacroAssemblerRISCV(isolate, zone, mode,
+ output_register_count));
#else
#error "Unsupported architecture"
#endif
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index 3e20b5f80c..40fe832fd7 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -86,16 +86,28 @@ class RegExp final : public AllStatic {
kFromJs = 1,
};
+ enum class ExecQuirks {
+ kNone,
+ // Used to work around an issue in the RegExpPrototypeSplit fast path,
+ // which diverges from the spec by not creating a sticky copy of the RegExp
+ // instance and calling `exec` in a loop. If called in this context, we
+ // must not update the last_match_info on a successful match at the subject
+ // string end. See crbug.com/1075514 for more information.
+ kTreatMatchAtEndAsFailure,
+ };
+
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Exec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int index, Handle<RegExpMatchInfo> last_match_info);
+ int index, Handle<RegExpMatchInfo> last_match_info,
+ ExecQuirks exec_quirks = ExecQuirks::kNone);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
ExperimentalOneshotExec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info);
+ Handle<RegExpMatchInfo> last_match_info,
+ ExecQuirks exec_quirks = ExecQuirks::kNone);
// Integral return values used throughout regexp code layers.
static constexpr int kInternalRegExpFailure = 0;
diff --git a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
new file mode 100644
index 0000000000..ddd7444c25
--- /dev/null
+++ b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
@@ -0,0 +1,1269 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/regexp/riscv64/regexp-macro-assembler-riscv64.h"
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/unicode.h"
+
+namespace v8 {
+namespace internal {
+
+/* clang-format off
+ *
+ * This assembler uses the following register assignment convention
+ * - t4 : Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
+ * - a5 : Pointer to current Code object including heap object tag.
+ * - a6 : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - a7 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - t0 : Points to tip of backtrack stack
+ * - t1 : Unused.
+ * - t2 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate
+ * kStackFrameHeader
+ * --- sp when called ---
+ * - fp[72] ra Return from RegExp code (ra). kReturnAddress
+ * - fp[64] s9, old-fp Old fp, callee saved(s9).
+ * - fp[0..63] fp..s7 Callee-saved registers fp..s7.
+ * --- frame pointer ----
+ * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
+ * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd
+ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-40] end of input (address of end of string). kInputEnd
+ * - fp[-48] start of input (address of first character in string). kInputStart
+ * - fp[-56] start index (character index of start). kStartIndex
+ * - fp[-64] void* input_string (location of a handle containing the string). kInputString
+ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
+ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * --------- The following output registers are 32-bit values. ---------
+ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * int num_capture_registers,
+ * byte* stack_area_base,
+ * bool direct_call = false,
+ * Isolate* isolate);
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
+ *
+ * clang-format on
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+const int RegExpMacroAssemblerRISCV::kRegExpCodeSize;
+
+RegExpMacroAssemblerRISCV::RegExpMacroAssemblerRISCV(Isolate* isolate,
+ Zone* zone, Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_(),
+ internal_failure_label_() {
+ masm_->set_root_array_available(false);
+
+ DCHECK_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ // If the code gets too big or corrupted, an internal exception will be
+ // raised, and we will exit right away.
+ __ bind(&internal_failure_label_);
+ __ li(a0, Operand(FAILURE));
+ __ Ret();
+ __ bind(&start_label_); // And then continue from here.
+}
+
+RegExpMacroAssemblerRISCV::~RegExpMacroAssemblerRISCV() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+ internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
+}
+
+int RegExpMacroAssemblerRISCV::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+void RegExpMacroAssemblerRISCV::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Add64(current_input_offset(), current_input_offset(),
+ Operand(by * char_size()));
+ }
+}
+
+void RegExpMacroAssemblerRISCV::AdvanceRegister(int reg, int by) {
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
+ if (by != 0) {
+ __ Ld(a0, register_location(reg));
+ __ Add64(a0, a0, Operand(by));
+ __ Sd(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerRISCV::Backtrack() {
+ CheckPreemption();
+ if (has_backtrack_limit()) {
+ Label next;
+ __ Ld(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Add64(a0, a0, Operand(1));
+ __ Sd(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Branch(&next, ne, a0, Operand(backtrack_limit()));
+
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
+
+ __ bind(&next);
+ }
+ // Pop Code offset from backtrack stack, add Code and jump to location.
+ Pop(a0);
+ __ Add64(a0, a0, code_pointer());
+ __ Jump(a0);
+}
+
+void RegExpMacroAssemblerRISCV::Bind(Label* label) { __ bind(label); }
+
+void RegExpMacroAssemblerRISCV::CheckCharacter(uint32_t c, Label* on_equal) {
+ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
+}
+
+void RegExpMacroAssemblerRISCV::CheckCharacterGT(uc16 limit,
+ Label* on_greater) {
+ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
+}
+
+void RegExpMacroAssemblerRISCV::CheckAtStart(int cp_offset,
+ Label* on_at_start) {
+ __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add64(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+}
+
+void RegExpMacroAssemblerRISCV::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add64(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
+}
+
+void RegExpMacroAssemblerRISCV::CheckCharacterLT(uc16 limit, Label* on_less) {
+ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
+}
+
+void RegExpMacroAssemblerRISCV::CheckGreedyLoop(Label* on_equal) {
+ Label backtrack_non_equal;
+ __ Lw(a0, MemOperand(backtrack_stackpointer(), 0));
+ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+ __ Add64(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(kIntSize));
+ __ bind(&backtrack_non_equal);
+ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
+}
+
+void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase(
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ Label fallthrough;
+ __ Ld(a0, register_location(start_reg)); // Index of start of capture.
+ __ Ld(a1, register_location(start_reg + 1)); // Index of end of capture.
+ __ Sub64(a1, a1, a0); // Length of capture.
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add64(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Add64(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
+
+ if (mode_ == LATIN1) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+ __ Add64(a0, a0, Operand(end_of_input_address()));
+ __ Add64(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Sub64(a2, a2, Operand(a1));
+ }
+ __ Add64(a1, a0, Operand(a1));
+
+ // a0 - Address of start of capture.
+ // a1 - Address of end of capture.
+ // a2 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ Lbu(a3, MemOperand(a0, 0));
+ __ addi(a0, a0, char_size());
+ __ Lbu(a4, MemOperand(a2, 0));
+ __ addi(a2, a2, char_size());
+
+ __ Branch(&loop_check, eq, a4, Operand(a3));
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case.
+ __ Or(a4, a4, Operand(0x20)); // Also convert input character.
+ __ Branch(&fail, ne, a4, Operand(a3));
+ __ Sub64(a3, a3, Operand('a'));
+ __ Branch(&loop_check, Uless_equal, a3, Operand('z' - 'a'));
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Sub64(a3, a3, Operand(224 - 'a'));
+ // Weren't Latin-1 letters.
+ __ Branch(&fail, Ugreater, a3, Operand(254 - 224));
+ // Check for 247.
+ __ Branch(&fail, eq, a3, Operand(247 - 224));
+
+ __ bind(&loop_check);
+ __ Branch(&loop, lt, a0, Operand(a1));
+ __ jmp(&success);
+
+ __ bind(&fail);
+ GoTo(on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Sub64(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ Ld(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Add64(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Sub64(current_input_offset(), current_input_offset(), Operand(a2));
+ }
+ } else {
+ DCHECK(mode_ == UC16);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() |
+ backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+
+ int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, a2);
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // a0: Address byte_offset1 - Address captured substring's start.
+ // a1: Address byte_offset2 - Address of current character position.
+ // a2: size_t byte_length - length of capture in bytes(!).
+ // a3: Isolate* isolate.
+
+ // Address of start of capture.
+ __ Add64(a0, a0, Operand(end_of_input_address()));
+ // Length of capture.
+ __ mv(a2, a1);
+ // Save length in callee-save register for use on return.
+ __ mv(s3, a1);
+ // Address of current input position.
+ __ Add64(a1, current_input_offset(), Operand(end_of_input_address()));
+ if (read_backward) {
+ __ Sub64(a1, a1, Operand(s3));
+ }
+ // Isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Restore regexp engine registers.
+ __ MultiPop(regexp_registers_to_retain);
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+ // Check if function returned non-zero for success or zero for failure.
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ // On success, increment position by length of capture.
+ if (read_backward) {
+ __ Sub64(current_input_offset(), current_input_offset(), Operand(s3));
+ } else {
+ __ Add64(current_input_offset(), current_input_offset(), Operand(s3));
+ }
+ }
+
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerRISCV::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ // Find length of back-referenced capture.
+ __ Ld(a0, register_location(start_reg));
+ __ Ld(a1, register_location(start_reg + 1));
+ __ Sub64(a1, a1, a0); // Length to check.
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add64(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Add64(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
+
+ // Compute pointers to match string and capture string.
+ __ Add64(a0, a0, Operand(end_of_input_address()));
+ __ Add64(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Sub64(a2, a2, Operand(a1));
+ }
+ __ Add64(a1, a1, Operand(a0));
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == LATIN1) {
+ __ Lbu(a3, MemOperand(a0, 0));
+ __ addi(a0, a0, char_size());
+ __ Lbu(a4, MemOperand(a2, 0));
+ __ addi(a2, a2, char_size());
+ } else {
+ DCHECK(mode_ == UC16);
+ __ Lhu(a3, MemOperand(a0, 0));
+ __ addi(a0, a0, char_size());
+ __ Lhu(a4, MemOperand(a2, 0));
+ __ addi(a2, a2, char_size());
+ }
+ BranchOrBacktrack(on_no_match, ne, a3, Operand(a4));
+ __ Branch(&loop, lt, a0, Operand(a1));
+
+ // Move current character position to position after match.
+ __ Sub64(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ Ld(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Add64(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Sub64(current_input_offset(), current_input_offset(), Operand(a2));
+ }
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerRISCV::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
+}
+
+void RegExpMacroAssemblerRISCV::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_equal, eq, a0, rhs);
+}
+
+void RegExpMacroAssemblerRISCV::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_not_equal, ne, a0, rhs);
+}
+
+void RegExpMacroAssemblerRISCV::CheckNotCharacterAfterMinusAnd(
+ uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) {
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
+ __ Sub64(a0, current_character(), Operand(minus));
+ __ And(a0, a0, Operand(mask));
+ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
+}
+
+void RegExpMacroAssemblerRISCV::CheckCharacterInRange(uc16 from, uc16 to,
+ Label* on_in_range) {
+ __ Sub64(a0, current_character(), Operand(from));
+ // Unsigned lower-or-same condition.
+ BranchOrBacktrack(on_in_range, Uless_equal, a0, Operand(to - from));
+}
+
+void RegExpMacroAssemblerRISCV::CheckCharacterNotInRange(
+ uc16 from, uc16 to, Label* on_not_in_range) {
+ __ Sub64(a0, current_character(), Operand(from));
+ // Unsigned higher condition.
+ BranchOrBacktrack(on_not_in_range, Ugreater, a0, Operand(to - from));
+}
+
+void RegExpMacroAssemblerRISCV::CheckBitInTable(Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ li(a0, Operand(table));
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+ __ And(a1, current_character(), Operand(kTableSize - 1));
+ __ Add64(a0, a0, a1);
+ } else {
+ __ Add64(a0, a0, current_character());
+ }
+
+ __ Lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
+ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
+}
+
+bool RegExpMacroAssemblerRISCV::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check.
+ switch (type) {
+ case 's':
+ // Match space-characters.
+ if (mode_ == LATIN1) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ __ Branch(&success, eq, current_character(), Operand(' '));
+ // Check range 0x09..0x0D.
+ __ Sub64(a0, current_character(), Operand('\t'));
+ __ Branch(&success, Uless_equal, a0, Operand('\r' - '\t'));
+ // \u00a0 (NBSP).
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t'));
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match Latin1 digits ('0'..'9').
+ __ Sub64(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, Ugreater, a0, Operand('9' - '0'));
+ return true;
+ case 'D':
+ // Match non Latin1-digits.
+ __ Sub64(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, Uless_equal, a0, Operand('9' - '0'));
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Sub64(a0, a0, Operand(0x0B));
+ BranchOrBacktrack(on_no_match, Uless_equal, a0, Operand(0x0C - 0x0B));
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Sub64(a0, a0, Operand(0x2028 - 0x0B));
+ BranchOrBacktrack(on_no_match, Uless_equal, a0, Operand(1));
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Sub64(a0, a0, Operand(0x0B));
+ if (mode_ == LATIN1) {
+ BranchOrBacktrack(on_no_match, Ugreater, a0, Operand(0x0C - 0x0B));
+ } else {
+ Label done;
+ BranchOrBacktrack(&done, Uless_equal, a0, Operand(0x0C - 0x0B));
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Sub64(a0, a0, Operand(0x2028 - 0x0B));
+ BranchOrBacktrack(on_no_match, Ugreater, a0, Operand(1));
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ BranchOrBacktrack(on_no_match, Ugreater, current_character(),
+ Operand('z'));
+ }
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
+ __ li(a0, Operand(map));
+ __ Add64(a0, a0, current_character());
+ __ Lbu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ __ Branch(&done, Ugreater, current_character(), Operand('z'));
+ }
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
+ __ li(a0, Operand(map));
+ __ Add64(a0, a0, current_character());
+ __ Lbu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+ if (mode_ != LATIN1) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+void RegExpMacroAssemblerRISCV::Fail() {
+ __ li(a0, Operand(FAILURE));
+ __ jmp(&exit_label_);
+}
+
+Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
+ Label return_a0;
+ if (masm_->has_exception()) {
+ // If the code gets corrupted due to long regular expressions and lack of
+ // space on trampolines, an internal exception flag is set. If this case
+ // is detected, we will jump into exit sequence right away.
+ __ bind_to(&entry_label_, internal_failure_label_.pos());
+ } else {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL,
+ // no is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Store link register in existing stack-cell.
+ // Order here should correspond to order of offset constants in header file.
+ // TODO(plind): we save fp..s11, but ONLY use s3 here - use the regs
+ // or dont save.
+ RegList registers_to_retain =
+ fp.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
+ s6.bit() | s7.bit() | s8.bit() /*| s9.bit() | s10.bit() | s11.bit()*/;
+ DCHECK(NumRegs(registers_to_retain) == kNumCalleeRegsToRetain);
+
+ // The remaining arguments are passed in registers, e.g.by calling the code
+ // entry as cast to a function with the signature:
+ //
+ // *int(*match)(String input_string, // a0
+ // int start_index, // a1
+ // Address start, // a2
+ // Address end, // a3
+ // int*capture_output_array, // a4
+ // int num_capture_registers, // a5
+ // byte* stack_area_base, // a6
+ // bool direct_call = false, // a7
+ // Isolate * isolate); // on the stack
+ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit() |
+ a4.bit() | a5.bit() | a6.bit() | a7.bit();
+
+ // According to MultiPush implementation, registers will be pushed in the
+ // order of ra, fp, then s8, ..., s1, and finally a7,...a0
+ __ MultiPush(ra.bit() | registers_to_retain | argument_registers);
+
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
+ __ Add64(frame_pointer(), sp,
+ Operand(NumRegs(argument_registers) * kPointerSize));
+
+ STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+ __ mv(a0, zero_reg);
+ __ push(a0); // Make room for success counter and initialize it to 0.
+ STATIC_ASSERT(kStringStartMinusOne ==
+ kSuccessfulCaptures - kSystemPointerSize);
+ __ push(a0); // Make room for "string start - 1" constant.
+ STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ __ push(a0); // The backtrack counter
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld(a0, MemOperand(a0));
+ __ Sub64(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, Ugreater_equal, a0,
+ Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(a0, Operand(EXCEPTION));
+ __ jmp(&return_a0);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Branch(&return_a0, ne, a0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ // Allocate space on stack for registers.
+ __ Sub64(sp, sp, Operand(num_registers_ * kPointerSize));
+ // Load string end.
+ __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ Ld(a0, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ Sub64(current_input_offset(), a0, end_of_input_address());
+ // Set a0 to address of char before start of the input string
+ // (effectively string position -1).
+ __ Ld(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Sub64(a0, current_input_offset(), Operand(char_size()));
+ __ slli(t1, a1, (mode_ == UC16) ? 1 : 0);
+ __ Sub64(a0, a0, t1);
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ Sd(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ // Initialize code pointer register
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1.
+ if (num_saved_registers_ > 8) {
+ // Address of register 0.
+ __ Add64(a1, frame_pointer(), Operand(kRegisterZero));
+ __ li(a2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ Sd(a0, MemOperand(a1));
+ __ Add64(a1, a1, Operand(-kPointerSize));
+ __ Sub64(a2, a2, Operand(1));
+ __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ Sd(a0, register_location(i));
+ }
+ }
+ }
+
+ // Initialize backtrack stack pointer.
+ __ Ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+
+ __ jmp(&start_label_);
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // Copy captures to output.
+ __ Ld(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Ld(a0, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Ld(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Sub64(a1, end_of_input_address(), a1);
+ // a1 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ srli(a1, a1, 1);
+ }
+ // a1 is length of input in characters.
+ __ Add64(a1, a1, Operand(a2));
+ // a1 is length of string in characters.
+
+ DCHECK_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ for (int i = 0; i < num_saved_registers_; i += 2) {
+ __ Ld(a2, register_location(i));
+ __ Ld(a3, register_location(i + 1));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in a4 for the zero-length check later.
+ __ mv(t4, a2);
+ }
+ if (mode_ == UC16) {
+ __ srai(a2, a2, 1);
+ __ Add64(a2, a2, a1);
+ __ srai(a3, a3, 1);
+ __ Add64(a3, a3, a1);
+ } else {
+ __ Add64(a2, a1, Operand(a2));
+ __ Add64(a3, a1, Operand(a3));
+ }
+ // V8 expects the output to be an int32_t array.
+ __ Sw(a2, MemOperand(a0));
+ __ Add64(a0, a0, kIntSize);
+ __ Sw(a3, MemOperand(a0));
+ __ Add64(a0, a0, kIntSize);
+ }
+ }
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ Ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Ld(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ Ld(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ Add64(a0, a0, 1);
+ __ Sd(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Sub64(a1, a1, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ __ Branch(&return_a0, lt, a1, Operand(num_saved_registers_));
+
+ __ Sd(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ Add64(a2, a2, num_saved_registers_ * kIntSize);
+ __ Sd(a2, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare a0 to initialize registers with its value in the next run.
+ __ Ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // t4: capture start index
+ // Not a zero-length match, restart.
+ __ Branch(&load_char_start_regexp, ne, current_input_offset(),
+ Operand(t4));
+ // Offset from the end is zero if we already reached the end.
+ __ Branch(&exit_label_, eq, current_input_offset(),
+ Operand(zero_reg));
+ // Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
+ __ Add64(current_input_offset(), current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
+ }
+
+ __ Branch(&load_char_start_regexp);
+ } else {
+ __ li(a0, Operand(SUCCESS));
+ }
+ }
+ // Exit and return a0.
+ __ bind(&exit_label_);
+ if (global()) {
+ __ Ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_a0);
+ // Skip sp past regexp registers and local variables..
+ __ mv(sp, frame_pointer());
+
+ // Restore registers fp..s11 and return (restoring ra to pc).
+ __ MultiPop(registers_to_retain | ra.bit());
+
+ __ Ret();
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code.
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() |
+ backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+ CallCheckStackGuardState(a0);
+ __ MultiPop(regexp_registers_to_retain);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ Branch(&return_a0, ne, a0, Operand(zero_reg));
+
+ // String might have moved: Reload end of string from frame.
+ __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+ // Put regexp engine registers on stack first.
+ RegList regexp_registers =
+ current_input_offset().bit() | current_character().bit();
+ __ MultiPush(regexp_registers);
+
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, a0);
+ __ mv(a0, backtrack_stackpointer());
+ __ Add64(a1, frame_pointer(), Operand(kStackHighEnd));
+ __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_->isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // Restore regexp registers.
+ __ MultiPop(regexp_registers);
+ // If return nullptr, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ Branch(&exit_with_exception, eq, a0, Operand(zero_reg));
+ // Otherwise use return value as new stack pointer.
+ __ mv(backtrack_stackpointer(), a0);
+ // Restore saved registers and continue.
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ li(a0, Operand(EXCEPTION));
+ __ jmp(&return_a0);
+ }
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(a0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_a0);
+ }
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(isolate(), &code_desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate(), code_desc, CodeKind::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
+ LOG(masm_->isolate(),
+ RegExpCodeCreateEvent(Handle<AbstractCode>::cast(code), source));
+ return Handle<HeapObject>::cast(code);
+}
+
+void RegExpMacroAssemblerRISCV::GoTo(Label* to) {
+ if (to == nullptr) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+}
+
+void RegExpMacroAssemblerRISCV::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
+ __ Ld(a0, register_location(reg));
+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+void RegExpMacroAssemblerRISCV::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
+ __ Ld(a0, register_location(reg));
+ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
+}
+
+void RegExpMacroAssemblerRISCV::IfRegisterEqPos(int reg, Label* if_eq) {
+ __ Ld(a0, register_location(reg));
+ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+RegExpMacroAssemblerRISCV::Implementation() {
+ return kRISCVImplementation;
+}
+
+void RegExpMacroAssemblerRISCV::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+void RegExpMacroAssemblerRISCV::PopRegister(int register_index) {
+ Pop(a0);
+ __ Sd(a0, register_location(register_index));
+}
+
+void RegExpMacroAssemblerRISCV::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label after_constant;
+ __ Branch(&after_constant);
+ int offset = masm_->pc_offset();
+ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ __ emit(0);
+ masm_->label_at_put(label, offset);
+ __ bind(&after_constant);
+ if (is_int16(cp_offset)) {
+ __ Lwu(a0, MemOperand(code_pointer(), cp_offset));
+ } else {
+ __ Add64(a0, code_pointer(), cp_offset);
+ __ Lwu(a0, MemOperand(a0, 0));
+ }
+ }
+ Push(a0);
+ CheckStackLimit();
+}
+
+void RegExpMacroAssemblerRISCV::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+void RegExpMacroAssemblerRISCV::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ Ld(a0, register_location(register_index));
+ Push(a0);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+void RegExpMacroAssemblerRISCV::ReadCurrentPositionFromRegister(int reg) {
+ __ Ld(current_input_offset(), register_location(reg));
+}
+
+void RegExpMacroAssemblerRISCV::ReadStackPointerFromRegister(int reg) {
+ __ Ld(backtrack_stackpointer(), register_location(reg));
+ __ Ld(a0, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Add64(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+}
+
+void RegExpMacroAssemblerRISCV::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Branch(&after_position, ge, current_input_offset(),
+ Operand(-by * char_size()));
+ __ li(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+void RegExpMacroAssemblerRISCV::SetRegister(int register_index, int to) {
+ DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
+ __ li(a0, Operand(to));
+ __ Sd(a0, register_location(register_index));
+}
+
+bool RegExpMacroAssemblerRISCV::Succeed() {
+ __ jmp(&success_label_);
+ return global();
+}
+
+void RegExpMacroAssemblerRISCV::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ Sd(current_input_offset(), register_location(reg));
+ } else {
+ __ Add64(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ Sd(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerRISCV::ClearRegisters(int reg_from, int reg_to) {
+ DCHECK(reg_from <= reg_to);
+ __ Ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ Sd(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerRISCV::WriteStackPointerToRegister(int reg) {
+ __ Ld(a1, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Sub64(a0, backtrack_stackpointer(), a1);
+ __ Sd(a0, register_location(reg));
+}
+
+bool RegExpMacroAssemblerRISCV::CanReadUnaligned() { return false; }
+
+// Private methods:
+
+void RegExpMacroAssemblerRISCV::CallCheckStackGuardState(Register scratch) {
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
+ DCHECK(!masm_->options().isolate_independent_code);
+
+ int stack_alignment = base::OS::ActivationFrameAlignment();
+
+ // Align the stack pointer and save the original sp value on the stack.
+ __ mv(scratch, sp);
+ __ Sub64(sp, sp, Operand(kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo(stack_alignment));
+ __ And(sp, sp, Operand(-stack_alignment));
+ __ Sd(scratch, MemOperand(sp));
+
+ __ mv(a2, frame_pointer());
+ // Code of self.
+ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ // We need to make room for the return address on the stack.
+ DCHECK(IsAligned(stack_alignment, kPointerSize));
+ __ Sub64(sp, sp, Operand(stack_alignment));
+
+ // The stack pointer now points to cell where the return address will be
+ // written. Arguments are in registers, meaning we treat the return address as
+ // argument 5. Since DirectCEntry will handle allocating space for the C
+ // argument slots, we don't need to care about that here. This is how the
+ // stack will look (sp meaning the value of sp at this moment):
+ // [sp + 3] - empty slot if needed for alignment.
+ // [sp + 2] - saved sp.
+ // [sp + 1] - second word reserved for return value.
+ // [sp + 0] - first word reserved for return value.
+
+ // a0 will point to the return address, placed by DirectCEntry.
+ __ mv(a0, sp);
+
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state(masm_->isolate());
+ __ li(t6, Operand(stack_guard_check));
+
+ EmbeddedData d = EmbeddedData::FromBlob();
+ CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
+ Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
+ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ __ Call(kScratchReg);
+
+ // DirectCEntry allocated space for the C argument slots so we have to
+ // drop them with the return address from the stack with loading saved sp.
+ // At this point stack must look:
+ // [sp + 7] - empty slot if needed for alignment.
+ // [sp + 6] - saved sp.
+ // [sp + 5] - second word reserved for return value.
+ // [sp + 4] - first word reserved for return value.
+ // [sp + 3] - C argument slot.
+ // [sp + 2] - C argument slot.
+ // [sp + 1] - C argument slot.
+ // [sp + 0] - C argument slot.
+ __ Ld(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize));
+
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
+}
+
+template <typename T>
+static T* frame_entry_address(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+int64_t RegExpMacroAssemblerRISCV::CheckStackGuardState(Address* return_address,
+ Address raw_code,
+ Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
+ return NativeRegExpMacroAssembler::CheckStackGuardState(
+ frame_entry<Isolate*>(re_frame, kIsolate),
+ static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
+ static_cast<RegExp::CallOrigin>(
+ frame_entry<int64_t>(re_frame, kDirectCall)),
+ return_address, re_code,
+ frame_entry_address<Address>(re_frame, kInputString),
+ frame_entry_address<const byte*>(re_frame, kInputStart),
+ frame_entry_address<const byte*>(re_frame, kInputEnd));
+}
+
+MemOperand RegExpMacroAssemblerRISCV::register_location(int register_index) {
+ DCHECK(register_index < (1 << 30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+void RegExpMacroAssemblerRISCV::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ if (cp_offset >= 0) {
+ BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
+ Operand(-cp_offset * char_size()));
+ } else {
+ __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add64(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
+ }
+}
+
+void RegExpMacroAssemblerRISCV::BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt) {
+ if (condition == al) { // Unconditional.
+ if (to == nullptr) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == nullptr) {
+ __ Branch(&backtrack_label_, condition, rs, rt);
+ return;
+ }
+ __ Branch(to, condition, rs, rt);
+}
+
+void RegExpMacroAssemblerRISCV::SafeCall(Label* to, Condition cond, Register rs,
+ const Operand& rt) {
+ __ BranchAndLink(to, cond, rs, rt);
+}
+
+void RegExpMacroAssemblerRISCV::SafeReturn() {
+ __ pop(ra);
+ __ Add64(t1, ra, Operand(masm_->CodeObject()));
+ __ Jump(t1);
+}
+
+void RegExpMacroAssemblerRISCV::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ Sub64(ra, ra, Operand(masm_->CodeObject()));
+ __ push(ra);
+}
+
+void RegExpMacroAssemblerRISCV::Push(Register source) {
+ DCHECK(source != backtrack_stackpointer());
+ __ Add64(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(-kIntSize));
+ __ Sw(source, MemOperand(backtrack_stackpointer()));
+}
+
+void RegExpMacroAssemblerRISCV::Pop(Register target) {
+ DCHECK(target != backtrack_stackpointer());
+ __ Lw(target, MemOperand(backtrack_stackpointer()));
+ __ Add64(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
+}
+
+void RegExpMacroAssemblerRISCV::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld(a0, MemOperand(a0));
+ SafeCall(&check_preempt_label_, Uless_equal, sp, Operand(a0));
+}
+
+void RegExpMacroAssemblerRISCV::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit_address(
+ masm_->isolate());
+
+ __ li(a0, Operand(stack_limit));
+ __ Ld(a0, MemOperand(a0));
+ SafeCall(&stack_overflow_label_, Uless_equal, backtrack_stackpointer(),
+ Operand(a0));
+}
+
+void RegExpMacroAssemblerRISCV::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+ if (cp_offset != 0) {
+ // t4 is not being used to store the capture start index at this point.
+ __ Add64(t4, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = t4;
+ }
+ // We assume that we cannot do unaligned loads on RISC-V, so this function
+ // must only be used to load a single character at a time.
+ DCHECK_EQ(1, characters);
+ __ Add64(t1, end_of_input_address(), Operand(offset));
+ if (mode_ == LATIN1) {
+ __ Lbu(current_character(), MemOperand(t1, 0));
+ } else {
+ DCHECK(mode_ == UC16);
+ __ Lhu(current_character(), MemOperand(t1, 0));
+ }
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
new file mode 100644
index 0000000000..7311d41189
--- /dev/null
+++ b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
@@ -0,0 +1,214 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_RISCV64_REGEXP_MACRO_ASSEMBLER_RISCV64_H_
+#define V8_REGEXP_RISCV64_REGEXP_MACRO_ASSEMBLER_RISCV64_H_
+
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/riscv64/assembler-riscv64.h"
+#include "src/regexp/regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
+ : public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerRISCV(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
+ virtual ~RegExpMacroAssemblerRISCV();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(int cp_offset, Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward, bool unicode,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from, uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int64_t CheckStackGuardState(Address* return_address, Address raw_code,
+ Address re_frame);
+
+ void print_regexp_frame_constants();
+
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Registers s1 to s8, fp, and ra.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+
+ // This 9 is 8 s-regs (s1..s8) plus fp.
+ static const int kNumCalleeRegsToRetain = 9;
+ static const int kReturnAddress =
+ kStoredRegisters + kNumCalleeRegsToRetain * kPointerSize;
+
+ // Stack frame header.
+ static const int kStackFrameHeader = kReturnAddress;
+ // Stack parameters placed by caller.
+ static const int kIsolate = kStackFrameHeader + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kFramePointer - kPointerSize;
+ static const int kStackHighEnd = kDirectCall - kPointerSize;
+ static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
+ static const int kInputEnd = kRegisterOutput - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+
+ // Initial size of code buffer.
+ static const int kRegExpCodeSize = 1024;
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return a6; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return a7; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return t2; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return t0; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return a5; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument).
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is nullptr, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Label* to, Condition condition, Register rs,
+ const Operand& rt);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to, Condition cond, Register rs,
+ const Operand& rt);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (Latin1 or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1).
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+ Label internal_failure_label_;
+ Label fallback_label_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_RISCV64_REGEXP_MACRO_ASSEMBLER_RISCV64_H_
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index bd9ef127dd..61a5e6e642 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -1273,25 +1273,13 @@ void RegExpMacroAssemblerS390::LoadCurrentCharacterUnchecked(int cp_offset,
if (mode_ == LATIN1) {
// using load reverse for big-endian platforms
if (characters == 4) {
-#if V8_TARGET_LITTLE_ENDIAN
- __ LoadU32(current_character(),
- MemOperand(current_input_offset(), end_of_input_address(),
- cp_offset * char_size()));
-#else
- __ LoadLogicalReversedWordP(current_character(),
- MemOperand(current_input_offset(), end_of_input_address(),
- cp_offset * char_size()));
-#endif
+ __ LoadU32LE(current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
} else if (characters == 2) {
-#if V8_TARGET_LITTLE_ENDIAN
- __ LoadU16(current_character(),
- MemOperand(current_input_offset(), end_of_input_address(),
- cp_offset * char_size()));
-#else
- __ LoadLogicalReversedHalfWordP(current_character(),
- MemOperand(current_input_offset(), end_of_input_address(),
- cp_offset * char_size()));
-#endif
+ __ LoadU16LE(current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
} else {
DCHECK_EQ(1, characters);
__ LoadU8(current_character(),
diff --git a/deps/v8/src/roots/roots-inl.h b/deps/v8/src/roots/roots-inl.h
index 83a9feafb4..3ca41b29af 100644
--- a/deps/v8/src/roots/roots-inl.h
+++ b/deps/v8/src/roots/roots-inl.h
@@ -21,6 +21,7 @@
#include "src/objects/scope-info.h"
#include "src/objects/slots.h"
#include "src/objects/string.h"
+#include "src/objects/swiss-name-dictionary.h"
#include "src/roots/roots.h"
namespace v8 {
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 28531ee579..547cb0cc8c 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -108,6 +108,7 @@ class Symbol;
V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
V(Map, small_ordered_name_dictionary_map, SmallOrderedNameDictionaryMap) \
V(Map, source_text_module_map, SourceTextModuleMap) \
+ V(Map, swiss_name_dictionary_map, SwissNameDictionaryMap) \
V(Map, synthetic_module_map, SyntheticModuleMap) \
V(Map, wasm_type_info_map, WasmTypeInfoMap) \
V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
@@ -129,6 +130,10 @@ class Symbol;
V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
V(Map, external_one_byte_internalized_string_map, \
ExternalOneByteInternalizedStringMap) \
+ V(Map, uncached_external_internalized_string_map, \
+ UncachedExternalInternalizedStringMap) \
+ V(Map, uncached_external_one_byte_internalized_string_map, \
+ UncachedExternalOneByteInternalizedStringMap) \
V(Map, uncached_external_one_byte_string_map, \
UncachedExternalOneByteStringMap) \
/* Oddball maps */ \
@@ -159,10 +164,11 @@ class Symbol;
V(OrderedHashMap, empty_ordered_hash_map, EmptyOrderedHashMap) \
V(OrderedHashSet, empty_ordered_hash_set, EmptyOrderedHashSet) \
V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata) \
- V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(NameDictionary, empty_property_dictionary, EmptyPropertyDictionary) \
V(OrderedNameDictionary, empty_ordered_property_dictionary, \
EmptyOrderedPropertyDictionary) \
+ V(SwissNameDictionary, empty_swiss_property_dictionary, \
+ EmptySwissPropertyDictionary) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList) \
@@ -197,11 +203,6 @@ class Symbol;
/* Maps */ \
V(Map, external_map, ExternalMap) \
V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, wasm_rttcanon_eqref_map, WasmRttEqrefMap) \
- V(Map, wasm_rttcanon_externref_map, WasmRttExternrefMap) \
- V(Map, wasm_rttcanon_funcref_map, WasmRttFuncrefMap) \
- V(Map, wasm_rttcanon_i31ref_map, WasmRttI31refMap) \
- V(Map, wasm_rttcanon_anyref_map, WasmRttAnyrefMap) \
/* Canonical empty values */ \
V(Script, empty_script, EmptyScript) \
V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
@@ -309,7 +310,6 @@ class Symbol;
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
V(Smi, next_template_serial_number, NextTemplateSerialNumber) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
V(Smi, construct_stub_create_deopt_pc_offset, \
ConstructStubCreateDeoptPCOffset) \
V(Smi, construct_stub_invoke_deopt_pc_offset, \
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 2b17ad52b1..6ea4e04d63 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -19,7 +19,8 @@ namespace internal {
// Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_RISCV64
namespace {
@@ -568,6 +569,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
-
+ // || V8_TARGET_ARCH_RISCV64
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 3e9b9bfd09..87456ad3a5 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -113,11 +113,6 @@ RUNTIME_FUNCTION(Runtime_ThrowNotSuperConstructor) {
return ThrowNotSuperConstructor(isolate, constructor, function);
}
-RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
- DCHECK_EQ(0, args.length());
- return ReadOnlyRoots(isolate).home_object_symbol();
-}
-
namespace {
template <typename Dictionary>
@@ -134,35 +129,17 @@ Handle<Name> KeyToName<NumberDictionary>(Isolate* isolate, Handle<Object> key) {
return isolate->factory()->NumberToString(key);
}
-inline void SetHomeObject(Isolate* isolate, JSFunction method,
- JSObject home_object) {
- if (method.shared().needs_home_object()) {
- const InternalIndex kPropertyIndex(
- JSFunction::kMaybeHomeObjectDescriptorIndex);
- CHECK_EQ(
- method.map().instance_descriptors(kRelaxedLoad).GetKey(kPropertyIndex),
- ReadOnlyRoots(isolate).home_object_symbol());
-
- FieldIndex field_index =
- FieldIndex::ForDescriptor(method.map(), kPropertyIndex);
- method.RawFastPropertyAtPut(field_index, home_object);
- }
-}
-
// Gets |index|'th argument which may be a class constructor object, a class
// prototype object or a class method. In the latter case the following
// post-processing may be required:
-// 1) set [[HomeObject]] slot to given |home_object| value if the method's
-// shared function info indicates that the method requires that;
-// 2) set method's name to a concatenation of |name_prefix| and |key| if the
+// 1) set method's name to a concatenation of |name_prefix| and |key| if the
// method's shared function info indicates that method does not have a
// shared name.
template <typename Dictionary>
-MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
+MaybeHandle<Object> GetMethodAndSetName(
Isolate* isolate,
RuntimeArguments& args, // NOLINT(runtime/references)
- Smi index, Handle<JSObject> home_object, Handle<String> name_prefix,
- Handle<Object> key) {
+ Smi index, Handle<String> name_prefix, Handle<Object> key) {
int int_index = index.value();
// Class constructor and prototype values do not require post processing.
@@ -172,8 +149,6 @@ MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
Handle<JSFunction> method = args.at<JSFunction>(int_index);
- SetHomeObject(isolate, *method, *home_object);
-
if (!method->shared().HasSharedName()) {
// TODO(ishell): method does not have a shared name at this point only if
// the key is a computed property name. However, the bytecode generator
@@ -189,17 +164,14 @@ MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
}
// Gets |index|'th argument which may be a class constructor object, a class
-// prototype object or a class method. In the latter case the following
-// post-processing may be required:
-// 1) set [[HomeObject]] slot to given |home_object| value if the method's
-// shared function info indicates that the method requires that;
-// This is a simplified version of GetMethodWithSharedNameAndSetHomeObject()
+// prototype object or a class method.
+// This is a simplified version of GetMethodAndSetName()
// function above that is used when it's guaranteed that the method has
// shared name.
-Object GetMethodWithSharedNameAndSetHomeObject(
+Object GetMethodWithSharedName(
Isolate* isolate,
RuntimeArguments& args, // NOLINT(runtime/references)
- Object index, JSObject home_object) {
+ Object index) {
DisallowGarbageCollection no_gc;
int int_index = Smi::ToInt(index);
@@ -209,9 +181,6 @@ Object GetMethodWithSharedNameAndSetHomeObject(
}
Handle<JSFunction> method = args.at<JSFunction>(int_index);
-
- SetHomeObject(isolate, *method, home_object);
-
DCHECK(method->shared().HasSharedName());
return *method;
}
@@ -237,7 +206,6 @@ Handle<Dictionary> ShallowCopyDictionaryTemplate(
template <typename Dictionary>
bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
- Handle<JSObject> receiver,
RuntimeArguments& args, // NOLINT(runtime/references)
bool* install_name_accessor = nullptr) {
Handle<Name> name_string = isolate->factory()->name_string();
@@ -260,9 +228,9 @@ bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, result,
- GetMethodAndSetHomeObjectAndName<Dictionary>(
- isolate, args, Smi::cast(tmp), receiver,
- isolate->factory()->get_string(), key),
+ GetMethodAndSetName<Dictionary>(isolate, args, Smi::cast(tmp),
+ isolate->factory()->get_string(),
+ key),
false);
pair->set_getter(*result);
}
@@ -271,9 +239,9 @@ bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, result,
- GetMethodAndSetHomeObjectAndName<Dictionary>(
- isolate, args, Smi::cast(tmp), receiver,
- isolate->factory()->set_string(), key),
+ GetMethodAndSetName<Dictionary>(isolate, args, Smi::cast(tmp),
+ isolate->factory()->set_string(),
+ key),
false);
pair->set_setter(*result);
}
@@ -281,9 +249,9 @@ bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, result,
- GetMethodAndSetHomeObjectAndName<Dictionary>(
- isolate, args, Smi::cast(*value), receiver,
- isolate->factory()->empty_string(), key),
+ GetMethodAndSetName<Dictionary>(isolate, args, Smi::cast(*value),
+ isolate->factory()->empty_string(),
+ key),
false);
dictionary->ValueAtPut(i, *result);
}
@@ -359,8 +327,7 @@ bool AddDescriptorsByTemplate(
if (details.location() == kDescriptor) {
if (details.kind() == kData) {
if (value.IsSmi()) {
- value = GetMethodWithSharedNameAndSetHomeObject(isolate, args, value,
- *receiver);
+ value = GetMethodWithSharedName(isolate, args, value);
}
details = details.CopyWithRepresentation(
value.OptimalRepresentation(isolate));
@@ -370,13 +337,11 @@ bool AddDescriptorsByTemplate(
AccessorPair pair = AccessorPair::cast(value);
Object tmp = pair.getter();
if (tmp.IsSmi()) {
- pair.set_getter(GetMethodWithSharedNameAndSetHomeObject(
- isolate, args, tmp, *receiver));
+ pair.set_getter(GetMethodWithSharedName(isolate, args, tmp));
}
tmp = pair.setter();
if (tmp.IsSmi()) {
- pair.set_setter(GetMethodWithSharedNameAndSetHomeObject(
- isolate, args, tmp, *receiver));
+ pair.set_setter(GetMethodWithSharedName(isolate, args, tmp));
}
}
}
@@ -401,11 +366,10 @@ bool AddDescriptorsByTemplate(
UpdateProtectors(isolate, receiver, descriptors_template);
- map->InitializeDescriptors(isolate, *descriptors,
- LayoutDescriptor::FastPointerLayout());
+ map->InitializeDescriptors(isolate, *descriptors);
if (elements_dictionary->NumberOfElements() > 0) {
if (!SubstituteValues<NumberDictionary>(isolate, elements_dictionary,
- receiver, args)) {
+ args)) {
return false;
}
map->set_elements_kind(DICTIONARY_ELEMENTS);
@@ -480,14 +444,15 @@ bool AddDescriptorsByTemplate(
}
// Replace all indices with proper methods.
- if (!SubstituteValues<Dictionary>(isolate, properties_dictionary, receiver,
- args, &install_name_accessor)) {
+ if (!SubstituteValues<Dictionary>(isolate, properties_dictionary, args,
+ &install_name_accessor)) {
return false;
}
if (install_name_accessor) {
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- PropertyDetails details(kAccessor, attribs, PropertyCellType::kNoCell);
+ PropertyDetails details(kAccessor, attribs,
+ PropertyDetails::kConstIfDictConstnessTracking);
Handle<Dictionary> dict = ToHandle(Dictionary::Add(
isolate, properties_dictionary, isolate->factory()->name_string(),
isolate->factory()->function_name_accessor(), details));
@@ -498,7 +463,7 @@ bool AddDescriptorsByTemplate(
if (elements_dictionary->NumberOfElements() > 0) {
if (!SubstituteValues<NumberDictionary>(isolate, elements_dictionary,
- receiver, args)) {
+ args)) {
return false;
}
map->set_elements_kind(DICTIONARY_ELEMENTS);
@@ -594,6 +559,8 @@ bool InitClassConstructor(
// Set map's prototype without enabling prototype setup mode for superclass
// because it does not make sense.
Map::SetPrototype(isolate, map, constructor_parent, false);
+ // Ensure that setup mode will never be enabled for superclass.
+ JSObject::MakePrototypesFast(constructor_parent, kStartAtReceiver, isolate);
}
Handle<NumberDictionary> elements_dictionary_template(
@@ -615,8 +582,7 @@ bool InitClassConstructor(
} else {
map->set_is_dictionary_map(true);
map->InitializeDescriptors(isolate,
- ReadOnlyRoots(isolate).empty_descriptor_array(),
- LayoutDescriptor::FastPointerLayout());
+ ReadOnlyRoots(isolate).empty_descriptor_array());
map->set_is_migration_target(false);
map->set_may_have_interesting_symbols(true);
map->set_construction_counter(Map::kNoSlackTracking);
@@ -695,7 +661,7 @@ MaybeHandle<Object> DefineClass(
DCHECK(isolate->has_pending_exception());
return MaybeHandle<Object>();
}
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
Handle<Map> empty_map;
LOG(isolate,
MapEvent("InitialMap", empty_map, handle(constructor->map(), isolate),
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 5377e80548..0897f685fc 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -3,19 +3,25 @@
// found in the LICENSE file.
#include "src/asmjs/asm-js.h"
+#include "src/baseline/baseline.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/compiler/pipeline.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
+#include "src/heap/parked-scope.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/shared-function-info.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -25,27 +31,10 @@ namespace {
// Returns false iff an exception was thrown.
bool MaybeSpawnNativeContextIndependentCompilationJob(
- Handle<JSFunction> function, ConcurrencyMode mode) {
- if (!FLAG_turbo_nci || FLAG_turbo_nci_as_midtier) {
- return true; // Nothing to do.
- }
-
- // If delayed codegen is enabled, the first optimization request does not
- // trigger NCI compilation, since we try to avoid compiling Code that
- // remains unused in the future. Repeated optimization (possibly in
- // different native contexts) is taken as a signal that this SFI will
- // continue to be used in the future, thus we trigger NCI compilation.
- if (!FLAG_turbo_nci_delayed_codegen ||
- function->shared().has_optimized_at_least_once()) {
- if (!Compiler::CompileOptimized(function, mode,
- CodeKind::NATIVE_CONTEXT_INDEPENDENT)) {
- return false;
- }
- } else {
- function->shared().set_has_optimized_at_least_once(true);
- }
-
- return true;
+ Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode) {
+ if (!FLAG_turbo_nci) return true; // Nothing to do.
+ return Compiler::CompileOptimized(isolate, function, mode,
+ CodeKind::NATIVE_CONTEXT_INDEPENDENT);
}
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
@@ -56,12 +45,14 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
}
// Compile for the next tier.
- if (!Compiler::CompileOptimized(function, mode, function->NextTier())) {
+ if (!Compiler::CompileOptimized(isolate, function, mode,
+ function->NextTier())) {
return ReadOnlyRoots(isolate).exception();
}
// Possibly compile for NCI caching.
- if (!MaybeSpawnNativeContextIndependentCompilationJob(function, mode)) {
+ if (!MaybeSpawnNativeContextIndependentCompilationJob(isolate, function,
+ mode)) {
return ReadOnlyRoots(isolate).exception();
}
@@ -81,7 +72,7 @@ void TryInstallNCICode(Isolate* isolate, Handle<JSFunction> function,
Handle<Code> code;
if (sfi->TryGetCachedCode(isolate).ToHandle(&code)) {
- function->set_code(*code);
+ function->set_code(*code, kReleaseStore);
JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(sfi, code);
}
@@ -109,7 +100,7 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
return isolate->StackOverflow();
}
IsCompiledScope is_compiled_scope;
- if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION,
+ if (!Compiler::Compile(isolate, function, Compiler::KEEP_EXCEPTION,
&is_compiled_scope)) {
return ReadOnlyRoots(isolate).exception();
}
@@ -120,6 +111,22 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
return function->code();
}
+RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
+ DCHECK(sfi->HasBaselineData());
+ IsCompiledScope is_compiled_scope(*sfi, isolate);
+ DCHECK(!function->HasAvailableOptimizedCode());
+ DCHECK(!function->HasOptimizationMarker());
+ DCHECK(!function->has_feedback_vector());
+ JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
+ Code baseline_code = sfi->baseline_data().baseline_code();
+ function->set_code(baseline_code);
+ return baseline_code;
+}
+
RUNTIME_FUNCTION(Runtime_TryInstallNCICode) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -174,7 +181,8 @@ RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
DCHECK(function->shared().is_compiled());
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "Runtime_HealOptimizedCodeSlot");
+ function->raw_feedback_cell(), function->shared(),
+ "Runtime_HealOptimizedCodeSlot");
return function->code();
}
@@ -197,10 +205,12 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
}
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
if (shared->HasAsmWasmData()) {
+#if V8_ENABLE_WEBASSEMBLY
Handle<AsmWasmData> data(shared->asm_wasm_data(), isolate);
MaybeHandle<Object> result = AsmJs::InstantiateAsmWasm(
isolate, shared, data, stdlib, foreign, memory);
if (!result.is_null()) return *result.ToHandleChecked();
+#endif
// Remove wasm data, mark as broken for asm->wasm, replace function code
// with UncompiledData, and return a smi 0 to indicate failure.
SharedFunctionInfo::DiscardCompiled(isolate, shared);
@@ -257,6 +267,14 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_ObserveNode) {
+ // The %ObserveNode intrinsic only tracks the changes to an observed node in
+ // code compiled by TurboFan.
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
+ return *obj;
+}
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function) {
@@ -283,24 +301,30 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate,
namespace {
-BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
- InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
+BytecodeOffset DetermineEntryAndDisarmOSRForUnoptimized(
+ JavaScriptFrame* js_frame) {
+ UnoptimizedFrame* frame = reinterpret_cast<UnoptimizedFrame*>(js_frame);
// Note that the bytecode array active on the stack might be different from
// the one installed on the function (e.g. patched by debugger). This however
- // is fine because we guarantee the layout to be in sync, hence any BailoutId
- // representing the entry point will be valid for any copy of the bytecode.
- Handle<BytecodeArray> bytecode(iframe->GetBytecodeArray(), iframe->isolate());
-
- DCHECK(frame->LookupCode().is_interpreter_trampoline_builtin());
+ // is fine because we guarantee the layout to be in sync, hence any
+ // BytecodeOffset representing the entry point will be valid for any copy of
+ // the bytecode.
+ Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), frame->isolate());
+
+ DCHECK_IMPLIES(frame->is_interpreted(),
+ frame->LookupCode().is_interpreter_trampoline_builtin());
+ DCHECK_IMPLIES(frame->is_baseline(),
+ frame->LookupCode().kind() == CodeKind::BASELINE);
+ DCHECK(frame->is_unoptimized());
DCHECK(frame->function().shared().HasBytecodeArray());
- DCHECK(frame->is_interpreted());
// Reset the OSR loop nesting depth to disarm back edges.
bytecode->set_osr_loop_nesting_level(0);
- // Return a BailoutId representing the bytecode offset of the back branch.
- return BailoutId(iframe->GetBytecodeOffset());
+ // Return a BytecodeOffset representing the bytecode offset of the back
+ // branch.
+ return BytecodeOffset(frame->GetBytecodeOffset());
}
} // namespace
@@ -315,12 +339,12 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
// Determine frame triggering OSR request.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- DCHECK(frame->is_interpreted());
+ DCHECK(frame->is_unoptimized());
// Determine the entry point for which this OSR request has been fired and
// also disarm all back edges in the calling code to stop new requests.
- BailoutId ast_id = DetermineEntryAndDisarmOSRForInterpreter(frame);
- DCHECK(!ast_id.IsNone());
+ BytecodeOffset osr_offset = DetermineEntryAndDisarmOSRForUnoptimized(frame);
+ DCHECK(!osr_offset.IsNone());
MaybeHandle<Code> maybe_result;
Handle<JSFunction> function(frame->function(), isolate);
@@ -329,15 +353,17 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[OSR - Compiling: ");
function->PrintName(scope.file());
- PrintF(scope.file(), " at AST id %d]\n", ast_id.ToInt());
+ PrintF(scope.file(), " at OSR bytecode offset %d]\n", osr_offset.ToInt());
}
- maybe_result = Compiler::GetOptimizedCodeForOSR(function, ast_id, frame);
+ maybe_result =
+ Compiler::GetOptimizedCodeForOSR(function, osr_offset, frame);
// Possibly compile for NCI caching.
if (!MaybeSpawnNativeContextIndependentCompilationJob(
- function, isolate->concurrent_recompilation_enabled()
- ? ConcurrencyMode::kConcurrent
- : ConcurrencyMode::kNotConcurrent)) {
+ isolate, function,
+ isolate->concurrent_recompilation_enabled()
+ ? ConcurrencyMode::kConcurrent
+ : ConcurrencyMode::kNotConcurrent)) {
return Object();
}
}
@@ -350,12 +376,13 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
DeoptimizationData::cast(result->deoptimization_data());
if (data.OsrPcOffset().value() >= 0) {
- DCHECK(BailoutId(data.OsrBytecodeOffset().value()) == ast_id);
+ DCHECK(BytecodeOffset(data.OsrBytecodeOffset().value()) == osr_offset);
if (FLAG_trace_osr) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(),
- "[OSR - Entry at AST id %d, offset %d in optimized code]\n",
- ast_id.ToInt(), data.OsrPcOffset().value());
+ "[OSR - Entry at OSR bytecode offset %d, offset %d in optimized "
+ "code]\n",
+ osr_offset.ToInt(), data.OsrPcOffset().value());
}
DCHECK(result->is_turbofanned());
@@ -399,11 +426,11 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[OSR - Failed: ");
function->PrintName(scope.file());
- PrintF(scope.file(), " at AST id %d]\n", ast_id.ToInt());
+ PrintF(scope.file(), " at OSR bytecode offset %d]\n", osr_offset.ToInt());
}
if (!function->HasAttachedOptimizedCode()) {
- function->set_code(function->shared().GetCode());
+ function->set_code(function->shared().GetCode(), kReleaseStore);
}
return Object();
}
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index ce91550684..e7ebfa2c18 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -10,6 +10,7 @@
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
+#include "src/debug/debug-wasm-objects.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/execution/arguments-inl.h"
@@ -325,7 +326,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
kExternalInt32Array,
};
Handle<FixedArray> result =
- factory->NewFixedArray((2 + arraysize(kTypes)) * 2);
+ factory->NewFixedArray((3 + arraysize(kTypes)) * 2);
int index = 0;
for (auto type : kTypes) {
switch (type) {
@@ -349,36 +350,41 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
}
Handle<String> byte_length_str =
factory->NewStringFromAsciiChecked("[[ArrayBufferByteLength]]");
- result->set(index++, *byte_length_str);
Handle<Object> byte_length_obj = factory->NewNumberFromSize(byte_length);
+ result->set(index++, *byte_length_str);
result->set(index++, *byte_length_obj);
+
Handle<String> buffer_data_str =
factory->NewStringFromAsciiChecked("[[ArrayBufferData]]");
- result->set(index++, *buffer_data_str);
// Use the backing store pointer as a unique ID
EmbeddedVector<char, 32> buffer_data_vec;
int len =
SNPrintF(buffer_data_vec, V8PRIxPTR_FMT,
reinterpret_cast<Address>(js_array_buffer->backing_store()));
- Handle<String> buffer_id =
+ Handle<String> buffer_data_obj =
factory->InternalizeUtf8String(buffer_data_vec.SubVector(0, len));
- result->set(index++, *buffer_id);
+ result->set(index++, *buffer_data_str);
+ result->set(index++, *buffer_data_obj);
+
+ Handle<Symbol> memory_symbol = factory->array_buffer_wasm_memory_symbol();
+ Handle<Object> memory_object =
+ JSObject::GetDataProperty(js_array_buffer, memory_symbol);
+ if (!memory_object->IsUndefined(isolate)) {
+ Handle<String> buffer_memory_str =
+ factory->NewStringFromAsciiChecked("[[WebAssemblyMemory]]");
+ Handle<WasmMemoryObject> buffer_memory_obj =
+ Handle<WasmMemoryObject>::cast(memory_object);
+ result->set(index++, *buffer_memory_str);
+ result->set(index++, *buffer_memory_obj);
+ }
return factory->NewJSArrayWithElements(result, PACKED_ELEMENTS, index);
+ } else if (object->IsWasmInstanceObject()) {
+ return GetWasmInstanceObjectInternalProperties(
+ Handle<WasmInstanceObject>::cast(object));
} else if (object->IsWasmModuleObject()) {
- auto module_object = Handle<WasmModuleObject>::cast(object);
- Handle<FixedArray> result = factory->NewFixedArray(2 * 2);
- Handle<String> exports_str =
- factory->NewStringFromStaticChars("[[Exports]]");
- Handle<JSArray> exports_obj = wasm::GetExports(isolate, module_object);
- result->set(0, *exports_str);
- result->set(1, *exports_obj);
- Handle<String> imports_str =
- factory->NewStringFromStaticChars("[[Imports]]");
- Handle<JSArray> imports_obj = wasm::GetImports(isolate, module_object);
- result->set(2, *imports_str);
- result->set(3, *imports_obj);
- return factory->NewJSArrayWithElements(result, PACKED_ELEMENTS);
+ return GetWasmModuleObjectInternalProperties(
+ Handle<WasmModuleObject>::cast(object));
}
return factory->NewJSArray(0);
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index e51085c00e..3eebf507f0 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -7,6 +7,7 @@
#include "src/api/api.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/prettyprinter.h"
+#include "src/baseline/baseline.h"
#include "src/builtins/builtins.h"
#include "src/common/message-template.h"
#include "src/debug/debug.h"
@@ -330,11 +331,16 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- function->raw_feedback_cell().set_interrupt_budget(FLAG_interrupt_budget);
+ function->SetInterruptBudget();
if (!function->has_feedback_vector()) {
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate));
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
+ DCHECK(is_compiled_scope.is_compiled());
+ if (FLAG_sparkplug) {
+ Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope);
+ }
// Also initialize the invocation count here. This is only really needed for
// OSR. When we OSR functions with lazy feedback allocation we want to have
// a non zero invocation count so we can inline functions.
@@ -356,7 +362,7 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromCode) {
DCHECK(feedback_cell->value().IsFeedbackVector());
- feedback_cell->set_interrupt_budget(FLAG_interrupt_budget);
+ FeedbackVector::SetInterruptBudget(*feedback_cell);
SealHandleScope shs(isolate);
isolate->counters()->runtime_profiler_ticks()->Increment();
@@ -505,7 +511,7 @@ RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
HandleScope scope(isolate);
-
+ DCHECK_LE(args.length(), 2);
// Append any worker thread runtime call stats to the main table before
// printing.
isolate->counters()->worker_thread_runtime_call_stats()->AddToMainTable(
@@ -513,47 +519,42 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
if (args.length() == 0) {
// Without arguments, the result is returned as a string.
- DCHECK_EQ(0, args.length());
std::stringstream stats_stream;
isolate->counters()->runtime_call_stats()->Print(stats_stream);
Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(
stats_stream.str().c_str());
isolate->counters()->runtime_call_stats()->Reset();
return *result;
+ }
+
+ std::FILE* f;
+ if (args[0].IsString()) {
+ // With a string argument, the results are appended to that file.
+ CONVERT_ARG_HANDLE_CHECKED(String, filename, 0);
+ f = std::fopen(filename->ToCString().get(), "a");
+ DCHECK_NOT_NULL(f);
} else {
- DCHECK_LE(args.length(), 2);
- std::FILE* f;
- if (args[0].IsString()) {
- // With a string argument, the results are appended to that file.
- CONVERT_ARG_HANDLE_CHECKED(String, arg0, 0);
- DisallowGarbageCollection no_gc;
- String::FlatContent flat = arg0->GetFlatContent(no_gc);
- const char* filename =
- reinterpret_cast<const char*>(&(flat.ToOneByteVector()[0]));
- f = std::fopen(filename, "a");
- DCHECK_NOT_NULL(f);
- } else {
- // With an integer argument, the results are written to stdout/stderr.
- CONVERT_SMI_ARG_CHECKED(fd, 0);
- DCHECK(fd == 1 || fd == 2);
- f = fd == 1 ? stdout : stderr;
- }
- // The second argument (if any) is a message header to be printed.
- if (args.length() >= 2) {
- CONVERT_ARG_HANDLE_CHECKED(String, arg1, 1);
- arg1->PrintOn(f);
- std::fputc('\n', f);
- std::fflush(f);
- }
- OFStream stats_stream(f);
- isolate->counters()->runtime_call_stats()->Print(stats_stream);
- isolate->counters()->runtime_call_stats()->Reset();
- if (args[0].IsString())
- std::fclose(f);
- else
- std::fflush(f);
- return ReadOnlyRoots(isolate).undefined_value();
+ // With an integer argument, the results are written to stdout/stderr.
+ CONVERT_SMI_ARG_CHECKED(fd, 0);
+ DCHECK(fd == 1 || fd == 2);
+ f = fd == 1 ? stdout : stderr;
+ }
+ // The second argument (if any) is a message header to be printed.
+ if (args.length() >= 2) {
+ CONVERT_ARG_HANDLE_CHECKED(String, message, 1);
+ message->PrintOn(f);
+ std::fputc('\n', f);
+ std::fflush(f);
}
+ OFStream stats_stream(f);
+ isolate->counters()->runtime_call_stats()->Print(stats_stream);
+ isolate->counters()->runtime_call_stats()->Reset();
+ if (args[0].IsString()) {
+ std::fclose(f);
+ } else {
+ std::fflush(f);
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index dfe738cdba..d0ca45d15a 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -120,7 +120,6 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
FieldIndex index = FieldIndex::ForPropertyIndex(
copy->map(isolate), details.field_index(),
details.representation());
- if (copy->IsUnboxedDoubleField(isolate, index)) continue;
Object raw = copy->RawFastPropertyAt(isolate, index);
if (raw.IsJSObject(isolate)) {
Handle<JSObject> value(JSObject::cast(raw), isolate);
@@ -521,7 +520,7 @@ Handle<JSObject> CreateArrayLiteral(
inline DeepCopyHints DecodeCopyHints(int flags) {
DeepCopyHints copy_hints =
(flags & AggregateLiteral::kIsShallow) ? kObjectIsShallow : kNoHints;
- if (FLAG_track_double_fields && !FLAG_unbox_double_fields) {
+ if (FLAG_track_double_fields) {
// Make sure we properly clone mutable heap numbers on 32-bit platforms.
copy_hints = kNoHints;
}
@@ -666,36 +665,46 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
- Handle<FeedbackVector> vector;
- if (maybe_vector->IsFeedbackVector()) {
- vector = Handle<FeedbackVector>::cast(maybe_vector);
- } else {
- DCHECK(maybe_vector->IsUndefined());
- }
- if (vector.is_null()) {
- Handle<JSRegExp> new_regexp;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, new_regexp,
- JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags)));
- return *new_regexp;
+ if (maybe_vector->IsUndefined()) {
+ // We don't have a vector; don't create a boilerplate, simply construct a
+ // plain JSRegExp instance and return it.
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags)));
}
- // This function assumes that the boilerplate does not yet exist.
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(maybe_vector);
FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
Handle<Object> literal_site(vector->Get(literal_slot)->cast<Object>(),
isolate);
+
+ // This function must not be called when a boilerplate already exists (if it
+ // exists, callers should instead copy the boilerplate into a new JSRegExp
+ // instance).
CHECK(!HasBoilerplate(literal_site));
- Handle<JSRegExp> boilerplate;
+ Handle<JSRegExp> regexp_instance;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, boilerplate,
+ isolate, regexp_instance,
JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags)));
+
+ // JSRegExp literal sites are initialized in a two-step process:
+ // Uninitialized-Preinitialized, and Preinitialized-Initialized.
if (IsUninitializedLiteralSite(*literal_site)) {
PreInitializeLiteralSite(vector, literal_slot);
- return *boilerplate;
+ return *regexp_instance;
}
+
+ Handle<FixedArray> data(FixedArray::cast(regexp_instance->data()), isolate);
+ Handle<String> source(String::cast(regexp_instance->source()), isolate);
+ Handle<RegExpBoilerplateDescription> boilerplate =
+ isolate->factory()->NewRegExpBoilerplateDescription(
+ data, source, Smi::cast(regexp_instance->flags()));
+
vector->SynchronizedSet(literal_slot, *boilerplate);
- return *JSRegExp::Copy(boilerplate);
+ DCHECK(HasBoilerplate(
+ handle(vector->Get(literal_slot)->cast<Object>(), isolate)));
+
+ return *regexp_instance;
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index dd15f64be5..1862b504fe 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -14,19 +14,26 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_LE(2, args.length());
+ DCHECK_GE(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, specifier, 1);
+ MaybeHandle<Object> import_assertions;
+ if (args.length() == 3) {
+ CHECK(args[2].IsObject());
+ import_assertions = args.at<Object>(2);
+ }
+
Handle<Script> script(Script::cast(function->shared().script()), isolate);
while (script->has_eval_from_shared()) {
script = handle(Script::cast(script->eval_from_shared().script()), isolate);
}
- RETURN_RESULT_OR_FAILURE(
- isolate,
- isolate->RunHostImportModuleDynamicallyCallback(script, specifier));
+ RETURN_RESULT_OR_FAILURE(isolate,
+ isolate->RunHostImportModuleDynamicallyCallback(
+ script, specifier, import_assertions));
}
RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 2bb98e674c..af7a26e869 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -164,12 +164,12 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
receiver->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
} else {
Object filler = ReadOnlyRoots(isolate).one_pointer_filler_map();
- JSObject::cast(*receiver).RawFastPropertyAtPut(index, filler);
+ JSObject::cast(*receiver).FastPropertyAtPut(index, filler);
// We must clear any recorded slot for the deleted property, because
// subsequent object modifications might put a raw double there.
// Slot clearing is the reason why this entire function cannot currently
// be implemented in the DeleteProperty stub.
- if (index.is_inobject() && !receiver_map->IsUnboxedDoubleField(index)) {
+ if (index.is_inobject()) {
// We need to clear the recorded slot in this case because in-object
// slack tracking might not be finished. This ensures that we don't
// have recorded slots in free space.
@@ -358,6 +358,38 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
return ReadOnlyRoots(isolate).false_value();
}
+RUNTIME_FUNCTION(Runtime_HasOwnConstDataProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, property, 1);
+
+ bool success;
+ LookupIterator::Key key(isolate, property, &success);
+ if (!success) return ReadOnlyRoots(isolate).undefined_value();
+
+ if (object->IsJSObject()) {
+ Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
+ LookupIterator it(isolate, js_obj, key, js_obj, LookupIterator::OWN);
+
+ switch (it.state()) {
+ case LookupIterator::NOT_FOUND:
+ return isolate->heap()->ToBoolean(false);
+ case LookupIterator::DATA:
+ return isolate->heap()->ToBoolean(it.constness() ==
+ PropertyConstness::kConst);
+ default:
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ }
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_IsDictPropertyConstTrackingEnabled) {
+ return isolate->heap()->ToBoolean(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
+}
+
RUNTIME_FUNCTION(Runtime_AddDictionaryProperty) {
HandleScope scope(isolate);
Handle<JSObject> receiver = args.at<JSObject>(0);
@@ -366,7 +398,8 @@ RUNTIME_FUNCTION(Runtime_AddDictionaryProperty) {
DCHECK(name->IsUniqueName());
- PropertyDetails property_details(kData, NONE, PropertyCellType::kNoCell);
+ PropertyDetails property_details(
+ kData, NONE, PropertyDetails::kConstIfDictConstnessTracking);
if (V8_DICT_MODE_PROTOTYPES_BOOL) {
Handle<OrderedNameDictionary> dictionary(
receiver->property_dictionary_ordered(), isolate);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index ec3a60dcfc..403d83bef9 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -861,6 +861,36 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
return *result;
}
+namespace {
+
+MaybeHandle<Object> RegExpExec(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int32_t index,
+ Handle<RegExpMatchInfo> last_match_info,
+ RegExp::ExecQuirks exec_quirks) {
+ // Due to the way the JS calls are constructed this must be less than the
+ // length of a string, i.e. it is always a Smi. We check anyway for security.
+ CHECK_LE(0, index);
+ CHECK_GE(subject->length(), index);
+ isolate->counters()->regexp_entry_runtime()->Increment();
+ return RegExp::Exec(isolate, regexp, subject, index, last_match_info,
+ exec_quirks);
+}
+
+MaybeHandle<Object> ExperimentalOneshotExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int32_t index, Handle<RegExpMatchInfo> last_match_info,
+ RegExp::ExecQuirks exec_quirks) {
+ // Due to the way the JS calls are constructed this must be less than the
+ // length of a string, i.e. it is always a Smi. We check anyway for security.
+ CHECK_LE(0, index);
+ CHECK_GE(subject->length(), index);
+ isolate->counters()->regexp_entry_runtime()->Increment();
+ return RegExp::ExperimentalOneshotExec(isolate, regexp, subject, index,
+ last_match_info, exec_quirks);
+}
+
+} // namespace
+
RUNTIME_FUNCTION(Runtime_RegExpExec) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -868,13 +898,21 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_INT32_ARG_CHECKED(index, 2);
CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
- // Due to the way the JS calls are constructed this must be less than the
- // length of a string, i.e. it is always a Smi. We check anyway for security.
- CHECK_LE(0, index);
- CHECK_GE(subject->length(), index);
- isolate->counters()->regexp_entry_runtime()->Increment();
RETURN_RESULT_OR_FAILURE(
- isolate, RegExp::Exec(isolate, regexp, subject, index, last_match_info));
+ isolate, RegExpExec(isolate, regexp, subject, index, last_match_info,
+ RegExp::ExecQuirks::kNone));
+}
+
+RUNTIME_FUNCTION(Runtime_RegExpExecTreatMatchAtEndAsFailure) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+ CONVERT_INT32_ARG_CHECKED(index, 2);
+ CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, RegExpExec(isolate, regexp, subject, index, last_match_info,
+ RegExp::ExecQuirks::kTreatMatchAtEndAsFailure));
}
RUNTIME_FUNCTION(Runtime_RegExpExperimentalOneshotExec) {
@@ -884,14 +922,39 @@ RUNTIME_FUNCTION(Runtime_RegExpExperimentalOneshotExec) {
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_INT32_ARG_CHECKED(index, 2);
CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
- // Due to the way the JS calls are constructed this must be less than the
- // length of a string, i.e. it is always a Smi. We check anyway for security.
- CHECK_LE(0, index);
- CHECK_GE(subject->length(), index);
- isolate->counters()->regexp_entry_runtime()->Increment();
RETURN_RESULT_OR_FAILURE(
- isolate, RegExp::ExperimentalOneshotExec(isolate, regexp, subject, index,
- last_match_info));
+ isolate,
+ ExperimentalOneshotExec(isolate, regexp, subject, index, last_match_info,
+ RegExp::ExecQuirks::kNone));
+}
+
+RUNTIME_FUNCTION(
+ Runtime_RegExpExperimentalOneshotExecTreatMatchAtEndAsFailure) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+ CONVERT_INT32_ARG_CHECKED(index, 2);
+ CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ ExperimentalOneshotExec(isolate, regexp, subject, index, last_match_info,
+ RegExp::ExecQuirks::kTreatMatchAtEndAsFailure));
+}
+
+RUNTIME_FUNCTION(Runtime_RegExpBuildIndices) {
+ DCHECK(FLAG_harmony_regexp_match_indices);
+
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, match_info, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, maybe_names, 2);
+#ifdef DEBUG
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ DCHECK(regexp->GetFlags() & JSRegExp::kHasIndices);
+#endif
+
+ return *JSRegExpResultIndices::BuildIndices(isolate, match_info, maybe_names);
}
namespace {
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 275fd2f529..457f372955 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -380,17 +380,7 @@ std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
return param_data;
} else {
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
int args_count = frame->GetActualArgumentCount();
-#else
- if (it.frame()->has_adapted_arguments()) {
- it.AdvanceOneFrame();
- DCHECK(it.frame()->is_arguments_adaptor());
- }
- frame = it.frame();
- int args_count = frame->ComputeParametersCount();
-#endif
-
*total_argc = args_count;
std::unique_ptr<Handle<Object>[]> param_data(
NewArray<Handle<Object>>(*total_argc));
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 2e761c4bfc..0acf6334d4 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -139,72 +139,6 @@ RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
return isolate->StackOverflow();
}
-// ES6 #sec-string.prototype.includes
-// String.prototype.includes(searchString [, position])
-RUNTIME_FUNCTION(Runtime_StringIncludes) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
-
- Handle<Object> receiver = args.at(0);
- if (receiver->IsNullOrUndefined(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
- isolate->factory()->NewStringFromAsciiChecked(
- "String.prototype.includes")));
- }
- Handle<String> receiver_string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_string,
- Object::ToString(isolate, receiver));
-
- // Check if the search string is a regExp and fail if it is.
- Handle<Object> search = args.at(1);
- Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
- if (is_reg_exp.IsNothing()) {
- DCHECK(isolate->has_pending_exception());
- return ReadOnlyRoots(isolate).exception();
- }
- if (is_reg_exp.FromJust()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kFirstArgumentNotRegExp,
- isolate->factory()->NewStringFromStaticChars(
- "String.prototype.includes")));
- }
- Handle<String> search_string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
- Object::ToString(isolate, args.at(1)));
- Handle<Object> position;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
- Object::ToInteger(isolate, args.at(2)));
-
- uint32_t index = receiver_string->ToValidIndex(*position);
- int index_in_str =
- String::IndexOf(isolate, receiver_string, search_string, index);
- return *isolate->factory()->ToBoolean(index_in_str != -1);
-}
-
-// ES6 #sec-string.prototype.indexof
-// String.prototype.indexOf(searchString [, position])
-RUNTIME_FUNCTION(Runtime_StringIndexOf) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- return String::IndexOf(isolate, args.at(0), args.at(1), args.at(2));
-}
-
-// ES6 #sec-string.prototype.indexof
-// String.prototype.indexOf(searchString, position)
-// Fast version that assumes that does not perform conversions of the incoming
-// arguments.
-RUNTIME_FUNCTION(Runtime_StringIndexOfUnchecked) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<String> receiver_string = args.at<String>(0);
- Handle<String> search_string = args.at<String>(1);
- int index = std::min(std::max(args.smi_at(2), 0), receiver_string->length());
-
- return Smi::FromInt(String::IndexOf(isolate, receiver_string, search_string,
- static_cast<uint32_t>(index)));
-}
-
RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
HandleScope handle_scope(isolate);
return String::LastIndexOf(isolate, args.at(0), args.at(1),
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 202a0a8785..802c7f2997 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -256,9 +256,25 @@ RUNTIME_FUNCTION(Runtime_DynamicCheckMapsEnabled) {
return isolate->heap()->ToBoolean(FLAG_turbo_dynamic_map_checks);
}
-RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
- HandleScope scope(isolate);
+RUNTIME_FUNCTION(Runtime_IsTopTierTurboprop) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(FLAG_turboprop_as_toptier);
+}
+
+RUNTIME_FUNCTION(Runtime_IsMidTierTurboprop) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(FLAG_turboprop &&
+ !FLAG_turboprop_as_toptier);
+}
+
+namespace {
+
+enum class TierupKind { kTierupBytecode, kTierupBytecodeOrMidTier };
+Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
+ TierupKind tierup_kind) {
if (args.length() != 1 && args.length() != 2) {
return CrashUnlessFuzzing(isolate);
}
@@ -278,7 +294,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate));
if (!is_compiled_scope.is_compiled() &&
- !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
+ !Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return CrashUnlessFuzzing(isolate);
}
@@ -297,7 +313,10 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
PendingOptimizationTable::MarkedForOptimization(isolate, function);
}
- if (function->HasAvailableOptimizedCode()) {
+ CodeKind kind = CodeKindForTopTier();
+ if ((tierup_kind == TierupKind::kTierupBytecode &&
+ function->HasAvailableOptimizedCode()) ||
+ function->HasAvailableCodeKind(kind)) {
DCHECK(function->HasAttachedOptimizedCode() ||
function->ChecksOptimizationMarker());
if (FLAG_testing_d8_test_runner) {
@@ -337,9 +356,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
return ReadOnlyRoots(isolate).undefined_value();
}
-namespace {
-
-bool EnsureFeedbackVector(Handle<JSFunction> function) {
+bool EnsureFeedbackVector(Isolate* isolate, Handle<JSFunction> function) {
// Check function allows lazy compilation.
if (!function->shared().allows_lazy_compilation()) return false;
@@ -355,7 +372,7 @@ bool EnsureFeedbackVector(Handle<JSFunction> function) {
bool needs_compilation =
!function->is_compiled() && !function->has_closure_feedback_cell_array();
if (needs_compilation &&
- !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
+ !Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return false;
}
@@ -368,11 +385,49 @@ bool EnsureFeedbackVector(Handle<JSFunction> function) {
} // namespace
+RUNTIME_FUNCTION(Runtime_CompileBaseline) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ IsCompiledScope is_compiled_scope =
+ function->shared(isolate).is_compiled_scope(isolate);
+
+ if (!function->shared(isolate).IsUserJavaScript()) {
+ return CrashUnlessFuzzing(isolate);
+ }
+
+ // First compile the bytecode, if we have to.
+ if (!is_compiled_scope.is_compiled() &&
+ !Compiler::Compile(isolate, function, Compiler::KEEP_EXCEPTION,
+ &is_compiled_scope)) {
+ return CrashUnlessFuzzing(isolate);
+ }
+
+ if (!Compiler::CompileBaseline(isolate, function, Compiler::KEEP_EXCEPTION,
+ &is_compiled_scope)) {
+ return CrashUnlessFuzzing(isolate);
+ }
+
+ return *function;
+}
+
+RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
+ HandleScope scope(isolate);
+ return OptimizeFunctionOnNextCall(args, isolate, TierupKind::kTierupBytecode);
+}
+
+RUNTIME_FUNCTION(Runtime_TierupFunctionOnNextCall) {
+ HandleScope scope(isolate);
+ return OptimizeFunctionOnNextCall(args, isolate,
+ TierupKind::kTierupBytecodeOrMidTier);
+}
+
RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- EnsureFeedbackVector(function);
+ EnsureFeedbackVector(isolate, function);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -394,7 +449,7 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
}
}
- if (!EnsureFeedbackVector(function)) {
+ if (!EnsureFeedbackVector(isolate, function)) {
return CrashUnlessFuzzing(isolate);
}
@@ -474,16 +529,15 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
// Make the profiler arm all back edges in unoptimized code.
- if (it.frame()->type() == StackFrame::INTERPRETED) {
+ if (it.frame()->is_unoptimized()) {
isolate->runtime_profiler()->AttemptOnStackReplacement(
- InterpretedFrame::cast(it.frame()),
+ UnoptimizedFrame::cast(it.frame()),
AbstractCode::kMaxLoopNestingMarker);
}
return ReadOnlyRoots(isolate).undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -567,6 +621,9 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
status |= static_cast<int>(OptimizationStatus::kTurboFanned);
}
}
+ if (function->HasAttachedCodeKind(CodeKind::BASELINE)) {
+ status |= static_cast<int>(OptimizationStatus::kBaseline);
+ }
if (function->ActiveTierIsIgnition()) {
status |= static_cast<int>(OptimizationStatus::kInterpreted);
}
@@ -680,7 +737,6 @@ RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
return ReadOnlyRoots(isolate).undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2 || args.length() == 3);
@@ -875,7 +931,6 @@ RUNTIME_FUNCTION(Runtime_GlobalPrint) {
return string;
}
-
RUNTIME_FUNCTION(Runtime_SystemBreak) {
// The code below doesn't create handles, but when breaking here in GDB
// having a handle scope might be useful.
@@ -885,7 +940,6 @@ RUNTIME_FUNCTION(Runtime_SystemBreak) {
return ReadOnlyRoots(isolate).undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_SetForceSlowPath) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -943,7 +997,8 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
IsCompiledScope is_compiled_scope;
CHECK(func->is_compiled() ||
- Compiler::Compile(func, Compiler::KEEP_EXCEPTION, &is_compiled_scope));
+ Compiler::Compile(isolate, func, Compiler::KEEP_EXCEPTION,
+ &is_compiled_scope));
StdoutStream os;
func->code().Print(os);
os << std::endl;
@@ -1061,22 +1116,22 @@ RUNTIME_FUNCTION(Runtime_WasmTraceExit) {
if (num_returns == 1) {
wasm::ValueType return_type = sig->GetReturn(0);
switch (return_type.kind()) {
- case wasm::ValueType::kI32: {
+ case wasm::kI32: {
int32_t value = ReadUnalignedValue<int32_t>(value_addr_smi.ptr());
PrintF(" -> %d\n", value);
break;
}
- case wasm::ValueType::kI64: {
+ case wasm::kI64: {
int64_t value = ReadUnalignedValue<int64_t>(value_addr_smi.ptr());
PrintF(" -> %" PRId64 "\n", value);
break;
}
- case wasm::ValueType::kF32: {
+ case wasm::kF32: {
float_t value = ReadUnalignedValue<float_t>(value_addr_smi.ptr());
PrintF(" -> %f\n", value);
break;
}
- case wasm::ValueType::kF64: {
+ case wasm::kF64: {
double_t value = ReadUnalignedValue<double_t>(value_addr_smi.ptr());
PrintF(" -> %f\n", value);
break;
@@ -1234,22 +1289,6 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
return *isolate->factory()->NewJSArrayWithElements(values);
}
-namespace {
-bool EnableWasmThreads(v8::Local<v8::Context> context) { return true; }
-bool DisableWasmThreads(v8::Local<v8::Context> context) { return false; }
-} // namespace
-
-// This runtime function enables WebAssembly threads through an embedder
-// callback and thereby bypasses the value in FLAG_experimental_wasm_threads.
-RUNTIME_FUNCTION(Runtime_SetWasmThreadsEnabled) {
- DCHECK_EQ(1, args.length());
- CONVERT_BOOLEAN_ARG_CHECKED(flag, 0);
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8_isolate->SetWasmThreadsEnabledCallback(flag ? EnableWasmThreads
- : DisableWasmThreads);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_RegexpHasBytecode) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -1571,25 +1610,17 @@ RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmTierDownModule) {
+RUNTIME_FUNCTION(Runtime_WasmTierDown) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- auto* native_module = instance->module_object().native_module();
- native_module->SetTieringState(wasm::kTieredDown);
- native_module->RecompileForTiering();
- CHECK(!native_module->compilation_state()->failed());
+ DCHECK_EQ(0, args.length());
+ isolate->wasm_engine()->TierDownAllModulesPerIsolate(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmTierUpModule) {
+RUNTIME_FUNCTION(Runtime_WasmTierUp) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- auto* native_module = instance->module_object().native_module();
- native_module->SetTieringState(wasm::kTieredUp);
- native_module->RecompileForTiering();
- CHECK(!native_module->compilation_state()->failed());
+ DCHECK_EQ(0, args.length());
+ isolate->wasm_engine()->TierUpAllModulesPerIsolate(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1675,6 +1706,7 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> shared,
const char* reason) final {}
+ void BytecodeFlushEvent(Address compiled_data_start) final {}
bool is_listening_to_code_events() final { return true; }
};
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-trace.cc
index 7cb9b067cf..8cd141d33e 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-trace.cc
@@ -21,7 +21,7 @@
namespace v8 {
namespace internal {
-#ifdef V8_TRACE_IGNITION
+#ifdef V8_TRACE_UNOPTIMIZED
namespace {
@@ -40,9 +40,9 @@ void AdvanceToOffsetForTracing(
interpreter::OperandScale::kSingle));
}
-void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
- interpreter::BytecodeArrayIterator&
- bytecode_iterator, // NOLINT(runtime/references)
+void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
+ interpreter::BytecodeArrayAccessor&
+ bytecode_accessor, // NOLINT(runtime/references)
Handle<Object> accumulator) {
static const char kAccumulator[] = "accumulator";
static const int kRegFieldWidth = static_cast<int>(sizeof(kAccumulator) - 1);
@@ -54,7 +54,7 @@ void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
os << (is_input ? kInputColourCode : kOutputColourCode);
}
- interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
+ interpreter::Bytecode bytecode = bytecode_accessor.current_bytecode();
// Print accumulator.
if ((is_input && interpreter::Bytecodes::ReadsAccumulator(bytecode)) ||
@@ -65,9 +65,6 @@ void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
}
// Print the registers.
- JavaScriptFrameIterator frame_iterator(isolate);
- InterpretedFrame* frame =
- reinterpret_cast<InterpretedFrame*>(frame_iterator.frame());
int operand_count = interpreter::Bytecodes::NumberOfOperands(bytecode);
for (int operand_index = 0; operand_index < operand_count; operand_index++) {
interpreter::OperandType operand_type =
@@ -78,14 +75,14 @@ void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
: interpreter::Bytecodes::IsRegisterOutputOperandType(operand_type);
if (should_print) {
interpreter::Register first_reg =
- bytecode_iterator.GetRegisterOperand(operand_index);
- int range = bytecode_iterator.GetRegisterOperandRange(operand_index);
+ bytecode_accessor.GetRegisterOperand(operand_index);
+ int range = bytecode_accessor.GetRegisterOperandRange(operand_index);
for (int reg_index = first_reg.index();
reg_index < first_reg.index() + range; reg_index++) {
Object reg_object = frame->ReadInterpreterRegister(reg_index);
os << " [ " << std::setw(kRegFieldWidth)
<< interpreter::Register(reg_index).ToString(
- bytecode_iterator.bytecode_array()->parameter_count())
+ bytecode_accessor.bytecode_array()->parameter_count())
<< kArrowDirection;
reg_object.ShortPrint(os);
os << " ]" << std::endl;
@@ -99,8 +96,19 @@ void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
} // namespace
-RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
- if (!FLAG_trace_ignition) {
+RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeEntry) {
+ if (!FLAG_trace_ignition && !FLAG_trace_baseline_exec) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ JavaScriptFrameIterator frame_iterator(isolate);
+ UnoptimizedFrame* frame =
+ reinterpret_cast<UnoptimizedFrame*>(frame_iterator.frame());
+
+ if (frame->is_interpreted() && !FLAG_trace_ignition) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ if (frame->is_baseline() && !FLAG_trace_baseline_exec) {
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -120,21 +128,38 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
const uint8_t* base_address = reinterpret_cast<const uint8_t*>(
bytecode_array->GetFirstBytecodeAddress());
const uint8_t* bytecode_address = base_address + offset;
- os << " -> " << static_cast<const void*>(bytecode_address) << " @ "
- << std::setw(4) << offset << " : ";
+
+ if (frame->is_baseline()) {
+ os << "B-> ";
+ } else {
+ os << " -> ";
+ }
+ os << static_cast<const void*>(bytecode_address) << " @ " << std::setw(4)
+ << offset << " : ";
interpreter::BytecodeDecoder::Decode(os, bytecode_address,
bytecode_array->parameter_count());
os << std::endl;
// Print all input registers and accumulator.
- PrintRegisters(isolate, os, true, bytecode_iterator, accumulator);
+ PrintRegisters(frame, os, true, bytecode_iterator, accumulator);
os << std::flush;
}
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
- if (!FLAG_trace_ignition) {
+RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeExit) {
+ if (!FLAG_trace_ignition && !FLAG_trace_baseline_exec) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ JavaScriptFrameIterator frame_iterator(isolate);
+ UnoptimizedFrame* frame =
+ reinterpret_cast<UnoptimizedFrame*>(frame_iterator.frame());
+
+ if (frame->is_interpreted() && !FLAG_trace_ignition) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ if (frame->is_baseline() && !FLAG_trace_baseline_exec) {
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -154,8 +179,9 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
interpreter::OperandScale::kSingle ||
offset > bytecode_iterator.current_offset()) {
StdoutStream os;
+
// Print all output registers and accumulator.
- PrintRegisters(isolate, os, false, bytecode_iterator, accumulator);
+ PrintRegisters(frame, os, false, bytecode_iterator, accumulator);
os << std::flush;
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -165,7 +191,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
#ifdef V8_TRACE_FEEDBACK_UPDATES
-RUNTIME_FUNCTION(Runtime_InterpreterTraceUpdateFeedback) {
+RUNTIME_FUNCTION(Runtime_TraceUpdateFeedback) {
if (!FLAG_trace_feedback_updates) {
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index db6420f295..9ba26c23ad 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -12,7 +12,6 @@
#include "src/heap/factory.h"
#include "src/logging/counters.h"
#include "src/numbers/conversions.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
#include "src/trap-handler/trap-handler.h"
@@ -66,7 +65,7 @@ Context GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
class V8_NODISCARD ClearThreadInWasmScope {
public:
- ClearThreadInWasmScope() {
+ explicit ClearThreadInWasmScope(Isolate* isolate) : isolate_(isolate) {
DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
trap_handler::IsThreadInWasm());
trap_handler::ClearThreadInWasm();
@@ -74,8 +73,15 @@ class V8_NODISCARD ClearThreadInWasmScope {
~ClearThreadInWasmScope() {
DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
!trap_handler::IsThreadInWasm());
- trap_handler::SetThreadInWasm();
+ if (!isolate_->has_pending_exception()) {
+ trap_handler::SetThreadInWasm();
+ }
+ // Otherwise we only want to set the flag if the exception is caught in
+ // wasm. This is handled by the unwinder.
}
+
+ private:
+ Isolate* isolate_;
};
Object ThrowWasmError(Isolate* isolate, MessageTemplate message) {
@@ -109,7 +115,7 @@ RUNTIME_FUNCTION(Runtime_WasmIsValidRefValue) {
}
RUNTIME_FUNCTION(Runtime_WasmMemoryGrow) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -125,59 +131,68 @@ RUNTIME_FUNCTION(Runtime_WasmMemoryGrow) {
}
RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
- ClearThreadInWasmScope clear_wasm_flag;
+ ClearThreadInWasmScope flag_scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(message_id, 0);
return ThrowWasmError(isolate, MessageTemplateFromInt(message_id));
}
RUNTIME_FUNCTION(Runtime_ThrowWasmStackOverflow) {
- ClearThreadInWasmScope clear_wasm_flag;
+ ClearThreadInWasmScope clear_wasm_flag(isolate);
SealHandleScope shs(isolate);
DCHECK_LE(0, args.length());
return isolate->StackOverflow();
}
RUNTIME_FUNCTION(Runtime_WasmThrowJSTypeError) {
- // This runtime function is called both from wasm and from e.g. js-to-js
- // functions. Hence the "thread in wasm" flag can be either set or not. Both
- // is OK, since throwing will trigger unwinding anyway, which sets the flag
- // correctly depending on the handler.
+ // The caller may be wasm or JS. Only clear the thread_in_wasm flag if the
+ // caller is wasm, and let the unwinder set it back depending on the handler.
+ if (trap_handler::IsTrapHandlerEnabled() && trap_handler::IsThreadInWasm()) {
+ trap_handler::ClearThreadInWasm();
+ }
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kWasmTrapJSTypeError));
}
-RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
- ClearThreadInWasmScope clear_wasm_flag;
- // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
+RUNTIME_FUNCTION(Runtime_WasmThrow) {
+ ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- DCHECK(isolate->context().is_null());
isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
+
CONVERT_ARG_CHECKED(WasmExceptionTag, tag_raw, 0);
- CONVERT_SMI_ARG_CHECKED(size, 1);
+ CONVERT_ARG_CHECKED(FixedArray, values_raw, 1);
// TODO(wasm): Manually box because parameters are not visited yet.
- Handle<Object> tag(tag_raw, isolate);
+ Handle<WasmExceptionTag> tag(tag_raw, isolate);
+ Handle<FixedArray> values(values_raw, isolate);
+
Handle<Object> exception = isolate->factory()->NewWasmRuntimeError(
MessageTemplate::kWasmExceptionError);
- CHECK(!Object::SetProperty(isolate, exception,
- isolate->factory()->wasm_exception_tag_symbol(),
- tag, StoreOrigin::kMaybeKeyed,
- Just(ShouldThrow::kThrowOnError))
- .is_null());
- Handle<FixedArray> values = isolate->factory()->NewFixedArray(size);
- CHECK(!Object::SetProperty(isolate, exception,
- isolate->factory()->wasm_exception_values_symbol(),
- values, StoreOrigin::kMaybeKeyed,
- Just(ShouldThrow::kThrowOnError))
- .is_null());
- return *exception;
+ Object::SetProperty(
+ isolate, exception, isolate->factory()->wasm_exception_tag_symbol(), tag,
+ StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError))
+ .Check();
+ Object::SetProperty(
+ isolate, exception, isolate->factory()->wasm_exception_values_symbol(),
+ values, StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError))
+ .Check();
+
+ isolate->wasm_engine()->SampleThrowEvent(isolate);
+ return isolate->Throw(*exception);
+}
+
+RUNTIME_FUNCTION(Runtime_WasmReThrow) {
+ ClearThreadInWasmScope clear_wasm_flag(isolate);
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ isolate->wasm_engine()->SampleRethrowEvent(isolate);
+ return isolate->ReThrow(args[0]);
}
RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
- ClearThreadInWasmScope wasm_flag;
+ ClearThreadInWasmScope wasm_flag(isolate);
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
@@ -189,7 +204,7 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
}
RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
- ClearThreadInWasmScope wasm_flag;
+ ClearThreadInWasmScope wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -295,7 +310,7 @@ RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
}
RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
- ClearThreadInWasmScope clear_wasm_flag;
+ ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -311,7 +326,7 @@ RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
}
RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
- ClearThreadInWasmScope clear_wasm_flag;
+ ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -334,7 +349,7 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
}
RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
- ClearThreadInWasmScope clear_wasm_flag;
+ ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -372,7 +387,7 @@ Object ThrowTableOutOfBounds(Isolate* isolate,
} // namespace
RUNTIME_FUNCTION(Runtime_WasmRefFunc) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -386,7 +401,7 @@ RUNTIME_FUNCTION(Runtime_WasmRefFunc) {
}
RUNTIME_FUNCTION(Runtime_WasmFunctionTableGet) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -410,7 +425,7 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableGet) {
}
RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -437,7 +452,7 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
}
RUNTIME_FUNCTION(Runtime_WasmTableInit) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(6, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -459,7 +474,7 @@ RUNTIME_FUNCTION(Runtime_WasmTableInit) {
}
RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(6, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -481,7 +496,7 @@ RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
}
RUNTIME_FUNCTION(Runtime_WasmTableGrow) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
auto instance =
@@ -500,7 +515,7 @@ RUNTIME_FUNCTION(Runtime_WasmTableGrow) {
}
RUNTIME_FUNCTION(Runtime_WasmTableFill) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
auto instance =
@@ -533,40 +548,73 @@ RUNTIME_FUNCTION(Runtime_WasmTableFill) {
}
RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
FrameFinder<WasmFrame, StackFrame::EXIT, StackFrame::WASM_DEBUG_BREAK>
frame_finder(isolate);
auto instance = handle(frame_finder.frame()->wasm_instance(), isolate);
- int position = frame_finder.frame()->position();
+ auto script = handle(instance->module_object().script(), isolate);
+ WasmFrame* frame = frame_finder.frame();
+ int position = frame->position();
+ auto frame_id = frame->id();
+ auto* debug_info = frame->native_module()->GetDebugInfo();
isolate->set_context(instance->native_context());
+ // Stepping can repeatedly create code, and code GC requires stack guards to
+ // be executed on all involved isolates. Proactively do this here.
+ StackLimitCheck check(isolate);
+ if (check.InterruptRequested()) isolate->stack_guard()->HandleInterrupts();
+
// Enter the debugger.
DebugScope debug_scope(isolate->debug());
- WasmFrame* frame = frame_finder.frame();
- auto* debug_info = frame->native_module()->GetDebugInfo();
+ // Check for instrumentation breakpoint.
+ DCHECK_EQ(script->break_on_entry(), instance->break_on_entry());
+ if (script->break_on_entry()) {
+ MaybeHandle<FixedArray> maybe_on_entry_breakpoints =
+ WasmScript::CheckBreakPoints(
+ isolate, script, WasmScript::kOnEntryBreakpointPosition, frame_id);
+ script->set_break_on_entry(false);
+ // Update the "break_on_entry" flag on all live instances.
+ i::WeakArrayList weak_instance_list = script->wasm_weak_instance_list();
+ for (int i = 0; i < weak_instance_list.length(); ++i) {
+ if (weak_instance_list.Get(i)->IsCleared()) continue;
+ i::WasmInstanceObject instance = i::WasmInstanceObject::cast(
+ weak_instance_list.Get(i)->GetHeapObject());
+ instance.set_break_on_entry(false);
+ }
+ DCHECK(!instance->break_on_entry());
+ Handle<FixedArray> on_entry_breakpoints;
+ if (maybe_on_entry_breakpoints.ToHandle(&on_entry_breakpoints)) {
+ debug_info->ClearStepping(isolate);
+ StepAction step_action = isolate->debug()->last_step_action();
+ isolate->debug()->ClearStepping();
+ isolate->debug()->OnDebugBreak(on_entry_breakpoints, step_action);
+ // Don't process regular breakpoints.
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ }
+
if (debug_info->IsStepping(frame)) {
debug_info->ClearStepping(isolate);
- StepAction stepAction = isolate->debug()->last_step_action();
+ StepAction step_action = isolate->debug()->last_step_action();
isolate->debug()->ClearStepping();
isolate->debug()->OnDebugBreak(isolate->factory()->empty_fixed_array(),
- stepAction);
+ step_action);
return ReadOnlyRoots(isolate).undefined_value();
}
// Check whether we hit a breakpoint.
- Handle<Script> script(instance->module_object().script(), isolate);
Handle<FixedArray> breakpoints;
- if (WasmScript::CheckBreakPoints(isolate, script, position)
+ if (WasmScript::CheckBreakPoints(isolate, script, position, frame_id)
.ToHandle(&breakpoints)) {
debug_info->ClearStepping(isolate);
- StepAction stepAction = isolate->debug()->last_step_action();
+ StepAction step_action = isolate->debug()->last_step_action();
isolate->debug()->ClearStepping();
if (isolate->debug()->break_points_active()) {
// We hit one or several breakpoints. Notify the debug listeners.
- isolate->debug()->OnDebugBreak(breakpoints, stepAction);
+ isolate->debug()->OnDebugBreak(breakpoints, step_action);
}
}
@@ -574,7 +622,7 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
}
RUNTIME_FUNCTION(Runtime_WasmAllocateRtt) {
- ClearThreadInWasmScope flag_scope;
+ ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_UINT32_ARG_CHECKED(type_index, 0);
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 311238c9d5..912808ab3c 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -82,7 +82,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_CLASSES(F, I) \
F(DefineClass, -1 /* >= 3 */, 1) \
- F(HomeObjectSymbol, 0, 1) \
F(LoadFromSuper, 3, 1) \
F(LoadKeyedFromSuper, 3, 1) \
F(StoreKeyedToSuper, 4, 1) \
@@ -106,12 +105,15 @@ namespace internal {
#define FOR_EACH_INTRINSIC_COMPILER(F, I) \
F(CompileForOnStackReplacement, 0, 1) \
F(CompileLazy, 1, 1) \
+ F(CompileBaseline, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
+ F(InstallBaselineCode, 1, 1) \
F(HealOptimizedCodeSlot, 1, 1) \
F(FunctionFirstExecution, 1, 1) \
F(InstantiateAsmJs, 4, 1) \
F(NotifyDeoptimized, 0, 1) \
+ F(ObserveNode, 1, 1) \
F(ResolvePossiblyDirectEval, 6, 1) \
F(TryInstallNCICode, 1, 1)
@@ -150,24 +152,23 @@ namespace internal {
F(ForInEnumerate, 1, 1) \
F(ForInHasProperty, 2, 1)
-#ifdef V8_TRACE_IGNITION
-#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I) \
- F(InterpreterTraceBytecodeEntry, 3, 1) \
- F(InterpreterTraceBytecodeExit, 3, 1)
+#ifdef V8_TRACE_UNOPTIMIZED
+#define FOR_EACH_INTRINSIC_TRACE_UNOPTIMIZED(F, I) \
+ F(TraceUnoptimizedBytecodeEntry, 3, 1) \
+ F(TraceUnoptimizedBytecodeExit, 3, 1)
#else
-#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I)
+#define FOR_EACH_INTRINSIC_TRACE_UNOPTIMIZED(F, I)
#endif
#ifdef V8_TRACE_FEEDBACK_UPDATES
-#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F, I) \
- F(InterpreterTraceUpdateFeedback, 3, 1)
+#define FOR_EACH_INTRINSIC_TRACE_FEEDBACK(F, I) F(TraceUpdateFeedback, 3, 1)
#else
-#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F, I)
+#define FOR_EACH_INTRINSIC_TRACE_FEEDBACK(F, I)
#endif
-#define FOR_EACH_INTRINSIC_INTERPRETER(F, I) \
- FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F, I) \
- FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F, I)
+#define FOR_EACH_INTRINSIC_TRACE(F, I) \
+ FOR_EACH_INTRINSIC_TRACE_UNOPTIMIZED(F, I) \
+ FOR_EACH_INTRINSIC_TRACE_FEEDBACK(F, I)
#define FOR_EACH_INTRINSIC_FUNCTION(F, I) \
I(Call, -1 /* >= 2 */, 1) \
@@ -266,9 +267,9 @@ namespace internal {
F(CreateObjectLiteralWithoutAllocationSite, 2, 1) \
F(CreateRegExpLiteral, 4, 1)
-#define FOR_EACH_INTRINSIC_MODULE(F, I) \
- F(DynamicImportCall, 2, 1) \
- I(GetImportMetaObject, 0, 1) \
+#define FOR_EACH_INTRINSIC_MODULE(F, I) \
+ F(DynamicImportCall, -1 /* [2, 3] */, 1) \
+ I(GetImportMetaObject, 0, 1) \
F(GetModuleNamespace, 1, 1)
#define FOR_EACH_INTRINSIC_NUMBERS(F, I) \
@@ -384,15 +385,18 @@ namespace internal {
F(JSProxyGetTarget, 1, 1) \
F(SetPropertyWithReceiver, 4, 1)
-#define FOR_EACH_INTRINSIC_REGEXP(F, I) \
- I(IsRegExp, 1, 1) \
- F(RegExpExec, 4, 1) \
- F(RegExpExperimentalOneshotExec, 4, 1) \
- F(RegExpExecMultiple, 4, 1) \
- F(RegExpInitializeAndCompile, 3, 1) \
- F(RegExpReplaceRT, 3, 1) \
- F(RegExpSplit, 3, 1) \
- F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
+#define FOR_EACH_INTRINSIC_REGEXP(F, I) \
+ I(IsRegExp, 1, 1) \
+ F(RegExpBuildIndices, 3, 1) \
+ F(RegExpExec, 4, 1) \
+ F(RegExpExecTreatMatchAtEndAsFailure, 4, 1) \
+ F(RegExpExperimentalOneshotExec, 4, 1) \
+ F(RegExpExperimentalOneshotExecTreatMatchAtEndAsFailure, 4, 1) \
+ F(RegExpExecMultiple, 4, 1) \
+ F(RegExpInitializeAndCompile, 3, 1) \
+ F(RegExpReplaceRT, 3, 1) \
+ F(RegExpSplit, 3, 1) \
+ F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
F(StringSplit, 3, 1)
#define FOR_EACH_INTRINSIC_SCOPES(F, I) \
@@ -430,9 +434,6 @@ namespace internal {
F(StringEscapeQuotes, 1, 1) \
F(StringGreaterThan, 2, 1) \
F(StringGreaterThanOrEqual, 2, 1) \
- F(StringIncludes, 3, 1) \
- F(StringIndexOf, 3, 1) \
- F(StringIndexOfUnchecked, 3, 1) \
F(StringLastIndexOf, 2, 1) \
F(StringLessThan, 2, 1) \
F(StringLessThanOrEqual, 2, 1) \
@@ -469,6 +470,8 @@ namespace internal {
F(DisallowWasmCodegen, 1, 1) \
F(DisassembleFunction, 1, 1) \
F(DynamicCheckMapsEnabled, 0, 1) \
+ F(IsTopTierTurboprop, 0, 1) \
+ F(IsMidTierTurboprop, 0, 1) \
F(EnableCodeLoggingForTesting, 0, 1) \
F(EnsureFeedbackVectorForFunction, 1, 1) \
F(FreezeWasmLazyCompilation, 1, 1) \
@@ -485,6 +488,7 @@ namespace internal {
F(HasElementsInALargeObjectSpace, 1, 1) \
F(HasFastElements, 1, 1) \
F(HasFastProperties, 1, 1) \
+ F(HasOwnConstDataProperty, 2, 1) \
F(HasFixedBigInt64Elements, 1, 1) \
F(HasFixedBigUint64Elements, 1, 1) \
F(HasFixedFloat32Elements, 1, 1) \
@@ -510,6 +514,7 @@ namespace internal {
F(IsAsmWasmCode, 1, 1) \
F(IsBeingInterpreted, 0, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
+ F(IsDictPropertyConstTrackingEnabled, 0, 1) \
F(IsLiftoffFunction, 1, 1) \
F(IsThreadInWasm, 0, 1) \
F(IsWasmCode, 1, 1) \
@@ -523,6 +528,7 @@ namespace internal {
F(NeverOptimizeFunction, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(TierupFunctionOnNextCall, -1, 1) \
F(OptimizeOsr, -1, 1) \
F(NewRegExpWithBacktrackLimit, 3, 1) \
F(PrepareFunctionForOptimization, -1, 1) \
@@ -536,7 +542,6 @@ namespace internal {
F(SetIteratorProtector, 0, 1) \
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
- F(SetWasmThreadsEnabled, 1, 1) \
F(SimulateNewspaceFull, 0, 1) \
F(ScheduleGCInStackCheck, 0, 1) \
F(StringIteratorProtector, 0, 1) \
@@ -548,9 +553,9 @@ namespace internal {
F(UnblockConcurrentRecompilation, 0, 1) \
F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumCodeSpaces, 1, 1) \
- F(WasmTierDownModule, 1, 1) \
+ F(WasmTierDown, 0, 1) \
+ F(WasmTierUp, 0, 1) \
F(WasmTierUpFunction, 2, 1) \
- F(WasmTierUpModule, 1, 1) \
F(WasmTraceEnter, 0, 1) \
F(WasmTraceExit, 1, 1) \
F(WasmTraceMemory, 1, 1) \
@@ -566,28 +571,29 @@ namespace internal {
F(TypedArraySet, 2, 1) \
F(TypedArraySortFast, 1, 1)
-#define FOR_EACH_INTRINSIC_WASM(F, I) \
- F(ThrowWasmError, 1, 1) \
- F(ThrowWasmStackOverflow, 0, 1) \
- F(WasmI32AtomicWait, 4, 1) \
- F(WasmI64AtomicWait, 5, 1) \
- F(WasmAtomicNotify, 3, 1) \
- F(WasmMemoryGrow, 2, 1) \
- F(WasmStackGuard, 0, 1) \
- F(WasmThrowCreate, 2, 1) \
- F(WasmThrowJSTypeError, 0, 1) \
- F(WasmRefFunc, 1, 1) \
- F(WasmFunctionTableGet, 3, 1) \
- F(WasmFunctionTableSet, 4, 1) \
- F(WasmTableInit, 6, 1) \
- F(WasmTableCopy, 6, 1) \
- F(WasmTableGrow, 3, 1) \
- F(WasmTableFill, 4, 1) \
- F(WasmIsValidRefValue, 3, 1) \
- F(WasmCompileLazy, 2, 1) \
- F(WasmCompileWrapper, 2, 1) \
- F(WasmTriggerTierUp, 1, 1) \
- F(WasmDebugBreak, 0, 1) \
+#define FOR_EACH_INTRINSIC_WASM(F, I) \
+ F(ThrowWasmError, 1, 1) \
+ F(ThrowWasmStackOverflow, 0, 1) \
+ F(WasmI32AtomicWait, 4, 1) \
+ F(WasmI64AtomicWait, 5, 1) \
+ F(WasmAtomicNotify, 3, 1) \
+ F(WasmMemoryGrow, 2, 1) \
+ F(WasmStackGuard, 0, 1) \
+ F(WasmThrow, 2, 1) \
+ F(WasmReThrow, 1, 1) \
+ F(WasmThrowJSTypeError, 0, 1) \
+ F(WasmRefFunc, 1, 1) \
+ F(WasmFunctionTableGet, 3, 1) \
+ F(WasmFunctionTableSet, 4, 1) \
+ F(WasmTableInit, 6, 1) \
+ F(WasmTableCopy, 6, 1) \
+ F(WasmTableGrow, 3, 1) \
+ F(WasmTableFill, 4, 1) \
+ F(WasmIsValidRefValue, 3, 1) \
+ F(WasmCompileLazy, 2, 1) \
+ F(WasmCompileWrapper, 2, 1) \
+ F(WasmTriggerTierUp, 1, 1) \
+ F(WasmDebugBreak, 0, 1) \
F(WasmAllocateRtt, 2, 1)
#define FOR_EACH_INTRINSIC_WEAKREF(F, I) \
@@ -640,7 +646,7 @@ namespace internal {
FOR_EACH_INTRINSIC_GENERATOR(F, I) \
FOR_EACH_INTRINSIC_IC(F, I) \
FOR_EACH_INTRINSIC_INTERNAL(F, I) \
- FOR_EACH_INTRINSIC_INTERPRETER(F, I) \
+ FOR_EACH_INTRINSIC_TRACE(F, I) \
FOR_EACH_INTRINSIC_INTL(F, I) \
FOR_EACH_INTRINSIC_LITERALS(F, I) \
FOR_EACH_INTRINSIC_MODULE(F, I) \
@@ -841,6 +847,7 @@ enum class OptimizationStatus {
kTopmostFrameIsTurboFanned = 1 << 11,
kLiteMode = 1 << 12,
kMarkedForDeoptimization = 1 << 13,
+ kBaseline = 1 << 14,
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index 72a5572c7c..80059e1e95 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -176,7 +176,9 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// serialize optimized code anyway.
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
closure->ResetIfBytecodeFlushed();
- if (closure->is_compiled()) closure->set_code(closure->shared().GetCode());
+ if (closure->is_compiled()) {
+ closure->set_code(closure->shared().GetCode(), kReleaseStore);
+ }
}
CheckRehashability(*obj);
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index b1e1d61588..c378653a27 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -282,7 +282,7 @@ void Deserializer::DeserializeDeferredObjects() {
void Deserializer::LogNewMapEvents() {
DisallowGarbageCollection no_gc;
for (Handle<Map> map : new_maps_) {
- DCHECK(FLAG_trace_maps);
+ DCHECK(FLAG_log_maps);
LOG(isolate(), MapCreate(*map));
LOG(isolate(), MapDetails(*map));
}
@@ -387,7 +387,7 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
new_code_objects_.push_back(Handle<Code>::cast(obj));
}
} else if (InstanceTypeChecker::IsMap(instance_type)) {
- if (FLAG_trace_maps) {
+ if (FLAG_log_maps) {
// Keep track of all seen Maps to log them later since they might be only
// partially initialized at this point.
new_maps_.push_back(Handle<Map>::cast(obj));
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index e0409a4fd6..8debb06d8a 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -28,9 +28,10 @@ class Object;
// Used for platforms with embedded constant pools to trigger deserialization
// of objects found in code.
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \
- defined(V8_TARGET_ARCH_PPC64) || V8_EMBEDDED_CONSTANT_POOL
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \
+ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64) || \
+ V8_EMBEDDED_CONSTANT_POOL
#define V8_CODE_EMBEDS_OBJECT_POINTER 1
#else
#define V8_CODE_EMBEDS_OBJECT_POINTER 0
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
index b472841cc6..6bf5f84088 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
@@ -78,10 +78,9 @@ void EmbeddedFileWriter::WriteBuiltin(PlatformEmbeddedFileWriterBase* w,
CHECK(positions.done()); // Release builds must not contain debug infos.
#endif
- // Some builtins (ArgumentsAdaptorTrampoline and JSConstructStubGeneric) have
- // entry points located in the middle of them, we need to store their
- // addresses since they are part of the list of allowed return addresses in
- // the deoptimizer.
+ // Some builtins (JSConstructStubGeneric) have entry points located in the
+ // middle of them, we need to store their addresses since they are part of
+ // the list of allowed return addresses in the deoptimizer.
const std::vector<LabelInfo>& current_labels = label_info_[builtin_id];
auto label = current_labels.begin();
@@ -297,14 +296,12 @@ void EmbeddedFileWriter::PrepareBuiltinSourcePositionMap(Builtins* builtins) {
}
}
-void EmbeddedFileWriter::PrepareBuiltinLabelInfoMap(
- int create_offset, int invoke_offset, int arguments_adaptor_offset) {
+void EmbeddedFileWriter::PrepareBuiltinLabelInfoMap(int create_offset,
+ int invoke_offset) {
label_info_[Builtins::kJSConstructStubGeneric].push_back(
{create_offset, "construct_stub_create_deopt_addr"});
label_info_[Builtins::kJSConstructStubGeneric].push_back(
{invoke_offset, "construct_stub_invoke_deopt_addr"});
- label_info_[Builtins::kArgumentsAdaptorTrampoline].push_back(
- {arguments_adaptor_offset, "arguments_adaptor_deopt_addr"});
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.h b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
index 3ed9208f41..6e7ec59f44 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.h
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
@@ -42,8 +42,8 @@ class EmbeddedFileWriterInterface {
// compiled builtin Code objects with trampolines.
virtual void PrepareBuiltinSourcePositionMap(Builtins* builtins) = 0;
- virtual void PrepareBuiltinLabelInfoMap(int create_offset, int invoke_offset,
- int arguments_adaptor_offset) = 0;
+ virtual void PrepareBuiltinLabelInfoMap(int create_offset,
+ int invoke_offset) = 0;
#if defined(V8_OS_WIN64)
virtual void SetBuiltinUnwindData(
@@ -69,8 +69,8 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
void PrepareBuiltinSourcePositionMap(Builtins* builtins) override;
- void PrepareBuiltinLabelInfoMap(int create_offset, int invoke_create,
- int arguments_adaptor_offset) override;
+ void PrepareBuiltinLabelInfoMap(int create_offset,
+ int invoke_create) override;
#if defined(V8_OS_WIN64)
void SetBuiltinUnwindData(
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index 891dbd94d3..3dff5d34d1 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -566,8 +566,15 @@ int PlatformEmbeddedFileWriterWin::IndentedDataDirective(
#else
+// The directives for text section prefix come from the COFF
+// (Common Object File Format) standards:
+// https://llvm.org/docs/Extensions.html
+//
+// .text$hot means this section contains hot code.
+// x means executable section.
+// r means read-only section.
void PlatformEmbeddedFileWriterWin::SectionText() {
- fprintf(fp_, ".section .text\n");
+ fprintf(fp_, ".section .text$hot,\"xr\"\n");
}
void PlatformEmbeddedFileWriterWin::SectionData() {
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index c10c097e65..720ffbe741 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -121,6 +121,12 @@ void Serializer::SerializeObject(Handle<HeapObject> obj) {
// indirection and serialize the actual string directly.
if (obj->IsThinString(isolate())) {
obj = handle(ThinString::cast(*obj).actual(isolate()), isolate());
+ } else if (obj->IsBaselineData()) {
+ // For now just serialize the BytecodeArray instead of baseline data.
+ // TODO(v8:11429,pthier): Handle BaselineData in cases we want to serialize
+ // Baseline code.
+ obj = handle(Handle<BaselineData>::cast(obj)->GetActiveBytecodeArray(),
+ isolate());
}
SerializeObjectImpl(obj);
}
@@ -638,7 +644,7 @@ void Serializer::ObjectSerializer::Serialize() {
RecursionScope recursion(serializer_);
// Defer objects as "pending" if they cannot be serialized now, or if we
- // exceed a certain recursion depth. Some objects cannot be deferred
+ // exceed a certain recursion depth. Some objects cannot be deferred.
if ((recursion.ExceedsMaximum() && CanBeDeferred(*object_)) ||
serializer_->MustBeDeferred(*object_)) {
DCHECK(CanBeDeferred(*object_));
diff --git a/deps/v8/src/snapshot/snapshot.cc b/deps/v8/src/snapshot/snapshot.cc
index 86d0544667..360bddc4b9 100644
--- a/deps/v8/src/snapshot/snapshot.cc
+++ b/deps/v8/src/snapshot/snapshot.cc
@@ -628,7 +628,7 @@ bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
}
v8::Local<v8::String> resource_name =
v8::String::NewFromUtf8(isolate, name).ToLocalChecked();
- v8::ScriptOrigin origin(resource_name);
+ v8::ScriptOrigin origin(isolate, resource_name);
v8::ScriptCompiler::Source source(source_string, origin);
v8::Local<v8::Script> script;
if (!v8::ScriptCompiler::Compile(context, &source).ToLocal(&script))
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index b019091ee9..dcf7905eee 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -99,7 +99,7 @@ void StartupDeserializer::DeserializeStringTable() {
}
void StartupDeserializer::LogNewMapEvents() {
- if (FLAG_trace_maps) LOG(isolate(), LogAllMaps());
+ if (FLAG_log_maps) LOG(isolate(), LogAllMaps());
}
void StartupDeserializer::FlushICache() {
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index bcde4d7951..84756896af 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -317,13 +317,8 @@ void StringStream::PrintUsingMap(JSObject js_object) {
}
Add(": ");
FieldIndex index = FieldIndex::ForDescriptor(map, i);
- if (js_object.IsUnboxedDoubleField(index)) {
- double value = js_object.RawFastDoublePropertyAt(index);
- Add("<unboxed double> %.16g\n", FmtElm(value));
- } else {
- Object value = js_object.RawFastPropertyAt(index);
- Add("%o\n", value);
- }
+ Object value = js_object.RawFastPropertyAt(index);
+ Add("%o\n", value);
}
}
}
diff --git a/deps/v8/src/torque/OWNERS b/deps/v8/src/torque/OWNERS
index 03fa4c9daa..11e743943f 100644
--- a/deps/v8/src/torque/OWNERS
+++ b/deps/v8/src/torque/OWNERS
@@ -1,6 +1,2 @@
-danno@chromium.org
-jarin@chromium.org
mvstanton@chromium.org
-sigurds@chromium.org
-szuend@chromium.org
-tebbi@chromium.org
+nicohartmann@chromium.org
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 0b73c1c42f..db4f80c32d 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -931,7 +931,8 @@ struct ClassFieldExpression {
bool weak;
bool const_qualified;
bool generate_verify;
- bool relaxed_write;
+ FieldSynchronization read_synchronization;
+ FieldSynchronization write_synchronization;
};
struct LabelAndTypes {
diff --git a/deps/v8/src/torque/cc-generator.cc b/deps/v8/src/torque/cc-generator.cc
index 53170817a1..5eea56654d 100644
--- a/deps/v8/src/torque/cc-generator.cc
+++ b/deps/v8/src/torque/cc-generator.cc
@@ -21,22 +21,6 @@ base::Optional<Stack<std::string>> CCGenerator::EmitGraph(
parameters.Peek(i));
}
- // C++ doesn't have parameterized labels like CSA, so we must pre-declare all
- // phi values so they're in scope for both the blocks that define them and the
- // blocks that read them.
- for (Block* block : cfg_.blocks()) {
- if (block->IsDead()) continue;
-
- DCHECK_EQ(block->InputTypes().Size(), block->InputDefinitions().Size());
- for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
- DefinitionLocation input_def = block->InputDefinitions().Peek(i);
- if (block->InputDefinitions().Peek(i).IsPhiFromBlock(block)) {
- out() << " " << block->InputTypes().Peek(i)->GetRuntimeType() << " "
- << DefinitionToVariable(input_def) << ";\n";
- }
- }
- }
-
// Redirect the output of non-declarations into a buffer and only output
// declarations right away.
std::stringstream out_buffer;
@@ -74,8 +58,10 @@ Stack<std::string> CCGenerator::EmitBlock(const Block* block) {
const auto& def = block->InputDefinitions().Peek(i);
stack.Push(DefinitionToVariable(def));
if (def.IsPhiFromBlock(block)) {
- decls() << " " << block->InputTypes().Peek(i)->GetRuntimeType() << " "
- << stack.Top() << "{}; USE(" << stack.Top() << ");\n";
+ decls() << " "
+ << (is_cc_debug_ ? block->InputTypes().Peek(i)->GetDebugType()
+ : block->InputTypes().Peek(i)->GetRuntimeType())
+ << " " << stack.Top() << "{}; USE(" << stack.Top() << ");\n";
}
}
@@ -151,8 +137,10 @@ void CCGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
for (std::size_t i = 0; i < lowered.size(); ++i) {
results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
stack->Push(results.back());
- decls() << " " << lowered[i]->GetRuntimeType() << " " << stack->Top()
- << "{}; USE(" << stack->Top() << ");\n";
+ decls() << " "
+ << (is_cc_debug_ ? lowered[i]->GetDebugType()
+ : lowered[i]->GetRuntimeType())
+ << " " << stack->Top() << "{}; USE(" << stack->Top() << ");\n";
}
out() << " ";
@@ -194,7 +182,16 @@ void CCGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
if (return_type->IsConstexpr()) {
ReportError("%FromConstexpr must return a non-constexpr type");
}
- // Nothing to do here; constexpr expressions are already valid C++.
+ if (return_type->IsSubtypeOf(TypeOracle::GetSmiType())) {
+ if (is_cc_debug_) {
+ out() << "Internals::IntToSmi";
+ } else {
+ out() << "Smi::FromInt";
+ }
+ }
+ // Wrap the raw constexpr value in a static_cast to ensure that
+ // enums get properly casted to their backing integral value.
+ out() << "(CastToUnderlyingTypeIfEnum";
} else {
ReportError("no built in intrinsic with name " +
instruction.intrinsic->ExternalName());
@@ -202,6 +199,9 @@ void CCGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
out() << "(";
PrintCommaSeparatedList(out(), args);
+ if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
+ out() << ")";
+ }
out() << ");\n";
}
@@ -220,29 +220,39 @@ void CCGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
for (std::size_t i = 0; i < lowered.size(); ++i) {
results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
stack->Push(results.back());
- decls() << " " << lowered[i]->GetRuntimeType() << " " << stack->Top()
- << "{}; USE(" << stack->Top() << ");\n";
+ decls() << " "
+ << (is_cc_debug_ ? lowered[i]->GetDebugType()
+ : lowered[i]->GetRuntimeType())
+ << " " << stack->Top() << "{}; USE(" << stack->Top() << ");\n";
}
// We should have inlined any calls requiring complex control flow.
CHECK(!instruction.catch_block);
- out() << " ";
+ out() << (is_cc_debug_ ? " ASSIGN_OR_RETURN(" : " ");
if (return_type->StructSupertype().has_value()) {
out() << "std::tie(";
PrintCommaSeparatedList(out(), results);
- out() << ") = ";
+ out() << (is_cc_debug_ ? "), " : ") = ");
} else {
if (results.size() == 1) {
- out() << results[0] << " = ";
+ out() << results[0] << (is_cc_debug_ ? ", " : " = ");
} else {
DCHECK_EQ(0, results.size());
}
}
- out() << instruction.macro->CCName() << "(isolate";
- if (!args.empty()) out() << ", ";
+ if (is_cc_debug_) {
+ out() << instruction.macro->CCDebugName() << "(accessor";
+ if (!args.empty()) out() << ", ";
+ } else {
+ out() << instruction.macro->CCName() << "(";
+ }
PrintCommaSeparatedList(out(), args);
- out() << ");\n";
+ if (is_cc_debug_) {
+ out() << "));\n";
+ } else {
+ out() << ");\n";
+ }
}
void CCGenerator::EmitInstruction(
@@ -251,6 +261,11 @@ void CCGenerator::EmitInstruction(
ReportError("Not supported in C++ output: CallCsaMacroAndBranch");
}
+void CCGenerator::EmitInstruction(const MakeLazyNodeInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: MakeLazyNode");
+}
+
void CCGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: CallBuiltin");
@@ -364,16 +379,44 @@ void CCGenerator::EmitInstruction(const LoadReferenceInstruction& instruction,
std::string object = stack->Pop();
stack->Push(result_name);
- std::string result_type = instruction.type->GetRuntimeType();
- decls() << " " << result_type << " " << result_name << "{}; USE("
- << result_name << ");\n";
- out() << " " << result_name << " = ";
- if (instruction.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- out() << "TaggedField<" << result_type << ">::load(isolate, " << object
- << ", static_cast<int>(" << offset << "));\n";
+ if (!is_cc_debug_) {
+ std::string result_type = instruction.type->GetRuntimeType();
+ decls() << " " << result_type << " " << result_name << "{}; USE("
+ << result_name << ");\n";
+ out() << " " << result_name << " = ";
+ if (instruction.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ // Currently, all of the tagged loads we emit are for smi values, so there
+ // is no point in providing an IsolateRoot. If at some point we start
+ // emitting loads for tagged fields which might be HeapObjects, then we
+ // should plumb an IsolateRoot through the generated functions that need
+ // it.
+ if (!instruction.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
+ Error(
+ "Not supported in C++ output: LoadReference on non-smi tagged "
+ "value");
+ }
+
+ // References and slices can cause some values to have the Torque type
+ // HeapObject|TaggedZeroPattern, which is output as "Object". TaggedField
+ // requires HeapObject, so we need a cast.
+ out() << "TaggedField<" << result_type
+ << ">::load(*static_cast<HeapObject*>(&" << object
+ << "), static_cast<int>(" << offset << "));\n";
+ } else {
+ out() << "(" << object << ").ReadField<" << result_type << ">(" << offset
+ << ");\n";
+ }
} else {
- out() << "(" << object << ").ReadField<" << result_type << ">(" << offset
- << ");\n";
+ std::string result_type = instruction.type->GetDebugType();
+ decls() << " " << result_type << " " << result_name << "{}; USE("
+ << result_name << ");\n";
+ if (instruction.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ out() << " READ_TAGGED_FIELD_OR_FAIL(" << result_name << ", accessor, "
+ << object << ", static_cast<int>(" << offset << "));\n";
+ } else {
+ out() << " READ_FIELD_OR_FAIL(" << result_type << ", " << result_name
+ << ", accessor, " << object << ", " << offset << ");\n";
+ }
}
}
@@ -411,13 +454,17 @@ void CCGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric());
if (smi_tagged_type) {
// Get the untagged value and its type.
- bit_field_struct = bit_field_struct + ".value()";
+ if (is_cc_debug_) {
+ bit_field_struct = "Internals::SmiValue(" + bit_field_struct + ")";
+ } else {
+ bit_field_struct = bit_field_struct + ".value()";
+ }
struct_type = *smi_tagged_type;
}
- out() << " " << result_name << " = "
+ out() << " " << result_name << " = CastToUnderlyingTypeIfEnum("
<< GetBitFieldSpecialization(struct_type, instruction.bit_field)
- << "::decode(" << bit_field_struct << ");\n";
+ << "::decode(" << bit_field_struct << "));\n";
}
void CCGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
diff --git a/deps/v8/src/torque/cc-generator.h b/deps/v8/src/torque/cc-generator.h
index 5626f3f7fa..2b3031f228 100644
--- a/deps/v8/src/torque/cc-generator.h
+++ b/deps/v8/src/torque/cc-generator.h
@@ -13,14 +13,17 @@ namespace torque {
class CCGenerator : public TorqueCodeGenerator {
public:
- CCGenerator(const ControlFlowGraph& cfg, std::ostream& out)
- : TorqueCodeGenerator(cfg, out) {}
+ CCGenerator(const ControlFlowGraph& cfg, std::ostream& out,
+ bool is_cc_debug = false)
+ : TorqueCodeGenerator(cfg, out), is_cc_debug_(is_cc_debug) {}
base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
static void EmitCCValue(VisitResult result, const Stack<std::string>& values,
std::ostream& out);
private:
+ bool is_cc_debug_;
+
void EmitSourcePosition(SourcePosition pos,
bool always_emit = false) override;
diff --git a/deps/v8/src/torque/cfg.h b/deps/v8/src/torque/cfg.h
index a340a697b4..33f803cc3a 100644
--- a/deps/v8/src/torque/cfg.h
+++ b/deps/v8/src/torque/cfg.h
@@ -116,13 +116,18 @@ class ControlFlowGraph {
Block* start() const { return start_; }
base::Optional<Block*> end() const { return end_; }
void set_end(Block* end) { end_ = end; }
- void SetReturnType(const Type* t) {
+ void SetReturnType(TypeVector t) {
if (!return_type_) {
return_type_ = t;
return;
}
if (t != *return_type_) {
- ReportError("expected return type ", **return_type_, " instead of ", *t);
+ std::stringstream message;
+ message << "expected return type ";
+ PrintCommaSeparatedList(message, *return_type_);
+ message << " instead of ";
+ PrintCommaSeparatedList(message, t);
+ ReportError(message.str());
}
}
const std::vector<Block*>& blocks() const { return placed_blocks_; }
@@ -136,7 +141,7 @@ class ControlFlowGraph {
Block* start_;
std::vector<Block*> placed_blocks_;
base::Optional<Block*> end_;
- base::Optional<const Type*> return_type_;
+ base::Optional<TypeVector> return_type_;
size_t next_block_id_ = 0;
};
diff --git a/deps/v8/src/torque/class-debug-reader-generator.cc b/deps/v8/src/torque/class-debug-reader-generator.cc
index 8a0575cc57..fe2a85fcd0 100644
--- a/deps/v8/src/torque/class-debug-reader-generator.cc
+++ b/deps/v8/src/torque/class-debug-reader-generator.cc
@@ -334,20 +334,24 @@ void GenerateFieldValueAccessor(const Field& field,
// 0, // Bitfield size (0=not a bitfield)
// 0)); // Bitfield shift
// // The line above is repeated for other struct fields. Omitted here.
-// Value<uint16_t> indexed_field_count =
-// GetNumberOfAllDescriptorsValue(accessor); // Fetch the array length.
-// result.push_back(std::make_unique<ObjectProperty>(
+// // Fetch the slice.
+// auto indexed_field_slice_descriptors =
+// TqDebugFieldSliceDescriptorArrayDescriptors(accessor, address_);
+// if (indexed_field_slice_descriptors.validity == d::MemoryAccessResult::kOk) {
+// result.push_back(std::make_unique<ObjectProperty>(
// "descriptors", // Field name
// "", // Field type
// "", // Decompressed type
-// GetDescriptorsAddress(), // Field address
-// indexed_field_count.value, // Number of values
-// 24, // Size of value
+// address_ - i::kHeapObjectTag +
+// std::get<1>(indexed_field_slice_descriptors.value), // Field address
+// std::get<2>(indexed_field_slice_descriptors.value), // Number of values
+// 12, // Size of value
// std::move(descriptors_struct_field_list), // Struct fields
-// GetArrayKind(indexed_field_count.validity))); // Field kind
+// GetArrayKind(indexed_field_slice_descriptors.validity))); // Field kind
+// }
void GenerateGetPropsChunkForField(const Field& field,
- base::Optional<NameAndType> array_length,
- std::ostream& get_props_impl) {
+ std::ostream& get_props_impl,
+ std::string class_name) {
DebugFieldType debug_field_type(field);
// If the current field is a struct or bitfield struct, create a vector
@@ -376,27 +380,31 @@ void GenerateGetPropsChunkForField(const Field& field,
// If the field is indexed, emit a fetch of the array length, and change
// count_value and property_kind to be the correct values for an array.
- if (array_length) {
- const Type* index_type = array_length->type;
- std::string index_type_name;
- if (index_type == TypeOracle::GetSmiType()) {
- index_type_name = "uintptr_t";
- count_value =
- "i::PlatformSmiTagging::SmiToInt(indexed_field_count.value)";
- } else if (!index_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- index_type_name = index_type->GetConstexprGeneratedTypeName();
- count_value = "indexed_field_count.value";
- } else {
- Error("Unsupported index type: ", index_type);
- return;
- }
- get_props_impl << " Value<" << index_type_name
- << "> indexed_field_count = Get"
- << CamelifyString(array_length->name)
- << "Value(accessor);\n";
- property_kind = "GetArrayKind(indexed_field_count.validity)";
+ if (field.index) {
+ std::string indexed_field_slice =
+ "indexed_field_slice_" + field.name_and_type.name;
+ get_props_impl << " auto " << indexed_field_slice << " = "
+ << "TqDebugFieldSlice" << class_name
+ << CamelifyString(field.name_and_type.name)
+ << "(accessor, address_);\n";
+ std::string validity = indexed_field_slice + ".validity";
+ std::string value = indexed_field_slice + ".value";
+ property_kind = "GetArrayKind(" + validity + ")";
+
+ get_props_impl << " if (" << validity
+ << " == d::MemoryAccessResult::kOk) {\n"
+ << " result.push_back(std::make_unique<ObjectProperty>(\""
+ << field.name_and_type.name << "\", "
+ << debug_field_type.GetTypeString(kAsStoredInHeap) << ", "
+ << debug_field_type.GetTypeString(kUncompressed) << ", "
+ << "address_ - i::kHeapObjectTag + std::get<1>(" << value
+ << "), "
+ << "std::get<2>(" << value << ")"
+ << ", " << debug_field_type.GetSize() << ", "
+ << struct_field_list << ", " << property_kind << "));\n"
+ << " }\n";
+ return;
}
-
get_props_impl << " result.push_back(std::make_unique<ObjectProperty>(\""
<< field.name_and_type.name << "\", "
<< debug_field_type.GetTypeString(kAsStoredInHeap) << ", "
@@ -499,21 +507,11 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
for (const Field& field : type.fields()) {
if (field.name_and_type.type == TypeOracle::GetVoidType()) continue;
- if (!field.offset.has_value()) {
- // Fields with dynamic offset are currently unsupported.
- continue;
- }
- GenerateFieldAddressAccessor(field, name, h_contents, cc_contents);
- GenerateFieldValueAccessor(field, name, h_contents, cc_contents);
- base::Optional<NameAndType> array_length;
- if (field.index) {
- array_length = ExtractSimpleFieldArraySize(type, *field.index);
- if (!array_length) {
- // Unsupported complex array length, skipping this field.
- continue;
- }
+ if (field.offset.has_value()) {
+ GenerateFieldAddressAccessor(field, name, h_contents, cc_contents);
+ GenerateFieldValueAccessor(field, name, h_contents, cc_contents);
}
- GenerateGetPropsChunkForField(field, array_length, get_props_impl);
+ GenerateGetPropsChunkForField(field, get_props_impl, name);
}
h_contents << "};\n";
@@ -556,6 +554,9 @@ void ImplementationVisitor::GenerateClassDebugReaders(
cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
}
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
+ cc_contents << "#include \"torque-generated/"
+ << "debug-macros"
+ << ".h\"\n";
cc_contents << "#include \"include/v8-internal.h\"\n\n";
cc_contents << "namespace i = v8::internal;\n\n";
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index f8b57efaa0..bd720bf0ac 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -73,6 +73,7 @@ static const char* const MUTABLE_SLICE_TYPE_STRING = "MutableSlice";
static const char* const CONST_SLICE_TYPE_STRING = "ConstSlice";
static const char* const WEAK_TYPE_STRING = "Weak";
static const char* const SMI_TAGGED_TYPE_STRING = "SmiTagged";
+static const char* const LAZY_TYPE_STRING = "Lazy";
static const char* const UNINITIALIZED_ITERATOR_TYPE_STRING =
"UninitializedIterator";
static const char* const GENERIC_TYPE_INSTANTIATION_NAMESPACE_STRING =
@@ -106,7 +107,14 @@ static const char* const ANNOTATION_DO_NOT_GENERATE_CAST = "@doNotGenerateCast";
static const char* const ANNOTATION_USE_PARENT_TYPE_CHECKER =
"@useParentTypeChecker";
// Generate C++ accessors with relaxed write semantics.
+// Weak<T> and MaybeObject fields always use relaxed write.
static const char* const ANNOTATION_RELAXED_WRITE = "@relaxedWrite";
+// Generate C++ accessors with relaxed read semantics.
+static const char* const ANNOTATION_RELAXED_READ = "@relaxedRead";
+// Generate C++ accessors with release write semantics.
+static const char* const ANNOTATION_RELEASE_WRITE = "@releaseWrite";
+// Generate C++ accessors with acquire read semantics.
+static const char* const ANNOTATION_ACQUIRE_READ = "@acquireRead";
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
@@ -155,6 +163,12 @@ using ClassFlags = base::Flags<ClassFlag>;
enum class StructFlag { kNone = 0, kExport = 1 << 0 };
using StructFlags = base::Flags<StructFlag>;
+enum class FieldSynchronization {
+ kNone,
+ kRelaxed,
+ kAcquireRelease,
+};
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index 93e8d47df4..9254a7ea6b 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -102,7 +102,7 @@ void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
void CSAGenerator::EmitInstruction(
const PushUninitializedInstruction& instruction,
Stack<std::string>* stack) {
- // TODO(tebbi): This can trigger an error in CSA if it is used. Instead, we
+ // TODO(turbofan): This can trigger an error in CSA if it is used. Instead, we
// should prevent usage of uninitialized in the type system. This
// requires "if constexpr" being evaluated at Torque time.
const std::string str = "ca_.Uninitialized<" +
@@ -479,6 +479,41 @@ void CSAGenerator::EmitInstruction(
}
}
+void CSAGenerator::EmitInstruction(const MakeLazyNodeInstruction& instruction,
+ Stack<std::string>* stack) {
+ TypeVector parameter_types =
+ instruction.macro->signature().parameter_types.types;
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
+
+ std::string result_name =
+ DefinitionToVariable(instruction.GetValueDefinition());
+
+ stack->Push(result_name);
+
+ decls() << " " << instruction.result_type->GetGeneratedTypeName() << " "
+ << result_name << ";\n";
+
+ // We assume here that the CodeAssemblerState will outlive any usage of
+ // the generated std::function that binds it. Likewise, copies of TNode values
+ // are only valid during generation of the current builtin.
+ out() << " " << result_name << " = [=] () { return ";
+ bool first = true;
+ if (const ExternMacro* extern_macro =
+ ExternMacro::DynamicCast(instruction.macro)) {
+ out() << extern_macro->external_assembler_name() << "(state_)."
+ << extern_macro->ExternalName() << "(";
+ } else {
+ out() << instruction.macro->ExternalName() << "(state_";
+ first = false;
+ }
+ if (!args.empty()) {
+ if (!first) out() << ", ";
+ PrintCommaSeparatedList(out(), args);
+ }
+ out() << "); };\n";
+}
+
void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
Stack<std::string>* stack) {
std::vector<std::string> arguments = stack->PopMany(instruction.argc);
@@ -496,23 +531,47 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
}
out() << ");\n";
} else {
- std::string result_name;
- if (result_types.size() == 1) {
- result_name = DefinitionToVariable(instruction.GetValueDefinition(0));
- decls() << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
- << "> " << result_name << ";\n";
+ std::vector<std::string> result_names(result_types.size());
+ for (size_t i = 0; i < result_types.size(); ++i) {
+ result_names[i] = DefinitionToVariable(instruction.GetValueDefinition(i));
+ decls() << " TNode<" << result_types[i]->GetGeneratedTNodeTypeName()
+ << "> " << result_names[i] << ";\n";
}
+
+ std::string lhs_name;
+ std::string lhs_type;
+ switch (result_types.size()) {
+ case 1:
+ lhs_name = result_names[0];
+ lhs_type = result_types[0]->GetGeneratedTNodeTypeName();
+ break;
+ case 2:
+ // If a builtin returns two values, the return type is represented as a
+ // TNode containing a pair. We need a temporary place to store that
+ // result so we can unpack it into separate TNodes.
+ lhs_name = result_names[0] + "_and_" + result_names[1];
+ lhs_type = "PairT<" + result_types[0]->GetGeneratedTNodeTypeName() +
+ ", " + result_types[1]->GetGeneratedTNodeTypeName() + ">";
+ decls() << " TNode<" << lhs_type << "> " << lhs_name << ";\n";
+ break;
+ default:
+ ReportError(
+ "Torque can only call builtins that return one or two values, not ",
+ result_types.size());
+ }
+
std::string catch_name =
PreCallableExceptionPreparation(instruction.catch_block);
Stack<std::string> pre_call_stack = *stack;
- DCHECK_EQ(1, result_types.size());
std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
- stack->Push(result_name);
- out() << " " << result_name << " = ";
- if (generated_type != "Object") out() << "TORQUE_CAST(";
- out() << "CodeStubAssembler(state_).CallBuiltin(Builtins::k"
- << instruction.builtin->ExternalName();
+ for (const std::string& name : result_names) {
+ stack->Push(name);
+ }
+ out() << " " << lhs_name << " = ";
+ out() << "ca_.CallStub<" << lhs_type
+ << ">(Builtins::CallableFor(ca_.isolate(), Builtins::k"
+ << instruction.builtin->ExternalName() << ")";
if (!instruction.builtin->signature().HasContextParameter()) {
// Add dummy context parameter to satisfy the CallBuiltin signature.
out() << ", TNode<Object>()";
@@ -520,9 +579,15 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
for (const std::string& argument : arguments) {
out() << ", " << argument;
}
- if (generated_type != "Object") out() << ")";
out() << ");\n";
+ if (result_types.size() > 1) {
+ for (size_t i = 0; i < result_types.size(); ++i) {
+ out() << " " << result_names[i] << " = ca_.Projection<" << i << ">("
+ << lhs_name << ");\n";
+ }
+ }
+
PostCallableExceptionPreparation(
catch_name,
result_types.size() == 0 ? TypeOracle::GetVoidType() : result_types[0],
@@ -771,7 +836,9 @@ void CSAGenerator::EmitInstruction(const ReturnInstruction& instruction,
} else {
out() << " CodeStubAssembler(state_).Return(";
}
- out() << stack->Pop() << ");\n";
+ std::vector<std::string> values = stack->PopMany(instruction.count);
+ PrintCommaSeparatedList(out(), values);
+ out() << ");\n";
}
void CSAGenerator::EmitInstruction(
@@ -980,7 +1047,7 @@ void CSAGenerator::EmitCSAValue(VisitResult result,
out << "}";
} else {
DCHECK_EQ(1, result.stack_range().Size());
- out << "TNode<" << result.type()->GetGeneratedTNodeTypeName() << ">{"
+ out << result.type()->GetGeneratedTypeName() << "{"
<< values.Peek(result.stack_range().begin()) << "}";
}
}
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index 7ed9d4c5e1..479a1249b3 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -18,6 +18,18 @@ namespace torque {
DEFINE_CONTEXTUAL_VARIABLE(CurrentScope)
+QualifiedName QualifiedName::Parse(std::string qualified_name) {
+ std::vector<std::string> qualifications;
+ while (true) {
+ size_t namespace_delimiter_index = qualified_name.find("::");
+ if (namespace_delimiter_index == std::string::npos) break;
+ qualifications.push_back(
+ qualified_name.substr(0, namespace_delimiter_index));
+ qualified_name = qualified_name.substr(namespace_delimiter_index + 2);
+ }
+ return QualifiedName(qualifications, qualified_name);
+}
+
std::ostream& operator<<(std::ostream& os, const QualifiedName& name) {
for (const std::string& qualifier : name.namespace_qualification) {
os << qualifier << "::";
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index 27edf79636..f8c878d329 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -36,6 +36,8 @@ struct QualifiedName {
explicit QualifiedName(std::string name)
: QualifiedName({}, std::move(name)) {}
+ static QualifiedName Parse(std::string qualified_name);
+
bool HasNamespaceQualification() const {
return !namespace_qualification.empty();
}
@@ -294,6 +296,7 @@ class ExternConstant : public Value {
enum class OutputType {
kCSA,
kCC,
+ kCCDebug,
};
class Callable : public Scope {
@@ -329,11 +332,23 @@ class Callable : public Scope {
return "TqRuntime" + name;
}
+ static std::string PrefixNameForCCDebugOutput(const std::string& name) {
+ // If a Torque macro requires a C++ runtime function to be generated, then
+ // the generated function begins with this prefix to avoid any naming
+ // collisions with the generated CSA function for the same macro.
+ return "TqDebug" + name;
+ }
+
// Name to use in runtime C++ code.
virtual std::string CCName() const {
return PrefixNameForCCOutput(ExternalName());
}
+ // Name to use in debug C++ code.
+ virtual std::string CCDebugName() const {
+ return PrefixNameForCCDebugOutput(ExternalName());
+ }
+
protected:
Callable(Declarable::Kind kind, std::string external_name,
std::string readable_name, Signature signature,
@@ -403,6 +418,11 @@ class ExternMacro : public Macro {
"::" + ExternalName();
}
+ std::string CCDebugName() const override {
+ return "TorqueDebugMacroShims::" + external_assembler_name() +
+ "::" + ExternalName();
+ }
+
private:
friend class Declarations;
ExternMacro(const std::string& name, std::string external_assembler_name,
@@ -424,6 +444,12 @@ class TorqueMacro : public Macro {
return PrefixNameForCCOutput(IsExportedToCSA() ? ReadableName()
: ExternalName());
}
+ std::string CCDebugName() const override {
+ // Exported functions must have unique and C++-friendly readable names, so
+ // prefer those wherever possible.
+ return PrefixNameForCCDebugOutput(IsExportedToCSA() ? ReadableName()
+ : ExternalName());
+ }
protected:
TorqueMacro(Declarable::Kind kind, std::string external_name,
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index 1b60f52b27..faf46b18e9 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -98,9 +98,11 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
}
}
- if (signature.return_type->StructSupertype()) {
- Error("Builtins cannot return structs, but the return type is ",
- *signature.return_type, ".");
+ if (signature.return_type->StructSupertype() && javascript) {
+ Error(
+ "Builtins with JS linkage cannot return structs, but the return type "
+ "is ",
+ *signature.return_type, ".");
}
if (signature.return_type == TypeOracle::GetVoidType()) {
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index d21e7563af..d417e45ca2 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -33,6 +33,15 @@ std::vector<T*> FilterDeclarables(const std::vector<Declarable*> list) {
return result;
}
+inline std::string UnwrapTNodeTypeName(const std::string& generates) {
+ if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
+ generates.substr(generates.length() - 1, 1) != ">") {
+ ReportError("generated type \"", generates,
+ "\" should be of the form \"TNode<...>\"");
+ }
+ return generates.substr(6, generates.length() - 7);
+}
+
class Declarations {
public:
static std::vector<Declarable*> TryLookup(const QualifiedName& name) {
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 9e2e4faef3..8769e8c9a9 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -149,6 +149,49 @@ void ImplementationVisitor::EndGeneratedFiles() {
}
}
+void ImplementationVisitor::BeginDebugMacrosFile() {
+ std::ostream& source = debug_macros_cc_;
+ std::ostream& header = debug_macros_h_;
+
+ source << "#include \"torque-generated/debug-macros.h\"\n\n";
+ source << "#include \"tools/debug_helper/debug-macro-shims.h\"\n";
+ source << "#include \"include/v8-internal.h\"\n";
+ source << "\n";
+
+ source << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "namespace debug_helper_internal {\n"
+ << "\n";
+
+ const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_";
+ header << "#ifndef " << kHeaderDefine << "\n";
+ header << "#define " << kHeaderDefine << "\n\n";
+ header << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
+ header << "#include \"tools/debug_helper/debug-helper-internal.h\"\n";
+ header << "\n";
+
+ header << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "namespace debug_helper_internal{\n"
+ << "\n";
+}
+
+void ImplementationVisitor::EndDebugMacrosFile() {
+ std::ostream& source = debug_macros_cc_;
+ std::ostream& header = debug_macros_h_;
+
+ source << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "} // namespace debug_helper_internal\n"
+ << "\n";
+
+ header << "\n} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "} // namespace debug_helper_internal\n"
+ << "\n";
+ header << "#endif // V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_\n";
+}
+
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(),
{}, false};
@@ -322,6 +365,11 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
if (output_type_ == OutputType::kCC) {
csa_ccfile() << "#ifndef V8_INTERNAL_DEFINED_" << macro->CCName() << "\n";
csa_ccfile() << "#define V8_INTERNAL_DEFINED_" << macro->CCName() << "\n";
+ } else if (output_type_ == OutputType::kCCDebug) {
+ csa_ccfile() << "#ifndef V8_INTERNAL_DEFINED_" << macro->CCDebugName()
+ << "\n";
+ csa_ccfile() << "#define V8_INTERNAL_DEFINED_" << macro->CCDebugName()
+ << "\n";
}
GenerateMacroFunctionDeclaration(csa_ccfile(), macro);
@@ -331,7 +379,7 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
// For now, generated C++ is only for field offset computations. If we ever
// generate C++ code that can allocate, then it should be handlified.
csa_ccfile() << " DisallowGarbageCollection no_gc;\n";
- } else {
+ } else if (output_type_ == OutputType::kCSA) {
csa_ccfile() << " compiler::CodeAssembler ca_(state_);\n";
csa_ccfile()
<< " compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
@@ -421,6 +469,9 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
if (output_type_ == OutputType::kCC) {
CCGenerator cc_generator{assembler().Result(), csa_ccfile()};
values = cc_generator.EmitGraph(lowered_parameters);
+ } else if (output_type_ == OutputType::kCCDebug) {
+ CCGenerator cc_generator{assembler().Result(), csa_ccfile(), true};
+ values = cc_generator.EmitGraph(lowered_parameters);
} else {
CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
values = csa_generator.EmitGraph(lowered_parameters);
@@ -430,7 +481,11 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
if (has_return_value) {
csa_ccfile() << " return ";
- if (output_type_ == OutputType::kCC) {
+ if (output_type_ == OutputType::kCCDebug) {
+ csa_ccfile() << "{d::MemoryAccessResult::kOk, ";
+ CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
+ csa_ccfile() << "}";
+ } else if (output_type_ == OutputType::kCC) {
CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
} else {
CSAGenerator::EmitCSAValue(return_value, *values, csa_ccfile());
@@ -441,6 +496,9 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
if (output_type_ == OutputType::kCC) {
csa_ccfile() << "#endif // V8_INTERNAL_DEFINED_" << macro->CCName()
<< "\n";
+ } else if (output_type_ == OutputType::kCCDebug) {
+ csa_ccfile() << "#endif // V8_INTERNAL_DEFINED_" << macro->CCDebugName()
+ << "\n";
}
csa_ccfile() << "\n";
}
@@ -1198,7 +1256,8 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
SetReturnValue(return_result);
}
} else if (current_callable->IsBuiltin()) {
- assembler().Emit(ReturnInstruction{});
+ assembler().Emit(ReturnInstruction{
+ LoweredSlotCount(current_callable->signature().return_type)});
} else {
UNREACHABLE();
}
@@ -1677,14 +1736,20 @@ void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
streams.class_definition_inline_headerfile.str());
WriteFile(base_filename + "-tq.cc", streams.class_definition_ccfile.str());
}
+
+ WriteFile(dir + "/debug-macros.h", debug_macros_h_.str());
+ WriteFile(dir + "/debug-macros.cc", debug_macros_cc_.str());
}
void ImplementationVisitor::GenerateMacroFunctionDeclaration(std::ostream& o,
Macro* macro) {
- GenerateFunctionDeclaration(
- o, "",
- output_type_ == OutputType::kCC ? macro->CCName() : macro->ExternalName(),
- macro->signature(), macro->parameter_names());
+ GenerateFunctionDeclaration(o, "",
+ output_type_ == OutputType::kCC
+ ? macro->CCName()
+ : output_type_ == OutputType::kCCDebug
+ ? macro->CCDebugName()
+ : macro->ExternalName(),
+ macro->signature(), macro->parameter_names());
}
std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
@@ -1698,17 +1763,21 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
if (signature.return_type->IsVoidOrNever()) {
o << "void";
} else {
- o << (output_type_ == OutputType::kCC
- ? signature.return_type->GetRuntimeType()
- : signature.return_type->GetGeneratedTypeName());
+ if (output_type_ == OutputType::kCCDebug) {
+ o << "Value<" << signature.return_type->GetDebugType() << ">";
+ } else {
+ o << (output_type_ == OutputType::kCC
+ ? signature.return_type->GetRuntimeType()
+ : signature.return_type->GetGeneratedTypeName());
+ }
}
o << " " << macro_prefix << name << "(";
bool first = true;
- if (output_type_ == OutputType::kCC) {
+ if (output_type_ == OutputType::kCCDebug) {
first = false;
- o << "Isolate* isolate";
- } else if (pass_code_assembler_state) {
+ o << "d::MemoryAccessor accessor";
+ } else if (output_type_ == OutputType::kCSA && pass_code_assembler_state) {
first = false;
o << "compiler::CodeAssemblerState* state_";
}
@@ -1721,7 +1790,9 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
const std::string& generated_type_name =
output_type_ == OutputType::kCC
? parameter_type->GetRuntimeType()
- : parameter_type->GetGeneratedTypeName();
+ : output_type_ == OutputType::kCCDebug
+ ? parameter_type->GetDebugType()
+ : parameter_type->GetGeneratedTypeName();
generated_parameter_names.push_back(ExternalParameterName(
i < parameter_names.size() ? parameter_names[i]->value
@@ -1730,7 +1801,8 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
}
for (const LabelDeclaration& label_info : signature.labels) {
- if (output_type_ == OutputType::kCC) {
+ if (output_type_ == OutputType::kCC ||
+ output_type_ == OutputType::kCCDebug) {
ReportError("Macros that generate runtime code can't have label exits");
}
if (!first) o << ", ";
@@ -1925,6 +1997,7 @@ Callable* ImplementationVisitor::LookupCallable(
for (size_t candidate : candidates) {
if (candidate != best && !is_better_candidate(best, candidate)) {
std::vector<Signature> candidate_signatures;
+ candidate_signatures.reserve(candidates.size());
for (size_t i : candidates) {
candidate_signatures.push_back(overload_signatures[i]);
}
@@ -2639,9 +2712,21 @@ VisitResult ImplementationVisitor::GenerateCall(
return VisitResult::NeverResult();
} else {
size_t slot_count = LoweredSlotCount(return_type);
- DCHECK_LE(slot_count, 1);
- // TODO(tebbi): Actually, builtins have to return a value, so we should
- // assert slot_count == 1 here.
+ if (builtin->IsStub()) {
+ if (slot_count < 1 || slot_count > 2) {
+ ReportError(
+ "Builtin with stub linkage is expected to return one or two "
+ "values but returns ",
+ slot_count);
+ }
+ } else {
+ if (slot_count != 1) {
+ ReportError(
+ "Builtin with JS linkage is expected to return one value but "
+ "returns ",
+ slot_count);
+ }
+ }
return VisitResult(return_type, assembler().TopRange(slot_count));
}
} else if (auto* macro = Macro::DynamicCast(callable)) {
@@ -2764,7 +2849,7 @@ VisitResult ImplementationVisitor::GenerateCall(
} else {
size_t slot_count = LoweredSlotCount(return_type);
DCHECK_LE(slot_count, 1);
- // TODO(tebbi): Actually, runtime functions have to return a value, so
+ // TODO(turbofan): Actually, runtime functions have to return a value, so
// we should assert slot_count == 1 here.
return VisitResult(return_type, assembler().TopRange(slot_count));
}
@@ -2850,6 +2935,68 @@ VisitResult ImplementationVisitor::GenerateCall(
const Field& field =
class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
return GenerateArrayLength(VisitResult(type, argument_range), field);
+ } else if (intrinsic->ExternalName() == "%MakeLazy") {
+ if (specialization_types[0]->IsStructType()) {
+ ReportError("%MakeLazy can't use macros that return structs");
+ }
+ std::string getter_name = StringLiteralUnquote(constexpr_arguments[0]);
+
+ // Normally the parser would split namespace names for us, but we
+ // sidestepped it by putting the macro name in a string literal.
+ QualifiedName qualified_getter_name = QualifiedName::Parse(getter_name);
+
+ // converted_arguments contains all of the arguments to %MakeLazy. We're
+ // looking for a function that takes all but the first.
+ Arguments arguments_to_getter;
+ arguments_to_getter.parameters.insert(
+ arguments_to_getter.parameters.begin(),
+ converted_arguments.begin() + 1, converted_arguments.end());
+
+ Callable* callable = LookupCallable(
+ qualified_getter_name, Declarations::Lookup(qualified_getter_name),
+ arguments_to_getter, {});
+ Macro* getter = Macro::DynamicCast(callable);
+ if (!getter || getter->IsMethod()) {
+ ReportError(
+ "%MakeLazy expects a macro, not builtin or other type of callable");
+ }
+ if (!getter->signature().labels.empty()) {
+ ReportError("%MakeLazy requires a macro with no labels");
+ }
+ if (!getter->signature().return_type->IsSubtypeOf(
+ specialization_types[0])) {
+ ReportError("%MakeLazy expected return type ", *specialization_types[0],
+ " but found ", *getter->signature().return_type);
+ }
+ if (getter->signature().implicit_count > 0) {
+ ReportError("Implicit parameters are not yet supported in %MakeLazy");
+ }
+
+ getter->SetUsed(); // Prevent warnings about unused macros.
+
+ // Now that we've looked up the getter macro, we have to convert the
+ // arguments again, so that, for example, constexpr arguments can be
+ // coerced to non-constexpr types and put on the stack.
+
+ std::vector<VisitResult> converted_arguments_for_getter;
+ StackRange argument_range_for_getter = assembler().TopRange(0);
+ std::vector<std::string> constexpr_arguments_for_getter;
+
+ size_t current = 0;
+ for (auto arg : arguments_to_getter.parameters) {
+ DCHECK_LT(current, getter->signature().types().size());
+ const Type* to_type = getter->signature().types()[current++];
+ AddCallParameter(getter, arg, to_type, &converted_arguments_for_getter,
+ &argument_range_for_getter,
+ &constexpr_arguments_for_getter,
+ /*inline_macro=*/false);
+ }
+
+ // Now that the arguments are prepared, emit the instruction that consumes
+ // them.
+ assembler().Emit(MakeLazyNodeInstruction{getter, return_type,
+ constexpr_arguments_for_getter});
+ return VisitResult(return_type, assembler().TopRange(1));
} else {
assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
constexpr_arguments});
@@ -3163,6 +3310,17 @@ void ImplementationVisitor::VisitAllDeclarables() {
// Recover from compile errors here. The error is recorded already.
}
}
+
+ // Do the same for macros which generate C++ debug code.
+ // The set of macros is the same as C++ macros.
+ output_type_ = OutputType::kCCDebug;
+ for (size_t i = 0; i < cc_macros.size(); ++i) {
+ try {
+ Visit(static_cast<Declarable*>(cc_macros[i].first), cc_macros[i].second);
+ } catch (TorqueAbortCompilation&) {
+ // Recover from compile errors here. The error is recorded already.
+ }
+ }
output_type_ = OutputType::kCSA;
}
@@ -3243,19 +3401,23 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
size_t parameter_count =
builtin->parameter_names().size() - kFirstNonContextParameter;
+ TypeVector return_types = LowerType(builtin->signature().return_type);
interface_descriptors
<< "class " << descriptor_name
- << " : public TorqueInterfaceDescriptor<" << parameter_count << ", "
+ << " : public TorqueInterfaceDescriptor<" << return_types.size()
+ << ", " << parameter_count << ", "
<< (has_context_parameter ? "true" : "false") << "> {\n";
interface_descriptors << " DECLARE_DESCRIPTOR_WITH_BASE("
<< descriptor_name
<< ", TorqueInterfaceDescriptor)\n";
- interface_descriptors << " MachineType ReturnType() override {\n";
interface_descriptors
- << " return "
- << MachineTypeString(builtin->signature().return_type) << ";\n";
+ << " std::vector<MachineType> ReturnType() override {\n";
+ interface_descriptors << " return {{";
+ PrintCommaSeparatedList(interface_descriptors, return_types,
+ MachineTypeString);
+ interface_descriptors << "}};\n";
interface_descriptors << " }\n";
interface_descriptors << " std::array<MachineType, " << parameter_count
@@ -3357,6 +3519,7 @@ class FieldOffsetsGenerator {
explicit FieldOffsetsGenerator(const ClassType* type) : type_(type) {}
virtual void WriteField(const Field& f, const std::string& size_string) = 0;
+ virtual void WriteFieldOffsetGetter(const Field& f) = 0;
virtual void WriteMarker(const std::string& marker) = 0;
virtual ~FieldOffsetsGenerator() { CHECK(is_finished_); }
@@ -3378,7 +3541,11 @@ class FieldOffsetsGenerator {
size_t field_size;
std::tie(field_size, size_string) = f.GetFieldSizeInformation();
}
- WriteField(f, size_string);
+ if (f.offset.has_value()) {
+ WriteField(f, size_string);
+ } else {
+ WriteFieldOffsetGetter(f);
+ }
}
void Finish() {
@@ -3484,6 +3651,9 @@ class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
out_ << "V(k" << CamelifyString(f.name_and_type.name) << "Offset, "
<< size_string << ") \\\n";
}
+ void WriteFieldOffsetGetter(const Field& f) override {
+ // Can't do anything here.
+ }
void WriteMarker(const std::string& marker) override {
out_ << "V(" << marker << ", 0) \\\n";
}
@@ -3611,10 +3781,13 @@ namespace {
class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
public:
- ClassFieldOffsetGenerator(std::ostream& header, const ClassType* type)
+ ClassFieldOffsetGenerator(std::ostream& header, std::ostream& inline_header,
+ const ClassType* type, std::string gen_name_T)
: FieldOffsetsGenerator(type),
hdr_(header),
- previous_field_end_("P::kHeaderSize") {}
+ inl_(inline_header),
+ previous_field_end_("P::kHeaderSize"),
+ gen_name_T_(gen_name_T) {}
void WriteField(const Field& f, const std::string& size_string) override {
std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
std::string field_end = field + "End";
@@ -3624,6 +3797,22 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
<< size_string << " - 1;\n";
previous_field_end_ = field_end + " + 1";
}
+ void WriteFieldOffsetGetter(const Field& f) override {
+ // A static constexpr int is more convenient than a getter if the offset is
+ // known.
+ DCHECK(!f.offset.has_value());
+
+ std::string function_name = CamelifyString(f.name_and_type.name) + "Offset";
+
+ hdr_ << " inline int " << function_name << "() const;\n";
+ inl_ << "template <class D, class P>\n";
+ inl_ << "int " << gen_name_T_ << "::" << function_name << "() const {\n";
+ // Item 1 in a flattened slice is the offset.
+ inl_ << " return static_cast<int>(std::get<1>("
+ << Callable::PrefixNameForCCOutput(type_->GetSliceMacroName(f))
+ << "(*static_cast<const D*>(this))));\n";
+ inl_ << "}\n\n";
+ }
void WriteMarker(const std::string& marker) override {
hdr_ << " static constexpr int " << marker << " = " << previous_field_end_
<< ";\n";
@@ -3631,7 +3820,9 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
private:
std::ostream& hdr_;
+ std::ostream& inl_;
std::string previous_field_end_;
+ std::string gen_name_T_;
};
class CppClassGenerator {
@@ -3655,13 +3846,28 @@ class CppClassGenerator {
private:
void GenerateClassConstructors();
- void GenerateFieldAccessor(const Field& f);
- void GenerateFieldAccessorForUntagged(const Field& f);
- void GenerateFieldAccessorForSmi(const Field& f);
- void GenerateFieldAccessorForTagged(const Field& f);
+
+ // Generates getter and setter runtime member functions for the given class
+ // field. Traverses depth-first through any nested struct fields to generate
+ // accessors for them also; struct_fields represents the stack of currently
+ // active struct fields.
+ void GenerateFieldAccessors(const Field& class_field,
+ std::vector<const Field*>& struct_fields);
+ void EmitLoadFieldStatement(const Field& class_field,
+ std::vector<const Field*>& struct_fields);
+ void EmitStoreFieldStatement(const Field& class_field,
+ std::vector<const Field*>& struct_fields);
void GenerateClassCasts();
+ std::string GetFieldOffsetForAccessor(const Field& f);
+
+ // Gets the C++ type name that should be used in accessors for referring to
+ // the value of a class field.
+ std::string GetTypeNameForAccessor(const Field& f);
+
+ bool CanContainHeapObjects(const Type* t);
+
const ClassType* type_;
const ClassType* super_;
const std::string name_;
@@ -3722,7 +3928,8 @@ void CppClassGenerator::GenerateClass() {
hdr_ << " protected: // not extern or @export\n";
}
for (const Field& f : type_->fields()) {
- GenerateFieldAccessor(f);
+ std::vector<const Field*> struct_fields;
+ GenerateFieldAccessors(f, struct_fields);
}
if (!type_->ShouldExport() && !type_->IsExtern()) {
hdr_ << " public:\n";
@@ -3750,7 +3957,7 @@ void CppClassGenerator::GenerateClass() {
}
hdr_ << "\n";
- ClassFieldOffsetGenerator g(hdr_, type_);
+ ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_T_);
for (auto f : type_->fields()) {
CurrentSourcePosition::Scope scope(f.pos);
g.RecordOffsetFor(f);
@@ -3763,6 +3970,21 @@ void CppClassGenerator::GenerateClass() {
if (!index_fields.has_value()) {
hdr_ << " // SizeFor implementations not generated due to complex array "
"lengths\n\n";
+
+ const Field& last_field = type_->LastField();
+ std::string last_field_item_size =
+ std::get<1>(*SizeOf(last_field.name_and_type.type));
+ hdr_ << " inline int AllocatedSize() const;\n\n";
+ inl_ << "template <class D, class P>\n";
+ inl_ << "int " << gen_name_T_ << "::AllocatedSize() const {\n";
+ inl_ << " auto slice = "
+ << Callable::PrefixNameForCCOutput(
+ type_->GetSliceMacroName(last_field))
+ << "(*static_cast<const D*>(this));\n";
+ inl_ << " return static_cast<int>(std::get<1>(slice)) + "
+ << last_field_item_size
+ << " * static_cast<int>(std::get<2>(slice));\n";
+ inl_ << "}\n\n";
} else if (type_->ShouldGenerateBodyDescriptor() ||
(!type_->IsAbstract() &&
!type_->IsSubtypeOf(TypeOracle::GetJSObjectType()))) {
@@ -3794,7 +4016,7 @@ void CppClassGenerator::GenerateClass() {
}
hdr_ << " return size;\n";
hdr_ << " }\n\n";
- hdr_ << " V8_INLINE int32_t AllocatedSize() {\n";
+ hdr_ << " V8_INLINE int32_t AllocatedSize() const {\n";
hdr_ << " return SizeFor(";
first = true;
for (auto field : *index_fields) {
@@ -3899,237 +4121,286 @@ std::string GenerateRuntimeTypeCheck(const Type* type,
void GenerateBoundsDCheck(std::ostream& os, const std::string& index,
const ClassType* type, const Field& f) {
os << " DCHECK_GE(" << index << ", 0);\n";
+ std::string length_expression;
if (base::Optional<NameAndType> array_length =
ExtractSimpleFieldArraySize(*type, *f.index)) {
- os << " DCHECK_LT(" << index << ", this->" << array_length->name
- << "());\n";
+ length_expression = "this ->" + array_length->name + "()";
+ } else {
+ // The length is element 2 in the flattened field slice.
+ length_expression =
+ "static_cast<int>(std::get<2>(" +
+ Callable::PrefixNameForCCOutput(type->GetSliceMacroName(f)) +
+ "(*static_cast<const D*>(this))))";
}
+ os << " DCHECK_LT(" << index << ", " << length_expression << ");\n";
}
} // namespace
// TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
-void CppClassGenerator::GenerateFieldAccessor(const Field& f) {
- const Type* field_type = f.name_and_type.type;
+void CppClassGenerator::GenerateFieldAccessors(
+ const Field& class_field, std::vector<const Field*>& struct_fields) {
+ const Field& innermost_field =
+ struct_fields.empty() ? class_field : *struct_fields.back();
+ const Type* field_type = innermost_field.name_and_type.type;
if (field_type == TypeOracle::GetVoidType()) return;
- // TODO(danno): Support generation of struct accessors
- if (f.name_and_type.type->IsStructType()) return;
-
- // TODO(v8:10391) Generate accessors for external pointers
- if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetExternalPointerType())) {
+ // float64_or_hole should be treated like float64. For now, we don't need it.
+ if (field_type == TypeOracle::GetFloat64OrHoleType()) {
return;
}
- if (!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- return GenerateFieldAccessorForUntagged(f);
- }
- if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
- return GenerateFieldAccessorForSmi(f);
+ if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
+ struct_fields.resize(struct_fields.size() + 1);
+ for (const Field& struct_field : struct_type->fields()) {
+ struct_fields[struct_fields.size() - 1] = &struct_field;
+ GenerateFieldAccessors(class_field, struct_fields);
+ }
+ struct_fields.resize(struct_fields.size() - 1);
+ return;
}
- if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- return GenerateFieldAccessorForTagged(f);
+
+ // TODO(v8:10391) Generate accessors for external pointers
+ if (field_type->IsSubtypeOf(TypeOracle::GetExternalPointerType())) {
+ return;
}
- Error("Generation of field accessor for ", type_->name(),
- "::", f.name_and_type.name, " failed (type ", *field_type,
- " is not supported).")
- .Position(f.pos);
-}
+ bool indexed = class_field.index.has_value();
+ std::string type_name = GetTypeNameForAccessor(innermost_field);
+ bool can_contain_heap_objects = CanContainHeapObjects(field_type);
-void CppClassGenerator::GenerateFieldAccessorForUntagged(const Field& f) {
- DCHECK(!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType()));
- const Type* field_type = f.name_and_type.type;
- if (field_type == TypeOracle::GetVoidType()) return;
- const Type* constexpr_version = field_type->ConstexprVersion();
- if (!constexpr_version) {
- Error("Field accessor for ", type_->name(), ":: ", f.name_and_type.name,
- " cannot be generated because its type ", *field_type,
- " is neither a subclass of Object nor does the type have a constexpr "
- "version.")
- .Position(f.pos);
- return;
+ // Assemble an accessor name by accumulating together all of the nested field
+ // names.
+ std::string name = class_field.name_and_type.name;
+ for (const Field* nested_struct_field : struct_fields) {
+ name += "_" + nested_struct_field->name_and_type.name;
}
- const std::string& name = f.name_and_type.name;
- const std::string type = constexpr_version->GetGeneratedTypeName();
- std::string offset = "k" + CamelifyString(name) + "Offset";
// Generate declarations in header.
- if (f.index) {
- hdr_ << " inline " << type << " " << name << "(int i) const;\n";
- hdr_ << " inline void set_" << name << "(int i, " << type
- << " value);\n\n";
- } else {
- hdr_ << " inline " << type << " " << name << "() const;\n";
- hdr_ << " inline void set_" << name << "(" << type << " value);\n\n";
+ if (can_contain_heap_objects && !field_type->IsClassType() &&
+ !field_type->IsStructType() &&
+ field_type != TypeOracle::GetObjectType()) {
+ hdr_ << " // Torque type: " << field_type->ToString() << "\n";
}
- // Generate implementation in inline header.
- inl_ << "template <class D, class P>\n";
- inl_ << type << " " << gen_name_ << "<D, P>::" << name << "(";
- if (f.index) {
- inl_ << "int i";
+ hdr_ << " inline " << type_name << " " << name << "("
+ << (indexed ? "int i" : "") << ") const;\n";
+ if (can_contain_heap_objects) {
+ hdr_ << " inline " << type_name << " " << name << "(IsolateRoot isolate"
+ << (indexed ? ", int i" : "") << ") const;\n";
+ }
+ hdr_ << " inline void set_" << name << "(" << (indexed ? "int i, " : "")
+ << type_name << " value"
+ << (can_contain_heap_objects
+ ? ", WriteBarrierMode mode = UPDATE_WRITE_BARRIER"
+ : "")
+ << ");\n\n";
+
+ // For tagged data, generate the extra getter that derives an IsolateRoot from
+ // the current object's pointer.
+ if (can_contain_heap_objects) {
+ inl_ << "template <class D, class P>\n";
+ inl_ << type_name << " " << gen_name_ << "<D, P>::" << name << "("
+ << (indexed ? "int i" : "") << ") const {\n";
+ inl_ << " IsolateRoot isolate = GetIsolateForPtrCompr(*this);\n";
+ inl_ << " return " << gen_name_ << "::" << name << "(isolate"
+ << (indexed ? ", i" : "") << ");\n";
+ inl_ << "}\n";
}
+
+ // Generate the getter implementation.
+ inl_ << "template <class D, class P>\n";
+ inl_ << type_name << " " << gen_name_ << "<D, P>::" << name << "(";
+ if (can_contain_heap_objects) inl_ << "IsolateRoot isolate";
+ if (can_contain_heap_objects && indexed) inl_ << ", ";
+ if (indexed) inl_ << "int i";
inl_ << ") const {\n";
- if (f.index) {
- GenerateBoundsDCheck(inl_, "i", type_, f);
- size_t field_size;
- std::string size_string;
- std::tie(field_size, size_string) = f.GetFieldSizeInformation();
- inl_ << " int offset = " << offset << " + i * " << field_size << ";\n";
- inl_ << " return this->template ReadField<" << type << ">(offset);\n";
- } else {
- inl_ << " return this->template ReadField<" << type << ">(" << offset
- << ");\n";
- }
+
+ inl_ << " " << type_name << " value;\n";
+ EmitLoadFieldStatement(class_field, struct_fields);
+ inl_ << " return value;\n";
inl_ << "}\n";
+ // Generate the setter implementation.
inl_ << "template <class D, class P>\n";
inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(";
- if (f.index) {
+ if (indexed) {
inl_ << "int i, ";
}
- inl_ << type << " value) {\n";
- if (f.index) {
- GenerateBoundsDCheck(inl_, "i", type_, f);
- size_t field_size;
- std::string size_string;
- std::tie(field_size, size_string) = f.GetFieldSizeInformation();
- inl_ << " int offset = " << offset << " + i * " << field_size << ";\n";
- inl_ << " this->template WriteField<" << type << ">(offset, value);\n";
- } else {
- inl_ << " this->template WriteField<" << type << ">(" << offset
- << ", value);\n";
+ inl_ << type_name << " value";
+ if (can_contain_heap_objects) {
+ inl_ << ", WriteBarrierMode mode";
}
+ inl_ << ") {\n";
+ EmitStoreFieldStatement(class_field, struct_fields);
inl_ << "}\n\n";
}
-void CppClassGenerator::GenerateFieldAccessorForSmi(const Field& f) {
- DCHECK(f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()));
- // Follow the convention to create Smi accessors with type int.
- const std::string type = "int";
- const std::string& name = f.name_and_type.name;
- const std::string offset = "k" + CamelifyString(name) + "Offset";
-
- // Generate declarations in header.
- if (f.index) {
- hdr_ << " inline " << type << " " << name << "(int i) const;\n";
- hdr_ << " inline void set_" << name << "(int i, " << type
- << " value);\n\n";
+std::string CppClassGenerator::GetFieldOffsetForAccessor(const Field& f) {
+ if (f.offset.has_value()) {
+ return "k" + CamelifyString(f.name_and_type.name) + "Offset";
}
- hdr_ << " inline " << type << " " << name << "() const;\n";
- hdr_ << " inline void set_" << name << "(" << type << " value);\n\n";
+ return CamelifyString(f.name_and_type.name) + "Offset()";
+}
- // Generate implementation in inline header.
- inl_ << "template <class D, class P>\n";
- inl_ << type << " " << gen_name_ << "<D, P>::" << name << "(";
- if (f.index) {
- inl_ << "int i";
+std::string CppClassGenerator::GetTypeNameForAccessor(const Field& f) {
+ const Type* field_type = f.name_and_type.type;
+ if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ const Type* constexpr_version = field_type->ConstexprVersion();
+ if (!constexpr_version) {
+ Error("Field accessor for ", type_->name(), ":: ", f.name_and_type.name,
+ " cannot be generated because its type ", *field_type,
+ " is neither a subclass of Object nor does the type have a "
+ "constexpr "
+ "version.")
+ .Position(f.pos)
+ .Throw();
+ }
+ return constexpr_version->GetGeneratedTypeName();
}
- inl_ << ") const {\n";
- if (f.index) {
- GenerateBoundsDCheck(inl_, "i", type_, f);
- inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
- inl_ << " return this->template ReadField<Smi>(offset).value();\n";
- inl_ << "}\n";
- } else {
- inl_ << " return TaggedField<Smi, " << offset
- << ">::load(*this).value();\n";
- inl_ << "}\n";
+ if (field_type->IsSubtypeOf(TypeOracle::GetSmiType())) {
+ // Follow the convention to create Smi accessors with type int.
+ return "int";
}
+ return field_type->UnhandlifiedCppTypeName();
+}
- inl_ << "template <class D, class P>\n";
- inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(";
- if (f.index) {
- inl_ << "int i, ";
- }
- inl_ << type << " value) {\n";
- const char* write_macro =
- f.relaxed_write ? "RELAXED_WRITE_FIELD" : "WRITE_FIELD";
- if (f.index) {
- GenerateBoundsDCheck(inl_, "i", type_, f);
- inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
- inl_ << " " << write_macro << "(*this, offset, Smi::FromInt(value));\n";
- } else {
- inl_ << " " << write_macro << "(*this, " << offset
- << ", Smi::FromInt(value));\n";
- }
- inl_ << "}\n\n";
+bool CppClassGenerator::CanContainHeapObjects(const Type* t) {
+ return t->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
+ !t->IsSubtypeOf(TypeOracle::GetSmiType());
}
-void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
- const Type* field_type = f.name_and_type.type;
- DCHECK(field_type->IsSubtypeOf(TypeOracle::GetTaggedType()));
- const std::string& name = f.name_and_type.name;
- std::string offset = "k" + CamelifyString(name) + "Offset";
- bool strong_pointer = field_type->IsSubtypeOf(TypeOracle::GetObjectType());
+void CppClassGenerator::EmitLoadFieldStatement(
+ const Field& class_field, std::vector<const Field*>& struct_fields) {
+ const Field& innermost_field =
+ struct_fields.empty() ? class_field : *struct_fields.back();
+ const Type* field_type = innermost_field.name_and_type.type;
+ std::string type_name = GetTypeNameForAccessor(innermost_field);
+ const std::string class_field_size =
+ std::get<1>(class_field.GetFieldSizeInformation());
- std::string type = field_type->UnhandlifiedCppTypeName();
- // Generate declarations in header.
- if (!field_type->IsClassType() && field_type != TypeOracle::GetObjectType()) {
- hdr_ << " // Torque type: " << field_type->ToString() << "\n";
+ // field_offset contains both the offset from the beginning of the object to
+ // the class field and the combined offsets of any nested struct fields
+ // within, but not the index adjustment.
+ std::string field_offset = GetFieldOffsetForAccessor(class_field);
+ for (const Field* nested_struct_field : struct_fields) {
+ field_offset += " + " + std::to_string(*nested_struct_field->offset);
}
- hdr_ << " inline " << type << " " << name << "(" << (f.index ? "int i" : "")
- << ") const;\n";
- hdr_ << " inline " << type << " " << name << "(IsolateRoot isolates"
- << (f.index ? ", int i" : "") << ") const;\n";
- hdr_ << " inline void set_" << name << "(" << (f.index ? "int i, " : "")
- << type << " value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);\n\n";
-
- std::string type_check = GenerateRuntimeTypeCheck(field_type, "value");
-
- // Generate implementation in inline header.
- inl_ << "template <class D, class P>\n";
- inl_ << type << " " << gen_name_ << "<D, P>::" << name << "("
- << (f.index ? "int i" : "") << ") const {\n";
- inl_ << " IsolateRoot isolate = GetIsolateForPtrCompr(*this);\n";
- inl_ << " return " << gen_name_ << "::" << name << "(isolate"
- << (f.index ? ", i" : "") << ");\n";
- inl_ << "}\n";
+ std::string offset = field_offset;
+ if (class_field.index) {
+ GenerateBoundsDCheck(inl_, "i", type_, class_field);
+ inl_ << " int offset = " << field_offset << " + i * " << class_field_size
+ << ";\n";
+ offset = "offset";
+ }
- inl_ << "template <class D, class P>\n";
- inl_ << type << " " << gen_name_ << "<D, P>::" << name
- << "(IsolateRoot isolate" << (f.index ? ", int i" : "") << ") const {\n";
+ inl_ << " value = ";
- if (f.index) {
- GenerateBoundsDCheck(inl_, "i", type_, f);
- inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
- inl_ << " auto value = TaggedField<" << type
- << ">::Relaxed_Load(isolate, *this, offset);\n";
+ if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ if (class_field.read_synchronization ==
+ FieldSynchronization::kAcquireRelease) {
+ ReportError("Torque doesn't support @acquireRead on untagged data");
+ } else if (class_field.read_synchronization ==
+ FieldSynchronization::kRelaxed) {
+ ReportError("Torque doesn't support @relaxedRead on untagged data");
+ }
+ inl_ << "this->template ReadField<" << type_name << ">(" << offset
+ << ");\n";
} else {
- inl_ << " auto value = TaggedField<" << type << ", " << offset
- << ">::load(isolate, *this);\n";
- }
- if (!type_check.empty()) {
- inl_ << " DCHECK(" << type_check << ");\n";
+ const char* load;
+ switch (class_field.read_synchronization) {
+ case FieldSynchronization::kNone:
+ load = "load";
+ break;
+ case FieldSynchronization::kRelaxed:
+ load = "Relaxed_Load";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ load = "Acquire_Load";
+ break;
+ }
+ bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
+ const std::string load_type = is_smi ? "Smi" : type_name;
+ const char* postfix = is_smi ? ".value()" : "";
+ const char* optional_isolate = is_smi ? "" : "isolate, ";
+
+ inl_ << "TaggedField<" << load_type << ">::" << load << "("
+ << optional_isolate << "*this, " << offset << ")" << postfix << ";\n";
+ }
+
+ if (CanContainHeapObjects(field_type)) {
+ inl_ << " DCHECK(" << GenerateRuntimeTypeCheck(field_type, "value")
+ << ");\n";
}
- inl_ << " return value;\n";
- inl_ << "}\n";
+}
- inl_ << "template <class D, class P>\n";
- inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(";
- if (f.index) {
- inl_ << "int i, ";
- }
- inl_ << type << " value, WriteBarrierMode mode) {\n";
- if (!type_check.empty()) {
- inl_ << " SLOW_DCHECK(" << type_check << ");\n";
+void CppClassGenerator::EmitStoreFieldStatement(
+ const Field& class_field, std::vector<const Field*>& struct_fields) {
+ const Field& innermost_field =
+ struct_fields.empty() ? class_field : *struct_fields.back();
+ const Type* field_type = innermost_field.name_and_type.type;
+ std::string type_name = GetTypeNameForAccessor(innermost_field);
+ const std::string class_field_size =
+ std::get<1>(class_field.GetFieldSizeInformation());
+
+ // field_offset contains both the offset from the beginning of the object to
+ // the class field and the combined offsets of any nested struct fields
+ // within, but not the index adjustment.
+ std::string field_offset = GetFieldOffsetForAccessor(class_field);
+ for (const Field* nested_struct_field : struct_fields) {
+ field_offset += " + " + std::to_string(*nested_struct_field->offset);
}
- const char* write_macro =
- strong_pointer ? (f.relaxed_write ? "RELAXED_WRITE_FIELD" : "WRITE_FIELD")
- : "RELAXED_WRITE_WEAK_FIELD";
- if (f.index) {
- GenerateBoundsDCheck(inl_, "i", type_, f);
- inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
+
+ std::string offset = field_offset;
+ if (class_field.index) {
+ GenerateBoundsDCheck(inl_, "i", type_, class_field);
+ inl_ << " int offset = " << field_offset << " + i * " << class_field_size
+ << ";\n";
offset = "offset";
- inl_ << " " << write_macro << "(*this, offset, value);\n";
+ }
+
+ if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ inl_ << " this->template WriteField<" << type_name << ">(" << offset
+ << ", value);\n";
} else {
- inl_ << " " << write_macro << "(*this, " << offset << ", value);\n";
+ bool strong_pointer = field_type->IsSubtypeOf(TypeOracle::GetObjectType());
+ bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
+ const char* write_macro;
+ if (!strong_pointer) {
+ if (class_field.write_synchronization ==
+ FieldSynchronization::kAcquireRelease) {
+ ReportError("Torque doesn't support @releaseWrite on weak fields");
+ }
+ write_macro = "RELAXED_WRITE_WEAK_FIELD";
+ } else {
+ switch (class_field.write_synchronization) {
+ case FieldSynchronization::kNone:
+ write_macro = "WRITE_FIELD";
+ break;
+ case FieldSynchronization::kRelaxed:
+ write_macro = "RELAXED_WRITE_FIELD";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ write_macro = "RELEASE_WRITE_FIELD";
+ break;
+ }
+ }
+ const std::string value_to_write = is_smi ? "Smi::FromInt(value)" : "value";
+
+ if (!is_smi) {
+ inl_ << " SLOW_DCHECK(" << GenerateRuntimeTypeCheck(field_type, "value")
+ << ");\n";
+ }
+ inl_ << " " << write_macro << "(*this, " << offset << ", "
+ << value_to_write << ");\n";
+ if (!is_smi) {
+ const char* write_barrier = strong_pointer
+ ? "CONDITIONAL_WRITE_BARRIER"
+ : "CONDITIONAL_WEAK_WRITE_BARRIER";
+ inl_ << " " << write_barrier << "(*this, " << offset
+ << ", value, mode);\n";
+ }
}
- const char* write_barrier = strong_pointer ? "CONDITIONAL_WRITE_BARRIER"
- : "CONDITIONAL_WEAK_WRITE_BARRIER";
- inl_ << " " << write_barrier << "(*this, " << offset << ", value, mode);\n";
- inl_ << "}\n\n";
}
void GenerateStructLayoutDescription(std::ostream& header,
@@ -4320,7 +4591,7 @@ void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
impl << " os << \"\\n - " << f.name_and_type.name << ": \" << ";
if (f.name_and_type.type->StructSupertype()) {
- // TODO(tebbi): Print struct fields too.
+ // TODO(turbofan): Print struct fields too.
impl << "\" <struct field printing still unimplemented>\";\n";
} else {
impl << "this->" << f.name_and_type.name << "();\n";
@@ -4608,7 +4879,7 @@ void GenerateClassFieldVerifier(const std::string& class_name,
<< length << ") = "
<< Callable::PrefixNameForCCOutput(
class_type.GetSliceMacroName(f))
- << "(isolate, o);\n";
+ << "(o);\n";
// Slices use intptr, but TaggedField<T>.load() uses int, so verify that
// such a cast is valid.
@@ -4784,6 +5055,7 @@ void ImplementationVisitor::GenerateExportedMacrosAssembler(
cc_contents << "#include \"src/objects/js-regexp-string-iterator.h\"\n";
cc_contents << "#include \"src/objects/ordered-hash-table.h\"\n";
cc_contents << "#include \"src/objects/property-descriptor-object.h\"\n";
+ cc_contents << "#include \"src/objects/swiss-name-dictionary.h\"\n";
cc_contents << "#include \"src/objects/synthetic-module.h\"\n";
cc_contents << "#include \"src/objects/template-objects.h\"\n";
{
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 037697e698..8b32de0788 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -561,6 +561,8 @@ class ImplementationVisitor {
void BeginGeneratedFiles();
void EndGeneratedFiles();
+ void BeginDebugMacrosFile();
+ void EndDebugMacrosFile();
void GenerateImplementation(const std::string& dir);
@@ -768,19 +770,31 @@ class ImplementationVisitor {
std::ostream& csa_ccfile() {
if (auto* streams = CurrentFileStreams::Get()) {
- return output_type_ == OutputType::kCSA
- ? streams->csa_ccfile
- : streams
- ->class_definition_inline_headerfile_macro_definitions;
+ switch (output_type_) {
+ case OutputType::kCSA:
+ return streams->csa_ccfile;
+ case OutputType::kCC:
+ return streams->class_definition_inline_headerfile_macro_definitions;
+ case OutputType::kCCDebug:
+ return debug_macros_cc_;
+ default:
+ UNREACHABLE();
+ }
}
return null_stream_;
}
std::ostream& csa_headerfile() {
if (auto* streams = CurrentFileStreams::Get()) {
- return output_type_ == OutputType::kCSA
- ? streams->csa_headerfile
- : streams
- ->class_definition_inline_headerfile_macro_declarations;
+ switch (output_type_) {
+ case OutputType::kCSA:
+ return streams->csa_headerfile;
+ case OutputType::kCC:
+ return streams->class_definition_inline_headerfile_macro_declarations;
+ case OutputType::kCCDebug:
+ return debug_macros_h_;
+ default:
+ UNREACHABLE();
+ }
}
return null_stream_;
}
@@ -832,6 +846,11 @@ class ImplementationVisitor {
std::unordered_map<const Expression*, const Identifier*>
bitfield_expressions_;
+ // The contents of the debug macros output files. These contain all Torque
+ // macros that have been generated using the C++ backend with debug purpose.
+ std::stringstream debug_macros_cc_;
+ std::stringstream debug_macros_h_;
+
OutputType output_type_ = OutputType::kCSA;
};
diff --git a/deps/v8/src/torque/instance-type-generator.cc b/deps/v8/src/torque/instance-type-generator.cc
index 1e2423deba..7625cc50d6 100644
--- a/deps/v8/src/torque/instance-type-generator.cc
+++ b/deps/v8/src/torque/instance-type-generator.cc
@@ -297,6 +297,9 @@ std::unique_ptr<InstanceTypeTree> AssignInstanceTypes() {
// - only_declared_single_instance_types: This list is pairs of class name and
// instance type, for classes which have a single corresponding instance type
// and do not have layout definitions in Torque.
+// - only_declared_multiple_instance_types: This list is pairs of class name and
+// instance type, for classes which have subclasses but also have a single
+// corresponding instance type, and do not have layout definitions in Torque.
// - fully_defined_range_instance_types: This list is triples of class name,
// first instance type, and last instance type, for classes which have defined
// layouts and multiple corresponding instance types.
@@ -309,6 +312,7 @@ void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions,
std::ostream& fully_defined_single_instance_types,
std::ostream& fully_defined_multiple_instance_types,
std::ostream& only_declared_single_instance_types,
+ std::ostream& only_declared_multiple_instance_types,
std::ostream& fully_defined_range_instance_types,
std::ostream& only_declared_range_instance_types,
const std::string& indent) {
@@ -325,21 +329,23 @@ void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions,
definitions << inner_indent << "V(" << type_name << ", " << root->value
<< ") \\\n";
values << " V(" << type_name << ") \\\n";
- if (root->num_values == 1) {
- std::ostream& single_instance_types =
- root->type->HasUndefinedLayout()
- ? only_declared_single_instance_types
- : fully_defined_single_instance_types;
- single_instance_types << " V(" << root->type->name() << ", " << type_name
- << ") \\\n";
- }
+ std::ostream& type_checker_list =
+ root->type->HasUndefinedLayout()
+ ? (root->num_values == 1 ? only_declared_single_instance_types
+ : only_declared_multiple_instance_types)
+ : (root->num_values == 1 ? fully_defined_single_instance_types
+ : fully_defined_multiple_instance_types);
+ type_checker_list << " V(" << root->type->name() << ", " << type_name
+ << ") \\\n";
}
for (auto& child : root->children) {
- PrintInstanceTypes(
- child.get(), definitions, values, fully_defined_single_instance_types,
- fully_defined_multiple_instance_types,
- only_declared_single_instance_types, fully_defined_range_instance_types,
- only_declared_range_instance_types, inner_indent);
+ PrintInstanceTypes(child.get(), definitions, values,
+ fully_defined_single_instance_types,
+ fully_defined_multiple_instance_types,
+ only_declared_single_instance_types,
+ only_declared_multiple_instance_types,
+ fully_defined_range_instance_types,
+ only_declared_range_instance_types, inner_indent);
}
if (root->num_values > 1) {
// We can't emit LAST_STRING_TYPE because it's not a valid flags
@@ -358,11 +364,6 @@ void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions,
: fully_defined_range_instance_types;
range_instance_types << " V(" << root->type->name() << ", FIRST_"
<< type_name << ", LAST_" << type_name << ") \\\n";
- if (!root->type->IsExtern() && !root->type->IsAbstract() &&
- !root->type->HasUndefinedLayout()) {
- fully_defined_multiple_instance_types << " V(" << root->type->name()
- << ", " << type_name << ") \\\n";
- }
}
}
}
@@ -384,6 +385,7 @@ void ImplementationVisitor::GenerateInstanceTypes(
std::stringstream fully_defined_single_instance_types;
std::stringstream fully_defined_multiple_instance_types;
std::stringstream only_declared_single_instance_types;
+ std::stringstream only_declared_multiple_instance_types;
std::stringstream fully_defined_range_instance_types;
std::stringstream only_declared_range_instance_types;
if (instance_types != nullptr) {
@@ -391,6 +393,7 @@ void ImplementationVisitor::GenerateInstanceTypes(
fully_defined_single_instance_types,
fully_defined_multiple_instance_types,
only_declared_single_instance_types,
+ only_declared_multiple_instance_types,
fully_defined_range_instance_types,
only_declared_range_instance_types, " ");
}
@@ -422,6 +425,14 @@ void ImplementationVisitor::GenerateInstanceTypes(
header << only_declared_single_instance_types.str();
header << "\n";
+ header << "// Pairs of (ClassName, INSTANCE_TYPE) for classes that are\n";
+ header << "// declared but not defined in Torque, and have subclasses.\n";
+ header << "// These classes may correspond with actual C++ classes, but\n";
+ header << "// they are not guaranteed to.\n";
+ header << "#define TORQUE_INSTANCE_CHECKERS_MULTIPLE_ONLY_DECLARED(V) \\\n";
+ header << only_declared_multiple_instance_types.str();
+ header << "\n";
+
header << "// Triples of (ClassName, FIRST_TYPE, LAST_TYPE) for classes\n";
header << "// that have full Torque definitions.\n";
header << "#define TORQUE_INSTANCE_CHECKERS_RANGE_FULLY_DEFINED(V) \\\n";
diff --git a/deps/v8/src/torque/instructions.cc b/deps/v8/src/torque/instructions.cc
index 90f392e000..ea7676ea44 100644
--- a/deps/v8/src/torque/instructions.cc
+++ b/deps/v8/src/torque/instructions.cc
@@ -423,8 +423,8 @@ void CallBuiltinPointerInstruction::TypeInstruction(
ReportError("wrong argument types");
}
DCHECK_EQ(type, f);
- // TODO(tebbi): Only invalidate transient types if the function pointer type
- // is transitioning.
+ // TODO(turbofan): Only invalidate transient types if the function pointer
+ // type is transitioning.
InvalidateTransientTypes(stack);
stack->PushMany(LowerType(f->return_type()));
}
@@ -558,12 +558,12 @@ void GotoExternalInstruction::RecomputeDefinitionLocations(
void ReturnInstruction::TypeInstruction(Stack<const Type*>* stack,
ControlFlowGraph* cfg) const {
- cfg->SetReturnType(stack->Pop());
+ cfg->SetReturnType(stack->PopMany(count));
}
void ReturnInstruction::RecomputeDefinitionLocations(
Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
- locations->Pop();
+ locations->PopMany(count);
}
void PrintConstantStringInstruction::TypeInstruction(
@@ -663,6 +663,36 @@ DefinitionLocation StoreBitFieldInstruction::GetValueDefinition() const {
return DefinitionLocation::Instruction(this, 0);
}
+void MakeLazyNodeInstruction::TypeInstruction(Stack<const Type*>* stack,
+ ControlFlowGraph* cfg) const {
+ std::vector<const Type*> parameter_types =
+ LowerParameterTypes(macro->signature().parameter_types);
+ for (intptr_t i = parameter_types.size() - 1; i >= 0; --i) {
+ const Type* arg_type = stack->Pop();
+ const Type* parameter_type = parameter_types.back();
+ parameter_types.pop_back();
+ if (arg_type != parameter_type) {
+ ReportError("parameter ", i, ": expected type ", *parameter_type,
+ " but found type ", *arg_type);
+ }
+ }
+
+ stack->Push(result_type);
+}
+
+void MakeLazyNodeInstruction::RecomputeDefinitionLocations(
+ Stack<DefinitionLocation>* locations, Worklist<Block*>* worklist) const {
+ auto parameter_types =
+ LowerParameterTypes(macro->signature().parameter_types);
+ locations->PopMany(parameter_types.size());
+
+ locations->Push(GetValueDefinition());
+}
+
+DefinitionLocation MakeLazyNodeInstruction::GetValueDefinition() const {
+ return DefinitionLocation::Instruction(this, 0);
+}
+
bool CallRuntimeInstruction::IsBlockTerminator() const {
return is_tailcall || runtime_function->signature().return_type ==
TypeOracle::GetNeverType();
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index 69dfbd8fc3..85fd7f897c 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -49,6 +49,7 @@ class RuntimeFunction;
V(ConstexprBranchInstruction) \
V(GotoInstruction) \
V(GotoExternalInstruction) \
+ V(MakeLazyNodeInstruction) \
V(ReturnInstruction) \
V(PrintConstantStringInstruction) \
V(AbortInstruction) \
@@ -457,6 +458,21 @@ struct CallCsaMacroAndBranchInstruction : InstructionBase {
base::Optional<Block*> catch_block;
};
+struct MakeLazyNodeInstruction : InstructionBase {
+ TORQUE_INSTRUCTION_BOILERPLATE()
+ MakeLazyNodeInstruction(Macro* macro, const Type* result_type,
+ std::vector<std::string> constexpr_arguments)
+ : macro(macro),
+ result_type(result_type),
+ constexpr_arguments(std::move(constexpr_arguments)) {}
+
+ DefinitionLocation GetValueDefinition() const;
+
+ Macro* macro;
+ const Type* result_type;
+ std::vector<std::string> constexpr_arguments;
+};
+
struct CallBuiltinInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
bool IsBlockTerminator() const override { return is_tailcall; }
@@ -578,7 +594,10 @@ struct GotoExternalInstruction : InstructionBase {
struct ReturnInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
+ explicit ReturnInstruction(size_t count) : count(count) {}
bool IsBlockTerminator() const override { return true; }
+
+ size_t count; // How many values to return.
};
struct PrintConstantStringInstruction : InstructionBase {
diff --git a/deps/v8/src/torque/runtime-macro-shims.h b/deps/v8/src/torque/runtime-macro-shims.h
index a7c017f93d..04b09a7334 100644
--- a/deps/v8/src/torque/runtime-macro-shims.h
+++ b/deps/v8/src/torque/runtime-macro-shims.h
@@ -13,26 +13,26 @@
namespace v8 {
namespace internal {
-class Isolate;
-
namespace TorqueRuntimeMacroShims {
namespace CodeStubAssembler {
-inline intptr_t ChangeInt32ToIntPtr(Isolate* isolate, int32_t i) { return i; }
-inline uintptr_t ChangeUint32ToWord(Isolate* isolate, uint32_t u) { return u; }
-inline intptr_t IntPtrAdd(Isolate* isolate, intptr_t a, intptr_t b) {
- return a + b;
-}
-inline intptr_t IntPtrMul(Isolate* isolate, intptr_t a, intptr_t b) {
- return a * b;
-}
-inline intptr_t Signed(Isolate* isolate, uintptr_t u) {
- return static_cast<intptr_t>(u);
-}
+inline bool BoolConstant(bool b) { return b; }
+inline intptr_t ChangeInt32ToIntPtr(int32_t i) { return i; }
+inline uintptr_t ChangeUint32ToWord(uint32_t u) { return u; }
+inline intptr_t IntPtrAdd(intptr_t a, intptr_t b) { return a + b; }
+inline intptr_t IntPtrMul(intptr_t a, intptr_t b) { return a * b; }
+inline intptr_t Signed(uintptr_t u) { return static_cast<intptr_t>(u); }
template <typename Smi>
-inline int32_t SmiUntag(Isolate* isolate, Smi s) {
+inline int32_t SmiUntag(Smi s) {
return s.value();
}
+inline bool UintPtrLessThan(uintptr_t a, uintptr_t b) { return a < b; }
+inline uint32_t Unsigned(int32_t s) { return static_cast<uint32_t>(s); }
+#if V8_HOST_ARCH_64_BIT
+inline uintptr_t Unsigned(intptr_t s) { return static_cast<uintptr_t>(s); }
+#endif
+inline bool Word32Equal(uint32_t a, uint32_t b) { return a == b; }
+inline bool Word32NotEqual(uint32_t a, uint32_t b) { return a != b; }
} // namespace CodeStubAssembler
} // namespace TorqueRuntimeMacroShims
diff --git a/deps/v8/src/torque/source-positions.cc b/deps/v8/src/torque/source-positions.cc
index 69be0e0911..94f2579749 100644
--- a/deps/v8/src/torque/source-positions.cc
+++ b/deps/v8/src/torque/source-positions.cc
@@ -58,6 +58,7 @@ SourceId SourceFileMap::GetSourceId(const std::string& path) {
std::vector<SourceId> SourceFileMap::AllSources() {
SourceFileMap& self = Get();
std::vector<SourceId> result;
+ result.reserve(static_cast<int>(self.sources_.size()));
for (int i = 0; i < static_cast<int>(self.sources_.size()); ++i) {
result.push_back(SourceId(i));
}
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index 88b3b94566..6da18dd526 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -76,6 +76,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateInstanceTypes(output_directory);
implementation_visitor.BeginGeneratedFiles();
+ implementation_visitor.BeginDebugMacrosFile();
implementation_visitor.VisitAllDeclarables();
@@ -95,6 +96,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateCSATypes(output_directory);
implementation_visitor.EndGeneratedFiles();
+ implementation_visitor.EndDebugMacrosFile();
implementation_visitor.GenerateImplementation(output_directory);
if (GlobalContext::collect_language_server_data()) {
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 46b4c5c915..428ee9e804 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -47,7 +47,6 @@ class BuildFlags : public ContextualClass<BuildFlags> {
BuildFlags() {
build_flags_["V8_SFI_HAS_UNIQUE_ID"] = V8_SFI_HAS_UNIQUE_ID;
build_flags_["TAGGED_SIZE_8_BYTES"] = TAGGED_SIZE_8_BYTES;
- build_flags_["V8_DOUBLE_FIELDS_UNBOXING"] = V8_DOUBLE_FIELDS_UNBOXING;
build_flags_["TRUE_FOR_TESTING"] = true;
build_flags_["FALSE_FOR_TESTING"] = false;
}
@@ -983,7 +982,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
std::vector<Declaration*> result;
result.push_back(MakeNode<ClassDeclaration>(
- name, flags, extends, std::move(generates), std::move(methods), fields,
+ name, flags, extends, generates, std::move(methods), fields,
MakeInstanceTypeConstraints(annotations)));
Identifier* constexpr_name =
@@ -993,7 +992,8 @@ base::Optional<ParseResult> MakeClassDeclaration(
AbstractTypeFlags abstract_type_flags(AbstractTypeFlag::kConstexpr);
if (transient) abstract_type_flags |= AbstractTypeFlag::kTransient;
TypeDeclaration* constexpr_decl = MakeNode<AbstractTypeDeclaration>(
- constexpr_name, abstract_type_flags, constexpr_extends, name->value);
+ constexpr_name, abstract_type_flags, constexpr_extends,
+ generates ? UnwrapTNodeTypeName(*generates) : name->value);
constexpr_decl->pos = name->pos;
result.push_back(constexpr_decl);
@@ -1311,6 +1311,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
name_type_expression->pos = name_identifier->pos;
std::vector<Declaration*> entry_decls;
+ entry_decls.reserve(entries.size());
for (const auto& entry : entries) {
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
entry.name, AbstractTypeFlag::kNone,
@@ -1829,22 +1830,11 @@ base::Optional<ParseResult> MakeAssignmentExpression(
base::Optional<ParseResult> MakeNumberLiteralExpression(
ParseResultIterator* child_results) {
auto number = child_results->NextAs<std::string>();
- // TODO(tebbi): Support 64bit literals.
+ // TODO(turbofan): Support 64bit literals.
// Meanwhile, we type it as constexpr float64 when out of int32 range.
double value = 0;
try {
-#if defined(V8_OS_SOLARIS)
- // stod() on Solaris does not currently support hex strings. Use strtol()
- // specifically for hex literals until stod() support is available.
- if (number.find("0x") == std::string::npos &&
- number.find("0X") == std::string::npos) {
- value = std::stod(number);
- } else {
- value = static_cast<double>(strtol(number.c_str(), nullptr, 0));
- }
-#else
value = std::stod(number);
-#endif // !defined(V8_OS_SOLARIS)
} catch (const std::out_of_range&) {
Error("double literal out-of-range").Throw();
}
@@ -1954,10 +1944,23 @@ base::Optional<ParseResult> MakeAnnotation(ParseResultIterator* child_results) {
base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
AnnotationSet annotations(child_results,
- {ANNOTATION_NO_VERIFIER, ANNOTATION_RELAXED_WRITE},
+ {ANNOTATION_NO_VERIFIER, ANNOTATION_RELAXED_WRITE,
+ ANNOTATION_RELAXED_READ, ANNOTATION_RELEASE_WRITE,
+ ANNOTATION_ACQUIRE_READ},
{ANNOTATION_IF, ANNOTATION_IFNOT});
bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER);
- bool relaxed_write = annotations.Contains(ANNOTATION_RELAXED_WRITE);
+ FieldSynchronization write_synchronization = FieldSynchronization::kNone;
+ if (annotations.Contains(ANNOTATION_RELEASE_WRITE)) {
+ write_synchronization = FieldSynchronization::kAcquireRelease;
+ } else if (annotations.Contains(ANNOTATION_RELAXED_WRITE)) {
+ write_synchronization = FieldSynchronization::kRelaxed;
+ }
+ FieldSynchronization read_synchronization = FieldSynchronization::kNone;
+ if (annotations.Contains(ANNOTATION_ACQUIRE_READ)) {
+ read_synchronization = FieldSynchronization::kAcquireRelease;
+ } else if (annotations.Contains(ANNOTATION_RELAXED_READ)) {
+ read_synchronization = FieldSynchronization::kRelaxed;
+ }
std::vector<ConditionalAnnotation> conditions;
base::Optional<std::string> if_condition =
annotations.GetStringParam(ANNOTATION_IF);
@@ -1981,7 +1984,8 @@ base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
weak,
const_qualified,
generate_verify,
- relaxed_write}};
+ read_synchronization,
+ write_synchronization}};
}
base::Optional<ParseResult> MakeStructField(
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index 88b4b829d9..e0d6741501 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -114,6 +114,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Declarations::LookupGlobalUniqueGenericType(SMI_TAGGED_TYPE_STRING);
}
+ static GenericType* GetLazyGeneric() {
+ return Declarations::LookupGlobalUniqueGenericType(LAZY_TYPE_STRING);
+ }
+
static const Type* GetReferenceType(const Type* referenced_type,
bool is_const) {
return GetGenericTypeInstance(GetReferenceGeneric(is_const),
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index b0441399c0..2673faf32d 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -63,12 +63,7 @@ std::string ComputeGeneratesType(base::Optional<std::string> opt_gen,
if (!opt_gen) return "";
const std::string& generates = *opt_gen;
if (enforce_tnode_type) {
- if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
- generates.substr(generates.length() - 1, 1) != ">") {
- ReportError("generated type \"", generates,
- "\" should be of the form \"TNode<...>\"");
- }
- return generates.substr(6, generates.length() - 7);
+ return UnwrapTNodeTypeName(generates);
}
return generates;
}
@@ -213,7 +208,8 @@ const StructType* TypeVisitor::ComputeType(
false,
field.const_qualified,
false,
- false};
+ FieldSynchronization::kNone,
+ FieldSynchronization::kNone};
auto optional_size = SizeOf(f.name_and_type.type);
struct_type->RegisterField(f);
// Offsets are assigned based on an assumption of no space between members.
@@ -434,7 +430,8 @@ void TypeVisitor::VisitClassFieldsAndMethods(
field_expression.weak,
field_expression.const_qualified,
field_expression.generate_verify,
- field_expression.relaxed_write});
+ field_expression.read_synchronization,
+ field_expression.write_synchronization});
ResidueClass field_size = std::get<0>(field.GetFieldSizeInformation());
if (field.index) {
// Validate that a value at any index in a packed array is aligned
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 848fb21e08..ff2531ebd1 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -184,6 +184,23 @@ std::string Type::GetGeneratedTNodeTypeName() const {
return result;
}
+std::string AbstractType::GetGeneratedTypeNameImpl() const {
+ // A special case that is not very well represented by the "generates"
+ // syntax in the .tq files: Lazy<T> represents a std::function that
+ // produces a TNode of the wrapped type.
+ if (base::Optional<const Type*> type_wrapped_in_lazy =
+ Type::MatchUnaryGeneric(this, TypeOracle::GetLazyGeneric())) {
+ DCHECK(!IsConstexpr());
+ return "std::function<" + (*type_wrapped_in_lazy)->GetGeneratedTypeName() +
+ "()>";
+ }
+
+ if (generated_type_.empty()) {
+ return parent()->GetGeneratedTypeName();
+ }
+ return IsConstexpr() ? generated_type_ : "TNode<" + generated_type_ + ">";
+}
+
std::string AbstractType::GetGeneratedTNodeTypeNameImpl() const {
if (generated_type_.empty()) return parent()->GetGeneratedTNodeTypeName();
return generated_type_;
@@ -1302,6 +1319,26 @@ std::string Type::GetRuntimeType() const {
return ConstexprVersion()->GetGeneratedTypeName();
}
+std::string Type::GetDebugType() const {
+ if (IsSubtypeOf(TypeOracle::GetSmiType())) return "uintptr_t";
+ if (IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ return "uintptr_t";
+ }
+ if (base::Optional<const StructType*> struct_type = StructSupertype()) {
+ std::stringstream result;
+ result << "std::tuple<";
+ bool first = true;
+ for (const Type* field_type : LowerType(*struct_type)) {
+ if (!first) result << ", ";
+ first = false;
+ result << field_type->GetDebugType();
+ }
+ result << ">";
+ return result.str();
+ }
+ return ConstexprVersion()->GetGeneratedTypeName();
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index 115bc5350d..ac6c5e3263 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -142,6 +142,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
base::Optional<const AggregateType*> AggregateSupertype() const;
virtual std::vector<TypeChecker> GetTypeCheckers() const { return {}; }
virtual std::string GetRuntimeType() const;
+ virtual std::string GetDebugType() const;
static const Type* CommonSupertype(const Type* a, const Type* b);
void AddAlias(std::string alias) const { aliases_.insert(std::move(alias)); }
size_t id() const { return id_; }
@@ -228,7 +229,8 @@ struct Field {
bool is_weak;
bool const_qualified;
bool generate_verify;
- bool relaxed_write;
+ FieldSynchronization read_synchronization;
+ FieldSynchronization write_synchronization;
};
std::ostream& operator<<(std::ostream& os, const Field& name_and_type);
@@ -266,12 +268,7 @@ class AbstractType final : public Type {
DECLARE_TYPE_BOILERPLATE(AbstractType)
const std::string& name() const { return name_; }
std::string ToExplicitString() const override { return name(); }
- std::string GetGeneratedTypeNameImpl() const override {
- if (generated_type_.empty()) {
- return parent()->GetGeneratedTypeName();
- }
- return IsConstexpr() ? generated_type_ : "TNode<" + generated_type_ + ">";
- }
+ std::string GetGeneratedTypeNameImpl() const override;
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsConstexpr() const final {
const bool is_constexpr = flags_ & AbstractTypeFlag::kConstexpr;
@@ -398,6 +395,7 @@ class V8_EXPORT_PRIVATE UnionType final : public Type {
std::string GetRuntimeType() const override {
return parent()->GetRuntimeType();
}
+ std::string GetDebugType() const override { return parent()->GetDebugType(); }
friend size_t hash_value(const UnionType& p) {
size_t result = 0;
@@ -574,6 +572,16 @@ class AggregateType : public Type {
return {{name_, ""}};
}
+ const Field& LastField() const {
+ for (base::Optional<const AggregateType*> current = this;
+ current.has_value();
+ current = (*current)->parent()->AggregateSupertype()) {
+ const std::vector<Field>& fields = (*current)->fields_;
+ if (!fields.empty()) return fields[fields.size() - 1];
+ }
+ ReportError("Can't get last field of empty aggregate type");
+ }
+
protected:
AggregateType(Kind kind, const Type* parent, Namespace* nspace,
const std::string& name,
@@ -745,7 +753,7 @@ class ClassType final : public AggregateType {
SourcePosition GetPosition() const { return decl_->pos; }
SourceId AttributedToFile() const;
- // TODO(tebbi): We should no longer pass around types as const pointers, so
+ // TODO(turbofan): We should no longer pass around types as const pointers, so
// that we can avoid mutable fields and const initializers for
// late-initialized portions of types like this one.
void InitializeInstanceTypes(base::Optional<int> own,
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index c2fe38849e..b5a16c8590 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -8,6 +8,9 @@
#include <stddef.h>
#include <memory>
+// Include first to ensure that V8_USE_PERFETTO can be defined before use.
+#include "v8config.h" // NOLINT(build/include_directory)
+
#if defined(V8_USE_PERFETTO)
#include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
#include "src/tracing/trace-categories.h"
diff --git a/deps/v8/src/trap-handler/handler-inside-posix.cc b/deps/v8/src/trap-handler/handler-inside-posix.cc
index a59803a0b2..908c8c832c 100644
--- a/deps/v8/src/trap-handler/handler-inside-posix.cc
+++ b/deps/v8/src/trap-handler/handler-inside-posix.cc
@@ -106,20 +106,22 @@ bool TryHandleSignal(int signum, siginfo_t* info, void* context) {
SigUnmaskStack unmask(sigs);
ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
-#if V8_OS_LINUX
- auto* context_rip = &uc->uc_mcontext.gregs[REG_RIP];
-#elif V8_OS_MACOSX
- auto* context_rip = &uc->uc_mcontext->__ss.__rip;
-#elif V8_OS_FREEBSD
- auto* context_rip = &uc->uc_mcontext.mc_rip;
+#if V8_OS_LINUX && V8_TARGET_ARCH_X64
+ auto* context_ip = &uc->uc_mcontext.gregs[REG_RIP];
+#elif V8_OS_MACOSX && V8_TARGET_ARCH_ARM64
+ auto* context_ip = &uc->uc_mcontext->__ss.__pc;
+#elif V8_OS_MACOSX && V8_TARGET_ARCH_X64
+ auto* context_ip = &uc->uc_mcontext->__ss.__rip;
+#elif V8_OS_FREEBSD && V8_TARGET_ARCH_X64
+ auto* context_ip = &uc->uc_mcontext.mc_rip;
#else
#error Unsupported platform
#endif
- uintptr_t fault_addr = *context_rip;
+ uintptr_t fault_addr = *context_ip;
uintptr_t landing_pad = 0;
if (TryFindLandingPad(fault_addr, &landing_pad)) {
// Tell the caller to return to the landing pad.
- *context_rip = landing_pad;
+ *context_ip = landing_pad;
// We will return to wasm code, so restore the g_thread_in_wasm_code flag.
g_thread_in_wasm_code = true;
return true;
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index e75355decd..fcdc256a38 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -27,6 +27,8 @@ namespace trap_handler {
#define V8_TRAP_HANDLER_SUPPORTED true
#elif V8_TARGET_ARCH_X64 && V8_OS_FREEBSD
#define V8_TRAP_HANDLER_SUPPORTED true
+#elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64 && V8_OS_MACOSX
+#define V8_TRAP_HANDLER_SUPPORTED true
#else
#define V8_TRAP_HANDLER_SUPPORTED false
#endif
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index 283685b45e..6adefccf8d 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -57,6 +57,14 @@ void DeleteArray(T* array) {
delete[] array;
}
+template <typename T>
+struct ArrayDeleter {
+ void operator()(T* array) { DeleteArray(array); }
+};
+
+template <typename T>
+using ArrayUniquePtr = std::unique_ptr<T, ArrayDeleter<T>>;
+
// The normal strdup functions use malloc. These versions of StrDup
// and StrNDup uses new and calls the FatalProcessOutOfMemory handler
// if allocation fails.
diff --git a/deps/v8/src/utils/memcopy.h b/deps/v8/src/utils/memcopy.h
index 5f51625f5d..335f12f648 100644
--- a/deps/v8/src/utils/memcopy.h
+++ b/deps/v8/src/utils/memcopy.h
@@ -255,6 +255,11 @@ inline void MemsetPointer(T** dest, U* value, size_t counter) {
reinterpret_cast<Address>(value), counter);
}
+template <typename T>
+inline void MemsetPointer(T** dest, std::nullptr_t, size_t counter) {
+ MemsetPointer(reinterpret_cast<Address*>(dest), Address{0}, counter);
+}
+
// Copy from 8bit/16bit chars to 8bit/16bit chars. Values are zero-extended if
// needed. Ranges are not allowed to overlap.
// The separate declaration is needed for the V8_NONNULL, which is not allowed
diff --git a/deps/v8/src/utils/utils.cc b/deps/v8/src/utils/utils.cc
index de4bdcdd1a..203476b157 100644
--- a/deps/v8/src/utils/utils.cc
+++ b/deps/v8/src/utils/utils.cc
@@ -80,12 +80,12 @@ std::ostream& operator<<(std::ostream& os, FeedbackSlot slot) {
return os << "#" << slot.id_;
}
-size_t hash_value(BailoutId id) {
+size_t hash_value(BytecodeOffset id) {
base::hash<int> h;
return h(id.id_);
}
-std::ostream& operator<<(std::ostream& os, BailoutId id) {
+std::ostream& operator<<(std::ostream& os, BytecodeOffset id) {
return os << id.id_;
}
diff --git a/deps/v8/src/utils/utils.h b/deps/v8/src/utils/utils.h
index ae963250e7..e22fbd547e 100644
--- a/deps/v8/src/utils/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -8,6 +8,7 @@
#include <limits.h>
#include <stdlib.h>
#include <string.h>
+
#include <cmath>
#include <string>
#include <type_traits>
@@ -17,6 +18,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/base/safe_conversions.h"
#include "src/base/v8-fallthrough.h"
#include "src/common/globals.h"
#include "src/utils/allocation.h"
@@ -123,15 +125,6 @@ inline double Modulo(double x, double y) {
}
template <typename T>
-T Saturate(int64_t value) {
- static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
- int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
- int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
- int64_t clamped = std::max(min, std::min(max, value));
- return static_cast<T>(clamped);
-}
-
-template <typename T>
T SaturateAdd(T a, T b) {
if (std::is_signed<T>::value) {
if (a > 0 && b > 0) {
@@ -187,7 +180,7 @@ T SaturateRoundingQMul(T a, T b) {
int64_t product = a * b;
product += round_const;
product >>= (size_in_bits - 1);
- return Saturate<T>(product);
+ return base::saturated_cast<T>(product);
}
// Multiply two numbers, returning a result that is twice as wide, no overflow.
@@ -569,29 +562,34 @@ class FeedbackSlot {
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, FeedbackSlot);
-class BailoutId {
+class BytecodeOffset {
public:
- explicit BailoutId(int id) : id_(id) {}
+ explicit BytecodeOffset(int id) : id_(id) {}
int ToInt() const { return id_; }
- static BailoutId None() { return BailoutId(kNoneId); }
+ static BytecodeOffset None() { return BytecodeOffset(kNoneId); }
// Special bailout id support for deopting into the {JSConstructStub} stub.
// The following hard-coded deoptimization points are supported by the stub:
// - {ConstructStubCreate} maps to {construct_stub_create_deopt_pc_offset}.
// - {ConstructStubInvoke} maps to {construct_stub_invoke_deopt_pc_offset}.
- static BailoutId ConstructStubCreate() { return BailoutId(1); }
- static BailoutId ConstructStubInvoke() { return BailoutId(2); }
+ static BytecodeOffset ConstructStubCreate() { return BytecodeOffset(1); }
+ static BytecodeOffset ConstructStubInvoke() { return BytecodeOffset(2); }
bool IsValidForConstructStub() const {
return id_ == ConstructStubCreate().ToInt() ||
id_ == ConstructStubInvoke().ToInt();
}
bool IsNone() const { return id_ == kNoneId; }
- bool operator==(const BailoutId& other) const { return id_ == other.id_; }
- bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
- friend size_t hash_value(BailoutId);
- V8_EXPORT_PRIVATE friend std::ostream& operator<<(std::ostream&, BailoutId);
+ bool operator==(const BytecodeOffset& other) const {
+ return id_ == other.id_;
+ }
+ bool operator!=(const BytecodeOffset& other) const {
+ return id_ != other.id_;
+ }
+ friend size_t hash_value(BytecodeOffset);
+ V8_EXPORT_PRIVATE friend std::ostream& operator<<(std::ostream&,
+ BytecodeOffset);
private:
friend class Builtins;
@@ -600,7 +598,7 @@ class BailoutId {
// Using 0 could disguise errors.
// Builtin continuations bailout ids start here. If you need to add a
- // non-builtin BailoutId, add it before this id so that this Id has the
+ // non-builtin BytecodeOffset, add it before this id so that this Id has the
// highest number.
static const int kFirstBuiltinContinuationId = 1;
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index bee45ad9af..b8c4911722 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -6,6 +6,7 @@
#define V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
#include "src/base/platform/wrappers.h"
+#include "src/codegen/arm/register-arm.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/baseline/liftoff-register.h"
@@ -292,34 +293,35 @@ inline void F64x2Compare(LiftoffAssembler* assm, LiftoffRegister dst,
}
inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
- ValueType type) {
+ ValueKind kind) {
#ifdef DEBUG
// The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed.
DCHECK(UseScratchRegisterScope{assm}.CanAcquire());
#endif
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->str(src.gp(), dst);
break;
- case ValueType::kI64:
+ case kI64:
// Positive offsets should be lowered to kI32.
assm->str(src.low_gp(), MemOperand(dst.rn(), dst.offset()));
assm->str(
src.high_gp(),
MemOperand(dst.rn(), dst.offset() + liftoff::kHalfStackSlotSize));
break;
- case ValueType::kF32:
+ case kF32:
assm->vstr(liftoff::GetFloatRegister(src.fp()), dst);
break;
- case ValueType::kF64:
+ case kF64:
assm->vstr(src.fp(), dst);
break;
- case ValueType::kS128: {
+ case kS128: {
UseScratchRegisterScope temps(assm);
Register addr = liftoff::CalculateActualAddress(assm, &temps, dst.rn(),
no_reg, dst.offset());
@@ -332,27 +334,28 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->ldr(dst.gp(), src);
break;
- case ValueType::kI64:
+ case kI64:
assm->ldr(dst.low_gp(), MemOperand(src.rn(), src.offset()));
assm->ldr(
dst.high_gp(),
MemOperand(src.rn(), src.offset() + liftoff::kHalfStackSlotSize));
break;
- case ValueType::kF32:
+ case kF32:
assm->vldr(liftoff::GetFloatRegister(dst.fp()), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->vldr(dst.fp(), src);
break;
- case ValueType::kS128: {
+ case kS128: {
// Get memory address of slot to fill from.
UseScratchRegisterScope temps(assm);
Register addr = liftoff::CalculateActualAddress(assm, &temps, src.rn(),
@@ -531,26 +534,26 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return (type.kind() == ValueType::kS128 || type.is_reference_type());
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
TurboAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case ValueType::kI64: {
+ case kI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -558,10 +561,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::Move(reg.high_gp(), Operand(high_word));
break;
}
- case ValueType::kF32:
+ case kF32:
vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
break;
- case ValueType::kF64: {
+ case kF64: {
Register extra_scratch = GetUnusedRegister(kGpReg, {}).gp();
vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch);
break;
@@ -571,15 +574,31 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- DCHECK_LE(0, offset);
- DCHECK_EQ(4, size);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
ldr(dst, liftoff::GetInstanceOperand());
- ldr(dst, MemOperand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
- LoadFromInstance(dst, offset, kTaggedSize);
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ MemOperand src{instance, offset};
+ switch (size) {
+ case 1:
+ ldrb(dst, src);
+ break;
+ case 4:
+ ldr(dst, src);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ ldr(dst, MemOperand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -701,15 +720,23 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
STATIC_ASSERT(kTaggedSize == kInt32Size);
- {
- // Store the value.
- UseScratchRegisterScope temps(this);
- MemOperand dst_op =
- liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
- str(src.gp(), dst_op);
+ Register actual_offset_reg = offset_reg;
+ if (offset_reg != no_reg && offset_imm != 0) {
+ if (cache_state()->is_used(LiftoffRegister(offset_reg))) {
+ actual_offset_reg = GetUnusedRegister(kGpReg, pinned).gp();
+ }
+ add(actual_offset_reg, offset_reg, Operand(offset_imm));
}
+ MemOperand dst_op = actual_offset_reg == no_reg
+ ? MemOperand(dst_addr, offset_imm)
+ : MemOperand(dst_addr, actual_offset_reg);
+ str(src.gp(), dst_op);
+
+ if (skip_write_barrier) return;
+
// The write barrier.
Label write_barrier;
Label exit;
@@ -720,8 +747,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
- CallRecordWriteStub(dst_addr, Operand(offset_imm), EMIT_REMEMBERED_SET,
- kSaveFPRegs, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr,
+ actual_offset_reg == no_reg ? Operand(offset_imm)
+ : Operand(actual_offset_reg),
+ EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -1041,12 +1071,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
// simpler, even though other register pairs would also be possible.
constexpr Register dst_low = r8;
constexpr Register dst_high = r9;
- if (cache_state()->is_used(LiftoffRegister(dst_low))) {
- SpillRegister(LiftoffRegister(dst_low));
- }
- if (cache_state()->is_used(LiftoffRegister(dst_high))) {
- SpillRegister(LiftoffRegister(dst_high));
- }
+ SpillRegisters(dst_low, dst_high);
{
UseScratchRegisterScope temps(this);
Register actual_addr = liftoff::CalculateActualAddress(
@@ -1056,7 +1081,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
}
ParallelRegisterMove(
- {{dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}});
+ {{dst, LiftoffRegister::ForPair(dst_low, dst_high), kI64}});
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
@@ -1178,11 +1203,10 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
__ ParallelRegisterMove(
{{LiftoffRegister::ForPair(new_value_low, new_value_high), new_value,
- kWasmI64},
- {LiftoffRegister::ForPair(expected_low, expected_high), expected,
- kWasmI64},
- {dst_addr, dst_addr_reg, kWasmI32},
- {offset, offset_reg != no_reg ? offset_reg : offset, kWasmI32}});
+ kI64},
+ {LiftoffRegister::ForPair(expected_low, expected_high), expected, kI64},
+ {dst_addr, dst_addr_reg, kI32},
+ {offset, offset_reg != no_reg ? offset_reg : offset, kI32}});
{
UseScratchRegisterScope temps(lasm);
@@ -1210,7 +1234,7 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
__ bind(&done);
__ ParallelRegisterMove(
- {{result, LiftoffRegister::ForPair(result_low, result_high), kWasmI64}});
+ {{result, LiftoffRegister::ForPair(result_low, result_high), kI64}});
}
#undef __
} // namespace liftoff
@@ -1321,52 +1345,52 @@ void LiftoffAssembler::AtomicFence() { dmb(ISH); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
MemOperand src(fp, (caller_slot_idx + 1) * kSystemPointerSize);
- liftoff::Load(this, dst, src, type);
+ liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
MemOperand dst(fp, (caller_slot_idx + 1) * kSystemPointerSize);
- liftoff::Store(this, src, dst, type);
+ liftoff::Store(this, src, dst, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
+ ValueKind kind) {
MemOperand src(sp, offset);
- liftoff::Load(this, dst, src, type);
+ liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
- Fill(reg, src_offset, type);
- Spill(dst_offset, reg, type);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
- DCHECK(type == kWasmI32 || type.is_reference_type());
+ DCHECK(kind == kI32 || is_reference_type(kind));
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
- if (type == kWasmF32) {
+ if (kind == kF32) {
vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
- } else if (type == kWasmF64) {
+ } else if (kind == kF64) {
vmov(dst, src);
} else {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
vmov(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
// The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed.
@@ -1374,7 +1398,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
DCHECK_LT(0, offset);
RecordUsedSpillOffset(offset);
MemOperand dst(fp, -offset);
- liftoff::Store(this, reg, dst, type);
+ liftoff::Store(this, reg, dst, kind);
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
@@ -1390,11 +1414,11 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
src = temps.Acquire();
}
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
mov(src, Operand(value.to_i32()));
str(src, dst);
break;
- case ValueType::kI64: {
+ case kI64: {
int32_t low_word = value.to_i64();
mov(src, Operand(low_word));
str(src, liftoff::GetHalfStackSlot(offset, kLowWord));
@@ -1409,8 +1433,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- liftoff::Load(this, reg, liftoff::GetStackSlot(offset), type);
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ liftoff::Load(this, reg, liftoff::GetStackSlot(offset), kind);
}
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
@@ -2161,16 +2185,16 @@ void LiftoffAssembler::emit_jump(Label* label) { b(label); }
void LiftoffAssembler::emit_jump(Register target) { bx(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
- DCHECK_EQ(type, kWasmI32);
+ DCHECK_EQ(kind, kI32);
cmp(lhs, Operand(0));
} else {
- DCHECK(type == kWasmI32 ||
- (type.is_reference_type() &&
+ DCHECK(kind == kI32 ||
+ (is_reference_type(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
cmp(lhs, rhs);
}
@@ -2279,7 +2303,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -2362,7 +2386,33 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ UseScratchRegisterScope temps(this);
+ Register actual_src_addr = liftoff::CalculateActualAddress(
+ this, &temps, addr, offset_reg, offset_imm);
+ TurboAssembler::Move(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+ *protected_load_pc = pc_offset();
+ LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx);
+ NeonListOperand dst_op =
+ NeonListOperand(load_params.low_op ? dst.low_fp() : dst.high_fp());
+ TurboAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx,
+ NeonMemOperand(actual_src_addr));
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t laneidx,
+ uint32_t* protected_store_pc) {
+ UseScratchRegisterScope temps(this);
+ Register actual_dst_addr =
+ liftoff::CalculateActualAddress(this, &temps, dst, offset, offset_imm);
+ *protected_store_pc = pc_offset();
+
+ LoadStoreLaneParams store_params(type.mem_rep(), laneidx);
+ NeonListOperand src_op =
+ NeonListOperand(store_params.low_op ? src.low_fp() : src.high_fp());
+ TurboAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx,
+ NeonMemOperand(actual_dst_addr));
}
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
@@ -2548,6 +2598,27 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
vmov(dest.high(), right.high(), gt);
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low_fp().code());
+ vcvt_f64_s32(dst.low_fp(), src_d.low());
+ vcvt_f64_s32(dst.high_fp(), src_d.high());
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low_fp().code());
+ vcvt_f64_u32(dst.low_fp(), src_d.low());
+ vcvt_f64_u32(dst.high_fp(), src_d.high());
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low_fp().code());
+ vcvt_f64_f32(dst.low_fp(), src_d.low());
+ vcvt_f64_f32(dst.high_fp(), src_d.high());
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon32, liftoff::GetSimd128Register(dst), src.fp(), 0);
@@ -2767,6 +2838,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ V64x2AllTrue(dst.gp(), liftoff::GetSimd128Register(src));
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShift<liftoff::kLeft, NeonS64, Neon32>(this, dst, lhs, rhs);
@@ -2890,7 +2966,27 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ I64x2BitMask(dst.gp(), liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonS32, liftoff::GetSimd128Register(dst), src.low_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonS32, liftoff::GetSimd128Register(dst), src.high_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonU32, liftoff::GetSimd128Register(dst), src.low_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonU32, liftoff::GetSimd128Register(dst), src.high_fp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -2920,11 +3016,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
UseScratchRegisterScope temps(this);
@@ -3058,6 +3149,16 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
vpadd(Neon32, dest.high(), scratch.low(), scratch.high());
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -3097,11 +3198,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
UseScratchRegisterScope temps(this);
@@ -3271,6 +3367,16 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
imm_lane_idx);
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -3297,6 +3403,14 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
src2.high_fp());
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vqrdmulh(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src1),
+ liftoff::GetSimd128Register(src2));
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -3349,6 +3463,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcnt(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon8, liftoff::GetSimd128Register(dst), src.gp());
@@ -3381,8 +3500,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -3644,6 +3763,29 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ I64x2Eq(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ I64x2GtS(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ I64x2GeS(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
vceq(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
@@ -3754,6 +3896,14 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister dst_d = LowDwVfpRegister::from_code(dst.low_fp().code());
+ vcvt_f32_f64(dst_d.low(), src.low_fp());
+ vcvt_f32_f64(dst_d.high(), src.high_fp());
+ vmov(dst.high_fp(), 0);
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3818,6 +3968,22 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
vmovl(NeonU16, liftoff::GetSimd128Register(dst), src.high_fp());
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister dst_d = LowDwVfpRegister::from_code(dst.low_fp().code());
+ vcvt_s32_f64(dst_d.low(), src.low_fp());
+ vcvt_s32_f64(dst_d.high(), src.high_fp());
+ vmov(dst.high_fp(), 0);
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister dst_d = LowDwVfpRegister::from_code(dst.low_fp().code());
+ vcvt_u32_f64(dst_d.low(), src.low_fp());
+ vcvt_u32_f64(dst_d.high(), src.high_fp());
+ vmov(dst.high_fp(), 0);
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3857,6 +4023,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2.abs");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
ldr(limit_address, MemOperand(limit_address));
cmp(sp, limit_address);
@@ -3942,10 +4113,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret();
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
// Arguments are passed by pushing them all to the stack and then passing
// a pointer to them.
@@ -3954,22 +4125,22 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- switch (param_type.kind()) {
- case ValueType::kI32:
+ for (ValueKind param_kind : sig->parameters()) {
+ switch (param_kind) {
+ case kI32:
str(args->gp(), MemOperand(sp, arg_bytes));
break;
- case ValueType::kI64:
+ case kI64:
str(args->low_gp(), MemOperand(sp, arg_bytes));
str(args->high_gp(), MemOperand(sp, arg_bytes + kSystemPointerSize));
break;
- case ValueType::kF32:
+ case kF32:
vstr(liftoff::GetFloatRegister(args->fp()), MemOperand(sp, arg_bytes));
break;
- case ValueType::kF64:
+ case kF64:
vstr(args->fp(), MemOperand(sp, arg_bytes));
break;
- case ValueType::kS128:
+ case kS128:
vstr(args->low_fp(), MemOperand(sp, arg_bytes));
vstr(args->high_fp(),
MemOperand(sp, arg_bytes + 2 * kSystemPointerSize));
@@ -3978,7 +4149,7 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
UNREACHABLE();
}
args++;
- arg_bytes += param_type.element_size_bytes();
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -4002,22 +4173,22 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- switch (out_argument_type.kind()) {
- case ValueType::kI32:
+ if (out_argument_kind != kStmt) {
+ switch (out_argument_kind) {
+ case kI32:
ldr(result_reg->gp(), MemOperand(sp));
break;
- case ValueType::kI64:
+ case kI64:
ldr(result_reg->low_gp(), MemOperand(sp));
ldr(result_reg->high_gp(), MemOperand(sp, kSystemPointerSize));
break;
- case ValueType::kF32:
+ case kF32:
vldr(liftoff::GetFloatRegister(result_reg->fp()), MemOperand(sp));
break;
- case ValueType::kF64:
+ case kF64:
vldr(result_reg->fp(), MemOperand(sp));
break;
- case ValueType::kS128:
+ case kS128:
vld1(Neon8, NeonListOperand(result_reg->low_fp(), 2),
NeonMemOperand(sp));
break;
@@ -4036,7 +4207,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
DCHECK(target != no_reg);
@@ -4068,25 +4239,27 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
- switch (src.type().kind()) {
+ switch (src.kind()) {
// i32 and i64 can be treated as similar cases, i64 being previously
// split into two i32 registers
- case ValueType::kI32:
- case ValueType::kI64:
- case ValueType::kF32: {
+ case kI32:
+ case kI64:
+ case kF32:
+ case kRef:
+ case kOptRef: {
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
asm_->ldr(scratch,
liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
asm_->Push(scratch);
} break;
- case ValueType::kF64: {
+ case kF64: {
UseScratchRegisterScope temps(asm_);
DwVfpRegister scratch = temps.AcquireD();
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->vpush(scratch);
} break;
- case ValueType::kS128: {
+ case kS128: {
MemOperand mem_op = liftoff::GetStackSlot(slot.src_offset_);
UseScratchRegisterScope temps(asm_);
Register addr = liftoff::CalculateActualAddress(
@@ -4102,24 +4275,24 @@ void LiftoffStackSlots::Construct() {
break;
}
case LiftoffAssembler::VarState::kRegister:
- switch (src.type().kind()) {
- case ValueType::kI64: {
+ switch (src.kind()) {
+ case kI64: {
LiftoffRegister reg =
slot.half_ == kLowWord ? src.reg().low() : src.reg().high();
asm_->push(reg.gp());
} break;
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
+ case kI32:
+ case kRef:
+ case kOptRef:
asm_->push(src.reg().gp());
break;
- case ValueType::kF32:
+ case kF32:
asm_->vpush(liftoff::GetFloatRegister(src.reg().fp()));
break;
- case ValueType::kF64:
+ case kF64:
asm_->vpush(src.reg().fp());
break;
- case ValueType::kS128:
+ case kS128:
asm_->vpush(liftoff::GetSimd128Register(src.reg()));
break;
default:
@@ -4127,7 +4300,7 @@ void LiftoffStackSlots::Construct() {
}
break;
case LiftoffAssembler::VarState::kIntConst: {
- DCHECK(src.type() == kWasmI32 || src.type() == kWasmI64);
+ DCHECK(src.kind() == kI32 || src.kind() == kI64);
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
// The high word is the sign extension of the low word.
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 815586ecd1..a2fe80891c 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -72,20 +72,21 @@ inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
-inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
return reg.gp().W();
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
return reg.gp().X();
- case ValueType::kF32:
+ case kF32:
return reg.fp().S();
- case ValueType::kF64:
+ case kF64:
return reg.fp().D();
- case ValueType::kS128:
+ case kS128:
return reg.fp().Q();
default:
UNREACHABLE();
@@ -103,15 +104,15 @@ inline CPURegList PadVRegList(RegList list) {
}
inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
return temps->AcquireW();
- case ValueType::kI64:
+ case kI64:
return temps->AcquireX();
- case ValueType::kF32:
+ case kF32:
return temps->AcquireS();
- case ValueType::kF64:
+ case kF64:
return temps->AcquireD();
default:
UNREACHABLE();
@@ -124,15 +125,37 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
Register offset, T offset_imm) {
if (offset.is_valid()) {
if (offset_imm == 0) return MemOperand(addr.X(), offset.W(), UXTW);
- Register tmp = temps->AcquireW();
- // TODO(clemensb): Do a 64-bit addition if memory64 is used.
+ Register tmp = temps->AcquireX();
DCHECK_GE(kMaxUInt32, offset_imm);
- assm->Add(tmp, offset.W(), offset_imm);
- return MemOperand(addr.X(), tmp, UXTW);
+ assm->Add(tmp, offset.X(), offset_imm);
+ return MemOperand(addr.X(), tmp);
}
return MemOperand(addr.X(), offset_imm);
}
+// Certain load instructions do not support offset (register or immediate).
+// This creates a MemOperand that is suitable for such instructions by adding
+// |addr|, |offset| (if needed), and |offset_imm| into a temporary.
+inline MemOperand GetMemOpWithImmOffsetZero(LiftoffAssembler* assm,
+ UseScratchRegisterScope* temps,
+ Register addr, Register offset,
+ uintptr_t offset_imm) {
+ Register tmp = temps->AcquireX();
+ if (offset.is_valid()) {
+ // offset has passed BoundsCheckMem in liftoff-compiler, and been unsigned
+ // extended, so it is fine to use the full width of the register.
+ assm->Add(tmp, addr, offset);
+ if (offset_imm != 0) {
+ assm->Add(tmp, tmp, offset_imm);
+ }
+ } else {
+ if (offset_imm != 0) {
+ assm->Add(tmp, addr, offset_imm);
+ }
+ }
+ return MemOperand(tmp.X(), 0);
+}
+
enum class ShiftDirection : bool { kLeft, kRight };
enum class ShiftSign : bool { kSigned, kUnsigned };
@@ -334,34 +357,34 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
// TODO(zhin): Unaligned access typically take additional cycles, we should do
// some performance testing to see how big an effect it will take.
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.kind() == ValueType::kS128 || type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
Mov(reg.gp().W(), Immediate(value.to_i32(), rmode));
break;
- case ValueType::kI64:
+ case kI64:
Mov(reg.gp().X(), Immediate(value.to_i64(), rmode));
break;
- case ValueType::kF32:
+ case kF32:
Fmov(reg.fp().S(), value.to_f32_boxed().get_scalar());
break;
- case ValueType::kF64:
+ case kF64:
Fmov(reg.fp().D(), value.to_f64_boxed().get_scalar());
break;
default:
@@ -369,21 +392,34 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- DCHECK_LE(0, offset);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
Ldr(dst, liftoff::GetInstanceOperand());
- DCHECK(size == 4 || size == 8);
- if (size == 4) {
- Ldr(dst.W(), MemOperand(dst, offset));
- } else {
- Ldr(dst, MemOperand(dst, offset));
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ MemOperand src{instance, offset};
+ switch (size) {
+ case 1:
+ Ldrb(dst.W(), src);
+ break;
+ case 4:
+ Ldr(dst.W(), src);
+ break;
+ case 8:
+ Ldr(dst, src);
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
DCHECK_LE(0, offset);
- Ldr(dst, liftoff::GetInstanceOperand());
- LoadTaggedPointerField(dst, MemOperand(dst, offset));
+ LoadTaggedPointerField(dst, MemOperand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -408,12 +444,16 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
// Store the value.
UseScratchRegisterScope temps(this);
MemOperand dst_op =
liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
StoreTaggedField(src.gp(), dst_op);
+
+ if (skip_write_barrier) return;
+
// The write barrier.
Label write_barrier;
Label exit;
@@ -427,8 +467,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, ne,
&exit);
- CallRecordWriteStub(dst_addr, Operand(offset_imm), EMIT_REMEMBERED_SET,
- kSaveFPRegs, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(
+ dst_addr,
+ dst_op.IsRegisterOffset() ? Operand(dst_op.regoffset().X())
+ : Operand(dst_op.offset()),
+ EMIT_REMEMBERED_SET, kSaveFPRegs, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -797,56 +840,56 @@ void LiftoffAssembler::AtomicFence() { Dmb(InnerShareable, BarrierAll); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
- Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset));
+ Ldr(liftoff::GetRegFromType(dst, kind), MemOperand(fp, offset));
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
- Str(liftoff::GetRegFromType(src, type), MemOperand(fp, offset));
+ Str(liftoff::GetRegFromType(src, kind), MemOperand(fp, offset));
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
- Ldr(liftoff::GetRegFromType(dst, type), MemOperand(sp, offset));
+ ValueKind kind) {
+ Ldr(liftoff::GetRegFromType(dst, kind), MemOperand(sp, offset));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
UseScratchRegisterScope temps(this);
- CPURegister scratch = liftoff::AcquireByType(&temps, type);
+ CPURegister scratch = liftoff::AcquireByType(&temps, kind);
Ldr(scratch, liftoff::GetStackSlot(src_offset));
Str(scratch, liftoff::GetStackSlot(dst_offset));
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- if (type == kWasmI32) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
+ if (kind == kI32) {
Mov(dst.W(), src.W());
} else {
- DCHECK(kWasmI64 == type || type.is_reference_type());
+ DCHECK(kI64 == kind || is_reference_type(kind));
Mov(dst.X(), src.X());
}
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
- if (type == kWasmF32) {
+ ValueKind kind) {
+ if (kind == kF32) {
Fmov(dst.S(), src.S());
- } else if (type == kWasmF64) {
+ } else if (kind == kF64) {
Fmov(dst.D(), src.D());
} else {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
Mov(dst.Q(), src.Q());
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- Str(liftoff::GetRegFromType(reg, type), dst);
+ Str(liftoff::GetRegFromType(reg, kind), dst);
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
@@ -855,7 +898,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
UseScratchRegisterScope temps(this);
CPURegister src = CPURegister::no_reg();
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
if (value.to_i32() == 0) {
src = wzr;
} else {
@@ -863,7 +906,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
Mov(src.W(), value.to_i32());
}
break;
- case ValueType::kI64:
+ case kI64:
if (value.to_i64() == 0) {
src = xzr;
} else {
@@ -878,9 +921,9 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
Str(src, dst);
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
MemOperand src = liftoff::GetStackSlot(offset);
- Ldr(liftoff::GetRegFromType(reg, type), src);
+ Ldr(liftoff::GetRegFromType(reg, kind), src);
}
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
@@ -1463,24 +1506,25 @@ void LiftoffAssembler::emit_jump(Label* label) { B(label); }
void LiftoffAssembler::emit_jump(Register target) { Br(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
if (rhs.is_valid()) {
Cmp(lhs.W(), rhs.W());
} else {
Cmp(lhs.W(), wzr);
}
break;
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
DCHECK(rhs.is_valid());
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
- case ValueType::kI64:
+ case kI64:
if (rhs.is_valid()) {
Cmp(lhs.X(), rhs.X());
} else {
@@ -1554,7 +1598,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -1572,7 +1616,10 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
uint32_t* protected_load_pc) {
UseScratchRegisterScope temps(this);
MemOperand src_op =
- liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
+ transform == LoadTransformationKind::kSplat
+ ? liftoff::GetMemOpWithImmOffsetZero(this, &temps, src_addr,
+ offset_reg, offset_imm)
+ : liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
*protected_load_pc = pc_offset();
MachineType memtype = type.mem_type();
@@ -1604,20 +1651,7 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Ldr(dst.fp().D(), src_op);
}
} else {
- // ld1r only allows no offset or post-index, so emit an add.
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
- if (src_op.IsRegisterOffset()) {
- // We have 2 tmp gps, so it's okay to acquire 1 more here, and actually
- // doesn't matter if we acquire the same one.
- Register tmp = temps.AcquireX();
- Add(tmp, src_op.base(), src_op.regoffset().X());
- src_op = MemOperand(tmp.X(), 0);
- } else if (src_op.IsImmediateOffset() && src_op.offset() != 0) {
- Register tmp = temps.AcquireX();
- Add(tmp, src_op.base(), src_op.offset());
- src_op = MemOperand(tmp.X(), 0);
- }
-
if (memtype == MachineType::Int8()) {
ld1r(dst.fp().V16B(), src_op);
} else if (memtype == MachineType::Int16()) {
@@ -1634,7 +1668,49 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ UseScratchRegisterScope temps(this);
+ MemOperand src_op = liftoff::GetMemOpWithImmOffsetZero(
+ this, &temps, addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+
+ MachineType mem_type = type.mem_type();
+ if (dst != src) {
+ Mov(dst.fp().Q(), src.fp().Q());
+ }
+
+ if (mem_type == MachineType::Int8()) {
+ ld1(dst.fp().B(), laneidx, src_op);
+ } else if (mem_type == MachineType::Int16()) {
+ ld1(dst.fp().H(), laneidx, src_op);
+ } else if (mem_type == MachineType::Int32()) {
+ ld1(dst.fp().S(), laneidx, src_op);
+ } else if (mem_type == MachineType::Int64()) {
+ ld1(dst.fp().D(), laneidx, src_op);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ UseScratchRegisterScope temps(this);
+ MemOperand dst_op =
+ liftoff::GetMemOpWithImmOffsetZero(this, &temps, dst, offset, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+
+ MachineRepresentation rep = type.mem_rep();
+ if (rep == MachineRepresentation::kWord8) {
+ st1(src.fp().B(), lane, dst_op);
+ } else if (rep == MachineRepresentation::kWord16) {
+ st1(src.fp().H(), lane, dst_op);
+ } else if (rep == MachineRepresentation::kWord32) {
+ st1(src.fp().S(), lane, dst_op);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord64, rep);
+ st1(src.fp().D(), lane, dst_op);
+ }
}
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
@@ -1767,6 +1843,23 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
}
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl(dst.fp(), src.fp().V2S());
+ Scvtf(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl(dst.fp(), src.fp().V2S());
+ Ucvtf(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtl(dst.fp().V2D(), src.fp().V2S());
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V4S(), src.fp().S(), 0);
@@ -1917,6 +2010,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
Neg(dst.fp().V2D(), src.fp().V2D());
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ V64x2AllTrue(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
@@ -2014,7 +2112,27 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ I64x2BitMask(dst.gp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl(dst.fp().V2D(), src.fp().V2S());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl2(dst.fp().V2D(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl(dst.fp().V2D(), src.fp().V2S());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl2(dst.fp().V2D(), src.fp().V4S());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -2043,11 +2161,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
Neg(dst.fp().V4S(), src.fp().V4S());
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, kFormat4S);
@@ -2158,6 +2271,16 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
Addp(dst.fp().V4S(), tmp1, tmp2);
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2214,11 +2337,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
Neg(dst.fp().V8H(), src.fp().V8H());
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, kFormat8H);
@@ -2383,6 +2501,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cnt(dst.fp().V16B(), src.fp().V16B());
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V16B(), src.gp().W());
@@ -2415,8 +2538,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
Neg(dst.fp().V16B(), src.fp().V16B());
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -2638,6 +2761,27 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
Cmhs(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+ Mvn(dst.fp().V2D(), dst.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmgt(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmge(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Fcmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
@@ -2736,6 +2880,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
Ucvtf(dst.fp().V4S(), src.fp().V4S());
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtn(dst.fp().V2S(), src.fp().V2D());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2832,6 +2981,18 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
Uxtl2(dst.fp().V4S(), src.fp().V8H());
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtzs(dst.fp().V2D(), src.fp().V2D());
+ Sqxtn(dst.fp().V2S(), dst.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtzs(dst.fp().V2D(), src.fp().V2D());
+ Uqxtn(dst.fp().V2S(), dst.fp().V2D());
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2860,6 +3021,16 @@ void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
Abs(dst.fp().V8H(), src.fp().V8H());
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2884,11 +3055,22 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
Umull2(dst.fp().V8H(), src1.fp().V16B(), src2.fp().V16B());
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Sqrdmulh(dst.fp().V8H(), src1.fp().V8H(), src2.fp().V8H());
+}
+
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
Abs(dst.fp().V4S(), src.fp().V4S());
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2.abs");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
Ldr(limit_address, MemOperand(limit_address));
Cmp(sp, limit_address);
@@ -2942,10 +3124,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret();
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
// The stack pointer is required to be quadword aligned.
int total_size = RoundUp(stack_bytes, kQuadWordSizeInBytes);
@@ -2953,9 +3135,9 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
Claim(total_size, 1);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- Poke(liftoff::GetRegFromType(*args++, param_type), arg_bytes);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ Poke(liftoff::GetRegFromType(*args++, param_kind), arg_bytes);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -2978,8 +3160,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_type), 0);
+ if (out_argument_kind != kStmt) {
+ Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_kind), 0);
}
Drop(total_size, 1);
@@ -2993,7 +3175,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
// For Arm64, we have more cache registers than wasm parameters. That means
@@ -3035,34 +3217,34 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffStackSlots::Construct() {
size_t num_slots = 0;
for (auto& slot : slots_) {
- num_slots += slot.src_.type() == kWasmS128 ? 2 : 1;
+ num_slots += slot.src_.kind() == kS128 ? 2 : 1;
}
// The stack pointer is required to be quadword aligned.
asm_->Claim(RoundUp(num_slots, 2));
size_t poke_offset = num_slots * kXRegSize;
for (auto& slot : slots_) {
- poke_offset -= slot.src_.type() == kWasmS128 ? kXRegSize * 2 : kXRegSize;
+ poke_offset -= slot.src_.kind() == kS128 ? kXRegSize * 2 : kXRegSize;
switch (slot.src_.loc()) {
case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_);
- CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type());
+ CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.kind());
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->Poke(scratch, poke_offset);
break;
}
case LiftoffAssembler::VarState::kRegister:
- asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.type()),
+ asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.kind()),
poke_offset);
break;
case LiftoffAssembler::VarState::kIntConst:
- DCHECK(slot.src_.type() == kWasmI32 || slot.src_.type() == kWasmI64);
+ DCHECK(slot.src_.kind() == kI32 || slot.src_.kind() == kI64);
if (slot.src_.i32_const() == 0) {
- Register zero_reg = slot.src_.type() == kWasmI32 ? wzr : xzr;
+ Register zero_reg = slot.src_.kind() == kI32 ? wzr : xzr;
asm_->Poke(zero_reg, poke_offset);
} else {
UseScratchRegisterScope temps(asm_);
- Register scratch = slot.src_.type() == kWasmI32 ? temps.AcquireW()
- : temps.AcquireX();
+ Register scratch =
+ slot.src_.kind() == kI32 ? temps.AcquireW() : temps.AcquireX();
asm_->Mov(scratch, int64_t{slot.src_.i32_const()});
asm_->Poke(scratch, poke_offset);
}
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 890337fe12..ec52468a1a 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -65,26 +65,27 @@ static constexpr LiftoffRegList kByteRegs =
LiftoffRegList::FromBits<Register::ListOf(eax, ecx, edx)>();
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
- int32_t offset, ValueType type) {
+ int32_t offset, ValueKind kind) {
Operand src(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->mov(dst.gp(), src);
break;
- case ValueType::kI64:
+ case kI64:
assm->mov(dst.low_gp(), src);
assm->mov(dst.high_gp(), Operand(base, offset + 4));
break;
- case ValueType::kF32:
+ case kF32:
assm->movss(dst.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->movsd(dst.fp(), src);
break;
- case ValueType::kS128:
+ case kS128:
assm->movdqu(dst.fp(), src);
break;
default:
@@ -93,23 +94,23 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
}
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
- LiftoffRegister src, ValueType type) {
+ LiftoffRegister src, ValueKind kind) {
Operand dst(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
assm->mov(dst, src.gp());
break;
- case ValueType::kI64:
+ case kI64:
assm->mov(dst, src.low_gp());
assm->mov(Operand(base, offset + 4), src.high_gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->movss(dst, src.fp());
break;
- case ValueType::kF64:
+ case kF64:
assm->movsd(dst, src.fp());
break;
- case ValueType::kS128:
+ case kS128:
assm->movdqu(dst, src.fp());
break;
default:
@@ -117,26 +118,26 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ case kRef:
+ case kOptRef:
assm->push(reg.gp());
break;
- case ValueType::kI64:
+ case kI64:
assm->push(reg.high_gp());
assm->push(reg.low_gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->AllocateStackSpace(sizeof(float));
assm->movss(Operand(esp, 0), reg.fp());
break;
- case ValueType::kF64:
+ case kF64:
assm->AllocateStackSpace(sizeof(double));
assm->movsd(Operand(esp, 0), reg.fp());
break;
- case ValueType::kS128:
+ case kS128:
assm->AllocateStackSpace(sizeof(double) * 2);
assm->movdqu(Operand(esp, 0), reg.fp());
break;
@@ -145,13 +146,6 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
}
}
-template <typename... Regs>
-inline void SpillRegisters(LiftoffAssembler* assm, Regs... regs) {
- for (LiftoffRegister r : {LiftoffRegister(regs)...}) {
- if (assm->cache_state()->is_used(r)) assm->SpillRegister(r);
- }
-}
-
inline void SignExtendI32ToI64(Assembler* assm, LiftoffRegister reg) {
assm->mov(reg.high_gp(), reg.low_gp());
assm->sar(reg.high_gp(), 31);
@@ -163,7 +157,7 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
if (candidate.is_byte_register()) return candidate;
// {GetUnusedRegister()} may insert move instructions to spill registers to
// the stack. This is OK because {mov} does not change the status flags.
- return assm->GetUnusedRegister(liftoff::kByteRegs, {}).gp();
+ return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
}
inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
@@ -267,22 +261,22 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- return type.is_reference_type() ? kSystemPointerSize
- : type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ return is_reference_type(kind) ? kSystemPointerSize
+ : element_size_bytes(kind);
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
TurboAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode));
break;
- case ValueType::kI64: {
+ case kI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -290,10 +284,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
break;
}
- case ValueType::kF32:
+ case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -301,15 +295,31 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- DCHECK_LE(0, offset);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
mov(dst, liftoff::GetInstanceOperand());
- DCHECK_EQ(4, size);
- mov(dst, Operand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
- LoadFromInstance(dst, offset, kTaggedSize);
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ Operand src{instance, offset};
+ switch (size) {
+ case 1:
+ movzx_b(dst, src);
+ break;
+ case 4:
+ mov(dst, src);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ mov(dst, Operand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -334,16 +344,19 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
DCHECK_GE(offset_imm, 0);
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
STATIC_ASSERT(kTaggedSize == kInt32Size);
- Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Operand dst_op = offset_reg == no_reg
? Operand(dst_addr, offset_imm)
: Operand(dst_addr, offset_reg, times_1, offset_imm);
mov(dst_op, src.gp());
+ if (skip_write_barrier) return;
+
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
@@ -468,7 +481,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
LiftoffRegList pinned_byte = pinned | LiftoffRegList::ForRegs(dst_addr);
if (offset_reg != no_reg) pinned_byte.set(offset_reg);
Register byte_src =
- GetUnusedRegister(liftoff::kByteRegs, pinned_byte).gp();
+ GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned_byte)).gp();
mov(byte_src, src.gp());
mov_b(dst_op, byte_src);
}
@@ -562,11 +575,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
// If there are no unused candidate registers, but {src} is a candidate,
// then spill other uses of {src}. Otherwise spill any candidate register
// and use that.
- if (!cache_state_.has_unused_register(src_candidates, pinned) &&
+ LiftoffRegList unpinned_candidates = src_candidates.MaskOut(pinned);
+ if (!cache_state_.has_unused_register(unpinned_candidates) &&
src_candidates.has(src)) {
SpillRegister(src);
} else {
- Register safe_src = GetUnusedRegister(src_candidates, pinned).gp();
+ Register safe_src = GetUnusedRegister(unpinned_candidates).gp();
mov(safe_src, src_gp);
src_gp = safe_src;
}
@@ -614,7 +628,7 @@ inline void AtomicAddOrSubOrExchange32(LiftoffAssembler* lasm, Binop binop,
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
Register safe_value_reg =
- __ GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ __ GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned)).gp();
__ mov(safe_value_reg, value_reg);
value_reg = safe_value_reg;
}
@@ -811,10 +825,10 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
std::swap(dst_addr, offset_reg);
}
// Spill all these registers if they are still holding other values.
- liftoff::SpillRegisters(lasm, old_hi, old_lo, new_hi, base, offset);
+ __ SpillRegisters(old_hi, old_lo, new_hi, base, offset);
__ ParallelRegisterMove(
{{LiftoffRegister::ForPair(base, offset),
- LiftoffRegister::ForPair(dst_addr, offset_reg), kWasmI64}});
+ LiftoffRegister::ForPair(dst_addr, offset_reg), kI64}});
Operand dst_op_lo = Operand(base, offset, times_1, offset_imm);
Operand dst_op_hi = Operand(base, offset, times_1, offset_imm + 4);
@@ -863,7 +877,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
// Move the result into the correct registers.
__ ParallelRegisterMove(
- {{result, LiftoffRegister::ForPair(old_lo, old_hi), kWasmI64}});
+ {{result, LiftoffRegister::ForPair(old_lo, old_hi), kI64}});
}
#undef __
@@ -981,7 +995,8 @@ void LiftoffAssembler::AtomicCompareExchange(
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
Register safe_value_reg =
- pinned.set(GetUnusedRegister(liftoff::kByteRegs, pinned)).gp();
+ pinned.set(GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned)))
+ .gp();
mov(safe_value_reg, value_reg);
value_reg = safe_value_reg;
pinned.clear(LiftoffRegister(value_reg));
@@ -1041,7 +1056,7 @@ void LiftoffAssembler::AtomicCompareExchange(
Register address = esi;
// Spill all these registers if they are still holding other values.
- liftoff::SpillRegisters(this, expected_hi, expected_lo, new_hi, address);
+ SpillRegisters(expected_hi, expected_lo, new_hi, address);
// We have to set new_lo specially, because it's the root register. We do it
// before setting all other registers so that the original value does not get
@@ -1050,9 +1065,9 @@ void LiftoffAssembler::AtomicCompareExchange(
// Move all other values into the right register.
ParallelRegisterMove(
- {{LiftoffRegister(address), LiftoffRegister(dst_addr), kWasmI32},
- {LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kWasmI64},
- {LiftoffRegister(new_hi), new_value.high(), kWasmI32}});
+ {{LiftoffRegister(address), LiftoffRegister(dst_addr), kI32},
+ {LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kI64},
+ {LiftoffRegister(new_hi), new_value.high(), kI32}});
Operand dst_op = Operand(address, offset_imm);
@@ -1064,33 +1079,33 @@ void LiftoffAssembler::AtomicCompareExchange(
// Move the result into the correct registers.
ParallelRegisterMove(
- {{result, LiftoffRegister::ForPair(expected_lo, expected_hi), kWasmI64}});
+ {{result, LiftoffRegister::ForPair(expected_lo, expected_hi), kI64}});
}
void LiftoffAssembler::AtomicFence() { mfence(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
liftoff::Load(this, dst, ebp, kSystemPointerSize * (caller_slot_idx + 1),
- type);
+ kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset,
- ValueType type) {
- liftoff::Load(this, reg, esp, offset, type);
+ ValueKind kind) {
+ liftoff::Load(this, reg, esp, offset, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
liftoff::Store(this, ebp, kSystemPointerSize * (caller_slot_idx + 1), src,
- type);
+ kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
- if (needs_gp_reg_pair(type)) {
+ ValueKind kind) {
+ if (needs_gp_reg_pair(kind)) {
liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_offset, kLowWord),
liftoff::GetHalfStackSlot(dst_offset, kLowWord));
@@ -1103,46 +1118,47 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
}
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
- DCHECK(kWasmI32 == type || type.is_reference_type());
+ DCHECK(kI32 == kind || is_reference_type(kind));
mov(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
- if (type == kWasmF32) {
+ if (kind == kF32) {
movss(dst, src);
- } else if (type == kWasmF64) {
+ } else if (kind == kF64) {
movsd(dst, src);
} else {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
Movaps(dst, src);
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
mov(dst, reg.gp());
break;
- case ValueType::kI64:
+ case kI64:
mov(liftoff::GetHalfStackSlot(offset, kLowWord), reg.low_gp());
mov(liftoff::GetHalfStackSlot(offset, kHighWord), reg.high_gp());
break;
- case ValueType::kF32:
+ case kF32:
movss(dst, reg.fp());
break;
- case ValueType::kF64:
+ case kF64:
movsd(dst, reg.fp());
break;
- case ValueType::kS128:
+ case kS128:
movdqu(dst, reg.fp());
break;
default:
@@ -1154,10 +1170,10 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
mov(dst, Immediate(value.to_i32()));
break;
- case ValueType::kI64: {
+ case kI64: {
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
mov(liftoff::GetHalfStackSlot(offset, kLowWord), Immediate(low_word));
@@ -1170,8 +1186,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- liftoff::Load(this, reg, ebp, -offset, type);
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ liftoff::Load(this, reg, ebp, -offset, kind);
}
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
@@ -1290,7 +1306,7 @@ void EmitInt32DivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
// another temporary register.
// Do all this before any branch, such that the code is executed
// unconditionally, as the cache state will also be modified unconditionally.
- liftoff::SpillRegisters(assm, eax, edx);
+ assm->SpillRegisters(eax, edx);
if (rhs == eax || rhs == edx) {
LiftoffRegList unavailable = LiftoffRegList::ForRegs(eax, edx, lhs);
Register tmp = assm->GetUnusedRegister(kGpReg, unavailable).gp();
@@ -1501,7 +1517,7 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
// If necessary, move result into the right registers.
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
- if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
+ if (tmp_result != dst) assm->Move(dst, tmp_result, kI64);
}
template <void (Assembler::*op)(Register, const Immediate&),
@@ -1557,12 +1573,11 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
Register rhs_lo = esi;
// Spill all these registers if they are still holding other values.
- liftoff::SpillRegisters(this, dst_hi, dst_lo, lhs_hi, rhs_lo);
+ SpillRegisters(dst_hi, dst_lo, lhs_hi, rhs_lo);
// Move lhs and rhs into the respective registers.
- ParallelRegisterMove(
- {{LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kWasmI64},
- {LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kWasmI64}});
+ ParallelRegisterMove({{LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kI64},
+ {LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kI64}});
// First mul: lhs_hi' = lhs_hi * rhs_lo.
imul(lhs_hi, rhs_lo);
@@ -1577,7 +1592,7 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
// Finally, move back the temporary result to the actual dst register pair.
LiftoffRegister dst_tmp = LiftoffRegister::ForPair(dst_lo, dst_hi);
- if (dst != dst_tmp) Move(dst, dst_tmp, kWasmI64);
+ if (dst != dst_tmp) Move(dst, dst_tmp, kI64);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1644,11 +1659,11 @@ inline void Emit64BitShiftOperation(
(assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
pinned.has(LiftoffRegister(ecx)))) {
ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp();
- reg_moves.emplace_back(ecx_replace, ecx, kWasmI32);
+ reg_moves.emplace_back(ecx_replace, ecx, kI32);
}
- reg_moves.emplace_back(dst, src, kWasmI64);
- reg_moves.emplace_back(ecx, amount, kWasmI32);
+ reg_moves.emplace_back(dst, src, kI64);
+ reg_moves.emplace_back(ecx, amount, kI32);
assm->ParallelRegisterMove(VectorOf(reg_moves));
// Do the actual shift.
@@ -1673,7 +1688,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) shl(dst.high_gp(), amount - 32);
xor_(dst.low_gp(), dst.low_gp());
} else {
- if (dst != src) Move(dst, src, kWasmI64);
+ if (dst != src) Move(dst, src, kI64);
ShlPair(dst.high_gp(), dst.low_gp(), amount);
}
}
@@ -1693,7 +1708,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) sar(dst.low_gp(), amount - 32);
sar(dst.high_gp(), 31);
} else {
- if (dst != src) Move(dst, src, kWasmI64);
+ if (dst != src) Move(dst, src, kI64);
SarPair(dst.high_gp(), dst.low_gp(), amount);
}
}
@@ -1711,7 +1726,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) shr(dst.low_gp(), amount - 32);
xor_(dst.high_gp(), dst.high_gp());
} else {
- if (dst != src) Move(dst, src, kWasmI64);
+ if (dst != src) Move(dst, src, kI64);
ShrPair(dst.high_gp(), dst.low_gp(), amount);
}
}
@@ -2386,24 +2401,25 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
- switch (type.kind()) {
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
- case ValueType::kI32:
+ case kI32:
cmp(lhs, rhs);
break;
default:
UNREACHABLE();
}
} else {
- DCHECK_EQ(type, kWasmI32);
+ DCHECK_EQ(kind, kI32);
test(lhs, lhs);
}
@@ -2555,7 +2571,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -2617,7 +2633,7 @@ template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
void (Assembler::*sse_op)(XMMRegister, XMMRegister), uint8_t width>
void EmitSimdShiftOp(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister operand, LiftoffRegister count) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
+ static constexpr RegClass tmp_rc = reg_class_for(kI32);
LiftoffRegister tmp =
assm->GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(count));
constexpr int mask = (1 << width) - 1;
@@ -2695,7 +2711,11 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister src) {
+ LiftoffRegister src,
+ base::Optional<CpuFeature> feature = base::nullopt) {
+ base::Optional<CpuFeatureScope> sse_scope;
+ if (feature.has_value()) sse_scope.emplace(assm, *feature);
+
Register tmp =
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)).gp();
XMMRegister tmp_simd = liftoff::kScratchDoubleReg;
@@ -2763,7 +2783,52 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
+ Operand src_op{addr, offset_reg, times_1, static_cast<int32_t>(offset_imm)};
+ *protected_load_pc = pc_offset();
+
+ MachineType mem_type = type.mem_type();
+ if (mem_type == MachineType::Int8()) {
+ Pinsrb(dst.fp(), src.fp(), src_op, laneidx);
+ } else if (mem_type == MachineType::Int16()) {
+ Pinsrw(dst.fp(), src.fp(), src_op, laneidx);
+ } else if (mem_type == MachineType::Int32()) {
+ Pinsrd(dst.fp(), src.fp(), src_op, laneidx);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), mem_type);
+ if (laneidx == 0) {
+ Movlps(dst.fp(), src.fp(), src_op);
+ } else {
+ DCHECK_EQ(1, laneidx);
+ Movhps(dst.fp(), src.fp(), src_op);
+ }
+ }
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
+ Operand dst_op = Operand(dst, offset, times_1, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+
+ MachineRepresentation rep = type.mem_rep();
+ if (rep == MachineRepresentation::kWord8) {
+ Pextrb(dst_op, src.fp(), lane);
+ } else if (rep == MachineRepresentation::kWord16) {
+ Pextrw(dst_op, src.fp(), lane);
+ } else if (rep == MachineRepresentation::kWord32) {
+ S128Store32Lane(dst_op, src.fp(), lane);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord64, rep);
+ if (lane == 0) {
+ Movlps(dst_op, src.fp());
+ } else {
+ DCHECK_EQ(1, lane);
+ Movhps(dst_op, src.fp());
+ }
+ }
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -2826,6 +2891,15 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
Pshufb(dst.fp(), lhs.fp(), mask);
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register scratch = GetUnusedRegister(RegClass::kGpReg, {}).gp();
+ XMMRegister tmp =
+ GetUnusedRegister(RegClass::kFpReg, LiftoffRegList::ForRegs(dst, src))
+ .fp();
+ I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp, scratch);
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Movd(dst.fp(), src.gp());
@@ -3048,6 +3122,75 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
Pcmpeqd(dst.fp(), ref);
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqq, &Assembler::pcmpeqq>(
+ this, dst, lhs, rhs, SSE4_1);
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqq, &Assembler::pcmpeqq>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Different register alias requirements depending on CpuFeatures supported:
+ if (CpuFeatures::IsSupported(AVX)) {
+ // 1. AVX, no requirements.
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ // 2. SSE4_2, dst == lhs.
+ if (dst != lhs) {
+ movdqa(dst.fp(), lhs.fp());
+ }
+ I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ if (dst == lhs || dst == rhs) {
+ LiftoffRegister tmp = GetUnusedRegister(
+ RegClass::kFpReg, LiftoffRegList::ForRegs(lhs, rhs));
+ I64x2GtS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ movaps(dst.fp(), tmp.fp());
+ } else {
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Different register alias requirements depending on CpuFeatures supported:
+ if (CpuFeatures::IsSupported(AVX)) {
+ // 1. AVX, no requirements.
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ // 2. SSE4_2, dst != lhs.
+ if (dst == lhs) {
+ LiftoffRegister tmp = GetUnusedRegister(RegClass::kFpReg, {rhs},
+ LiftoffRegList::ForRegs(lhs));
+ // macro-assembler uses kScratchDoubleReg, so don't use it.
+ I64x2GeS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ movdqa(dst.fp(), tmp.fp());
+ } else {
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ }
+ } else {
+ // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ if (dst == lhs || dst == rhs) {
+ LiftoffRegister tmp = GetUnusedRegister(
+ RegClass::kFpReg, LiftoffRegList::ForRegs(lhs, rhs));
+ I64x2GeS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ movaps(dst.fp(), tmp.fp());
+ } else {
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ }
+ }
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpeqps, &Assembler::cmpeqps>(
@@ -3171,8 +3314,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -3188,8 +3331,8 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
- static constexpr RegClass tmp_simd_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kI32);
+ static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
LiftoffRegister tmp = GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(rhs));
LiftoffRegister tmp_simd =
GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
@@ -3216,7 +3359,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
+ static constexpr RegClass tmp_rc = reg_class_for(kI32);
LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
byte shift = static_cast<byte>(rhs & 0x7);
if (CpuFeatures::IsSupported(AVX)) {
@@ -3316,7 +3459,7 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
if (CpuFeatures::IsSupported(AVX)) {
@@ -3415,11 +3558,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
@@ -3546,6 +3684,18 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp(), liftoff::kScratchDoubleReg,
+ GetUnusedRegister(kGpReg, {}).gp());
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I16x8ExtAddPairwiseI8x16U(dst.fp(), src.fp(),
+ GetUnusedRegister(kGpReg, {}).gp());
+}
+
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -3574,6 +3724,12 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
/*low=*/false, /*is_signed=*/false);
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3585,11 +3741,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
@@ -3691,6 +3842,17 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4ExtAddPairwiseI16x8S(dst.fp(), src.fp(),
+ GetUnusedRegister(kGpReg, {}).gp());
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+}
+
namespace liftoff {
// Helper function to check for register aliasing, AVX support, and moves
// registers around before calling the actual macro-assembler function.
@@ -3760,6 +3922,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpsllq, &Assembler::psllq, 6>(this, dst,
@@ -3845,7 +4012,7 @@ void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp1 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
@@ -3903,6 +4070,26 @@ void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
Movmskpd(dst.gp(), src.fp());
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovsxdq(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2SConvertI32x4High(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovzxdq(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2UConvertI32x4High(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -4215,6 +4402,22 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, rhs, lhs);
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtdq2pd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp(), tmp);
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtps2pd(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
// NAN->0
@@ -4241,7 +4444,7 @@ void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
DoubleRegister tmp =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, src)).fp();
// NAN->0, negative->0.
@@ -4306,6 +4509,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
liftoff::kScratchDoubleReg); // Add hi and lo, may round.
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtpd2ps(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -4345,8 +4553,7 @@ void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovsxbw(dst.fp(), dst.fp());
+ I16x8SConvertI8x16High(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
@@ -4356,8 +4563,7 @@ void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovzxbw(dst.fp(), dst.fp());
+ I16x8UConvertI8x16High(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
@@ -4367,8 +4573,7 @@ void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovsxwd(dst.fp(), dst.fp());
+ I32x4SConvertI16x8High(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
@@ -4378,8 +4583,19 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovzxwd(dst.fp(), dst.fp());
+ I32x4UConvertI16x8High(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp);
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp);
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
@@ -4418,6 +4634,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
Pabsd(dst.fp(), src.fp());
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2Abs(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -4658,17 +4879,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- liftoff::Store(this, esp, arg_bytes, *args++, param_type);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, esp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -4697,8 +4918,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- liftoff::Load(this, *next_result_reg, esp, 0, out_argument_type);
+ if (out_argument_kind != kStmt) {
+ liftoff::Load(this, *next_result_reg, esp, 0, out_argument_kind);
}
add(esp, Immediate(stack_bytes));
@@ -4712,7 +4933,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
jmp(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
// Since we have more cache registers than parameter registers, the
@@ -4758,26 +4979,26 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack:
// The combination of AllocateStackSpace and 2 movdqu is usually smaller
// in code size than doing 4 pushes.
- if (src.type() == kWasmS128) {
+ if (src.kind() == kS128) {
asm_->AllocateStackSpace(sizeof(double) * 2);
asm_->movdqu(liftoff::kScratchDoubleReg,
liftoff::GetStackSlot(slot.src_offset_));
asm_->movdqu(Operand(esp, 0), liftoff::kScratchDoubleReg);
break;
}
- if (src.type() == kWasmF64) {
+ if (src.kind() == kF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
}
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
break;
case LiftoffAssembler::VarState::kRegister:
- if (src.type() == kWasmI64) {
+ if (src.kind() == kI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
- kWasmI32);
+ kI32);
} else {
- liftoff::push(asm_, src.reg(), src.type());
+ liftoff::push(asm_, src.reg(), src.kind());
}
break;
case LiftoffAssembler::VarState::kIntConst:
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index a4d7fd1221..11b2e4993c 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -77,6 +77,26 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+#elif V8_TARGET_ARCH_PPC64
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10, r11);
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+
+#elif V8_TARGET_ARCH_RISCV64
+
+// Any change of kLiftoffAssemblerGpCacheRegs also need to update
+// kPushedGpRegs in frame-constants-riscv64.h
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7);
+
+// Any change of kLiftoffAssemblerGpCacheRegs also need to update
+// kPushedFpRegs in frame-constants-riscv64.h
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf(ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1,
+ fa2, fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
@@ -84,7 +104,6 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
#endif
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 587430a107..3a8d7ba01e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -23,17 +23,18 @@ namespace internal {
namespace wasm {
using VarState = LiftoffAssembler::VarState;
+using ValueKindSig = LiftoffAssembler::ValueKindSig;
-constexpr ValueType LiftoffAssembler::kWasmIntPtr;
+constexpr ValueKind LiftoffAssembler::kIntPtr;
namespace {
class StackTransferRecipe {
struct RegisterMove {
LiftoffRegister src;
- ValueType type;
- constexpr RegisterMove(LiftoffRegister src, ValueType type)
- : src(src), type(type) {}
+ ValueKind kind;
+ constexpr RegisterMove(LiftoffRegister src, ValueKind kind)
+ : src(src), kind(kind) {}
};
struct RegisterLoad {
@@ -45,34 +46,34 @@ class StackTransferRecipe {
kHighHalfStack // fill a register from the high half of a stack slot.
};
- LoadKind kind;
- ValueType type;
+ LoadKind load_kind;
+ ValueKind kind;
int32_t value; // i32 constant value or stack offset, depending on kind.
// Named constructors.
static RegisterLoad Const(WasmValue constant) {
- if (constant.type() == kWasmI32) {
- return {kConstant, kWasmI32, constant.to_i32()};
+ if (constant.type().kind() == kI32) {
+ return {kConstant, kI32, constant.to_i32()};
}
- DCHECK_EQ(kWasmI64, constant.type());
- DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
- return {kConstant, kWasmI64, constant.to_i32_unchecked()};
+ DCHECK_EQ(kI64, constant.type().kind());
+ int32_t i32_const = static_cast<int32_t>(constant.to_i64());
+ DCHECK_EQ(constant.to_i64(), i32_const);
+ return {kConstant, kI64, i32_const};
}
- static RegisterLoad Stack(int32_t offset, ValueType type) {
- return {kStack, type, offset};
+ static RegisterLoad Stack(int32_t offset, ValueKind kind) {
+ return {kStack, kind, offset};
}
static RegisterLoad HalfStack(int32_t offset, RegPairHalf half) {
- return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
- offset};
+ return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kI32, offset};
}
static RegisterLoad Nop() {
- // ValueType does not matter.
- return {kNop, kWasmI32, 0};
+ // ValueKind does not matter.
+ return {kNop, kI32, 0};
}
private:
- RegisterLoad(LoadKind kind, ValueType type, int32_t value)
- : kind(kind), type(type), value(value) {}
+ RegisterLoad(LoadKind load_kind, ValueKind kind, int32_t value)
+ : load_kind(load_kind), kind(kind), value(value) {}
};
public:
@@ -90,8 +91,23 @@ class StackTransferRecipe {
DCHECK(load_dst_regs_.is_empty());
}
+#if DEBUG
+ bool CheckCompatibleStackSlotTypes(ValueKind dst, ValueKind src) {
+ if (is_object_reference_type(dst)) {
+ // Since Liftoff doesn't do accurate type tracking (e.g. on loop back
+ // edges), we only care that pointer types stay amongst pointer types.
+ // It's fine if ref/optref overwrite each other.
+ DCHECK(is_object_reference_type(src));
+ } else {
+ // All other types (primitive numbers, RTTs, bottom/stmt) must be equal.
+ DCHECK_EQ(dst, src);
+ }
+ return true; // Dummy so this can be called via DCHECK.
+ }
+#endif
+
V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) {
- DCHECK_EQ(dst.type(), src.type());
+ DCHECK(CheckCompatibleStackSlotTypes(dst.kind(), src.kind()));
if (dst.is_reg()) {
LoadIntoRegister(dst.reg(), src, src.offset());
return;
@@ -104,11 +120,11 @@ class StackTransferRecipe {
switch (src.loc()) {
case VarState::kStack:
if (src.offset() != dst.offset()) {
- asm_->MoveStackValue(dst.offset(), src.offset(), src.type());
+ asm_->MoveStackValue(dst.offset(), src.offset(), src.kind());
}
break;
case VarState::kRegister:
- asm_->Spill(dst.offset(), src.reg(), src.type());
+ asm_->Spill(dst.offset(), src.reg(), src.kind());
break;
case VarState::kIntConst:
asm_->Spill(dst.offset(), src.constant());
@@ -121,11 +137,11 @@ class StackTransferRecipe {
uint32_t src_offset) {
switch (src.loc()) {
case VarState::kStack:
- LoadStackSlot(dst, src_offset, src.type());
+ LoadStackSlot(dst, src_offset, src.kind());
break;
case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class());
- if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type());
+ if (dst != src.reg()) MoveRegister(dst, src.reg(), src.kind());
break;
case VarState::kIntConst:
LoadConstant(dst, src.constant());
@@ -139,7 +155,7 @@ class StackTransferRecipe {
// Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair);
- DCHECK_EQ(kWasmI64, src.type());
+ DCHECK_EQ(kI64, src.kind());
switch (src.loc()) {
case VarState::kStack:
LoadI64HalfStackSlot(dst, offset, half);
@@ -147,7 +163,7 @@ class StackTransferRecipe {
case VarState::kRegister: {
LiftoffRegister src_half =
half == kLowWord ? src.reg().low() : src.reg().high();
- if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
+ if (dst != src_half) MoveRegister(dst, src_half, kI32);
break;
}
case VarState::kIntConst:
@@ -159,45 +175,44 @@ class StackTransferRecipe {
}
}
- void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) {
+ void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueKind kind) {
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
- DCHECK_EQ(reg_class_for(type), src.reg_class());
+ DCHECK_EQ(reg_class_for(kind), src.reg_class());
if (src.is_gp_pair()) {
- DCHECK_EQ(kWasmI64, type);
- if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32);
- if (dst.high() != src.high())
- MoveRegister(dst.high(), src.high(), kWasmI32);
+ DCHECK_EQ(kI64, kind);
+ if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kI32);
+ if (dst.high() != src.high()) MoveRegister(dst.high(), src.high(), kI32);
return;
}
if (src.is_fp_pair()) {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
if (dst.low() != src.low()) {
- MoveRegister(dst.low(), src.low(), kWasmF64);
- MoveRegister(dst.high(), src.high(), kWasmF64);
+ MoveRegister(dst.low(), src.low(), kF64);
+ MoveRegister(dst.high(), src.high(), kF64);
}
return;
}
if (move_dst_regs_.has(dst)) {
DCHECK_EQ(register_move(dst)->src, src);
// Non-fp registers can only occur with the exact same type.
- DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->type == type);
+ DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->kind == kind);
// It can happen that one fp register holds both the f32 zero and the f64
// zero, as the initial value for local variables. Move the value as f64
// in that case.
- if (type == kWasmF64) register_move(dst)->type = kWasmF64;
+ if (kind == kF64) register_move(dst)->kind = kF64;
return;
}
move_dst_regs_.set(dst);
++*src_reg_use_count(src);
- *register_move(dst) = {src, type};
+ *register_move(dst) = {src, kind};
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
DCHECK(!load_dst_regs_.has(dst));
load_dst_regs_.set(dst);
if (dst.is_gp_pair()) {
- DCHECK_EQ(kWasmI64, value.type());
+ DCHECK_EQ(kI64, value.type().kind());
int64_t i64 = value.to_i64();
*register_load(dst.low()) =
RegisterLoad::Const(WasmValue(static_cast<int32_t>(i64)));
@@ -209,7 +224,7 @@ class StackTransferRecipe {
}
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_offset,
- ValueType type) {
+ ValueKind kind) {
if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack
// slots, and then we reload them later into the same dst register.
@@ -218,20 +233,20 @@ class StackTransferRecipe {
}
load_dst_regs_.set(dst);
if (dst.is_gp_pair()) {
- DCHECK_EQ(kWasmI64, type);
+ DCHECK_EQ(kI64, kind);
*register_load(dst.low()) =
RegisterLoad::HalfStack(stack_offset, kLowWord);
*register_load(dst.high()) =
RegisterLoad::HalfStack(stack_offset, kHighWord);
} else if (dst.is_fp_pair()) {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
// Only need register_load for low_gp since we load 128 bits at one go.
// Both low and high need to be set in load_dst_regs_ but when iterating
// over it, both low and high will be cleared, so we won't load twice.
- *register_load(dst.low()) = RegisterLoad::Stack(stack_offset, type);
+ *register_load(dst.low()) = RegisterLoad::Stack(stack_offset, kind);
*register_load(dst.high()) = RegisterLoad::Nop();
} else {
- *register_load(dst) = RegisterLoad::Stack(stack_offset, type);
+ *register_load(dst) = RegisterLoad::Stack(stack_offset, kind);
}
}
@@ -279,7 +294,7 @@ class StackTransferRecipe {
void ExecuteMove(LiftoffRegister dst) {
RegisterMove* move = register_move(dst);
DCHECK_EQ(0, *src_reg_use_count(dst));
- asm_->Move(dst, move->src, move->type);
+ asm_->Move(dst, move->src, move->kind);
ClearExecutedMove(dst);
}
@@ -313,11 +328,11 @@ class StackTransferRecipe {
// TODO(clemensb): Use an unused register if available.
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst);
- last_spill_offset += LiftoffAssembler::SlotSizeForType(move->type);
+ last_spill_offset += LiftoffAssembler::SlotSizeForType(move->kind);
LiftoffRegister spill_reg = move->src;
- asm_->Spill(last_spill_offset, spill_reg, move->type);
+ asm_->Spill(last_spill_offset, spill_reg, move->kind);
// Remember to reload into the destination register later.
- LoadStackSlot(dst, last_spill_offset, move->type);
+ LoadStackSlot(dst, last_spill_offset, move->kind);
ClearExecutedMove(dst);
}
}
@@ -325,20 +340,20 @@ class StackTransferRecipe {
void ExecuteLoads() {
for (LiftoffRegister dst : load_dst_regs_) {
RegisterLoad* load = register_load(dst);
- switch (load->kind) {
+ switch (load->load_kind) {
case RegisterLoad::kNop:
break;
case RegisterLoad::kConstant:
- asm_->LoadConstant(dst, load->type == kWasmI64
+ asm_->LoadConstant(dst, load->kind == kI64
? WasmValue(int64_t{load->value})
: WasmValue(int32_t{load->value}));
break;
case RegisterLoad::kStack:
- if (kNeedS128RegPair && load->type == kWasmS128) {
+ if (kNeedS128RegPair && load->kind == kS128) {
asm_->Fill(LiftoffRegister::ForFpPair(dst.fp()), load->value,
- load->type);
+ load->kind);
} else {
- asm_->Fill(dst, load->value, load->type);
+ asm_->Fill(dst, load->value, load->kind);
}
break;
case RegisterLoad::kLowHalfStack:
@@ -415,18 +430,18 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
reg = register_reuse_map.Lookup(source->reg());
}
// Third try: Use any free register.
- RegClass rc = reg_class_for(source->type());
+ RegClass rc = reg_class_for(source->kind());
if (!reg && state->has_unused_register(rc, used_regs)) {
reg = state->unused_register(rc, used_regs);
}
if (!reg) {
// No free register; make this a stack slot.
- *target = VarState(source->type(), source->offset());
+ *target = VarState(source->kind(), source->offset());
continue;
}
if (reuse_registers) register_reuse_map.Add(source->reg(), *reg);
state->inc_used(*reg);
- *target = VarState(source->type(), *reg, source->offset());
+ *target = VarState(source->kind(), *reg, source->offset());
}
}
@@ -440,6 +455,10 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
// |------locals------|---(in between)----|--(discarded)--|----merge----|
// <-- num_locals --> <-- stack_depth -->^stack_base <-- arity -->
+ if (source.cached_instance != no_reg) {
+ SetInstanceCacheRegister(source.cached_instance);
+ }
+
uint32_t stack_base = stack_depth + num_locals;
uint32_t target_height = stack_base + arity;
uint32_t discarded = source.stack_height() - target_height;
@@ -514,7 +533,7 @@ void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
ZoneVector<int>* slots, LiftoffRegList* spills,
SpillLocation spill_location) {
for (const auto& slot : stack_state) {
- if (!slot.type().is_reference_type()) continue;
+ if (!is_reference_type(slot.kind())) continue;
if (spill_location == SpillLocation::kTopOfStack && slot.is_reg()) {
// Registers get spilled just before the call to the runtime. In {spills}
@@ -533,7 +552,7 @@ void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
for (const auto& slot : stack_state) {
DCHECK(!slot.is_reg());
- if (slot.type().is_reference_type()) {
+ if (is_reference_type(slot.kind())) {
safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
}
}
@@ -571,12 +590,12 @@ LiftoffAssembler::~LiftoffAssembler() {
LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
LiftoffRegList pinned) {
if (slot.is_reg()) return slot.reg();
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.type()), pinned);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.kind()), pinned);
if (slot.is_const()) {
LoadConstant(reg, slot.constant());
} else {
DCHECK(slot.is_stack());
- Fill(reg, slot.offset(), slot.type());
+ Fill(reg, slot.offset(), slot.kind());
}
return reg;
}
@@ -627,7 +646,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
for (int i = 0; i < num; ++i) {
VarState& slot = cache_state_.stack_state.end()[-1 - i];
if (slot.is_stack()) continue;
- RegClass rc = reg_class_for(slot.type());
+ RegClass rc = reg_class_for(slot.kind());
if (slot.is_reg()) {
if (cache_state_.get_use_count(slot.reg()) > 1) {
// If the register is used more than once, we cannot use it for the
@@ -635,7 +654,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
LiftoffRegList pinned;
pinned.set(slot.reg());
LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned);
- Move(dst_reg, slot.reg(), slot.type());
+ Move(dst_reg, slot.reg(), slot.kind());
cache_state_.dec_used(slot.reg());
cache_state_.inc_used(dst_reg);
slot.MakeRegister(dst_reg);
@@ -657,7 +676,7 @@ void LiftoffAssembler::MaterializeMergedConstants(uint32_t arity) {
VectorOf(stack_base, num_locals())}) {
for (VarState& slot : slots) {
if (!slot.is_const()) continue;
- RegClass rc = reg_class_for(slot.type());
+ RegClass rc = reg_class_for(slot.kind());
if (cache_state_.has_unused_register(rc)) {
LiftoffRegister reg = cache_state_.unused_register(rc);
LoadConstant(reg, slot.constant());
@@ -671,7 +690,7 @@ void LiftoffAssembler::MaterializeMergedConstants(uint32_t arity) {
}
}
-void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
+void LiftoffAssembler::MergeFullStackWith(CacheState& target,
const CacheState& source) {
DCHECK_EQ(source.stack_height(), target.stack_height());
// TODO(clemensb): Reuse the same StackTransferRecipe object to save some
@@ -680,10 +699,16 @@ void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
for (uint32_t i = 0, e = source.stack_height(); i < e; ++i) {
transfers.TransferStackSlot(target.stack_state[i], source.stack_state[i]);
}
+
+ if (source.cached_instance != target.cached_instance) {
+ // Backward jumps (to loop headers) do not have a cached instance anyway, so
+ // ignore this. On forward jumps, jump reset the cached instance in the
+ // target state.
+ target.ClearCachedInstanceRegister();
+ }
}
-void LiftoffAssembler::MergeStackWith(const CacheState& target,
- uint32_t arity) {
+void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
// Before: ----------------|----- (discarded) ----|--- arity ---|
// ^target_stack_height ^stack_base ^stack_height
// After: ----|-- arity --|
@@ -704,6 +729,13 @@ void LiftoffAssembler::MergeStackWith(const CacheState& target,
transfers.TransferStackSlot(target.stack_state[target_stack_base + i],
cache_state_.stack_state[stack_base + i]);
}
+
+ if (cache_state_.cached_instance != target.cached_instance) {
+ // Backward jumps (to loop headers) do not have a cached instance anyway, so
+ // ignore this. On forward jumps, jump reset the cached instance in the
+ // target state.
+ target.ClearCachedInstanceRegister();
+ }
}
void LiftoffAssembler::Spill(VarState* slot) {
@@ -711,7 +743,7 @@ void LiftoffAssembler::Spill(VarState* slot) {
case VarState::kStack:
return;
case VarState::kRegister:
- Spill(slot->offset(), slot->reg(), slot->type());
+ Spill(slot->offset(), slot->reg(), slot->kind());
cache_state_.dec_used(slot->reg());
break;
case VarState::kIntConst:
@@ -731,15 +763,20 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue;
- Spill(slot.offset(), slot.reg(), slot.type());
+ Spill(slot.offset(), slot.reg(), slot.kind());
slot.MakeStack();
}
+ cache_state_.ClearCachedInstanceRegister();
cache_state_.reset_used_registers();
}
void LiftoffAssembler::ClearRegister(
Register reg, std::initializer_list<Register*> possible_uses,
LiftoffRegList pinned) {
+ if (reg == cache_state()->cached_instance) {
+ cache_state()->ClearCachedInstanceRegister();
+ return;
+ }
if (cache_state()->is_used(LiftoffRegister(reg))) {
SpillRegister(LiftoffRegister(reg));
}
@@ -748,7 +785,7 @@ void LiftoffAssembler::ClearRegister(
if (reg != *use) continue;
if (replacement == no_reg) {
replacement = GetUnusedRegister(kGpReg, pinned).gp();
- Move(replacement, reg, LiftoffAssembler::kWasmIntPtr);
+ Move(replacement, reg, LiftoffAssembler::kIntPtr);
}
// We cannot leave this loop early. There may be multiple uses of {reg}.
*use = replacement;
@@ -756,7 +793,7 @@ void LiftoffAssembler::ClearRegister(
}
namespace {
-void PrepareStackTransfers(const FunctionSig* sig,
+void PrepareStackTransfers(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
const VarState* slots,
LiftoffStackSlots* stack_slots,
@@ -769,8 +806,8 @@ void PrepareStackTransfers(const FunctionSig* sig,
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
for (uint32_t i = num_params; i > 0; --i) {
const uint32_t param = i - 1;
- ValueType type = sig->GetParam(param);
- const bool is_gp_pair = kNeedI64RegPair && type == kWasmI64;
+ ValueKind kind = sig->GetParam(param);
+ const bool is_gp_pair = kNeedI64RegPair && kind == kI64;
const int num_lowered_params = is_gp_pair ? 2 : 1;
const VarState& slot = slots[param];
const uint32_t stack_offset = slot.offset();
@@ -784,10 +821,10 @@ void PrepareStackTransfers(const FunctionSig* sig,
call_descriptor->GetInputLocation(call_desc_input_idx);
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
- RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type);
+ RegClass rc = is_gp_pair ? kGpReg : reg_class_for(kind);
int reg_code = loc.AsRegister();
LiftoffRegister reg =
- LiftoffRegister::from_external_code(rc, type, reg_code);
+ LiftoffRegister::from_external_code(rc, kind, reg_code);
param_regs->set(reg);
if (is_gp_pair) {
stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset,
@@ -806,7 +843,7 @@ void PrepareStackTransfers(const FunctionSig* sig,
} // namespace
void LiftoffAssembler::PrepareBuiltinCall(
- const FunctionSig* sig, compiler::CallDescriptor* call_descriptor,
+ const ValueKindSig* sig, compiler::CallDescriptor* call_descriptor,
std::initializer_list<VarState> params) {
LiftoffStackSlots stack_slots(this);
StackTransferRecipe stack_transfers(this);
@@ -825,7 +862,7 @@ void LiftoffAssembler::PrepareBuiltinCall(
cache_state_.reset_used_registers();
}
-void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
+void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register* target,
Register* target_instance) {
@@ -834,12 +871,13 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
constexpr size_t kInputShift = 1;
// Spill all cache slots which are not being used as parameters.
+ cache_state_.ClearCachedInstanceRegister();
for (VarState* it = cache_state_.stack_state.end() - 1 - num_params;
it >= cache_state_.stack_state.begin() &&
!cache_state_.used_registers.is_empty();
--it) {
if (!it->is_reg()) continue;
- Spill(it->offset(), it->reg(), it->type());
+ Spill(it->offset(), it->reg(), it->kind());
cache_state_.dec_used(it->reg());
it->MakeStack();
}
@@ -856,8 +894,7 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
param_regs.set(instance_reg);
if (target_instance && *target_instance != instance_reg) {
stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
- LiftoffRegister(*target_instance),
- kWasmIntPtr);
+ LiftoffRegister(*target_instance), kIntPtr);
}
if (num_params) {
@@ -875,10 +912,10 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
if (!free_regs.is_empty()) {
LiftoffRegister new_target = free_regs.GetFirstRegSet();
stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
- kWasmIntPtr);
+ kIntPtr);
*target = new_target.gp();
} else {
- stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
+ stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kIntPtr,
LiftoffRegister(*target), 0));
*target = no_reg;
}
@@ -900,15 +937,15 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
}
}
-void LiftoffAssembler::FinishCall(const FunctionSig* sig,
+void LiftoffAssembler::FinishCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor) {
int call_desc_return_idx = 0;
- for (ValueType return_type : sig->returns()) {
+ for (ValueKind return_kind : sig->returns()) {
DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount());
- const bool needs_gp_pair = needs_gp_reg_pair(return_type);
+ const bool needs_gp_pair = needs_gp_reg_pair(return_kind);
const int num_lowered_params = 1 + needs_gp_pair;
- const ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
- const RegClass rc = reg_class_for(lowered_type);
+ const ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
+ const RegClass rc = reg_class_for(lowered_kind);
// Initialize to anything, will be set in the loop and used afterwards.
LiftoffRegister reg_pair[2] = {kGpCacheRegList.GetFirstRegSet(),
kGpCacheRegList.GetFirstRegSet()};
@@ -919,7 +956,7 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
reg_pair[pair_idx] = LiftoffRegister::from_external_code(
- rc, lowered_type, loc.AsRegister());
+ rc, lowered_kind, loc.AsRegister());
} else {
DCHECK(loc.IsCallerFrameSlot());
reg_pair[pair_idx] = GetUnusedRegister(rc, pinned);
@@ -927,16 +964,16 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
int offset = call_descriptor->GetOffsetToReturns();
int return_slot = -loc.GetLocation() - offset - 1;
LoadReturnStackSlot(reg_pair[pair_idx],
- return_slot * kSystemPointerSize, lowered_type);
+ return_slot * kSystemPointerSize, lowered_kind);
}
if (pair_idx == 0) {
pinned.set(reg_pair[0]);
}
}
if (num_lowered_params == 1) {
- PushRegister(return_type, reg_pair[0]);
+ PushRegister(return_kind, reg_pair[0]);
} else {
- PushRegister(return_type, LiftoffRegister::ForPair(reg_pair[0].gp(),
+ PushRegister(return_kind, LiftoffRegister::ForPair(reg_pair[0].gp(),
reg_pair[1].gp()));
}
}
@@ -945,21 +982,21 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_EQ(dst.reg_class(), src.reg_class());
DCHECK_NE(dst, src);
if (kNeedI64RegPair && dst.is_gp_pair()) {
// Use the {StackTransferRecipe} to move pairs, as the registers in the
// pairs might overlap.
- StackTransferRecipe(this).MoveRegister(dst, src, type);
+ StackTransferRecipe(this).MoveRegister(dst, src, kind);
} else if (kNeedS128RegPair && dst.is_fp_pair()) {
- // Calling low_fp is fine, Move will automatically check the type and
+ // Calling low_fp is fine, Move will automatically check the kind and
// convert this FP to its SIMD register, and use a SIMD move.
- Move(dst.low_fp(), src.low_fp(), type);
+ Move(dst.low_fp(), src.low_fp(), kind);
} else if (dst.is_gp()) {
- Move(dst.gp(), src.gp(), type);
+ Move(dst.gp(), src.gp(), kind);
} else {
- Move(dst.fp(), src.fp(), type);
+ Move(dst.fp(), src.fp(), kind);
}
}
@@ -968,7 +1005,7 @@ void LiftoffAssembler::ParallelRegisterMove(
StackTransferRecipe stack_transfers(this);
for (auto tuple : tuples) {
if (tuple.dst == tuple.src) continue;
- stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.type);
+ stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.kind);
}
}
@@ -976,19 +1013,19 @@ void LiftoffAssembler::MoveToReturnLocations(
const FunctionSig* sig, compiler::CallDescriptor* descriptor) {
StackTransferRecipe stack_transfers(this);
if (sig->return_count() == 1) {
- ValueType return_type = sig->GetReturn(0);
- // Defaults to a gp reg, will be set below if return type is not gp.
+ ValueKind return_kind = sig->GetReturn(0).kind();
+ // Defaults to a gp reg, will be set below if return kind is not gp.
LiftoffRegister return_reg = LiftoffRegister(kGpReturnRegisters[0]);
- if (needs_gp_reg_pair(return_type)) {
+ if (needs_gp_reg_pair(return_kind)) {
return_reg = LiftoffRegister::ForPair(kGpReturnRegisters[0],
kGpReturnRegisters[1]);
- } else if (needs_fp_reg_pair(return_type)) {
+ } else if (needs_fp_reg_pair(return_kind)) {
return_reg = LiftoffRegister::ForFpPair(kFpReturnRegisters[0]);
- } else if (reg_class_for(return_type) == kFpReg) {
+ } else if (reg_class_for(return_kind) == kFpReg) {
return_reg = LiftoffRegister(kFpReturnRegisters[0]);
} else {
- DCHECK_EQ(kGpReg, reg_class_for(return_type));
+ DCHECK_EQ(kGpReg, reg_class_for(return_kind));
}
stack_transfers.LoadIntoRegister(return_reg,
cache_state_.stack_state.back(),
@@ -1003,8 +1040,8 @@ void LiftoffAssembler::MoveToReturnLocations(
// Fill return frame slots first to ensure that all potential spills happen
// before we prepare the stack transfers.
for (size_t i = 0; i < sig->return_count(); ++i) {
- ValueType return_type = sig->GetReturn(i);
- bool needs_gp_pair = needs_gp_reg_pair(return_type);
+ ValueKind return_kind = sig->GetReturn(i).kind();
+ bool needs_gp_pair = needs_gp_reg_pair(return_kind);
int num_lowered_params = 1 + needs_gp_pair;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
compiler::LinkageLocation loc =
@@ -1015,16 +1052,16 @@ void LiftoffAssembler::MoveToReturnLocations(
LiftoffRegister reg = needs_gp_pair
? LoadI64HalfIntoRegister(slot, half)
: LoadToRegister(slot, {});
- ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
- StoreCallerFrameSlot(reg, -loc.AsCallerFrameSlot(), lowered_type);
+ ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
+ StoreCallerFrameSlot(reg, -loc.AsCallerFrameSlot(), lowered_kind);
}
}
}
// Prepare and execute stack transfers.
call_desc_return_idx = 0;
for (size_t i = 0; i < sig->return_count(); ++i) {
- ValueType return_type = sig->GetReturn(i);
- bool needs_gp_pair = needs_gp_reg_pair(return_type);
+ ValueKind return_kind = sig->GetReturn(i).kind();
+ bool needs_gp_pair = needs_gp_reg_pair(return_kind);
int num_lowered_params = 1 + needs_gp_pair;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
RegPairHalf half = pair_idx == 0 ? kLowWord : kHighWord;
@@ -1033,10 +1070,10 @@ void LiftoffAssembler::MoveToReturnLocations(
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
int reg_code = loc.AsRegister();
- ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
- RegClass rc = reg_class_for(lowered_type);
+ ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
+ RegClass rc = reg_class_for(lowered_kind);
LiftoffRegister reg =
- LiftoffRegister::from_external_code(rc, return_type, reg_code);
+ LiftoffRegister::from_external_code(rc, return_kind, reg_code);
VarState& slot = slots[i];
if (needs_gp_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, slot.offset(),
@@ -1064,6 +1101,14 @@ bool LiftoffAssembler::ValidateCacheState() const {
}
used_regs.set(reg);
}
+ if (cache_state_.cached_instance != no_reg) {
+ DCHECK(!used_regs.has(cache_state_.cached_instance));
+ int liftoff_code =
+ LiftoffRegister{cache_state_.cached_instance}.liftoff_code();
+ used_regs.set(cache_state_.cached_instance);
+ DCHECK_EQ(0, register_use_count[liftoff_code]);
+ register_use_count[liftoff_code] = 1;
+ }
bool valid = memcmp(register_use_count, cache_state_.register_use_count,
sizeof(register_use_count)) == 0 &&
used_regs == cache_state_.used_registers;
@@ -1079,10 +1124,9 @@ bool LiftoffAssembler::ValidateCacheState() const {
}
#endif
-LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
- LiftoffRegList pinned) {
+LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates) {
// Spill one cached value to free a register.
- LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
+ LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates);
SpillRegister(spill_reg);
return spill_reg;
}
@@ -1114,7 +1158,7 @@ LiftoffRegister LiftoffAssembler::SpillAdjacentFpRegisters(
// b. If used, spill it.
// We spill one register in 2 and 3a, and two registers in 3b.
- LiftoffRegister first_reg = GetUnusedRegister(kFpCacheRegList, pinned);
+ LiftoffRegister first_reg = GetUnusedRegister(kFpReg, pinned);
LiftoffRegister second_reg = first_reg, low_reg = first_reg;
if (first_reg.fp().code() % 2 == 0) {
@@ -1148,7 +1192,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
cache_state_.last_spilled_regs.set(slot->reg().low());
cache_state_.last_spilled_regs.set(slot->reg().high());
}
- Spill(slot->offset(), slot->reg(), slot->type());
+ Spill(slot->offset(), slot->reg(), slot->kind());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
@@ -1160,14 +1204,14 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
DCHECK_EQ(0, num_locals_); // only call this once.
num_locals_ = num_locals;
if (num_locals > kInlineLocalTypes) {
- more_local_types_ = reinterpret_cast<ValueType*>(
- base::Malloc(num_locals * sizeof(ValueType)));
+ more_local_types_ = reinterpret_cast<ValueKind*>(
+ base::Malloc(num_locals * sizeof(ValueKind)));
DCHECK_NOT_NULL(more_local_types_);
}
}
std::ostream& operator<<(std::ostream& os, VarState slot) {
- os << slot.type().name() << ":";
+ os << name(slot.kind()) << ":";
switch (slot.loc()) {
case VarState::kStack:
return os << "s";
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 94f91ab0fd..13c0d45c1e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -73,25 +73,26 @@ class LiftoffAssembler : public TurboAssembler {
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr int kStackSlotSize = 8;
- static constexpr ValueType kWasmIntPtr =
- kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
+ static constexpr ValueKind kIntPtr = kSystemPointerSize == 8 ? kI64 : kI32;
+
+ using ValueKindSig = Signature<ValueKind>;
class VarState {
public:
enum Location : uint8_t { kStack, kRegister, kIntConst };
- explicit VarState(ValueType type, int offset)
- : loc_(kStack), type_(type), spill_offset_(offset) {}
- explicit VarState(ValueType type, LiftoffRegister r, int offset)
- : loc_(kRegister), type_(type), reg_(r), spill_offset_(offset) {
- DCHECK_EQ(r.reg_class(), reg_class_for(type));
+ explicit VarState(ValueKind kind, int offset)
+ : loc_(kStack), kind_(kind), spill_offset_(offset) {}
+ explicit VarState(ValueKind kind, LiftoffRegister r, int offset)
+ : loc_(kRegister), kind_(kind), reg_(r), spill_offset_(offset) {
+ DCHECK_EQ(r.reg_class(), reg_class_for(kind));
}
- explicit VarState(ValueType type, int32_t i32_const, int offset)
+ explicit VarState(ValueKind kind, int32_t i32_const, int offset)
: loc_(kIntConst),
- type_(type),
+ kind_(kind),
i32_const_(i32_const),
spill_offset_(offset) {
- DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ DCHECK(kind_ == kI32 || kind_ == kI64);
}
bool is_stack() const { return loc_ == kStack; }
@@ -100,7 +101,7 @@ class LiftoffAssembler : public TurboAssembler {
bool is_reg() const { return loc_ == kRegister; }
bool is_const() const { return loc_ == kIntConst; }
- ValueType type() const { return type_; }
+ ValueKind kind() const { return kind_; }
Location loc() const { return loc_; }
@@ -109,10 +110,10 @@ class LiftoffAssembler : public TurboAssembler {
return i32_const_;
}
WasmValue constant() const {
- DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ DCHECK(kind_ == kI32 || kind_ == kI64);
DCHECK_EQ(loc_, kIntConst);
- return type_ == kWasmI32 ? WasmValue(i32_const_)
- : WasmValue(int64_t{i32_const_});
+ return kind_ == kI32 ? WasmValue(i32_const_)
+ : WasmValue(int64_t{i32_const_});
}
int offset() const { return spill_offset_; }
@@ -133,7 +134,7 @@ class LiftoffAssembler : public TurboAssembler {
}
void MakeConstant(int32_t i32_const) {
- DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ DCHECK(kind_ == kI32 || kind_ == kI64);
loc_ = kIntConst;
i32_const_ = i32_const;
}
@@ -142,7 +143,7 @@ class LiftoffAssembler : public TurboAssembler {
// from different stack states.
void Copy(VarState src) {
loc_ = src.loc();
- type_ = src.type();
+ kind_ = src.kind();
if (loc_ == kRegister) {
reg_ = src.reg();
} else if (loc_ == kIntConst) {
@@ -154,7 +155,7 @@ class LiftoffAssembler : public TurboAssembler {
Location loc_;
// TODO(wasm): This is redundant, the decoder already knows the type of each
// stack value. Try to collapse.
- ValueType type_;
+ ValueKind kind_;
union {
LiftoffRegister reg_; // used if loc_ == kRegister
@@ -192,6 +193,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList last_spilled_regs;
+ Register cached_instance = no_reg;
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
if (kNeedI64RegPair && rc == kGpRegPair) {
@@ -205,13 +207,11 @@ class LiftoffAssembler : public TurboAssembler {
}
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
- return has_unused_register(candidates, pinned);
+ return has_unused_register(candidates.MaskOut(pinned));
}
- bool has_unused_register(LiftoffRegList candidates,
- LiftoffRegList pinned = {}) const {
- LiftoffRegList available_regs =
- candidates.MaskOut(used_registers).MaskOut(pinned);
+ bool has_unused_register(LiftoffRegList candidates) const {
+ LiftoffRegList available_regs = candidates.MaskOut(used_registers);
return !available_regs.is_empty();
}
@@ -241,6 +241,52 @@ class LiftoffAssembler : public TurboAssembler {
return available_regs.GetFirstRegSet();
}
+ // Volatile registers are registers which are used for caching values that
+ // can easily be reloaded. Those are returned first if we run out of free
+ // registers.
+ // Note: This interface is a bit more generic than currently needed, in
+ // anticipation of more "volatile registers" being added later.
+ bool has_volatile_register(LiftoffRegList candidates) {
+ return cached_instance != no_reg && candidates.has(cached_instance);
+ }
+
+ LiftoffRegister take_volatile_register(LiftoffRegList candidates) {
+ DCHECK(candidates.has(cached_instance));
+ LiftoffRegister ret{cached_instance};
+ DCHECK_EQ(1, register_use_count[ret.liftoff_code()]);
+ register_use_count[ret.liftoff_code()] = 0;
+ used_registers.clear(ret);
+ cached_instance = no_reg;
+ return ret;
+ }
+
+ void SetInstanceCacheRegister(Register reg) {
+ DCHECK_EQ(no_reg, cached_instance);
+ cached_instance = reg;
+ int liftoff_code = LiftoffRegister{reg}.liftoff_code();
+ DCHECK_EQ(0, register_use_count[liftoff_code]);
+ register_use_count[liftoff_code] = 1;
+ used_registers.set(reg);
+ }
+
+ Register TrySetCachedInstanceRegister(LiftoffRegList pinned) {
+ DCHECK_EQ(no_reg, cached_instance);
+ LiftoffRegList candidates = kGpCacheRegList.MaskOut(pinned);
+ if (!has_unused_register(candidates)) return no_reg;
+ SetInstanceCacheRegister(unused_register(candidates).gp());
+ DCHECK_NE(no_reg, cached_instance);
+ return cached_instance;
+ }
+
+ void ClearCachedInstanceRegister() {
+ if (cached_instance == no_reg) return;
+ int liftoff_code = LiftoffRegister{cached_instance}.liftoff_code();
+ DCHECK_EQ(1, register_use_count[liftoff_code]);
+ register_use_count[liftoff_code] = 0;
+ used_registers.clear(cached_instance);
+ cached_instance = no_reg;
+ }
+
void inc_used(LiftoffRegister reg) {
if (reg.is_pair()) {
inc_used(reg.low());
@@ -294,15 +340,13 @@ class LiftoffAssembler : public TurboAssembler {
memset(register_use_count, 0, sizeof(register_use_count));
}
- LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
- LiftoffRegList pinned = {}) {
- LiftoffRegList unpinned = candidates.MaskOut(pinned);
- DCHECK(!unpinned.is_empty());
+ LiftoffRegister GetNextSpillReg(LiftoffRegList candidates) {
+ DCHECK(!candidates.is_empty());
// This method should only be called if none of the candidates is free.
- DCHECK(unpinned.MaskOut(used_registers).is_empty());
- LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
+ DCHECK(candidates.MaskOut(used_registers).is_empty());
+ LiftoffRegList unspilled = candidates.MaskOut(last_spilled_regs);
if (unspilled.is_empty()) {
- unspilled = unpinned;
+ unspilled = candidates;
last_spilled_regs = {};
}
LiftoffRegister reg = unspilled.GetFirstRegSet();
@@ -345,13 +389,13 @@ class LiftoffAssembler : public TurboAssembler {
// Use this to pop a value into a register that has no other uses, so it
// can be modified.
LiftoffRegister PopToModifiableRegister(LiftoffRegList pinned = {}) {
- ValueType type = cache_state_.stack_state.back().type();
+ ValueKind kind = cache_state_.stack_state.back().kind();
LiftoffRegister reg = PopToRegister(pinned);
if (cache_state()->is_free(reg)) return reg;
pinned.set(reg);
LiftoffRegister new_reg = GetUnusedRegister(reg.reg_class(), pinned);
- Move(new_reg, reg, type);
+ Move(new_reg, reg, kind);
return new_reg;
}
@@ -370,10 +414,10 @@ class LiftoffAssembler : public TurboAssembler {
// stack, so that we can merge different values on the back-edge.
void PrepareLoopArgs(int num);
- int NextSpillOffset(ValueType type) {
- int offset = TopSpillOffset() + SlotSizeForType(type);
- if (NeedsAlignment(type)) {
- offset = RoundUp(offset, SlotSizeForType(type));
+ int NextSpillOffset(ValueKind kind) {
+ int offset = TopSpillOffset() + SlotSizeForType(kind);
+ if (NeedsAlignment(kind)) {
+ offset = RoundUp(offset, SlotSizeForType(kind));
}
return offset;
}
@@ -384,25 +428,25 @@ class LiftoffAssembler : public TurboAssembler {
: cache_state_.stack_state.back().offset();
}
- void PushRegister(ValueType type, LiftoffRegister reg) {
- DCHECK_EQ(reg_class_for(type), reg.reg_class());
+ void PushRegister(ValueKind kind, LiftoffRegister reg) {
+ DCHECK_EQ(reg_class_for(kind), reg.reg_class());
cache_state_.inc_used(reg);
- cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset(type));
+ cache_state_.stack_state.emplace_back(kind, reg, NextSpillOffset(kind));
}
- void PushConstant(ValueType type, int32_t i32_const) {
- DCHECK(type == kWasmI32 || type == kWasmI64);
- cache_state_.stack_state.emplace_back(type, i32_const,
- NextSpillOffset(type));
+ void PushConstant(ValueKind kind, int32_t i32_const) {
+ DCHECK(kind == kI32 || kind == kI64);
+ cache_state_.stack_state.emplace_back(kind, i32_const,
+ NextSpillOffset(kind));
}
- void PushStack(ValueType type) {
- cache_state_.stack_state.emplace_back(type, NextSpillOffset(type));
+ void PushStack(ValueKind kind) {
+ cache_state_.stack_state.emplace_back(kind, NextSpillOffset(kind));
}
void SpillRegister(LiftoffRegister);
- uint32_t GetNumUses(LiftoffRegister reg) {
+ uint32_t GetNumUses(LiftoffRegister reg) const {
return cache_state_.get_use_count(reg);
}
@@ -421,9 +465,9 @@ class LiftoffAssembler : public TurboAssembler {
// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) {
if (kNeedI64RegPair && rc == kGpRegPair) {
- LiftoffRegList candidates = kGpCacheRegList;
- Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
- Register high = GetUnusedRegister(candidates, pinned).gp();
+ LiftoffRegList candidates = kGpCacheRegList.MaskOut(pinned);
+ Register low = candidates.clear(GetUnusedRegister(candidates)).gp();
+ Register high = GetUnusedRegister(candidates).gp();
return LiftoffRegister::ForPair(low, high);
} else if (kNeedS128RegPair && rc == kFpRegPair) {
// kFpRegPair specific logic here because we need adjacent registers, not
@@ -435,23 +479,26 @@ class LiftoffAssembler : public TurboAssembler {
return LiftoffRegister::ForFpPair(low_fp);
}
DCHECK(rc == kGpReg || rc == kFpReg);
- LiftoffRegList candidates = GetCacheRegList(rc);
- return GetUnusedRegister(candidates, pinned);
+ LiftoffRegList candidates = GetCacheRegList(rc).MaskOut(pinned);
+ return GetUnusedRegister(candidates);
}
// Get an unused register of {candidates}, potentially spilling to free one.
- LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
- LiftoffRegList pinned = {}) {
- if (cache_state_.has_unused_register(candidates, pinned)) {
- return cache_state_.unused_register(candidates, pinned);
+ LiftoffRegister GetUnusedRegister(LiftoffRegList candidates) {
+ DCHECK(!candidates.is_empty());
+ if (cache_state_.has_unused_register(candidates)) {
+ return cache_state_.unused_register(candidates);
}
- return SpillOneRegister(candidates, pinned);
+ if (cache_state_.has_volatile_register(candidates)) {
+ return cache_state_.take_volatile_register(candidates);
+ }
+ return SpillOneRegister(candidates);
}
void MaterializeMergedConstants(uint32_t arity);
- void MergeFullStackWith(const CacheState& target, const CacheState& source);
- void MergeStackWith(const CacheState& target, uint32_t arity);
+ void MergeFullStackWith(CacheState& target, const CacheState& source);
+ void MergeStackWith(CacheState& target, uint32_t arity);
void Spill(VarState* slot);
void SpillLocals();
@@ -469,7 +516,12 @@ class LiftoffAssembler : public TurboAssembler {
template <typename... Regs>
void SpillRegisters(Regs... regs) {
for (LiftoffRegister r : {LiftoffRegister(regs)...}) {
- if (cache_state()->is_used(r)) SpillRegister(r);
+ if (cache_state_.is_free(r)) continue;
+ if (r.is_gp() && cache_state_.cached_instance == r.gp()) {
+ cache_state_.ClearCachedInstanceRegister();
+ } else {
+ SpillRegister(r);
+ }
}
}
@@ -484,32 +536,32 @@ class LiftoffAssembler : public TurboAssembler {
}
// Load parameters into the right registers / stack slots for the call.
- void PrepareBuiltinCall(const FunctionSig* sig,
+ void PrepareBuiltinCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
std::initializer_list<VarState> params);
// Load parameters into the right registers / stack slots for the call.
// Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack.
- void PrepareCall(const FunctionSig*, compiler::CallDescriptor*,
+ void PrepareCall(const ValueKindSig*, compiler::CallDescriptor*,
Register* target = nullptr,
Register* target_instance = nullptr);
// Process return values of the call.
- void FinishCall(const FunctionSig*, compiler::CallDescriptor*);
+ void FinishCall(const ValueKindSig*, compiler::CallDescriptor*);
// Move {src} into {dst}. {src} and {dst} must be different.
- void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
+ void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind);
- // Parallel register move: For a list of tuples <dst, src, type>, move the
- // {src} register of type {type} into {dst}. If {src} equals {dst}, ignore
+ // Parallel register move: For a list of tuples <dst, src, kind>, move the
+ // {src} register of kind {kind} into {dst}. If {src} equals {dst}, ignore
// that tuple.
struct ParallelRegisterMoveTuple {
LiftoffRegister dst;
LiftoffRegister src;
- ValueType type;
+ ValueKind kind;
template <typename Dst, typename Src>
- ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type)
- : dst(dst), src(src), type(type) {}
+ ParallelRegisterMoveTuple(Dst dst, Src src, ValueKind kind)
+ : dst(dst), src(src), kind(kind) {}
};
void ParallelRegisterMove(Vector<const ParallelRegisterMoveTuple>);
@@ -543,33 +595,45 @@ class LiftoffAssembler : public TurboAssembler {
inline void FinishCode();
inline void AbortCompilation();
inline static constexpr int StaticStackFrameSize();
- inline static int SlotSizeForType(ValueType type);
- inline static bool NeedsAlignment(ValueType type);
+ inline static int SlotSizeForType(ValueKind kind);
+ inline static bool NeedsAlignment(ValueKind kind);
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
- inline void LoadFromInstance(Register dst, int offset, int size);
- inline void LoadTaggedPointerFromInstance(Register dst, int offset);
+ inline void LoadInstanceFromFrame(Register dst);
+ inline void LoadFromInstance(Register dst, Register instance, int offset,
+ int size);
+ inline void LoadTaggedPointerFromInstance(Register dst, Register instance,
+ int offset);
inline void SpillInstance(Register instance);
inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, int32_t offset_imm,
LiftoffRegList pinned);
+ enum SkipWriteBarrier : bool {
+ kSkipWriteBarrier = true,
+ kNoSkipWriteBarrier = false
+ };
inline void StoreTaggedPointer(Register dst_addr, Register offset_reg,
int32_t offset_imm, LiftoffRegister src,
- LiftoffRegList pinned);
+ LiftoffRegList pinned,
+ SkipWriteBarrier = kNoSkipWriteBarrier);
inline void LoadFixedArrayLengthAsInt32(LiftoffRegister dst, Register array,
LiftoffRegList pinned) {
int offset = FixedArray::kLengthOffset - kHeapObjectTag;
+ LoadTaggedSignedAsInt32(dst, array, offset, pinned);
+ }
+ inline void LoadTaggedSignedAsInt32(LiftoffRegister dst, Register src_addr,
+ int32_t offset, LiftoffRegList pinned) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
DCHECK_EQ(kSmiShiftSize + kSmiTagSize, 4 * kBitsPerByte);
offset += 4;
#endif
- Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
+ Load(dst, src_addr, no_reg, offset, LoadType::kI32Load, pinned);
} else {
DCHECK(SmiValuesAre31Bits());
- Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
+ Load(dst, src_addr, no_reg, offset, LoadType::kI32Load, pinned);
emit_i32_sari(dst.gp(), dst.gp(), kSmiTagSize);
}
}
@@ -622,19 +686,19 @@ class LiftoffAssembler : public TurboAssembler {
inline void AtomicFence();
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
- ValueType);
+ ValueKind);
inline void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
- ValueType);
- inline void LoadReturnStackSlot(LiftoffRegister, int offset, ValueType);
+ ValueKind);
+ inline void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType);
+ ValueKind);
- inline void Move(Register dst, Register src, ValueType);
- inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
+ inline void Move(Register dst, Register src, ValueKind);
+ inline void Move(DoubleRegister dst, DoubleRegister src, ValueKind);
- inline void Spill(int offset, LiftoffRegister, ValueType);
+ inline void Spill(int offset, LiftoffRegister, ValueKind);
inline void Spill(int offset, WasmValue);
- inline void Fill(LiftoffRegister, int offset, ValueType);
+ inline void Fill(LiftoffRegister, int offset, ValueKind);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, int offset, RegPairHalf);
@@ -777,7 +841,7 @@ class LiftoffAssembler : public TurboAssembler {
emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
LiftoffRegister(src));
} else if (dst != src) {
- Move(dst, src, kWasmI32);
+ Move(dst, src, kI32);
}
}
@@ -843,7 +907,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_jump(Label*);
inline void emit_jump(Register);
- inline void emit_cond_jump(LiftoffCondition, Label*, ValueType value,
+ inline void emit_cond_jump(LiftoffCondition, Label*, ValueKind value,
Register lhs, Register rhs = no_reg);
inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label,
Register lhs, int imm);
@@ -863,7 +927,7 @@ class LiftoffAssembler : public TurboAssembler {
// should be emitted instead.
inline bool emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
- LiftoffRegister false_value, ValueType type);
+ LiftoffRegister false_value, ValueKind kind);
enum SmiCheckMode { kJumpOnSmi, kJumpOnNotSmi };
inline void emit_smi_check(Register obj, Label* target, SmiCheckMode mode);
@@ -875,11 +939,15 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr,
Register offset_reg, uintptr_t offset_imm, LoadType type,
uint8_t lane, uint32_t* protected_load_pc);
+ inline void StoreLane(Register dst, Register offset, uintptr_t offset_imm,
+ LiftoffRegister src, StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc);
inline void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, const uint8_t shuffle[16],
bool is_swizzle);
inline void emit_i8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i8x16_popcnt(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src);
@@ -922,6 +990,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
@@ -949,7 +1025,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1,
LiftoffRegister src2, LiftoffRegister mask);
inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v8x16_anytrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_v8x16_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -987,7 +1063,6 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v16x8_anytrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_v16x8_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1024,6 +1099,10 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2);
@@ -1036,8 +1115,10 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2);
+ inline void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
inline void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v32x4_anytrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_v32x4_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1068,6 +1149,10 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2);
@@ -1081,6 +1166,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister src1,
LiftoffRegister src2);
inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v64x2_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1112,6 +1198,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister src1,
LiftoffRegister src2);
inline void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src);
inline void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src);
@@ -1158,6 +1252,18 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src);
inline void emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -1205,6 +1311,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64x2_abs(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx);
@@ -1261,18 +1368,18 @@ class LiftoffAssembler : public TurboAssembler {
inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
// Execute a C call. Arguments are pushed to the stack and a pointer to this
- // region is passed to the C function. If {out_argument_type != kWasmStmt},
+ // region is passed to the C function. If {out_argument_kind != kStmt},
// this is the return value of the C function, stored in {rets[0]}. Further
// outputs (specified in {sig->returns()}) are read from the buffer and stored
// in the remaining {rets} registers.
- inline void CallC(const FunctionSig* sig, const LiftoffRegister* args,
- const LiftoffRegister* rets, ValueType out_argument_type,
+ inline void CallC(const ValueKindSig* sig, const LiftoffRegister* args,
+ const LiftoffRegister* rets, ValueKind out_argument_kind,
int stack_bytes, ExternalReference ext_ref);
inline void CallNativeWasmCode(Address addr);
inline void TailCallNativeWasmCode(Address addr);
// Indirect call: If {target == no_reg}, then pop the target from the stack.
- inline void CallIndirect(const FunctionSig* sig,
+ inline void CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target);
inline void TailCallIndirect(Register target);
@@ -1293,17 +1400,17 @@ class LiftoffAssembler : public TurboAssembler {
int GetTotalFrameSize() const { return max_used_spill_offset_; }
- ValueType local_type(uint32_t index) {
+ ValueKind local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
- ValueType* locals =
+ ValueKind* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
return locals[index];
}
- void set_local_type(uint32_t index, ValueType type) {
- ValueType* locals =
+ void set_local_type(uint32_t index, ValueKind kind) {
+ ValueKind* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
- locals[index] = type;
+ locals[index] = kind;
}
CacheState* cache_state() { return &cache_state_; }
@@ -1325,13 +1432,13 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
uint32_t num_locals_ = 0;
- static constexpr uint32_t kInlineLocalTypes = 8;
+ static constexpr uint32_t kInlineLocalTypes = 16;
union {
- ValueType local_types_[kInlineLocalTypes];
- ValueType* more_local_types_;
+ ValueKind local_types_[kInlineLocalTypes];
+ ValueKind* more_local_types_;
};
- static_assert(sizeof(ValueType) == 4,
- "Reconsider this inlining if ValueType gets bigger");
+ static_assert(sizeof(ValueKind) == 1,
+ "Reconsider this inlining if ValueKind gets bigger");
CacheState cache_state_;
// The maximum spill offset for slots in the value stack.
int max_used_spill_offset_ = StaticStackFrameSize();
@@ -1340,8 +1447,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
- V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
- LiftoffRegList pinned);
+ V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
// Spill one or two fp registers to get a pair of adjacent fp registers.
LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
};
@@ -1378,7 +1484,7 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
- assm->Move(dst.low_gp(), tmp, kWasmI32);
+ assm->Move(dst.low_gp(), tmp, kI32);
}
template <void (LiftoffAssembler::*op)(Register, Register, int32_t)>
@@ -1406,7 +1512,7 @@ void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), low_word);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
- assm->Move(dst.low_gp(), tmp, kWasmI32);
+ assm->Move(dst.low_gp(), tmp, kI32);
}
} // namespace liftoff
@@ -1506,6 +1612,8 @@ class LiftoffStackSlots {
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h"
#else
#error Unsupported architecture.
#endif
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 9a42bbf50c..01264e4e38 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -62,15 +62,16 @@ struct assert_field_size {
#define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \
FIELD_SIZE(WasmInstanceObject::k##name##Offset)
-#define LOAD_INSTANCE_FIELD(dst, name, load_size) \
- __ LoadFromInstance(dst, WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
+#define LOAD_INSTANCE_FIELD(dst, name, load_size, pinned) \
+ __ LoadFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
+ WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \
load_size>::size);
-#define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name) \
- static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
- "field in WasmInstance does not have the expected size"); \
- __ LoadTaggedPointerFromInstance(dst, \
+#define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name, pinned) \
+ static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
+ "field in WasmInstance does not have the expected size"); \
+ __ LoadTaggedPointerFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
#ifdef DEBUG
@@ -85,8 +86,13 @@ struct assert_field_size {
constexpr LoadType::LoadTypeValue kPointerLoadType =
kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
-constexpr ValueType kPointerValueType =
- kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
+constexpr ValueKind kPointerValueType = kSystemPointerSize == 8 ? kI64 : kI32;
+
+#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
+constexpr ValueKind kSmiValueType = kI32;
+#else
+constexpr ValueKind kSmiValueType = kI64;
+#endif
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
@@ -156,6 +162,9 @@ constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
// Builds a {DebugSideTable}.
class DebugSideTableBuilder {
+ using Entry = DebugSideTable::Entry;
+ using Value = Entry::Value;
+
public:
enum AssumeSpilling {
// All register values will be spilled before the pc covered by the debug
@@ -170,12 +179,28 @@ class DebugSideTableBuilder {
class EntryBuilder {
public:
- explicit EntryBuilder(int pc_offset,
- std::vector<DebugSideTable::Entry::Value> values)
- : pc_offset_(pc_offset), values_(std::move(values)) {}
+ explicit EntryBuilder(int pc_offset, int stack_height,
+ std::vector<Value> changed_values)
+ : pc_offset_(pc_offset),
+ stack_height_(stack_height),
+ changed_values_(std::move(changed_values)) {}
+
+ Entry ToTableEntry() {
+ return Entry{pc_offset_, stack_height_, std::move(changed_values_)};
+ }
- DebugSideTable::Entry ToTableEntry() {
- return DebugSideTable::Entry{pc_offset_, std::move(values_)};
+ void MinimizeBasedOnPreviousStack(const std::vector<Value>& last_values) {
+ auto dst = changed_values_.begin();
+ auto end = changed_values_.end();
+ for (auto src = dst; src != end; ++src) {
+ if (src->index < static_cast<int>(last_values.size()) &&
+ *src == last_values[src->index]) {
+ continue;
+ }
+ if (dst != src) *dst = *src;
+ ++dst;
+ }
+ changed_values_.erase(dst, end);
}
int pc_offset() const { return pc_offset_; }
@@ -183,67 +208,182 @@ class DebugSideTableBuilder {
private:
int pc_offset_;
- std::vector<DebugSideTable::Entry::Value> values_;
+ int stack_height_;
+ std::vector<Value> changed_values_;
};
- // Adds a new entry, and returns a pointer to a builder for modifying that
- // entry ({stack_height} includes {num_locals}).
- EntryBuilder* NewEntry(int pc_offset, int num_locals, int stack_height,
- LiftoffAssembler::VarState* stack_state,
- AssumeSpilling assume_spilling) {
- DCHECK_LE(num_locals, stack_height);
- // Record stack types.
- std::vector<DebugSideTable::Entry::Value> values(stack_height);
- for (int i = 0; i < stack_height; ++i) {
- const auto& slot = stack_state[i];
- values[i].type = slot.type();
- values[i].stack_offset = slot.offset();
+ // Adds a new entry in regular code.
+ void NewEntry(int pc_offset, Vector<LiftoffAssembler::VarState> stack_state,
+ AssumeSpilling assume_spilling) {
+ entries_.emplace_back(
+ pc_offset, static_cast<int>(stack_state.size()),
+ GetChangedStackValues(last_values_, stack_state, assume_spilling));
+ }
+
+ // Adds a new entry for OOL code, and returns a pointer to a builder for
+ // modifying that entry.
+ EntryBuilder* NewOOLEntry(Vector<LiftoffAssembler::VarState> stack_state,
+ AssumeSpilling assume_spilling) {
+ constexpr int kNoPcOffsetYet = -1;
+ ool_entries_.emplace_back(
+ kNoPcOffsetYet, static_cast<int>(stack_state.size()),
+ GetChangedStackValues(last_ool_values_, stack_state, assume_spilling));
+ return &ool_entries_.back();
+ }
+
+ void SetNumLocals(int num_locals) {
+ DCHECK_EQ(-1, num_locals_);
+ DCHECK_LE(0, num_locals);
+ num_locals_ = num_locals;
+ }
+
+ std::unique_ptr<DebugSideTable> GenerateDebugSideTable() {
+ DCHECK_LE(0, num_locals_);
+
+ // Connect {entries_} and {ool_entries_} by removing redundant stack
+ // information from the first {ool_entries_} entry (based on
+ // {last_values_}).
+ if (!entries_.empty() && !ool_entries_.empty()) {
+ ool_entries_.front().MinimizeBasedOnPreviousStack(last_values_);
+ }
+
+ std::vector<Entry> entries;
+ entries.reserve(entries_.size() + ool_entries_.size());
+ for (auto& entry : entries_) entries.push_back(entry.ToTableEntry());
+ for (auto& entry : ool_entries_) entries.push_back(entry.ToTableEntry());
+ DCHECK(std::is_sorted(
+ entries.begin(), entries.end(),
+ [](Entry& a, Entry& b) { return a.pc_offset() < b.pc_offset(); }));
+ return std::make_unique<DebugSideTable>(num_locals_, std::move(entries));
+ }
+
+ private:
+ static std::vector<Value> GetChangedStackValues(
+ std::vector<Value>& last_values,
+ Vector<LiftoffAssembler::VarState> stack_state,
+ AssumeSpilling assume_spilling) {
+ std::vector<Value> changed_values;
+ int old_stack_size = static_cast<int>(last_values.size());
+ last_values.resize(stack_state.size());
+
+ int index = 0;
+ for (const auto& slot : stack_state) {
+ Value new_value;
+ new_value.index = index;
+ new_value.kind = slot.kind();
switch (slot.loc()) {
case kIntConst:
- values[i].kind = DebugSideTable::Entry::kConstant;
- values[i].i32_const = slot.i32_const();
+ new_value.storage = Entry::kConstant;
+ new_value.i32_const = slot.i32_const();
break;
case kRegister:
DCHECK_NE(kDidSpill, assume_spilling);
if (assume_spilling == kAllowRegisters) {
- values[i].kind = DebugSideTable::Entry::kRegister;
- values[i].reg_code = slot.reg().liftoff_code();
+ new_value.storage = Entry::kRegister;
+ new_value.reg_code = slot.reg().liftoff_code();
break;
}
DCHECK_EQ(kAssumeSpilling, assume_spilling);
V8_FALLTHROUGH;
case kStack:
- values[i].kind = DebugSideTable::Entry::kStack;
- values[i].stack_offset = slot.offset();
+ new_value.storage = Entry::kStack;
+ new_value.stack_offset = slot.offset();
break;
}
+
+ if (index >= old_stack_size || last_values[index] != new_value) {
+ changed_values.push_back(new_value);
+ last_values[index] = new_value;
+ }
+ ++index;
}
- entries_.emplace_back(pc_offset, std::move(values));
- return &entries_.back();
+ return changed_values;
}
- void SetNumLocals(int num_locals) {
- DCHECK_EQ(-1, num_locals_);
- DCHECK_LE(0, num_locals);
- num_locals_ = num_locals;
+ int num_locals_ = -1;
+ // Keep a snapshot of the stack of the last entry, to generate a delta to the
+ // next entry.
+ std::vector<Value> last_values_;
+ std::vector<EntryBuilder> entries_;
+ // Keep OOL code entries separate so we can do proper delta-encoding (more
+ // entries might be added between the existing {entries_} and the
+ // {ool_entries_}). Store the entries in a list so the pointer is not
+ // invalidated by adding more entries.
+ std::vector<Value> last_ool_values_;
+ std::list<EntryBuilder> ool_entries_;
+};
+
+void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
+ const CompilationEnv* env) {
+ // Decode errors are ok.
+ if (reason == kDecodeError) return;
+
+ // Missing CPU features are also generally OK for now.
+ if (reason == kMissingCPUFeature) return;
+
+ // --liftoff-only ensures that tests actually exercise the Liftoff path
+ // without bailing out. Bailing out due to (simulated) lack of CPU support
+ // is okay though (see above).
+ if (FLAG_liftoff_only) {
+ FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s", detail);
}
- std::unique_ptr<DebugSideTable> GenerateDebugSideTable() {
- DCHECK_LE(0, num_locals_);
- std::vector<DebugSideTable::Entry> entries;
- entries.reserve(entries_.size());
- for (auto& entry : entries_) entries.push_back(entry.ToTableEntry());
- std::sort(entries.begin(), entries.end(),
- [](DebugSideTable::Entry& a, DebugSideTable::Entry& b) {
- return a.pc_offset() < b.pc_offset();
- });
- return std::make_unique<DebugSideTable>(num_locals_, std::move(entries));
+ // If --enable-testing-opcode-in-wasm is set, we are expected to bailout with
+ // "testing opcode".
+ if (FLAG_enable_testing_opcode_in_wasm &&
+ strcmp(detail, "testing opcode") == 0) {
+ return;
}
- private:
- int num_locals_ = -1;
- std::list<EntryBuilder> entries_;
-};
+ // Some externally maintained architectures don't fully implement Liftoff yet.
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+ return;
+#endif
+
+ // TODO(11235): On arm and arm64 there is still a limit on the size of
+ // supported stack frames.
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
+ if (strstr(detail, "Stack limited to 512 bytes")) return;
+#endif
+
+#define LIST_FEATURE(name, ...) kFeature_##name,
+ constexpr WasmFeatures kExperimentalFeatures{
+ FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)};
+ constexpr WasmFeatures kStagedFeatures{
+ FOREACH_WASM_STAGING_FEATURE_FLAG(LIST_FEATURE)};
+#undef LIST_FEATURE
+
+ // Bailout is allowed if any experimental feature is enabled.
+ if (env->enabled_features.contains_any(kExperimentalFeatures)) return;
+
+ // Staged features should be feature complete in Liftoff according to
+ // https://v8.dev/docs/wasm-shipping-checklist. Some are not though. They are
+ // listed here explicitly, with a bug assigned to each of them.
+
+ // TODO(6020): Fully implement SIMD in Liftoff.
+ STATIC_ASSERT(kStagedFeatures.has_simd());
+ if (reason == kSimd) {
+ DCHECK(env->enabled_features.has_simd());
+ return;
+ }
+
+ // TODO(7581): Fully implement reftypes in Liftoff.
+ STATIC_ASSERT(kStagedFeatures.has_reftypes());
+ if (reason == kRefTypes) {
+ DCHECK(env->enabled_features.has_reftypes());
+ return;
+ }
+
+ // TODO(v8:8091): Implement exception handling in Liftoff.
+ if (reason == kExceptionHandling) {
+ DCHECK(env->enabled_features.has_eh());
+ return;
+ }
+
+ // Otherwise, bailout is not allowed.
+ FATAL("Liftoff bailout should not happen. Cause: %s\n", detail);
+}
class LiftoffCompiler {
public:
@@ -252,12 +392,6 @@ class LiftoffCompiler {
using Value = ValueBase<validate>;
- static constexpr auto kI32 = ValueType::kI32;
- static constexpr auto kI64 = ValueType::kI64;
- static constexpr auto kF32 = ValueType::kF32;
- static constexpr auto kF64 = ValueType::kF64;
- static constexpr auto kS128 = ValueType::kS128;
-
struct ElseState {
MovableLabel label;
LiftoffAssembler::CacheState state;
@@ -276,6 +410,7 @@ class LiftoffCompiler {
};
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
+ using ValueKindSig = LiftoffAssembler::ValueKindSig;
// For debugging, we need to spill registers before a trap or a stack check to
// be able to inspect them.
@@ -283,7 +418,7 @@ class LiftoffCompiler {
struct Entry {
int offset;
LiftoffRegister reg;
- ValueType type;
+ ValueKind kind;
};
ZoneVector<Entry> entries;
@@ -352,7 +487,7 @@ class LiftoffCompiler {
std::unique_ptr<AssemblerBuffer> buffer,
DebugSideTableBuilder* debug_sidetable_builder,
ForDebugging for_debugging, int func_index,
- Vector<int> breakpoints = {}, int dead_breakpoint = 0)
+ Vector<const int> breakpoints = {}, int dead_breakpoint = 0)
: asm_(std::move(buffer)),
descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
@@ -403,13 +538,7 @@ class LiftoffCompiler {
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
detail);
UnuseLabels(decoder);
- // --liftoff-only ensures that tests actually exercise the Liftoff path
- // without bailing out. Bailing out due to (simulated) lack of CPU support
- // is okay though.
- if (FLAG_liftoff_only && reason != kMissingCPUFeature) {
- FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s",
- detail);
- }
+ CheckBailoutAllowed(reason, detail, env_);
}
bool DidAssemblerBailout(FullDecoder* decoder) {
@@ -418,37 +547,34 @@ class LiftoffCompiler {
return true;
}
- bool CheckSupportedType(FullDecoder* decoder, ValueType type,
+ bool CheckSupportedType(FullDecoder* decoder, ValueKind kind,
const char* context) {
LiftoffBailoutReason bailout_reason = kOtherReason;
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kI64:
- case ValueType::kF32:
- case ValueType::kF64:
+ switch (kind) {
+ case kI32:
+ case kI64:
+ case kF32:
+ case kF64:
return true;
- case ValueType::kS128:
+ case kS128:
if (CpuFeatures::SupportsWasmSimd128()) return true;
bailout_reason = kMissingCPUFeature;
break;
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
- case ValueType::kI8:
- case ValueType::kI16:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI8:
+ case kI16:
if (FLAG_experimental_liftoff_extern_ref) return true;
- if (type.is_reference_to(HeapType::kExn)) {
- bailout_reason = kExceptionHandling;
- } else {
- bailout_reason = kRefTypes;
- }
+ bailout_reason = kRefTypes;
break;
- case ValueType::kBottom:
- case ValueType::kStmt:
+ case kBottom:
+ case kStmt:
UNREACHABLE();
}
EmbeddedVector<char, 128> buffer;
- SNPrintF(buffer, "%s %s", type.name().c_str(), context);
+ SNPrintF(buffer, "%s %s", name(kind), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
@@ -479,27 +605,27 @@ class LiftoffCompiler {
int num_locals = decoder->num_locals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
- ValueType type = decoder->local_type(i);
- __ set_local_type(i, type);
+ ValueKind kind = decoder->local_type(i).kind();
+ __ set_local_type(i, kind);
}
}
// Returns the number of inputs processed (1 or 2).
- uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
- const bool needs_pair = needs_gp_reg_pair(type);
- const ValueType reg_type = needs_pair ? kWasmI32 : type;
- const RegClass rc = reg_class_for(reg_type);
+ uint32_t ProcessParameter(ValueKind kind, uint32_t input_idx) {
+ const bool needs_pair = needs_gp_reg_pair(kind);
+ const ValueKind reg_kind = needs_pair ? kI32 : kind;
+ const RegClass rc = reg_class_for(reg_kind);
- auto LoadToReg = [this, reg_type, rc](compiler::LinkageLocation location,
+ auto LoadToReg = [this, reg_kind, rc](compiler::LinkageLocation location,
LiftoffRegList pinned) {
if (location.IsRegister()) {
DCHECK(!location.IsAnyRegister());
- return LiftoffRegister::from_external_code(rc, reg_type,
+ return LiftoffRegister::from_external_code(rc, reg_kind,
location.AsRegister());
}
DCHECK(location.IsCallerFrameSlot());
LiftoffRegister reg = __ GetUnusedRegister(rc, pinned);
- __ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_type);
+ __ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind);
return reg;
};
@@ -511,7 +637,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(reg));
reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp());
}
- __ PushRegister(type, reg);
+ __ PushRegister(kind, reg);
return needs_pair ? 2 : 1;
}
@@ -536,11 +662,16 @@ class LiftoffCompiler {
}
out_of_line_code_.push_back(OutOfLineCode::StackCheck(
position, regs_to_save, spilled_regs, safepoint_info,
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
+ RegisterOOLDebugSideTableEntry()));
OutOfLineCode& ool = out_of_line_code_.back();
- LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize,
+ {});
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
+ // If the stack check triggers, we lose the cached instance register.
+ // TODO(clemensb): Restore that register in the OOL code so it's always
+ // available at the beginning of the actual function code.
+ __ cache_state()->ClearCachedInstanceRegister();
}
bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
@@ -555,8 +686,8 @@ class LiftoffCompiler {
// because other types cannot be initialized to constants.
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
- ValueType type = decoder->local_type(param_idx);
- if (type != kWasmI32 && type != kWasmI64) return true;
+ ValueKind kind = __ local_type(param_idx);
+ if (kind != kI32 && kind != kI64) return true;
}
return false;
}
@@ -580,16 +711,6 @@ class LiftoffCompiler {
if (!CheckSupportedType(decoder, __ local_type(i), "param")) return;
}
- // Input 0 is the call target, the instance is at 1.
- constexpr int kInstanceParameterIndex = 1;
- // Store the instance parameter to a special stack slot.
- compiler::LinkageLocation instance_loc =
- descriptor_->GetInputLocation(kInstanceParameterIndex);
- DCHECK(instance_loc.IsRegister());
- DCHECK(!instance_loc.IsAnyRegister());
- Register instance_reg = Register::from_code(instance_loc.AsRegister());
- DCHECK_EQ(kWasmInstanceRegister, instance_reg);
-
// Parameter 0 is the instance parameter.
uint32_t num_params =
static_cast<uint32_t>(decoder->sig_->parameter_count());
@@ -608,9 +729,19 @@ class LiftoffCompiler {
// LiftoffAssembler methods.
if (DidAssemblerBailout(decoder)) return;
+ // Input 0 is the call target, the instance is at 1.
+ constexpr int kInstanceParameterIndex = 1;
+ // Check that {kWasmInstanceRegister} matches our call descriptor.
+ DCHECK_EQ(kWasmInstanceRegister,
+ Register::from_code(
+ descriptor_->GetInputLocation(kInstanceParameterIndex)
+ .AsRegister()));
+ // Store the instance parameter to a special stack slot.
+ __ SpillInstance(kWasmInstanceRegister);
+ __ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
+
// Process parameters.
if (num_params) DEBUG_CODE_COMMENT("process parameters");
- __ SpillInstance(instance_reg);
// Input 0 is the code target, 1 is the instance. First parameter at 2.
uint32_t input_idx = kInstanceParameterIndex + 1;
for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
@@ -624,28 +755,32 @@ class LiftoffCompiler {
if (SpillLocalsInitially(decoder, num_params)) {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
- ValueType type = decoder->local_type(param_idx);
- __ PushStack(type);
+ ValueKind kind = __ local_type(param_idx);
+ __ PushStack(kind);
}
int spill_size = __ TopSpillOffset() - params_size;
__ FillStackSlotsWithZero(params_size, spill_size);
} else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
- ValueType type = decoder->local_type(param_idx);
- __ PushConstant(type, int32_t{0});
+ ValueKind kind = __ local_type(param_idx);
+ __ PushConstant(kind, int32_t{0});
}
}
if (FLAG_experimental_liftoff_extern_ref) {
// Initialize all reference type locals with ref.null.
- for (uint32_t param_idx = num_params; param_idx < __ num_locals();
- ++param_idx) {
- ValueType type = decoder->local_type(param_idx);
- if (type.is_reference_type()) {
- LiftoffRegister result = __ GetUnusedRegister(kGpReg, {});
- LoadNullValue(result.gp(), {});
- __ Spill(__ cache_state()->stack_state.back().offset(), result, type);
+ Register null_ref_reg = no_reg;
+ for (uint32_t local_index = num_params; local_index < __ num_locals();
+ ++local_index) {
+ ValueKind kind = __ local_type(local_index);
+ if (is_reference_type(kind)) {
+ if (null_ref_reg == no_reg) {
+ null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
+ LoadNullValue(null_ref_reg, {});
+ }
+ __ Spill(__ cache_state()->stack_state[local_index].offset(),
+ LiftoffRegister(null_ref_reg), kind);
}
}
}
@@ -669,7 +804,7 @@ class LiftoffCompiler {
LiftoffRegister array_address =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LOAD_INSTANCE_FIELD(array_address.gp(), NumLiftoffFunctionCallsArray,
- kSystemPointerSize);
+ kSystemPointerSize, pinned);
// Compute the correct offset in the array.
uint32_t offset =
@@ -692,9 +827,12 @@ class LiftoffCompiler {
__ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
new_number_of_calls.gp());
// Unary "unequal" means "different from zero".
- __ emit_cond_jump(kUnequal, &no_tierup, kWasmI32,
- old_number_of_calls.gp());
+ __ emit_cond_jump(kUnequal, &no_tierup, kI32, old_number_of_calls.gp());
TierUpFunction(decoder);
+ // After the runtime call, the instance cache register is clobbered (we
+ // reset it already in {SpillAllRegisters} above, but then we still access
+ // the instance afterwards).
+ __ cache_state()->ClearCachedInstanceRegister();
__ bind(&no_tierup);
}
@@ -735,15 +873,14 @@ class LiftoffCompiler {
__ PushRegisters(ool->regs_to_save);
} else if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
for (auto& entry : ool->spilled_registers->entries) {
- __ Spill(entry.offset, entry.reg, entry.type);
+ __ Spill(entry.offset, entry.reg, entry.kind);
}
}
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool->position), true);
__ CallRuntimeStub(ool->stub);
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(
- &asm_, Safepoint::kNoLazyDeopt);
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
if (ool->safepoint_info) {
for (auto index : ool->safepoint_info->slots) {
@@ -774,7 +911,7 @@ class LiftoffCompiler {
if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
DCHECK(for_debugging_);
for (auto& entry : ool->spilled_registers->entries) {
- __ Fill(entry.reg, entry.offset, entry.type);
+ __ Fill(entry.reg, entry.offset, entry.kind);
}
}
__ emit_jump(ool->continuation.get());
@@ -831,19 +968,29 @@ class LiftoffCompiler {
}
if (has_breakpoint) {
EmitBreakpoint(decoder);
- // Once we emitted a breakpoint, we don't need to check the "hook on
- // function call" any more.
- checked_hook_on_function_call_ = true;
- } else if (!checked_hook_on_function_call_) {
- checked_hook_on_function_call_ = true;
- // Check the "hook on function call" flag. If set, trigger a break.
- DEBUG_CODE_COMMENT("check hook on function call");
- Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
- LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize);
+ // Once we emitted an unconditional breakpoint, we don't need to check
+ // function entry breaks any more.
+ did_function_entry_break_checks_ = true;
+ } else if (!did_function_entry_break_checks_) {
+ did_function_entry_break_checks_ = true;
+ DEBUG_CODE_COMMENT("check function entry break");
+ Label do_break;
Label no_break;
+ Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
+
+ // Check the "hook on function call" flag. If set, trigger a break.
+ LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize,
+ {});
__ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
+ // Unary "unequal" means "not equals zero".
+ __ emit_cond_jump(kUnequal, &do_break, kI32, flag);
+
+ // Check if we should stop on "script entry".
+ LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
// Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
+ __ emit_cond_jump(kEqual, &no_break, kI32, flag);
+
+ __ bind(&do_break);
EmitBreakpoint(decoder);
__ bind(&no_break);
} else if (dead_breakpoint_ == decoder->position()) {
@@ -882,7 +1029,7 @@ class LiftoffCompiler {
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallRuntimeStub(WasmCode::kWasmDebugBreak);
// TODO(ahaas): Define a proper safepoint here.
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+ safepoint_table_builder_.DefineSafepoint(&asm_);
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAllowRegisters);
}
@@ -895,6 +1042,8 @@ class LiftoffCompiler {
// TODO(clemensb): Come up with a better strategy here, involving
// pre-analysis of the function.
__ SpillLocals();
+ // Same for the cached instance register.
+ __ cache_state()->ClearCachedInstanceRegister();
__ PrepareLoopArgs(loop->start_merge.arity);
@@ -939,8 +1088,7 @@ class LiftoffCompiler {
// Test the condition, jump to else if zero.
Register value = __ PopToRegister().gp();
- __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
- value);
+ __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
@@ -1009,8 +1157,8 @@ class LiftoffCompiler {
void EndControl(FullDecoder* decoder, Control* c) {}
- void GenerateCCall(const LiftoffRegister* result_regs, const FunctionSig* sig,
- ValueType out_argument_type,
+ void GenerateCCall(const LiftoffRegister* result_regs,
+ const ValueKindSig* sig, ValueKind out_argument_kind,
const LiftoffRegister* arg_regs,
ExternalReference ext_ref) {
// Before making a call, spill all cache registers.
@@ -1018,14 +1166,13 @@ class LiftoffCompiler {
// Store arguments on our stack, then align the stack for calling to C.
int param_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- param_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ param_bytes += element_size_bytes(param_kind);
}
- int out_arg_bytes = out_argument_type == kWasmStmt
- ? 0
- : out_argument_type.element_size_bytes();
+ int out_arg_bytes =
+ out_argument_kind == kStmt ? 0 : element_size_bytes(out_argument_kind);
int stack_bytes = std::max(param_bytes, out_arg_bytes);
- __ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes,
+ __ CallC(sig, arg_regs, result_regs, out_argument_kind, stack_bytes,
ext_ref);
}
@@ -1075,38 +1222,38 @@ class LiftoffCompiler {
CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...);
}
- template <ValueType::Kind src_type, ValueType::Kind result_type, class EmitFn>
+ template <ValueKind src_kind, ValueKind result_kind, class EmitFn>
void EmitUnOp(EmitFn fn) {
- constexpr RegClass src_rc = reg_class_for(src_type);
- constexpr RegClass result_rc = reg_class_for(result_type);
+ constexpr RegClass src_rc = reg_class_for(src_kind);
+ constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src}, {})
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
}
- template <ValueType::Kind type>
+ template <ValueKind kind>
void EmitFloatUnOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
- ValueType sig_reps[] = {ValueType::Primitive(type)};
- FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, ValueType::Primitive(type), &src, ext_ref);
+ ValueKind sig_reps[] = {kind};
+ ValueKindSig sig(0, 1, sig_reps);
+ GenerateCCall(&dst, &sig, kind, &src, ext_ref);
};
- EmitUnOp<type, type>(emit_with_c_fallback);
+ EmitUnOp<kind, kind>(emit_with_c_fallback);
}
enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
- template <ValueType::Kind dst_type, ValueType::Kind src_type,
+ template <ValueKind dst_type, ValueKind src_kind,
TypeConversionTrapping can_trap>
void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(),
WasmCodePosition trap_position) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc
@@ -1122,22 +1269,20 @@ class LiftoffCompiler {
ExternalReference ext_ref = fallback_fn();
if (can_trap) {
// External references for potentially trapping conversions return int.
- ValueType sig_reps[] = {kWasmI32, ValueType::Primitive(src_type)};
- FunctionSig sig(1, 1, sig_reps);
+ ValueKind sig_reps[] = {kI32, src_kind};
+ ValueKindSig sig(1, 1, sig_reps);
LiftoffRegister ret_reg =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister dst_regs[] = {ret_reg, dst};
- GenerateCCall(dst_regs, &sig, ValueType::Primitive(dst_type), &src,
- ext_ref);
- __ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
+ GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
+ __ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp());
} else {
- ValueType sig_reps[] = {ValueType::Primitive(src_type)};
- FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, ValueType::Primitive(dst_type), &src,
- ext_ref);
+ ValueKind sig_reps[] = {src_kind};
+ ValueKindSig sig(0, 1, sig_reps);
+ GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
}
}
- __ PushRegister(ValueType::Primitive(dst_type), dst);
+ __ PushRegister(dst_type, dst);
}
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
@@ -1148,16 +1293,16 @@ class LiftoffCompiler {
#define CASE_I64_UNOP(opcode, fn) \
case kExpr##opcode: \
return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn);
-#define CASE_FLOAT_UNOP(opcode, type, fn) \
+#define CASE_FLOAT_UNOP(opcode, kind, fn) \
case kExpr##opcode: \
- return EmitUnOp<k##type, k##type>(&LiftoffAssembler::emit_##fn);
-#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
+ return EmitUnOp<k##kind, k##kind>(&LiftoffAssembler::emit_##fn);
+#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, kind, fn) \
case kExpr##opcode: \
- return EmitFloatUnOpWithCFallback<k##type>(&LiftoffAssembler::emit_##fn, \
+ return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \
&ExternalReference::wasm_##fn);
-#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
+#define CASE_TYPE_CONVERSION(opcode, dst_type, src_kind, ext_ref, can_trap) \
case kExpr##opcode: \
- return EmitTypeConversion<k##dst_type, k##src_type, can_trap>( \
+ return EmitTypeConversion<k##dst_type, k##src_kind, can_trap>( \
kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0);
switch (opcode) {
CASE_I32_UNOP(I32Clz, i32_clz)
@@ -1246,9 +1391,9 @@ class LiftoffCompiler {
return EmitUnOp<kI32, kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i32_popcnt(dst.gp(), src.gp())) return;
- ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
- FunctionSig sig_i_i(1, 1, sig_i_i_reps);
- GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src,
+ ValueKind sig_i_i_reps[] = {kI32, kI32};
+ ValueKindSig sig_i_i(1, 1, sig_i_i_reps);
+ GenerateCCall(&dst, &sig_i_i, kStmt, &src,
ExternalReference::wasm_word32_popcnt());
});
case kExprI64Popcnt:
@@ -1256,10 +1401,10 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i64_popcnt(dst, src)) return;
// The c function returns i32. We will zero-extend later.
- ValueType sig_i_l_reps[] = {kWasmI32, kWasmI64};
- FunctionSig sig_i_l(1, 1, sig_i_l_reps);
+ ValueKind sig_i_l_reps[] = {kI32, kI64};
+ ValueKindSig sig_i_l(1, 1, sig_i_l_reps);
LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst;
- GenerateCCall(&c_call_dst, &sig_i_l, kWasmStmt, &src,
+ GenerateCCall(&c_call_dst, &sig_i_l, kStmt, &src,
ExternalReference::wasm_word64_popcnt());
// Now zero-extend the result to i64.
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
@@ -1278,7 +1423,7 @@ class LiftoffCompiler {
// of the comparison.
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
__ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
- __ PushRegister(kWasmI32, dst);
+ __ PushRegister(kI32, dst);
return;
}
default:
@@ -1291,11 +1436,11 @@ class LiftoffCompiler {
#undef CASE_TYPE_CONVERSION
}
- template <ValueType::Kind src_type, ValueType::Kind result_type,
- typename EmitFn, typename EmitFnImm>
+ template <ValueKind src_kind, ValueKind result_kind, typename EmitFn,
+ typename EmitFnImm>
void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
- static constexpr RegClass result_rc = reg_class_for(result_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
+ static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
// Check if the RHS is an immediate.
@@ -1312,18 +1457,18 @@ class LiftoffCompiler {
: __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
} else {
// The RHS was not an immediate.
- EmitBinOp<src_type, result_type>(fn);
+ EmitBinOp<src_kind, result_kind>(fn);
}
}
- template <ValueType::Kind src_type, ValueType::Kind result_type,
+ template <ValueKind src_kind, ValueKind result_kind,
bool swap_lhs_rhs = false, typename EmitFn>
void EmitBinOp(EmitFn fn) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
- static constexpr RegClass result_rc = reg_class_for(result_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
+ static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
@@ -1333,7 +1478,7 @@ class LiftoffCompiler {
if (swap_lhs_rhs) std::swap(lhs, rhs);
CallEmitFn(fn, dst, lhs, rhs);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
}
void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1347,16 +1492,15 @@ class LiftoffCompiler {
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
LiftoffRegister arg_regs[] = {lhs, rhs};
LiftoffRegister result_regs[] = {ret, dst};
- ValueType sig_types[] = {kWasmI32, kWasmI64, kWasmI64};
+ ValueKind sig_kinds[] = {kI32, kI64, kI64};
// <i64, i64> -> i32 (with i64 output argument)
- FunctionSig sig(1, 2, sig_types);
- GenerateCCall(result_regs, &sig, kWasmI64, arg_regs, ext_ref);
+ ValueKindSig sig(1, 2, sig_kinds);
+ GenerateCCall(result_regs, &sig, kI64, arg_regs, ext_ref);
__ LoadConstant(tmp, WasmValue(int32_t{0}));
- __ emit_cond_jump(kEqual, trap_by_zero, kWasmI32, ret.gp(), tmp.gp());
+ __ emit_cond_jump(kEqual, trap_by_zero, kI32, ret.gp(), tmp.gp());
if (trap_unrepresentable) {
__ LoadConstant(tmp, WasmValue(int32_t{-1}));
- __ emit_cond_jump(kEqual, trap_unrepresentable, kWasmI32, ret.gp(),
- tmp.gp());
+ __ emit_cond_jump(kEqual, trap_unrepresentable, kI32, ret.gp(), tmp.gp());
}
}
@@ -1383,17 +1527,17 @@ class LiftoffCompiler {
amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \
}, \
&LiftoffAssembler::emit_##fn##i);
-#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
+#define CASE_CCALL_BINOP(opcode, kind, ext_ref_fn) \
case kExpr##opcode: \
- return EmitBinOp<k##type, k##type>( \
+ return EmitBinOp<k##kind, k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(); \
- ValueType sig_reps[] = {kWasm##type, kWasm##type, kWasm##type}; \
- const bool out_via_stack = kWasm##type == kWasmI64; \
- FunctionSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \
- ValueType out_arg_type = out_via_stack ? kWasmI64 : kWasmStmt; \
- GenerateCCall(&dst, &sig, out_arg_type, args, ext_ref); \
+ ValueKind sig_reps[] = {k##kind, k##kind, k##kind}; \
+ const bool out_via_stack = k##kind == kI64; \
+ ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \
+ ValueKind out_arg_kind = out_via_stack ? kI64 : kStmt; \
+ GenerateCCall(&dst, &sig, out_arg_kind, args, ext_ref); \
});
switch (opcode) {
case kExprI32Add:
@@ -1650,7 +1794,7 @@ class LiftoffCompiler {
}
});
case kExprRefEq: {
- return EmitBinOp<ValueType::kOptRef, kI32>(
+ return EmitBinOp<kOptRef, kI32>(
BindFirst(&LiftoffAssembler::emit_ptrsize_set_cond, kEqual));
}
@@ -1662,7 +1806,7 @@ class LiftoffCompiler {
}
void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
- __ PushConstant(kWasmI32, value);
+ __ PushConstant(kI32, value);
}
void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
@@ -1672,24 +1816,24 @@ class LiftoffCompiler {
// a register immediately.
int32_t value_i32 = static_cast<int32_t>(value);
if (value_i32 == value) {
- __ PushConstant(kWasmI64, value_i32);
+ __ PushConstant(kI64, value_i32);
} else {
- LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {});
+ LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kI64), {});
__ LoadConstant(reg, WasmValue(value));
- __ PushRegister(kWasmI64, reg);
+ __ PushRegister(kI64, reg);
}
}
void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
- __ PushRegister(kWasmF32, reg);
+ __ PushRegister(kF32, reg);
}
void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
- __ PushRegister(kWasmF64, reg);
+ __ PushRegister(kF64, reg);
}
void RefNull(FullDecoder* decoder, ValueType type, Value*) {
@@ -1699,18 +1843,29 @@ class LiftoffCompiler {
}
LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
LoadNullValue(null.gp(), {});
- __ PushRegister(type, null);
+ __ PushRegister(type.kind(), null);
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
- unsupported(decoder, kRefTypes, "func");
+ WasmCode::RuntimeStubId target = WasmCode::kWasmRefFunc;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(compilation_zone_);
+ ValueKind sig_reps[] = {kRef, kI32};
+ ValueKindSig sig(1, 1, sig_reps);
+ LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {});
+ __ LoadConstant(func_index_reg, WasmValue(function_index));
+ LiftoffAssembler::VarState func_index_var(kI32, func_index_reg, 0);
+ __ PrepareBuiltinCall(&sig, call_descriptor, {func_index_var});
+ __ CallRuntimeStub(target);
+ DefineSafepoint();
+ __ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type);
- __ PushRegister(ValueType::Ref(arg.type.heap_type(), kNonNullable), obj);
+ __ PushRegister(kRef, obj);
}
void Drop(FullDecoder* decoder) { __ DropValues(1); }
@@ -1728,11 +1883,11 @@ class LiftoffCompiler {
// are not handled yet.
size_t num_returns = decoder->sig_->return_count();
if (num_returns == 1) {
- ValueType return_type = decoder->sig_->GetReturn(0);
+ ValueKind return_kind = decoder->sig_->GetReturn(0).kind();
LiftoffRegister return_reg =
__ LoadToRegister(__ cache_state()->stack_state.back(), pinned);
__ Store(info.gp(), no_reg, 0, return_reg,
- StoreType::ForValueType(return_type), pinned);
+ StoreType::ForValueKind(return_kind), pinned);
}
// Put the parameter in its place.
WasmTraceExitDescriptor descriptor;
@@ -1740,7 +1895,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
- __ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr);
+ __ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
}
source_position_table_builder_.AddPosition(
@@ -1751,7 +1906,7 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(int64_t));
}
- void ReturnImpl(FullDecoder* decoder) {
+ void DoReturn(FullDecoder* decoder) {
if (FLAG_trace_wasm) TraceFunctionExit(decoder);
size_t num_returns = decoder->sig_->return_count();
if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
@@ -1761,15 +1916,11 @@ class LiftoffCompiler {
static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
- void DoReturn(FullDecoder* decoder, Vector<Value> /*values*/) {
- ReturnImpl(decoder);
- }
-
void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto local_slot = __ cache_state()->stack_state[imm.index];
__ cache_state()->stack_state.emplace_back(
- local_slot.type(), __ NextSpillOffset(local_slot.type()));
+ local_slot.kind(), __ NextSpillOffset(local_slot.kind()));
auto* slot = &__ cache_state()->stack_state.back();
if (local_slot.is_reg()) {
__ cache_state()->inc_used(local_slot.reg());
@@ -1778,11 +1929,11 @@ class LiftoffCompiler {
slot->MakeConstant(local_slot.i32_const());
} else {
DCHECK(local_slot.is_stack());
- auto rc = reg_class_for(local_slot.type());
+ auto rc = reg_class_for(local_slot.kind());
LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ cache_state()->inc_used(reg);
slot->MakeRegister(reg);
- __ Fill(reg, local_slot.offset(), local_slot.type());
+ __ Fill(reg, local_slot.offset(), local_slot.kind());
}
}
@@ -1790,21 +1941,21 @@ class LiftoffCompiler {
uint32_t local_index) {
auto& state = *__ cache_state();
auto& src_slot = state.stack_state.back();
- ValueType type = dst_slot->type();
+ ValueKind kind = dst_slot->kind();
if (dst_slot->is_reg()) {
LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) {
- __ Fill(dst_slot->reg(), src_slot.offset(), type);
+ __ Fill(dst_slot->reg(), src_slot.offset(), kind);
return;
}
state.dec_used(slot_reg);
dst_slot->MakeStack();
}
- DCHECK_EQ(type, __ local_type(local_index));
- RegClass rc = reg_class_for(type);
+ DCHECK_EQ(kind, __ local_type(local_index));
+ RegClass rc = reg_class_for(kind);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
- __ Fill(dst_reg, src_slot.offset(), type);
- *dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
+ __ Fill(dst_reg, src_slot.offset(), kind);
+ *dst_slot = LiftoffAssembler::VarState(kind, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
}
@@ -1853,69 +2004,117 @@ class LiftoffCompiler {
LiftoffRegList* pinned, uint32_t* offset) {
Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
if (global->mutability && global->imported) {
- LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize,
+ *pinned);
__ Load(LiftoffRegister(addr), addr, no_reg,
global->index * sizeof(Address), kPointerLoadType, *pinned);
*offset = 0;
} else {
- LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize, *pinned);
*offset = global->offset;
}
return addr;
}
+ void GetBaseAndOffsetForImportedMutableExternRefGlobal(
+ const WasmGlobal* global, LiftoffRegList* pinned, Register* base,
+ Register* offset) {
+ Register globals_buffer =
+ pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer,
+ ImportedMutableGlobalsBuffers, *pinned);
+ *base = globals_buffer;
+ __ LoadTaggedPointer(
+ *base, globals_buffer, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(global->offset),
+ *pinned);
+
+ // For the offset we need the index of the global in the buffer, and
+ // then calculate the actual offset from the index. Load the index from
+ // the ImportedMutableGlobals array of the instance.
+ Register imported_mutable_globals =
+ pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
+
+ LOAD_INSTANCE_FIELD(imported_mutable_globals, ImportedMutableGlobals,
+ kSystemPointerSize, *pinned);
+ *offset = imported_mutable_globals;
+ __ Load(LiftoffRegister(*offset), imported_mutable_globals, no_reg,
+ global->index * sizeof(Address),
+ kSystemPointerSize == 4 ? LoadType::kI32Load : LoadType::kI64Load,
+ *pinned);
+ __ emit_i32_shli(*offset, *offset, kTaggedSizeLog2);
+ __ emit_i32_addi(*offset, *offset,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0));
+ }
+
void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
- if (!CheckSupportedType(decoder, global->type, "global")) {
+ ValueKind kind = global->type.kind();
+ if (!CheckSupportedType(decoder, kind, "global")) {
return;
}
- if (global->type.is_reference_type()) {
+ if (is_reference_type(kind)) {
if (global->mutability && global->imported) {
- unsupported(decoder, kRefTypes, "imported mutable globals");
+ LiftoffRegList pinned;
+ Register base = no_reg;
+ Register offset = no_reg;
+ GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
+ &base, &offset);
+ __ LoadTaggedPointer(base, base, offset, 0, pinned);
+ __ PushRegister(kind, LiftoffRegister(base));
return;
}
LiftoffRegList pinned;
Register globals_buffer =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer,
+ pinned);
Register value = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadTaggedPointer(value, globals_buffer, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
imm.global->offset),
pinned);
- __ PushRegister(global->type, LiftoffRegister(value));
+ __ PushRegister(kind, LiftoffRegister(value));
return;
}
LiftoffRegList pinned;
uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister value =
- pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
- LoadType type = LoadType::ForValueType(global->type);
+ pinned.set(__ GetUnusedRegister(reg_class_for(kind), pinned));
+ LoadType type = LoadType::ForValueKind(kind);
__ Load(value, addr, no_reg, offset, type, pinned, nullptr, true);
- __ PushRegister(global->type, value);
+ __ PushRegister(kind, value);
}
void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
- if (!CheckSupportedType(decoder, global->type, "global")) {
+ ValueKind kind = global->type.kind();
+ if (!CheckSupportedType(decoder, kind, "global")) {
return;
}
- if (global->type.is_reference_type()) {
+ if (is_reference_type(kind)) {
if (global->mutability && global->imported) {
- unsupported(decoder, kRefTypes, "imported mutable globals");
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+ Register base = no_reg;
+ Register offset = no_reg;
+ GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
+ &base, &offset);
+ __ StoreTaggedPointer(base, offset, 0, value, pinned);
return;
}
LiftoffRegList pinned;
Register globals_buffer =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer,
+ pinned);
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
__ StoreTaggedPointer(globals_buffer, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
@@ -1927,7 +2126,7 @@ class LiftoffCompiler {
uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
- StoreType type = StoreType::ForValueType(global->type);
+ StoreType type = StoreType::ForValueKind(kind);
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
@@ -1947,9 +2146,9 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableGetDescriptor>(compilation_zone_);
- ValueType result_type = env_->module->tables[imm.index].type;
- ValueType sig_reps[] = {result_type, kWasmI32, kWasmI32};
- FunctionSig sig(1, 2, sig_reps);
+ ValueKind result_kind = env_->module->tables[imm.index].type.kind();
+ ValueKind sig_reps[] = {result_kind, kI32, kI32};
+ ValueKindSig sig(1, 2, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index});
__ CallRuntimeStub(target);
@@ -1960,7 +2159,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ PushRegister(result_type, LiftoffRegister(kReturnRegister0));
+ __ PushRegister(result_kind, LiftoffRegister(kReturnRegister0));
}
void TableSet(FullDecoder* decoder, const Value&, const Value&,
@@ -1980,9 +2179,9 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableSetDescriptor>(compilation_zone_);
- ValueType sig_reps[] = {kWasmI32, kWasmI32,
- env_->module->tables[imm.index].type};
- FunctionSig sig(0, 3, sig_reps);
+ ValueKind table_kind = env_->module->tables[imm.index].type.kind();
+ ValueKind sig_reps[] = {kI32, kI32, table_kind};
+ ValueKindSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index, value});
__ CallRuntimeStub(target);
@@ -2001,29 +2200,33 @@ class LiftoffCompiler {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
+ void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {
+ unsupported(decoder, kOtherReason, "testing opcode");
+ }
+
void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp();
- ValueType type = __ cache_state()->stack_state.end()[-1].type();
- DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
+ ValueKind kind = __ cache_state()->stack_state.end()[-1].kind();
+ DCHECK_EQ(kind, __ cache_state()->stack_state.end()[-2].kind());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
{true_value, false_value}, {});
- if (!__ emit_select(dst, condition, true_value, false_value, type)) {
+ if (!__ emit_select(dst, condition, true_value, false_value, kind)) {
// Emit generic code (using branches) instead.
Label cont;
Label case_false;
- __ emit_cond_jump(kEqual, &case_false, kWasmI32, condition);
- if (dst != true_value) __ Move(dst, true_value, type);
+ __ emit_cond_jump(kEqual, &case_false, kI32, condition);
+ if (dst != true_value) __ Move(dst, true_value, kind);
__ emit_jump(&cont);
__ bind(&case_false);
- if (dst != false_value) __ Move(dst, false_value, type);
+ if (dst != false_value) __ Move(dst, false_value, kind);
__ bind(&cont);
}
- __ PushRegister(type, dst);
+ __ PushRegister(kind, dst);
}
void BrImpl(Control* target) {
@@ -2038,7 +2241,7 @@ class LiftoffCompiler {
void BrOrRet(FullDecoder* decoder, uint32_t depth) {
if (depth == decoder->control_depth() - 1) {
- ReturnImpl(decoder);
+ DoReturn(decoder);
} else {
BrImpl(decoder->control_at(depth));
}
@@ -2058,17 +2261,17 @@ class LiftoffCompiler {
if (!has_outstanding_op()) {
// Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
+ __ emit_cond_jump(kEqual, &cont_false, kI32, value);
} else if (outstanding_op_ == kExprI32Eqz) {
// Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, &cont_false, kWasmI32, value);
+ __ emit_cond_jump(kUnequal, &cont_false, kI32, value);
outstanding_op_ = kNoOutstandingOp;
} else {
// Otherwise, it's an i32 compare opcode.
LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
Register rhs = value;
Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
- __ emit_cond_jump(cond, &cont_false, kWasmI32, lhs, rhs);
+ __ emit_cond_jump(cond, &cont_false, kI32, lhs, rhs);
outstanding_op_ = kNoOutstandingOp;
}
@@ -2106,7 +2309,7 @@ class LiftoffCompiler {
uint32_t split = min + (max - min) / 2;
Label upper_half;
__ LoadConstant(tmp, WasmValue(split));
- __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(),
+ __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kI32, value.gp(),
tmp.gp());
// Emit br table for lower half:
GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
@@ -2130,8 +2333,8 @@ class LiftoffCompiler {
LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
__ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count}));
Label case_default;
- __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
- value.gp(), tmp.gp());
+ __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kI32, value.gp(),
+ tmp.gp());
GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator,
&br_targets);
@@ -2169,7 +2372,7 @@ class LiftoffCompiler {
auto& slot = __ cache_state()->stack_state[i];
if (!slot.is_reg()) continue;
spilled->entries.push_back(SpilledRegistersForInspection::Entry{
- slot.offset(), slot.reg(), slot.type()});
+ slot.offset(), slot.reg(), slot.kind()});
__ RecordUsedSpillOffset(slot.offset());
}
return spilled;
@@ -2194,8 +2397,7 @@ class LiftoffCompiler {
stub, position,
V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersForInspection()
: nullptr,
- safepoint_info, pc,
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
+ safepoint_info, pc, RegisterOOLDebugSideTableEntry()));
return out_of_line_code_.back().label.get();
}
@@ -2250,7 +2452,7 @@ class LiftoffCompiler {
} else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
// Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, trap_label, kWasmI32, index.high_gp());
+ __ emit_cond_jump(kUnequal, trap_label, kI32, index.high_gp());
}
uintptr_t end_offset = offset + access_size - 1u;
@@ -2259,7 +2461,7 @@ class LiftoffCompiler {
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
- LOAD_INSTANCE_FIELD(mem_size.gp(), MemorySize, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(mem_size.gp(), MemorySize, kSystemPointerSize, pinned);
__ LoadConstant(end_offset_reg, WasmValue::ForUintPtr(end_offset));
@@ -2298,12 +2500,12 @@ class LiftoffCompiler {
// {emit_cond_jump} to use the "test" instruction without the "and" here.
// Then we can also avoid using the temp register here.
__ emit_i32_andi(address, index, align_mask);
- __ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
+ __ emit_cond_jump(kUnequal, trap_label, kI32, address);
} else {
// For alignment checks we only look at the lower 32-bits in {offset}.
__ emit_i32_addi(address, index, static_cast<uint32_t>(offset));
__ emit_i32_andi(address, address, align_mask);
- __ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
+ __ emit_cond_jump(kUnequal, trap_label, kI32, address);
}
}
@@ -2353,7 +2555,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
- __ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr);
+ __ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
}
source_position_table_builder_.AddPosition(__ pc_offset(),
@@ -2380,7 +2582,7 @@ class LiftoffCompiler {
}
}
Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize, *pinned);
if (*offset) __ emit_ptrsize_addi(index, index, *offset);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
@@ -2396,8 +2598,8 @@ class LiftoffCompiler {
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
- ValueType value_type = type.value_type();
- if (!CheckSupportedType(decoder, value_type, "load")) return;
+ ValueKind kind = type.value_type().kind();
+ if (!CheckSupportedType(decoder, kind, "load")) return;
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDontForceCheck);
@@ -2408,8 +2610,8 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
- RegClass rc = reg_class_for(value_type);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
__ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
@@ -2418,7 +2620,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
- __ PushRegister(value_type, value);
+ __ PushRegister(kind, value);
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
@@ -2432,7 +2634,7 @@ class LiftoffCompiler {
const Value& index_val, Value* result) {
// LoadTransform requires SIMD support, so check for it here. If
// unsupported, bailout and let TurboFan lower the code.
- if (!CheckSupportedType(decoder, kWasmS128, "LoadTransform")) {
+ if (!CheckSupportedType(decoder, kS128, "LoadTransform")) {
return;
}
@@ -2451,7 +2653,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load with transformation");
Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
uint32_t protected_load_pc = 0;
__ LoadTransform(value, addr, index, offset, type, transform,
@@ -2462,7 +2664,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
- __ PushRegister(ValueType::Primitive(kS128), value);
+ __ PushRegister(kS128, value);
if (FLAG_trace_wasm_memory) {
// Again load extend is different.
@@ -2477,7 +2679,7 @@ class LiftoffCompiler {
void LoadLane(FullDecoder* decoder, LoadType type, const Value& _value,
const Value& _index, const MemoryAccessImmediate<validate>& imm,
const uint8_t laneidx, Value* _result) {
- if (!CheckSupportedType(decoder, kWasmS128, "LoadLane")) {
+ if (!CheckSupportedType(decoder, kS128, "LoadLane")) {
return;
}
@@ -2493,7 +2695,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load lane");
Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
uint32_t protected_load_pc = 0;
@@ -2505,7 +2707,7 @@ class LiftoffCompiler {
protected_load_pc);
}
- __ PushRegister(ValueType::Primitive(kS128), result);
+ __ PushRegister(kS128, result);
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
@@ -2516,8 +2718,8 @@ class LiftoffCompiler {
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
- ValueType value_type = type.value_type();
- if (!CheckSupportedType(decoder, value_type, "store")) return;
+ ValueKind kind = type.value_type().kind();
+ if (!CheckSupportedType(decoder, kind, "store")) return;
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
LiftoffRegister full_index = __ PopToRegister(pinned);
@@ -2530,7 +2732,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
uint32_t protected_store_pc = 0;
LiftoffRegList outer_pinned;
if (FLAG_trace_wasm_memory) outer_pinned.set(index);
@@ -2548,16 +2750,48 @@ class LiftoffCompiler {
}
void StoreLane(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm, const Value& index,
- const Value& value, const uint8_t laneidx) {
- unsupported(decoder, kSimd, "simd load lane");
+ const MemoryAccessImmediate<validate>& imm,
+ const Value& _index, const Value& _value, const uint8_t lane) {
+ if (!CheckSupportedType(decoder, kS128, "StoreLane")) return;
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister());
+ LiftoffRegister full_index = __ PopToRegister(pinned);
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, pinned, kDontForceCheck);
+ if (index == no_reg) return;
+
+ uintptr_t offset = imm.offset;
+ pinned.set(index);
+ index = AddMemoryMasking(index, &offset, &pinned);
+ DEBUG_CODE_COMMENT("store lane to memory");
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ uint32_t protected_store_pc = 0;
+ __ StoreLane(addr, index, offset, value, type, lane, &protected_store_pc);
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder->position(),
+ WasmCode::kThrowWasmTrapMemOutOfBounds,
+ protected_store_pc);
+ }
+ if (FLAG_trace_wasm_memory) {
+ TraceMemoryOperation(true, type.mem_rep(), index, offset,
+ decoder->position());
+ }
}
- void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
+ void CurrentMemoryPages(FullDecoder* /* decoder */, Value* /* result */) {
Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize, {});
__ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2);
- __ PushRegister(kWasmI32, LiftoffRegister(mem_size));
+ LiftoffRegister result{mem_size};
+ if (env_->module->is_memory64 && kNeedI64RegPair) {
+ LiftoffRegister high_word =
+ __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(mem_size));
+ // The high word is always 0 on 32-bit systems.
+ __ LoadConstant(high_word, WasmValue{uint32_t{0}});
+ result = LiftoffRegister::ForPair(mem_size, high_word.gp());
+ }
+ __ PushRegister(env_->module->is_memory64 ? kI64 : kI32, result);
}
void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) {
@@ -2575,29 +2809,35 @@ class LiftoffCompiler {
WasmMemoryGrowDescriptor descriptor;
DCHECK_EQ(0, descriptor.GetStackParameterCount());
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
- DCHECK_EQ(kWasmI32.machine_type(), descriptor.GetParameterType(0));
+ DCHECK_EQ(machine_type(kI32), descriptor.GetParameterType(0));
Register param_reg = descriptor.GetRegisterParameter(0);
- if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
+ if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kI32);
__ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
if (kReturnRegister0 != result.gp()) {
- __ Move(result.gp(), kReturnRegister0, kWasmI32);
+ __ Move(result.gp(), kReturnRegister0, kI32);
}
- __ PushRegister(kWasmI32, result);
+ __ PushRegister(kI32, result);
}
- DebugSideTableBuilder::EntryBuilder* RegisterDebugSideTableEntry(
+ void RegisterDebugSideTableEntry(
DebugSideTableBuilder::AssumeSpilling assume_spilling) {
+ if (V8_LIKELY(!debug_sidetable_builder_)) return;
+ debug_sidetable_builder_->NewEntry(__ pc_offset(),
+ VectorOf(__ cache_state()->stack_state),
+ assume_spilling);
+ }
+
+ DebugSideTableBuilder::EntryBuilder* RegisterOOLDebugSideTableEntry() {
if (V8_LIKELY(!debug_sidetable_builder_)) return nullptr;
- int stack_height = static_cast<int>(__ cache_state()->stack_height());
- return debug_sidetable_builder_->NewEntry(
- __ pc_offset(), __ num_locals(), stack_height,
- __ cache_state()->stack_state.begin(), assume_spilling);
+ return debug_sidetable_builder_->NewOOLEntry(
+ VectorOf(__ cache_state()->stack_state),
+ DebugSideTableBuilder::kAssumeSpilling);
}
enum CallKind : bool { kReturnCall = true, kNoReturnCall = false };
@@ -2617,7 +2857,7 @@ class LiftoffCompiler {
void CallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- unsupported(decoder, kRefTypes, "call_ref");
+ CallRef(decoder, func_ref.type, sig, kNoReturnCall);
}
void ReturnCall(FullDecoder* decoder,
@@ -2635,7 +2875,7 @@ class LiftoffCompiler {
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- unsupported(decoder, kRefTypes, "call_ref");
+ CallRef(decoder, func_ref.type, sig, kReturnCall);
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
@@ -2649,21 +2889,20 @@ class LiftoffCompiler {
Label cont_false;
LiftoffRegList pinned;
LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
- Register null = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ Register null = __ GetUnusedRegister(kGpReg, pinned).gp();
LoadNullValue(null, pinned);
- __ emit_cond_jump(kUnequal, &cont_false, ref_object.type, ref.gp(), null);
+ __ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(),
+ null);
BrOrRet(decoder, depth);
__ bind(&cont_false);
- __ PushRegister(ValueType::Ref(ref_object.type.heap_type(), kNonNullable),
- ref);
+ __ PushRegister(kRef, ref);
}
- template <ValueType::Kind src_type, ValueType::Kind result_type,
- typename EmitFn>
+ template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitTerOp(EmitFn fn) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
- static constexpr RegClass result_rc = reg_class_for(result_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
+ static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src3 = __ PopToRegister();
LiftoffRegister src2 = __ PopToRegister(LiftoffRegList::ForRegs(src3));
LiftoffRegister src1 =
@@ -2676,12 +2915,12 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src1, src2))
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
}
template <typename EmitFn, typename EmitFnImm>
void EmitSimdShiftOp(EmitFn fn, EmitFnImm fnImm) {
- static constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass result_rc = reg_class_for(kS128);
LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
// Check if the RHS is an immediate.
@@ -2693,30 +2932,30 @@ class LiftoffCompiler {
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fnImm, dst, operand, imm);
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
} else {
LiftoffRegister count = __ PopToRegister();
LiftoffRegister operand = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fn, dst, operand, count);
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
}
}
void EmitSimdFloatRoundingOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister),
ExternalReference (*ext_ref)()) {
- static constexpr RegClass rc = reg_class_for(kWasmS128);
+ static constexpr RegClass rc = reg_class_for(kS128);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(rc, {src}, {});
if (!(asm_.*emit_fn)(dst, src)) {
// Return v128 via stack for ARM.
- ValueType sig_v_s_reps[] = {kWasmS128};
- FunctionSig sig_v_s(0, 1, sig_v_s_reps);
- GenerateCCall(&dst, &sig_v_s, kWasmS128, &src, ext_ref());
+ ValueKind sig_v_s_reps[] = {kS128};
+ ValueKindSig sig_v_s(0, 1, sig_v_s_reps);
+ GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
}
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
@@ -2727,6 +2966,8 @@ class LiftoffCompiler {
switch (opcode) {
case wasm::kExprI8x16Swizzle:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_swizzle);
+ case wasm::kExprI8x16Popcnt:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_popcnt);
case wasm::kExprI8x16Splat:
return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i8x16_splat);
case wasm::kExprI16x8Splat:
@@ -2811,6 +3052,18 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_s);
case wasm::kExprI32x4GeU:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_u);
+ case wasm::kExprI64x2Eq:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_eq);
+ case wasm::kExprI64x2LtS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i64x2_gt_s);
+ case wasm::kExprI64x2GtS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_gt_s);
+ case wasm::kExprI64x2LeS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i64x2_ge_s);
+ case wasm::kExprI64x2GeS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_ge_s);
case wasm::kExprF32x4Eq:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_eq);
case wasm::kExprF32x4Ne:
@@ -2847,8 +3100,8 @@ class LiftoffCompiler {
return EmitTerOp<kS128, kS128>(&LiftoffAssembler::emit_s128_select);
case wasm::kExprI8x16Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg);
- case wasm::kExprV8x16AnyTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v8x16_anytrue);
+ case wasm::kExprV128AnyTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v128_anytrue);
case wasm::kExprV8x16AllTrue:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v8x16_alltrue);
case wasm::kExprI8x16BitMask:
@@ -2886,8 +3139,6 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_u);
case wasm::kExprI16x8Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_neg);
- case wasm::kExprV16x8AnyTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v16x8_anytrue);
case wasm::kExprV16x8AllTrue:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v16x8_alltrue);
case wasm::kExprI16x8BitMask:
@@ -2923,6 +3174,12 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_s);
case wasm::kExprI16x8MaxU:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_u);
+ case wasm::kExprI16x8ExtAddPairwiseI8x16S:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s);
+ case wasm::kExprI16x8ExtAddPairwiseI8x16U:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u);
case wasm::kExprI16x8ExtMulLowI8x16S:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s);
@@ -2935,10 +3192,11 @@ class LiftoffCompiler {
case wasm::kExprI16x8ExtMulHighI8x16U:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u);
+ case wasm::kExprI16x8Q15MulRSatS:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_q15mulr_sat_s);
case wasm::kExprI32x4Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_neg);
- case wasm::kExprV32x4AnyTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v32x4_anytrue);
case wasm::kExprV32x4AllTrue:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v32x4_alltrue);
case wasm::kExprI32x4BitMask:
@@ -2969,6 +3227,12 @@ class LiftoffCompiler {
case wasm::kExprI32x4DotI16x8S:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i32x4_dot_i16x8_s);
+ case wasm::kExprI32x4ExtAddPairwiseI16x8S:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s);
+ case wasm::kExprI32x4ExtAddPairwiseI16x8U:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u);
case wasm::kExprI32x4ExtMulLowI16x8S:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s);
@@ -2983,6 +3247,8 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u);
case wasm::kExprI64x2Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg);
+ case wasm::kExprV64x2AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v64x2_alltrue);
case wasm::kExprI64x2Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shl,
&LiftoffAssembler::emit_i64x2_shli);
@@ -3012,6 +3278,18 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u);
case wasm::kExprI64x2BitMask:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i64x2_bitmask);
+ case wasm::kExprI64x2SConvertI32x4Low:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_sconvert_i32x4_low);
+ case wasm::kExprI64x2SConvertI32x4High:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_sconvert_i32x4_high);
+ case wasm::kExprI64x2UConvertI32x4Low:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_uconvert_i32x4_low);
+ case wasm::kExprI64x2UConvertI32x4High:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_uconvert_i32x4_high);
case wasm::kExprF32x4Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_abs);
case wasm::kExprF32x4Neg:
@@ -3150,26 +3428,27 @@ class LiftoffCompiler {
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_abs);
case wasm::kExprI32x4Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_abs);
+ case wasm::kExprI64x2Abs:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_abs);
default:
unsupported(decoder, kSimd, "simd");
}
}
- template <ValueType::Kind src_type, ValueType::Kind result_type,
- typename EmitFn>
+ template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitSimdExtractLaneOp(EmitFn fn,
const SimdLaneImmediate<validate>& imm) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
- static constexpr RegClass result_rc = reg_class_for(result_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
+ static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs}, {})
: __ GetUnusedRegister(result_rc, {});
fn(dst, lhs, imm.lane);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
}
- template <ValueType::Kind src2_type, typename EmitFn>
+ template <ValueKind src2_type, typename EmitFn>
void EmitSimdReplaceLaneOp(EmitFn fn,
const SimdLaneImmediate<validate>& imm) {
static constexpr RegClass src1_rc = reg_class_for(kS128);
@@ -3192,7 +3471,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src2))
: __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane);
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
@@ -3202,9 +3481,9 @@ class LiftoffCompiler {
return unsupported(decoder, kSimd, "simd");
}
switch (opcode) {
-#define CASE_SIMD_EXTRACT_LANE_OP(opcode, type, fn) \
+#define CASE_SIMD_EXTRACT_LANE_OP(opcode, kind, fn) \
case wasm::kExpr##opcode: \
- EmitSimdExtractLaneOp<kS128, k##type>( \
+ EmitSimdExtractLaneOp<kS128, k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { \
__ emit_##fn(dst, lhs, imm_lane_idx); \
}, \
@@ -3219,9 +3498,9 @@ class LiftoffCompiler {
CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane)
CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane)
#undef CASE_SIMD_EXTRACT_LANE_OP
-#define CASE_SIMD_REPLACE_LANE_OP(opcode, type, fn) \
+#define CASE_SIMD_REPLACE_LANE_OP(opcode, kind, fn) \
case wasm::kExpr##opcode: \
- EmitSimdReplaceLaneOp<k##type>( \
+ EmitSimdReplaceLaneOp<k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
uint8_t imm_lane_idx) { \
__ emit_##fn(dst, src1, src2, imm_lane_idx); \
@@ -3245,7 +3524,7 @@ class LiftoffCompiler {
if (!CpuFeatures::SupportsWasmSimd128()) {
return unsupported(decoder, kSimd, "simd");
}
- constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ constexpr RegClass result_rc = reg_class_for(kS128);
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {});
bool all_zeroes = std::all_of(std::begin(imm.value), std::end(imm.value),
[](uint8_t v) { return v == 0; });
@@ -3259,7 +3538,7 @@ class LiftoffCompiler {
} else {
__ LiftoffAssembler::emit_s128_const(dst, imm.value);
}
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
}
void Simd8x16ShuffleOp(FullDecoder* decoder,
@@ -3269,7 +3548,7 @@ class LiftoffCompiler {
if (!CpuFeatures::SupportsWasmSimd128()) {
return unsupported(decoder, kSimd, "simd");
}
- static constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass result_rc = reg_class_for(kS128);
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {});
@@ -3284,13 +3563,127 @@ class LiftoffCompiler {
std::swap(lhs, rhs);
}
__ LiftoffAssembler::emit_i8x16_shuffle(dst, lhs, rhs, shuffle, is_swizzle);
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
+ }
+
+ void ToSmi(Register reg) {
+ if (COMPRESS_POINTERS_BOOL || kSystemPointerSize == 4) {
+ __ emit_i32_shli(reg, reg, kSmiShiftSize + kSmiTagSize);
+ } else {
+ __ emit_i64_shli(LiftoffRegister{reg}, LiftoffRegister{reg},
+ kSmiShiftSize + kSmiTagSize);
+ }
}
- void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
- const Vector<Value>& args) {
- unsupported(decoder, kExceptionHandling, "throw");
+ void Store32BitExceptionValue(Register values_array, int* index_in_array,
+ Register value, LiftoffRegList pinned) {
+ LiftoffRegister tmp_reg = __ GetUnusedRegister(kGpReg, pinned);
+ // Get the lower half word into tmp_reg and extend to a Smi.
+ --*index_in_array;
+ __ emit_i32_andi(tmp_reg.gp(), value, 0xffff);
+ ToSmi(tmp_reg.gp());
+ __ StoreTaggedPointer(
+ values_array, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index_in_array),
+ tmp_reg, pinned, LiftoffAssembler::kSkipWriteBarrier);
+
+ // Get the upper half word into tmp_reg and extend to a Smi.
+ --*index_in_array;
+ __ emit_i32_shri(tmp_reg.gp(), value, 16);
+ ToSmi(tmp_reg.gp());
+ __ StoreTaggedPointer(
+ values_array, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index_in_array),
+ tmp_reg, pinned, LiftoffAssembler::kSkipWriteBarrier);
+ }
+
+ void StoreExceptionValue(ValueType type, Register values_array,
+ int* index_in_array, LiftoffRegList pinned) {
+ // TODO(clemensb): Handle more types.
+ DCHECK_EQ(kWasmI32, type);
+ LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+ Store32BitExceptionValue(values_array, index_in_array, value.gp(), pinned);
}
+
+ void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
+ const Vector<Value>& /* args */) {
+ LiftoffRegList pinned;
+
+ // Load the encoded size in a register for the builtin call.
+ int encoded_size = WasmExceptionPackage::GetEncodedSize(imm.exception);
+ LiftoffRegister encoded_size_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(encoded_size_reg, WasmValue(encoded_size));
+
+ // Call the WasmAllocateFixedArray builtin to create the values array.
+ DEBUG_CODE_COMMENT("call WasmAllocateFixedArray builtin");
+ compiler::CallDescriptor* create_values_descriptor =
+ GetBuiltinCallDescriptor<WasmAllocateFixedArrayDescriptor>(
+ compilation_zone_);
+
+ ValueKind create_values_sig_reps[] = {kPointerValueType,
+ LiftoffAssembler::kIntPtr};
+ ValueKindSig create_values_sig(1, 1, create_values_sig_reps);
+
+ __ PrepareBuiltinCall(
+ &create_values_sig, create_values_descriptor,
+ {LiftoffAssembler::VarState{kSmiValueType,
+ LiftoffRegister{encoded_size_reg}, 0}});
+ __ CallRuntimeStub(WasmCode::kWasmAllocateFixedArray);
+ DefineSafepoint();
+
+ // The FixedArray for the exception values is now in the first gp return
+ // register.
+ DCHECK_EQ(kReturnRegister0.code(),
+ create_values_descriptor->GetReturnLocation(0).AsRegister());
+ LiftoffRegister values_array{kReturnRegister0};
+ pinned.set(values_array);
+
+ // Now store the exception values in the FixedArray. Do this from last to
+ // first value, such that we can just pop them from the value stack.
+ DEBUG_CODE_COMMENT("fill values array");
+ int index = encoded_size;
+ auto* sig = imm.exception->sig;
+ for (size_t param_idx = sig->parameter_count(); param_idx > 0;
+ --param_idx) {
+ ValueType type = sig->GetParam(param_idx - 1);
+ if (type != kWasmI32) {
+ unsupported(decoder, kExceptionHandling,
+ "unsupported type in exception payload");
+ return;
+ }
+ StoreExceptionValue(type, values_array.gp(), &index, pinned);
+ }
+ DCHECK_EQ(0, index);
+
+ // Load the exception tag.
+ DEBUG_CODE_COMMENT("load exception tag");
+ Register exception_tag =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(exception_tag, ExceptionsTable, pinned);
+ __ LoadTaggedPointer(
+ exception_tag, exception_tag, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
+
+ // Finally, call WasmThrow.
+ DEBUG_CODE_COMMENT("call WasmThrow builtin");
+ compiler::CallDescriptor* throw_descriptor =
+ GetBuiltinCallDescriptor<WasmThrowDescriptor>(compilation_zone_);
+
+ ValueKind throw_sig_reps[] = {kPointerValueType, kPointerValueType};
+ ValueKindSig throw_sig(0, 2, throw_sig_reps);
+
+ __ PrepareBuiltinCall(
+ &throw_sig, throw_descriptor,
+ {LiftoffAssembler::VarState{kPointerValueType,
+ LiftoffRegister{exception_tag}, 0},
+ LiftoffAssembler::VarState{kPointerValueType, values_array, 0}});
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), true);
+ __ CallRuntimeStub(WasmCode::kWasmThrow);
+ DefineSafepoint();
+ }
+
void Rethrow(FullDecoder* decoder, const Value& exception) {
unsupported(decoder, kExceptionHandling, "rethrow");
}
@@ -3309,7 +3702,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("atomic store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegList outer_pinned;
if (FLAG_trace_wasm_memory) outer_pinned.set(index);
__ AtomicStore(addr, index, offset, value, type, outer_pinned);
@@ -3321,7 +3714,7 @@ class LiftoffCompiler {
void AtomicLoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm) {
- ValueType value_type = type.value_type();
+ ValueKind kind = type.value_type().kind();
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
@@ -3333,11 +3726,11 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
- RegClass rc = reg_class_for(value_type);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
__ AtomicLoad(value, addr, index, offset, type, pinned);
- __ PushRegister(value_type, value);
+ __ PushRegister(kind, value);
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
@@ -3351,7 +3744,7 @@ class LiftoffCompiler {
uintptr_t, LiftoffRegister,
LiftoffRegister,
StoreType)) {
- ValueType result_type = type.value_type();
+ ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
#ifdef V8_TARGET_ARCH_IA32
@@ -3362,7 +3755,7 @@ class LiftoffCompiler {
LiftoffRegister result = value;
if (__ cache_state()->is_used(value)) {
result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
- __ Move(result, value, result_type);
+ __ Move(result, value, result_kind);
pinned.clear(value);
value = result;
}
@@ -3381,10 +3774,10 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
(asm_.*emit_fn)(addr, index, offset, value, result, type);
- __ PushRegister(result_type, result);
+ __ PushRegister(result_kind, result);
}
void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
@@ -3405,7 +3798,7 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
__ emit_i32_add(addr, addr, index);
pinned.clear(LiftoffRegister(index));
LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
@@ -3420,10 +3813,10 @@ class LiftoffCompiler {
// assembler now.
__ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
type);
- __ PushRegister(type.value_type(), result);
+ __ PushRegister(type.value_type().kind(), result);
return;
#else
- ValueType result_type = type.value_type();
+ ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned;
LiftoffRegister new_value = pinned.set(__ PopToRegister());
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
@@ -3437,13 +3830,13 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister result =
- pinned.set(__ GetUnusedRegister(reg_class_for(result_type), pinned));
+ pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
__ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
type);
- __ PushRegister(result_type, result);
+ __ PushRegister(result_kind, result);
#endif
}
@@ -3459,15 +3852,15 @@ class LiftoffCompiler {
StubCallMode::kCallWasmRuntimeStub); // stub call mode
}
- void AtomicWait(FullDecoder* decoder, ValueType type,
+ void AtomicWait(FullDecoder* decoder, ValueKind kind,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegister full_index = __ PeekToRegister(2, {});
Register index_reg =
- BoundsCheckMem(decoder, type.element_size_bytes(), imm.offset,
+ BoundsCheckMem(decoder, element_size_bytes(kind), imm.offset,
full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
- AlignmentCheckMem(decoder, type.element_size_bytes(), imm.offset, index_reg,
+ AlignmentCheckMem(decoder, element_size_bytes(kind), imm.offset, index_reg,
pinned);
uintptr_t offset = imm.offset;
@@ -3494,7 +3887,7 @@ class LiftoffCompiler {
WasmCode::RuntimeStubId target;
compiler::CallDescriptor* call_descriptor;
- if (type == kWasmI32) {
+ if (kind == kI32) {
if (kNeedI64RegPair) {
target = WasmCode::kWasmI32AtomicWait32;
call_descriptor =
@@ -3520,8 +3913,8 @@ class LiftoffCompiler {
}
}
- ValueType sig_reps[] = {kPointerValueType, type, kWasmI64};
- FunctionSig sig(0, 3, sig_reps);
+ ValueKind sig_reps[] = {kPointerValueType, kind, kI64};
+ ValueKindSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{index, expected_value, timeout});
@@ -3532,19 +3925,17 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0));
+ __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
void AtomicNotify(FullDecoder* decoder,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegister full_index = __ PeekToRegister(1, {});
- Register index_reg =
- BoundsCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
- full_index, {}, kDoForceCheck);
+ Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
+ full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
- AlignmentCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
- index_reg, pinned);
+ AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
@@ -3558,8 +3949,8 @@ class LiftoffCompiler {
__ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
}
- ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32};
- FunctionSig sig(1, 2, sig_reps);
+ ValueKind sig_reps[] = {kI32, kPointerValueType, kI32};
+ ValueKindSig sig(1, 2, sig_reps);
auto call_descriptor =
GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
@@ -3575,7 +3966,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0));
+ __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
#define ATOMIC_STORE_LIST(V) \
@@ -3685,10 +4076,10 @@ class LiftoffCompiler {
#undef ATOMIC_COMPARE_EXCHANGE_OP
case kExprI32AtomicWait:
- AtomicWait(decoder, kWasmI32, imm);
+ AtomicWait(decoder, kI32, imm);
break;
case kExprI64AtomicWait:
- AtomicWait(decoder, kWasmI64, imm);
+ AtomicWait(decoder, kI64, imm);
break;
case kExprAtomicNotify:
AtomicNotify(decoder, imm);
@@ -3721,18 +4112,17 @@ class LiftoffCompiler {
__ LoadConstant(segment_index, WasmValue(imm.data_segment_index));
ExternalReference ext_ref = ExternalReference::wasm_memory_init();
- ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32,
- kWasmI32, kWasmI32, kWasmI32};
- FunctionSig sig(1, 5, sig_reps);
+ ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32, kI32};
+ ValueKindSig sig(1, 5, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
segment_index, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
+ GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
- __ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
+ __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
@@ -3740,13 +4130,13 @@ class LiftoffCompiler {
Register seg_size_array =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(seg_size_array, DataSegmentSizes, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(seg_size_array, DataSegmentSizes, kSystemPointerSize,
+ pinned);
LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Scale the seg_index for the array access.
- __ LoadConstant(seg_index,
- WasmValue(imm.index << kWasmI32.element_size_log2()));
+ __ LoadConstant(seg_index, WasmValue(imm.index << element_size_log2(kI32)));
// Set the length of the segment to '0' to drop it.
LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -3765,17 +4155,16 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
- ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32,
- kWasmI32};
- FunctionSig sig(1, 4, sig_reps);
+ ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
+ ValueKindSig sig(1, 4, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
+ GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
- __ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
+ __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
void MemoryFill(FullDecoder* decoder,
@@ -3788,17 +4177,23 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
- ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32,
- kWasmI32};
- FunctionSig sig(1, 4, sig_reps);
+ ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
+ ValueKindSig sig(1, 4, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
+ GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
- __ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
+ __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
+ }
+
+ void LoadSmi(LiftoffRegister reg, int value) {
+ Address smi_value = Smi::FromInt(value).ptr();
+ using smi_type =
+ std::conditional_t<kSmiValueType == kI32, int32_t, int64_t>;
+ __ LoadConstant(reg, WasmValue{static_cast<smi_type>(smi_value)});
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
@@ -3807,24 +4202,13 @@ class LiftoffCompiler {
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
-#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
- WasmValue table_index_val(
- static_cast<uint32_t>(Smi::FromInt(imm.table.index).ptr()));
- WasmValue segment_index_val(
- static_cast<uint32_t>(Smi::FromInt(imm.elem_segment_index).ptr()));
-#else
- WasmValue table_index_val(
- static_cast<uint64_t>(Smi::FromInt(imm.table.index).ptr()));
- WasmValue segment_index_val(
- static_cast<uint64_t>(Smi::FromInt(imm.elem_segment_index).ptr()));
-#endif
- __ LoadConstant(table_index_reg, table_index_val);
+ LoadSmi(table_index_reg, imm.table.index);
LiftoffAssembler::VarState table_index(kPointerValueType, table_index_reg,
0);
LiftoffRegister segment_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(segment_index_reg, segment_index_val);
+ LoadSmi(segment_index_reg, imm.elem_segment_index);
LiftoffAssembler::VarState segment_index(kPointerValueType,
segment_index_reg, 0);
@@ -3836,9 +4220,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableInitDescriptor>(compilation_zone_);
- ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32,
- table_index_val.type(), segment_index_val.type()};
- FunctionSig sig(0, 5, sig_reps);
+ ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
+ ValueKindSig sig(0, 5, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{dst, src, size, table_index, segment_index});
@@ -3856,7 +4239,7 @@ class LiftoffCompiler {
Register dropped_elem_segments =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(dropped_elem_segments, DroppedElemSegments,
- kSystemPointerSize);
+ kSystemPointerSize, pinned);
LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -3874,27 +4257,15 @@ class LiftoffCompiler {
Vector<Value> args) {
LiftoffRegList pinned;
-#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
- WasmValue table_dst_index_val(
- static_cast<uint32_t>(Smi::FromInt(imm.table_dst.index).ptr()));
- WasmValue table_src_index_val(
- static_cast<uint32_t>(Smi::FromInt(imm.table_src.index).ptr()));
-#else
- WasmValue table_dst_index_val(
- static_cast<uint64_t>(Smi::FromInt(imm.table_dst.index).ptr()));
- WasmValue table_src_index_val(
- static_cast<uint64_t>(Smi::FromInt(imm.table_src.index).ptr()));
-#endif
-
LiftoffRegister table_dst_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(table_dst_index_reg, table_dst_index_val);
+ LoadSmi(table_dst_index_reg, imm.table_dst.index);
LiftoffAssembler::VarState table_dst_index(kPointerValueType,
table_dst_index_reg, 0);
LiftoffRegister table_src_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(table_src_index_reg, table_src_index_val);
+ LoadSmi(table_src_index_reg, imm.table_src.index);
LiftoffAssembler::VarState table_src_index(kPointerValueType,
table_src_index_reg, 0);
@@ -3906,10 +4277,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(compilation_zone_);
- ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32,
- table_dst_index_val.type(),
- table_src_index_val.type()};
- FunctionSig sig(0, 5, sig_reps);
+ ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
+ ValueKindSig sig(0, 5, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{dst, src, size, table_dst_index, table_src_index});
@@ -3940,13 +4309,12 @@ class LiftoffCompiler {
void StructNew(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const Value& rtt,
bool initial_values_on_stack) {
- ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>(
compilation_zone_);
- ValueType sig_reps[] = {struct_value_type, rtt.type};
- FunctionSig sig(1, 1, sig_reps);
+ ValueKind sig_reps[] = {kRef, rtt.type.kind()};
+ ValueKindSig sig(1, 1, sig_reps);
LiftoffAssembler::VarState rtt_value =
__ cache_state()->stack_state.end()[-1];
__ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value});
@@ -3960,19 +4328,19 @@ class LiftoffCompiler {
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--;
int offset = StructFieldOffset(imm.struct_type, i);
- ValueType field_type = imm.struct_type->field(i);
+ ValueKind field_kind = imm.struct_type->field(i).kind();
LiftoffRegister value = initial_values_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
- reg_class_for(field_type), pinned));
+ reg_class_for(field_kind), pinned));
if (!initial_values_on_stack) {
- if (!CheckSupportedType(decoder, field_type, "default value")) return;
- SetDefaultValue(value, field_type, pinned);
+ if (!CheckSupportedType(decoder, field_kind, "default value")) return;
+ SetDefaultValue(value, field_kind, pinned);
}
- StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
+ StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
pinned.clear(value);
}
- __ PushRegister(struct_value_type, obj);
+ __ PushRegister(kRef, obj);
}
void StructNewWithRtt(FullDecoder* decoder,
@@ -3991,34 +4359,34 @@ class LiftoffCompiler {
const FieldIndexImmediate<validate>& field, bool is_signed,
Value* result) {
const StructType* struct_type = field.struct_index.struct_type;
- ValueType field_type = struct_type->field(field.index);
- if (!CheckSupportedType(decoder, field_type, "field load")) return;
+ ValueKind field_kind = struct_type->field(field.index).kind();
+ if (!CheckSupportedType(decoder, field_kind, "field load")) return;
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
LiftoffRegister value =
- pinned.set(__ GetUnusedRegister(reg_class_for(field_type), pinned));
- LoadObjectField(value, obj.gp(), no_reg, offset, field_type, is_signed,
+ __ GetUnusedRegister(reg_class_for(field_kind), pinned);
+ LoadObjectField(value, obj.gp(), no_reg, offset, field_kind, is_signed,
pinned);
- __ PushRegister(field_type.Unpacked(), value);
+ __ PushRegister(unpacked(field_kind), value);
}
void StructSet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field,
const Value& field_value) {
const StructType* struct_type = field.struct_index.struct_type;
- ValueType field_type = struct_type->field(field.index);
+ ValueKind field_kind = struct_type->field(field.index).kind();
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
- StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
+ StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
}
void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
- ValueType rtt_type, bool initial_value_on_stack) {
+ ValueKind rtt_type, bool initial_value_on_stack) {
// Max length check.
{
LiftoffRegister length =
@@ -4028,24 +4396,23 @@ class LiftoffCompiler {
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
static_cast<int>(wasm::kV8MaxWasmArrayLength));
}
- ValueType array_value_type = ValueType::Ref(imm.index, kNonNullable);
- ValueType elem_type = imm.array_type->element_type();
- int elem_size = elem_type.element_size_bytes();
+ ValueKind elem_kind = imm.array_type->element_type().kind();
+ int elem_size = element_size_bytes(elem_kind);
// Allocate the array.
{
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateArrayWithRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateArrayWithRttDescriptor>(
compilation_zone_);
- ValueType sig_reps[] = {array_value_type, rtt_type, kWasmI32, kWasmI32};
- FunctionSig sig(1, 3, sig_reps);
+ ValueKind sig_reps[] = {kRef, rtt_type, kI32, kI32};
+ ValueKindSig sig(1, 3, sig_reps);
LiftoffAssembler::VarState rtt_var =
__ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState length_var =
__ cache_state()->stack_state.end()[-2];
LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(elem_size_reg, WasmValue(elem_size));
- LiftoffAssembler::VarState elem_size_var(kWasmI32, elem_size_reg, 0);
+ LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor,
{rtt_var, length_var, elem_size_var});
__ CallRuntimeStub(target);
@@ -4060,10 +4427,10 @@ class LiftoffCompiler {
LiftoffRegister value = initial_value_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
- reg_class_for(elem_type), pinned));
+ reg_class_for(elem_kind), pinned));
if (!initial_value_on_stack) {
- if (!CheckSupportedType(decoder, elem_type, "default value")) return;
- SetDefaultValue(value, elem_type, pinned);
+ if (!CheckSupportedType(decoder, elem_kind, "default value")) return;
+ SetDefaultValue(value, elem_kind, pinned);
}
// Initialize the array's elements.
@@ -4072,34 +4439,34 @@ class LiftoffCompiler {
offset,
WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
LiftoffRegister end_offset = length;
- if (elem_type.element_size_log2() != 0) {
+ if (element_size_log2(elem_kind) != 0) {
__ emit_i32_shli(end_offset.gp(), length.gp(),
- elem_type.element_size_log2());
+ element_size_log2(elem_kind));
}
__ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
Label loop, done;
__ bind(&loop);
- __ emit_cond_jump(kUnsignedGreaterEqual, &done, kWasmI32, offset.gp(),
+ __ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
end_offset.gp());
- StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_type);
+ StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
__ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
__ emit_jump(&loop);
__ bind(&done);
- __ PushRegister(array_value_type, obj);
+ __ PushRegister(kRef, obj);
}
void ArrayNewWithRtt(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length_value, const Value& initial_value,
const Value& rtt, Value* result) {
- ArrayNew(decoder, imm, rtt.type, true);
+ ArrayNew(decoder, imm, rtt.type.kind(), true);
}
void ArrayNewDefault(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length, const Value& rtt, Value* result) {
- ArrayNew(decoder, imm, rtt.type, false);
+ ArrayNew(decoder, imm, rtt.type.kind(), false);
}
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
@@ -4110,17 +4477,17 @@ class LiftoffCompiler {
LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
BoundsCheck(decoder, array, index, pinned);
- ValueType elem_type = imm.array_type->element_type();
- if (!CheckSupportedType(decoder, elem_type, "array load")) return;
- int elem_size_shift = elem_type.element_size_log2();
+ ValueKind elem_kind = imm.array_type->element_type().kind();
+ if (!CheckSupportedType(decoder, elem_kind, "array load")) return;
+ int elem_size_shift = element_size_log2(elem_kind);
if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
}
LiftoffRegister value = __ GetUnusedRegister(kGpReg, {array}, pinned);
LoadObjectField(value, array.gp(), index.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
- elem_type, is_signed, pinned);
- __ PushRegister(elem_type.Unpacked(), value);
+ elem_kind, is_signed, pinned);
+ __ PushRegister(unpacked(elem_kind), value);
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
@@ -4132,25 +4499,24 @@ class LiftoffCompiler {
LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
BoundsCheck(decoder, array, index, pinned);
- ValueType elem_type = imm.array_type->element_type();
- int elem_size_shift = elem_type.element_size_log2();
+ ValueKind elem_kind = imm.array_type->element_type().kind();
+ int elem_size_shift = element_size_log2(elem_kind);
if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
}
StoreObjectField(array.gp(), index.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
- value, pinned, elem_type);
+ value, pinned, elem_kind);
}
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, array_obj.type);
- LiftoffRegister len = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister len = __ GetUnusedRegister(kGpReg, pinned);
int kLengthOffset = wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
- LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kWasmI32, false,
- pinned);
- __ PushRegister(kWasmI32, len);
+ LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kI32, false, pinned);
+ __ PushRegister(kI32, len);
}
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
@@ -4166,7 +4532,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits());
__ emit_i64_shli(dst, src, kI31To32BitSmiShift);
}
- __ PushRegister(kWasmI31Ref, dst);
+ __ PushRegister(kRef, dst);
}
void I31GetS(FullDecoder* decoder, const Value& input, Value* result) {
@@ -4178,7 +4544,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits());
__ emit_i64_sari(dst, src, kI31To32BitSmiShift);
}
- __ PushRegister(kWasmI32, dst);
+ __ PushRegister(kI32, dst);
}
void I31GetU(FullDecoder* decoder, const Value& input, Value* result) {
@@ -4190,63 +4556,32 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits());
__ emit_i64_shri(dst, src, kI31To32BitSmiShift);
}
- __ PushRegister(kWasmI32, dst);
+ __ PushRegister(kI32, dst);
}
- void RttCanon(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
- Value* result) {
+ void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) {
LiftoffRegister rtt = __ GetUnusedRegister(kGpReg, {});
- RootIndex index;
- switch (imm.type.representation()) {
- case wasm::HeapType::kEq:
- index = RootIndex::kWasmRttEqrefMap;
- break;
- case wasm::HeapType::kExtern:
- index = RootIndex::kWasmRttExternrefMap;
- break;
- case wasm::HeapType::kFunc:
- index = RootIndex::kWasmRttFuncrefMap;
- break;
- case wasm::HeapType::kI31:
- index = RootIndex::kWasmRttI31refMap;
- break;
- case wasm::HeapType::kAny:
- index = RootIndex::kWasmRttAnyrefMap;
- break;
- case wasm::HeapType::kBottom:
- UNREACHABLE();
- default:
- // User-defined type.
- LOAD_TAGGED_PTR_INSTANCE_FIELD(rtt.gp(), ManagedObjectMaps);
- __ LoadTaggedPointer(
- rtt.gp(), rtt.gp(), no_reg,
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
- imm.type.ref_index()),
- {});
- __ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
- return;
- }
- LOAD_INSTANCE_FIELD(rtt.gp(), IsolateRoot, kSystemPointerSize);
- __ LoadTaggedPointer(rtt.gp(), rtt.gp(), no_reg,
- IsolateData::root_slot_offset(index), {});
- __ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(rtt.gp(), ManagedObjectMaps, {});
+ __ LoadTaggedPointer(
+ rtt.gp(), rtt.gp(), no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index), {});
+ __ PushRegister(kRttWithDepth, rtt);
}
- void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
- const Value& parent, Value* result) {
- ValueType parent_value_type = parent.type;
- ValueType rtt_value_type =
- ValueType::Rtt(imm.type, parent_value_type.depth() + 1);
+ void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
+ Value* result) {
+ ValueKind parent_value_kind = parent.type.kind();
+ ValueKind rtt_value_type = kRttWithDepth;
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
- ValueType sig_reps[] = {rtt_value_type, kWasmI32, parent_value_type};
- FunctionSig sig(1, 2, sig_reps);
+ ValueKind sig_reps[] = {rtt_value_type, kI32, parent_value_kind};
+ ValueKindSig sig(1, 2, sig_reps);
LiftoffAssembler::VarState parent_var =
__ cache_state()->stack_state.end()[-1];
LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
- __ LoadConstant(type_reg, WasmValue(imm.type.representation()));
- LiftoffAssembler::VarState type_var(kWasmI32, type_reg, 0);
+ __ LoadConstant(type_reg, WasmValue(type_index));
+ LiftoffAssembler::VarState type_var(kI32, type_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var});
__ CallRuntimeStub(target);
DefineSafepoint();
@@ -4255,67 +4590,69 @@ class LiftoffCompiler {
__ PushRegister(rtt_value_type, LiftoffRegister(kReturnRegister0));
}
+ enum NullSucceeds : bool { // --
+ kNullSucceeds = true,
+ kNullFails = false
+ };
+
// Falls through on match (=successful type check).
// Returns the register containing the object.
LiftoffRegister SubtypeCheck(FullDecoder* decoder, const Value& obj,
const Value& rtt, Label* no_match,
+ NullSucceeds null_succeeds,
LiftoffRegList pinned = {},
Register opt_scratch = no_reg) {
Label match;
LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
- bool obj_can_be_i31 = IsSubtypeOf(kWasmI31Ref, obj.type, decoder->module_);
- bool rtt_is_i31 = rtt.type.heap_representation() == HeapType::kI31;
- bool i31_check_only = obj_can_be_i31 && rtt_is_i31;
- if (i31_check_only) {
- __ emit_smi_check(obj_reg.gp(), no_match,
- LiftoffAssembler::kJumpOnNotSmi);
- // Emit no further code, just fall through to {match}.
- } else {
- // Reserve all temporary registers up front, so that the cache state
- // tracking doesn't get confused by the following conditional jumps.
- LiftoffRegister tmp1 =
- opt_scratch != no_reg
- ? LiftoffRegister(opt_scratch)
- : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- if (obj_can_be_i31) {
- DCHECK(!rtt_is_i31);
- __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
- }
- if (obj.type.is_nullable()) {
- LoadNullValue(tmp1.gp(), pinned);
- __ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp());
- }
-
- // At this point, the object is neither null nor an i31ref. Perform
- // a regular type check. Check for exact match first.
- __ LoadMap(tmp1.gp(), obj_reg.gp());
- // {tmp1} now holds the object's map.
- __ emit_cond_jump(kEqual, &match, rtt.type, tmp1.gp(), rtt_reg.gp());
+ // Reserve all temporary registers up front, so that the cache state
+ // tracking doesn't get confused by the following conditional jumps.
+ LiftoffRegister tmp1 =
+ opt_scratch != no_reg
+ ? LiftoffRegister(opt_scratch)
+ : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ if (obj.type.is_nullable()) {
+ LoadNullValue(tmp1.gp(), pinned);
+ __ emit_cond_jump(kEqual, null_succeeds ? &match : no_match,
+ obj.type.kind(), obj_reg.gp(), tmp1.gp());
+ }
- // If the object isn't guaranteed to be an array or struct, check that.
- // Subsequent code wouldn't handle e.g. funcrefs.
- if (!is_data_ref_type(obj.type, decoder->module_)) {
- EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
- }
+ // Perform a regular type check. Check for exact match first.
+ __ LoadMap(tmp1.gp(), obj_reg.gp());
+ // {tmp1} now holds the object's map.
+
+ if (decoder->module_->has_signature(rtt.type.ref_index())) {
+ // Function case: currently, the only way for a function to match an rtt
+ // is if its map is equal to that rtt.
+ __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
+ rtt_reg.gp());
+ __ bind(&match);
+ return obj_reg;
+ }
- // Constant-time subtyping check: load exactly one candidate RTT from the
- // supertypes list.
- // Step 1: load the WasmTypeInfo into {tmp1}.
- constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
- Map::kConstructorOrBackPointerOrNativeContextOffset);
- __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kTypeInfoOffset,
- pinned);
- // Step 2: load the super types list into {tmp1}.
- constexpr int kSuperTypesOffset =
- wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset);
- __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset,
- pinned);
- // Step 3: check the list's length.
- LiftoffRegister list_length = tmp2;
- __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
+ // Array/struct case until the rest of the function.
+
+ // Check for rtt equality, and if not, check if the rtt is a struct/array
+ // rtt.
+ __ emit_cond_jump(kEqual, &match, rtt.type.kind(), tmp1.gp(), rtt_reg.gp());
+
+ // Constant-time subtyping check: load exactly one candidate RTT from the
+ // supertypes list.
+ // Step 1: load the WasmTypeInfo into {tmp1}.
+ constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
+ Map::kConstructorOrBackPointerOrNativeContextOffset);
+ __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kTypeInfoOffset, pinned);
+ // Step 2: load the super types list into {tmp1}.
+ constexpr int kSuperTypesOffset =
+ wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset);
+ __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset,
+ pinned);
+ // Step 3: check the list's length.
+ LiftoffRegister list_length = tmp2;
+ __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
+ if (rtt.type.has_depth()) {
__ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
rtt.type.depth());
// Step 4: load the candidate list slot into {tmp1}, and compare it.
@@ -4323,20 +4660,41 @@ class LiftoffCompiler {
tmp1.gp(), tmp1.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
pinned);
- __ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp());
- // Fall through to {match}.
+ __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
+ rtt_reg.gp());
+ } else {
+ // Preserve {obj_reg} across the call.
+ LiftoffRegList saved_regs = LiftoffRegList::ForRegs(obj_reg);
+ __ PushRegisters(saved_regs);
+ WasmCode::RuntimeStubId target = WasmCode::kWasmSubtypeCheck;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmSubtypeCheckDescriptor>(
+ compilation_zone_);
+ ValueKind sig_reps[] = {kI32, kOptRef, rtt.type.kind()};
+ ValueKindSig sig(1, 2, sig_reps);
+ LiftoffAssembler::VarState rtt_state(kPointerValueType, rtt_reg, 0);
+ LiftoffAssembler::VarState tmp1_state(kPointerValueType, tmp1, 0);
+ __ PrepareBuiltinCall(&sig, call_descriptor, {tmp1_state, rtt_state});
+ __ CallRuntimeStub(target);
+ DefineSafepoint();
+ __ PopRegisters(saved_regs);
+ __ Move(tmp1.gp(), kReturnRegister0, kI32);
+ __ emit_i32_cond_jumpi(kEqual, no_match, tmp1.gp(), 0);
}
+
+ // Fall through to {match}.
__ bind(&match);
return obj_reg;
}
void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt,
- Value* result_val) {
+ Value* /* result_val */) {
Label return_false, done;
LiftoffRegList pinned;
LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
- SubtypeCheck(decoder, obj, rtt, &return_false, pinned, result.gp());
+ SubtypeCheck(decoder, obj, rtt, &return_false, kNullFails, pinned,
+ result.gp());
__ LoadConstant(result, WasmValue(1));
// TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
@@ -4345,16 +4703,16 @@ class LiftoffCompiler {
__ bind(&return_false);
__ LoadConstant(result, WasmValue(0));
__ bind(&done);
- __ PushRegister(kWasmI32, result);
+ __ PushRegister(kI32, result);
}
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result) {
Label* trap_label = AddOutOfLineTrap(decoder->position(),
WasmCode::kThrowWasmTrapIllegalCast);
- LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, trap_label);
- __ PushRegister(ValueType::Ref(rtt.type.heap_type(), kNonNullable),
- obj_reg);
+ LiftoffRegister obj_reg =
+ SubtypeCheck(decoder, obj, rtt, trap_label, kNullSucceeds);
+ __ PushRegister(obj.type.kind(), obj_reg);
}
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
@@ -4367,18 +4725,188 @@ class LiftoffCompiler {
}
Label cont_false;
- LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, &cont_false);
+ LiftoffRegister obj_reg =
+ SubtypeCheck(decoder, obj, rtt, &cont_false, kNullFails);
- __ PushRegister(rtt.type.is_bottom()
- ? kWasmBottom
- : ValueType::Ref(rtt.type.heap_type(), kNonNullable),
- obj_reg);
+ __ PushRegister(rtt.type.is_bottom() ? kBottom : obj.type.kind(), obj_reg);
BrOrRet(decoder, depth);
__ bind(&cont_false);
// Drop the branch's value, restore original value.
Drop(decoder);
- __ PushRegister(obj.type, obj_reg);
+ __ PushRegister(obj.type.kind(), obj_reg);
+ }
+
+ // Abstract type checkers. They all return the object register and fall
+ // through to match.
+ LiftoffRegister DataCheck(const Value& obj, Label* no_match,
+ LiftoffRegList pinned, Register opt_scratch) {
+ LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
+
+ // Reserve all temporary registers up front, so that the cache state
+ // tracking doesn't get confused by the following conditional jumps.
+ LiftoffRegister tmp1 =
+ opt_scratch != no_reg
+ ? LiftoffRegister(opt_scratch)
+ : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ if (obj.type.is_nullable()) {
+ LoadNullValue(tmp1.gp(), pinned);
+ __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
+ }
+
+ __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
+
+ // Load the object's map and check if it is a struct/array map.
+ __ LoadMap(tmp1.gp(), obj_reg.gp());
+ EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
+
+ return obj_reg;
+ }
+
+ LiftoffRegister FuncCheck(const Value& obj, Label* no_match,
+ LiftoffRegList pinned, Register opt_scratch) {
+ LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
+
+ // Reserve all temporary registers up front, so that the cache state
+ // tracking doesn't get confused by the following conditional jumps.
+ LiftoffRegister tmp1 =
+ opt_scratch != no_reg
+ ? LiftoffRegister(opt_scratch)
+ : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ if (obj.type.is_nullable()) {
+ LoadNullValue(tmp1.gp(), pinned);
+ __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
+ }
+
+ __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
+
+ // Load the object's map and check if its InstaceType field is that of a
+ // function.
+ __ LoadMap(tmp1.gp(), obj_reg.gp());
+ __ Load(tmp1, tmp1.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
+ LoadType::kI32Load16U, pinned);
+ __ emit_i32_cond_jumpi(kUnequal, no_match, tmp1.gp(), JS_FUNCTION_TYPE);
+
+ return obj_reg;
+ }
+
+ LiftoffRegister I31Check(const Value& object, Label* no_match,
+ LiftoffRegList pinned, Register opt_scratch) {
+ LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
+
+ __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnNotSmi);
+
+ return obj_reg;
+ }
+
+ using TypeChecker = LiftoffRegister (LiftoffCompiler::*)(
+ const Value& obj, Label* no_match, LiftoffRegList pinned,
+ Register opt_scratch);
+
+ template <TypeChecker type_checker>
+ void AbstractTypeCheck(const Value& object) {
+ Label match, no_match, done;
+ LiftoffRegList pinned;
+ LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
+
+ (this->*type_checker)(object, &no_match, pinned, result.gp());
+
+ __ bind(&match);
+ __ LoadConstant(result, WasmValue(1));
+ // TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
+ __ emit_jump(&done);
+
+ __ bind(&no_match);
+ __ LoadConstant(result, WasmValue(0));
+ __ bind(&done);
+ __ PushRegister(kI32, result);
+ }
+
+ void RefIsData(FullDecoder* /* decoder */, const Value& object,
+ Value* /* result_val */) {
+ return AbstractTypeCheck<&LiftoffCompiler::DataCheck>(object);
+ }
+
+ void RefIsFunc(FullDecoder* /* decoder */, const Value& object,
+ Value* /* result_val */) {
+ return AbstractTypeCheck<&LiftoffCompiler::FuncCheck>(object);
+ }
+
+ void RefIsI31(FullDecoder* decoder, const Value& object,
+ Value* /* result */) {
+ return AbstractTypeCheck<&LiftoffCompiler::I31Check>(object);
+ }
+
+ template <TypeChecker type_checker>
+ void AbstractTypeCast(const Value& object, FullDecoder* decoder,
+ ValueKind result_kind) {
+ Label* trap_label = AddOutOfLineTrap(decoder->position(),
+ WasmCode::kThrowWasmTrapIllegalCast);
+ Label match;
+ LiftoffRegister obj_reg =
+ (this->*type_checker)(object, trap_label, {}, no_reg);
+ __ bind(&match);
+ __ PushRegister(result_kind, obj_reg);
+ }
+
+ void RefAsData(FullDecoder* decoder, const Value& object,
+ Value* /* result */) {
+ return AbstractTypeCast<&LiftoffCompiler::DataCheck>(object, decoder, kRef);
+ }
+
+ void RefAsFunc(FullDecoder* decoder, const Value& object,
+ Value* /* result */) {
+ return AbstractTypeCast<&LiftoffCompiler::FuncCheck>(object, decoder, kRef);
+ }
+
+ void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) {
+ return AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, kRef);
+ }
+
+ template <TypeChecker type_checker>
+ void BrOnAbstractType(const Value& object, FullDecoder* decoder,
+ uint32_t br_depth, ValueKind result_kind) {
+ // Before branching, materialize all constants. This avoids repeatedly
+ // materializing them for each conditional branch.
+ if (br_depth != decoder->control_depth() - 1) {
+ __ MaterializeMergedConstants(
+ decoder->control_at(br_depth)->br_merge()->arity);
+ }
+
+ Label match, no_match;
+ LiftoffRegister obj_reg =
+ (this->*type_checker)(object, &no_match, {}, no_reg);
+
+ __ bind(&match);
+ __ PushRegister(result_kind, obj_reg);
+ BrOrRet(decoder, br_depth);
+
+ __ bind(&no_match);
+ // Drop the branch's value, restore original value.
+ Drop(decoder);
+ __ PushRegister(object.type.kind(), obj_reg);
+ }
+
+ void BrOnData(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnAbstractType<&LiftoffCompiler::DataCheck>(object, decoder,
+ br_depth, kRef);
+ }
+
+ void BrOnFunc(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder,
+ br_depth, kRef);
+ }
+
+ void BrOnI31(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder,
+ br_depth, kRef);
}
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
@@ -4386,10 +4914,20 @@ class LiftoffCompiler {
}
private:
+ ValueKindSig* MakeKindSig(Zone* zone, const FunctionSig* sig) {
+ ValueKind* reps =
+ zone->NewArray<ValueKind>(sig->parameter_count() + sig->return_count());
+ ValueKind* ptr = reps;
+ for (ValueType type : sig->all()) *ptr++ = type.kind();
+ return zone->New<ValueKindSig>(sig->return_count(), sig->parameter_count(),
+ reps);
+ }
+
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[], CallKind call_kind) {
- for (ValueType ret : imm.sig->returns()) {
+ ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
+ for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -4406,20 +4944,20 @@ class LiftoffCompiler {
Register imported_targets = tmp;
LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
- kSystemPointerSize);
+ kSystemPointerSize, pinned);
__ Load(LiftoffRegister(target), imported_targets, no_reg,
imm.index * sizeof(Address), kPointerLoadType, pinned);
Register imported_function_refs = tmp;
LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs,
- ImportedFunctionRefs);
+ ImportedFunctionRefs, pinned);
Register imported_function_ref = tmp;
__ LoadTaggedPointer(
imported_function_ref, imported_function_refs, no_reg,
ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
Register* explicit_instance = &imported_function_ref;
- __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
if (call_kind == kReturnCall) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->StackParameterCount()),
@@ -4429,11 +4967,11 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(imm.sig, call_descriptor, target);
+ __ CallIndirect(sig, call_descriptor, target);
}
} else {
// A direct call within this module just gets the current instance.
- __ PrepareCall(imm.sig, call_descriptor);
+ __ PrepareCall(sig, call_descriptor);
// Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index);
if (call_kind == kReturnCall) {
@@ -4453,16 +4991,17 @@ class LiftoffCompiler {
DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ FinishCall(imm.sig, call_descriptor);
+ __ FinishCall(sig, call_descriptor);
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
CallKind call_kind) {
+ ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
if (imm.table_index != 0) {
return unsupported(decoder, kRefTypes, "table index != 0");
}
- for (ValueType ret : imm.sig->returns()) {
+ for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -4486,9 +5025,10 @@ class LiftoffCompiler {
// Compare against table size stored in
// {instance->indirect_function_table_size}.
- LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size);
- __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
- index, tmp_const);
+ LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size,
+ pinned);
+ __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
+ tmp_const);
// Mask the index to prevent SSCA.
if (FLAG_untrusted_code_mitigations) {
@@ -4514,7 +5054,8 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize,
+ pinned);
// Shift {index} by 2 (multiply by 4) to represent kInt32Size items.
STATIC_ASSERT((1 << 2) == kInt32Size);
__ emit_i32_shli(index, index, 2);
@@ -4526,8 +5067,8 @@ class LiftoffCompiler {
Label* sig_mismatch_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
- __ emit_cond_jump(kUnequal, sig_mismatch_label,
- LiftoffAssembler::kWasmIntPtr, scratch, tmp_const);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label, LiftoffAssembler::kIntPtr,
+ scratch, tmp_const);
// At this point {index} has already been multiplied by 4.
DEBUG_CODE_COMMENT("Execute indirect call");
@@ -4539,7 +5080,7 @@ class LiftoffCompiler {
// At this point {index} has already been multiplied by kTaggedSize.
// Load the instance from {instance->ift_instances[key]}
- LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs, pinned);
__ LoadTaggedPointer(tmp_const, table, index,
ObjectAccess::ElementOffsetInTaggedFixedArray(0),
pinned);
@@ -4554,8 +5095,8 @@ class LiftoffCompiler {
Register* explicit_instance = &tmp_const;
// Load the target from {instance->ift_targets[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
- kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kSystemPointerSize,
+ pinned);
__ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
pinned);
@@ -4565,7 +5106,7 @@ class LiftoffCompiler {
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
Register target = scratch;
- __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
if (call_kind == kReturnCall) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->StackParameterCount()),
@@ -4575,17 +5116,225 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(imm.sig, call_descriptor, target);
+ __ CallIndirect(sig, call_descriptor, target);
}
DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ FinishCall(imm.sig, call_descriptor);
+ __ FinishCall(sig, call_descriptor);
+ }
+
+ void CallRef(FullDecoder* decoder, ValueType func_ref_type,
+ const FunctionSig* type_sig, CallKind call_kind) {
+ ValueKindSig* sig = MakeKindSig(compilation_zone_, type_sig);
+ for (ValueKind ret : sig->returns()) {
+ if (!CheckSupportedType(decoder, ret, "return")) return;
+ }
+ compiler::CallDescriptor* call_descriptor =
+ compiler::GetWasmCallDescriptor(compilation_zone_, type_sig);
+ call_descriptor =
+ GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+
+ // Since this is a call instruction, we'll have to spill everything later
+ // anyway; do it right away so that the register state tracking doesn't
+ // get confused by the conditional builtin call below.
+ __ SpillAllRegisters();
+
+ // We limit ourselves to four registers:
+ // (1) func_data, initially reused for func_ref.
+ // (2) instance, initially used as temp.
+ // (3) target, initially used as temp.
+ // (4) temp.
+ LiftoffRegList pinned;
+ LiftoffRegister func_ref = pinned.set(__ PopToModifiableRegister(pinned));
+ MaybeEmitNullCheck(decoder, func_ref.gp(), pinned, func_ref_type);
+ LiftoffRegister instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ LiftoffRegister func_data = func_ref;
+ __ LoadTaggedPointer(
+ func_data.gp(), func_ref.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(JSFunction::kSharedFunctionInfoOffset),
+ pinned);
+ __ LoadTaggedPointer(
+ func_data.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset),
+ pinned);
+
+ LiftoffRegister data_type = instance;
+ __ LoadMap(data_type.gp(), func_data.gp());
+ __ Load(data_type, data_type.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
+ LoadType::kI32Load16U, pinned);
+
+ Label is_js_function, perform_call;
+ __ emit_i32_cond_jumpi(kEqual, &is_js_function, data_type.gp(),
+ WASM_JS_FUNCTION_DATA_TYPE);
+ // End of {data_type}'s live range.
+
+ {
+ // Call to a WasmExportedFunction.
+
+ LiftoffRegister callee_instance = instance;
+ __ LoadTaggedPointer(callee_instance.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kInstanceOffset),
+ pinned);
+ LiftoffRegister func_index = target;
+ __ LoadTaggedSignedAsInt32(
+ func_index, func_data.gp(),
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kFunctionIndexOffset),
+ pinned);
+ LiftoffRegister imported_function_refs = temp;
+ __ LoadTaggedPointer(imported_function_refs.gp(), callee_instance.gp(),
+ no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kImportedFunctionRefsOffset),
+ pinned);
+ // We overwrite {imported_function_refs} here, at the cost of having
+ // to reload it later, because we don't have more registers on ia32.
+ LiftoffRegister imported_functions_num = imported_function_refs;
+ __ LoadFixedArrayLengthAsInt32(imported_functions_num,
+ imported_function_refs.gp(), pinned);
+
+ Label imported;
+ __ emit_cond_jump(kSignedLessThan, &imported, kI32, func_index.gp(),
+ imported_functions_num.gp());
+
+ {
+ // Function locally defined in module.
+
+ // {func_index} is invalid from here on.
+ LiftoffRegister jump_table_start = target;
+ __ Load(jump_table_start, callee_instance.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kJumpTableStartOffset),
+ kPointerLoadType, pinned);
+ LiftoffRegister jump_table_offset = temp;
+ __ LoadTaggedSignedAsInt32(
+ jump_table_offset, func_data.gp(),
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kJumpTableOffsetOffset),
+ pinned);
+ __ emit_ptrsize_add(target.gp(), jump_table_start.gp(),
+ jump_table_offset.gp());
+ __ emit_jump(&perform_call);
+ }
+
+ {
+ // Function imported to module.
+ __ bind(&imported);
+
+ LiftoffRegister imported_function_targets = temp;
+ __ Load(imported_function_targets, callee_instance.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kImportedFunctionTargetsOffset),
+ kPointerLoadType, pinned);
+ // {callee_instance} is invalid from here on.
+ LiftoffRegister imported_instance = instance;
+ // Scale {func_index} to kTaggedSize.
+ __ emit_i32_shli(func_index.gp(), func_index.gp(), kTaggedSizeLog2);
+ // {func_data} is invalid from here on.
+ imported_function_refs = func_data;
+ __ LoadTaggedPointer(
+ imported_function_refs.gp(), callee_instance.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kImportedFunctionRefsOffset),
+ pinned);
+ __ LoadTaggedPointer(
+ imported_instance.gp(), imported_function_refs.gp(),
+ func_index.gp(),
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0), pinned);
+ // Scale {func_index} to kSystemPointerSize.
+ if (kSystemPointerSize == kTaggedSize * 2) {
+ __ emit_i32_add(func_index.gp(), func_index.gp(), func_index.gp());
+ } else {
+ DCHECK_EQ(kSystemPointerSize, kTaggedSize);
+ }
+ // This overwrites the contents of {func_index}, which we don't need
+ // any more.
+ __ Load(target, imported_function_targets.gp(), func_index.gp(), 0,
+ kPointerLoadType, pinned);
+ __ emit_jump(&perform_call);
+ }
+ }
+
+ {
+ // Call to a WasmJSFunction. The call target is
+ // function_data->wasm_to_js_wrapper_code()->instruction_start().
+ // The instance_node is the pair
+ // (current WasmInstanceObject, function_data->callable()).
+ __ bind(&is_js_function);
+
+ LiftoffRegister callable = temp;
+ __ LoadTaggedPointer(
+ callable.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset),
+ pinned);
+
+ // Preserve {func_data} across the call.
+ LiftoffRegList saved_regs = LiftoffRegList::ForRegs(func_data);
+ __ PushRegisters(saved_regs);
+
+ WasmCode::RuntimeStubId builtin = WasmCode::kWasmAllocatePair;
+ compiler::CallDescriptor* builtin_call_descriptor =
+ GetBuiltinCallDescriptor<WasmAllocatePairDescriptor>(
+ compilation_zone_);
+ ValueKind sig_reps[] = {kOptRef, kOptRef, kOptRef};
+ ValueKindSig builtin_sig(1, 2, sig_reps);
+ LiftoffRegister current_instance = instance;
+ __ FillInstanceInto(current_instance.gp());
+ LiftoffAssembler::VarState instance_var(kOptRef, current_instance, 0);
+ LiftoffAssembler::VarState callable_var(kOptRef, callable, 0);
+ __ PrepareBuiltinCall(&builtin_sig, builtin_call_descriptor,
+ {instance_var, callable_var});
+
+ __ CallRuntimeStub(builtin);
+ DefineSafepoint();
+ if (instance.gp() != kReturnRegister0) {
+ __ Move(instance.gp(), kReturnRegister0, LiftoffAssembler::kIntPtr);
+ }
+
+ // Restore {func_data}, which we saved across the call.
+ __ PopRegisters(saved_regs);
+
+ LiftoffRegister wrapper_code = target;
+ __ LoadTaggedPointer(wrapper_code.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
+ pinned);
+ __ emit_ptrsize_addi(target.gp(), wrapper_code.gp(),
+ wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
+ // Fall through to {perform_call}.
+ }
+
+ __ bind(&perform_call);
+ // Now the call target is in {target}, and the right instance object
+ // is in {instance}.
+ Register target_reg = target.gp();
+ Register instance_reg = instance.gp();
+ __ PrepareCall(sig, call_descriptor, &target_reg, &instance_reg);
+ if (call_kind == kReturnCall) {
+ __ PrepareTailCall(
+ static_cast<int>(call_descriptor->StackParameterCount()),
+ static_cast<int>(
+ call_descriptor->GetStackParameterDelta(descriptor_)));
+ __ TailCallIndirect(target_reg);
+ } else {
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), true);
+ __ CallIndirect(sig, call_descriptor, target_reg);
+ }
+ DefineSafepoint();
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ __ FinishCall(sig, call_descriptor);
}
void LoadNullValue(Register null, LiftoffRegList pinned) {
- LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize, pinned);
__ LoadTaggedPointer(null, null, no_reg,
IsolateData::root_slot_offset(RootIndex::kNullValue),
pinned);
@@ -4598,7 +5347,7 @@ class LiftoffCompiler {
decoder->position(), WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
- __ emit_cond_jump(LiftoffCondition::kEqual, trap_label, type, object,
+ __ emit_cond_jump(LiftoffCondition::kEqual, trap_label, kOptRef, object,
null.gp());
}
@@ -4611,8 +5360,8 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
__ Load(length, array.gp(), no_reg, kLengthOffset, LoadType::kI32Load,
pinned);
- __ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label,
- kWasmI32, index.gp(), length.gp());
+ __ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label, kI32,
+ index.gp(), length.gp());
}
int StructFieldOffset(const StructType* struct_type, int field_index) {
@@ -4621,52 +5370,53 @@ class LiftoffCompiler {
}
void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg,
- int offset, ValueType type, bool is_signed,
+ int offset, ValueKind kind, bool is_signed,
LiftoffRegList pinned) {
- if (type.is_reference_type()) {
+ if (is_reference_type(kind)) {
__ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned);
} else {
- // Primitive type.
- LoadType load_type = LoadType::ForValueType(type, is_signed);
+ // Primitive kind.
+ LoadType load_type = LoadType::ForValueKind(kind, is_signed);
__ Load(dst, src, offset_reg, offset, load_type, pinned);
}
}
void StoreObjectField(Register obj, Register offset_reg, int offset,
LiftoffRegister value, LiftoffRegList pinned,
- ValueType type) {
- if (type.is_reference_type()) {
+ ValueKind kind) {
+ if (is_reference_type(kind)) {
__ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
} else {
- // Primitive type.
- StoreType store_type = StoreType::ForValueType(type);
+ // Primitive kind.
+ StoreType store_type = StoreType::ForValueKind(kind);
__ Store(obj, offset_reg, offset, value, store_type, pinned);
}
}
- void SetDefaultValue(LiftoffRegister reg, ValueType type,
+ void SetDefaultValue(LiftoffRegister reg, ValueKind kind,
LiftoffRegList pinned) {
- DCHECK(type.is_defaultable());
- switch (type.kind()) {
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kI32:
+ DCHECK(is_defaultable(kind));
+ switch (kind) {
+ case kI8:
+ case kI16:
+ case kI32:
return __ LoadConstant(reg, WasmValue(int32_t{0}));
- case ValueType::kI64:
+ case kI64:
return __ LoadConstant(reg, WasmValue(int64_t{0}));
- case ValueType::kF32:
+ case kF32:
return __ LoadConstant(reg, WasmValue(float{0.0}));
- case ValueType::kF64:
+ case kF64:
return __ LoadConstant(reg, WasmValue(double{0.0}));
- case ValueType::kS128:
+ case kS128:
DCHECK(CpuFeatures::SupportsWasmSimd128());
return __ emit_s128_xor(reg, reg, reg);
- case ValueType::kOptRef:
+ case kOptRef:
return LoadNullValue(reg.gp(), pinned);
- case ValueType::kRtt:
- case ValueType::kStmt:
- case ValueType::kBottom:
- case ValueType::kRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kStmt:
+ case kBottom:
+ case kRef:
UNREACHABLE();
}
}
@@ -4726,17 +5476,17 @@ class LiftoffCompiler {
// breakpoint, and a pointer after the list of breakpoints as end marker.
// A single breakpoint at offset 0 indicates that we should prepare the
// function for stepping by flooding it with breakpoints.
- int* next_breakpoint_ptr_ = nullptr;
- int* next_breakpoint_end_ = nullptr;
+ const int* next_breakpoint_ptr_ = nullptr;
+ const int* next_breakpoint_end_ = nullptr;
// Introduce a dead breakpoint to ensure that the calculation of the return
// address in OSR is correct.
int dead_breakpoint_ = 0;
- // Remember whether the "hook on function call" has already been checked.
- // This happens at the first breakable opcode in the function (if compiling
- // for debugging).
- bool checked_hook_on_function_call_ = false;
+ // Remember whether the did function-entry break checks (for "hook on function
+ // call" and "break on entry" a.k.a. instrumentation breakpoint). This happens
+ // at the first breakable opcode in the function (if compiling for debugging).
+ bool did_function_entry_break_checks_ = false;
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
@@ -4758,11 +5508,21 @@ class LiftoffCompiler {
}
void DefineSafepoint() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(
- &asm_, Safepoint::kNoLazyDeopt);
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
__ cache_state()->DefineSafepoint(safepoint);
}
+ Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
+ Register instance = __ cache_state()->cached_instance;
+ if (instance == no_reg) {
+ instance = __ cache_state()->TrySetCachedInstanceRegister(
+ pinned | LiftoffRegList::ForRegs(fallback));
+ if (instance == no_reg) instance = fallback;
+ __ LoadInstanceFromFrame(instance);
+ }
+ return instance;
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
};
@@ -4771,7 +5531,7 @@ class LiftoffCompiler {
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator* allocator, CompilationEnv* env,
const FunctionBody& func_body, int func_index, ForDebugging for_debugging,
- Counters* counters, WasmFeatures* detected, Vector<int> breakpoints,
+ Counters* counters, WasmFeatures* detected, Vector<const int> breakpoints,
std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
@@ -4787,8 +5547,6 @@ WasmCompilationResult ExecuteLiftoffCompilation(
std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
wasm::WasmInstructionBuffer::New(128 + code_size_estimate * 4 / 3);
std::unique_ptr<DebugSideTableBuilder> debug_sidetable_builder;
- // If we are emitting breakpoints, we should also emit the debug side table.
- DCHECK_IMPLIES(!breakpoints.empty(), debug_sidetable != nullptr);
if (debug_sidetable) {
debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
}
@@ -4811,11 +5569,6 @@ WasmCompilationResult ExecuteLiftoffCompilation(
// Register the bailout reason (can also be {kSuccess}).
counters->liftoff_bailout_reasons()->AddSample(
static_cast<int>(compiler->bailout_reason()));
- if (compiler->did_bailout()) {
- counters->liftoff_unsupported_functions()->Increment();
- } else {
- counters->liftoff_compiled_functions()->Increment();
- }
}
if (compiler->did_bailout()) return WasmCompilationResult{};
@@ -4839,17 +5592,32 @@ WasmCompilationResult ExecuteLiftoffCompilation(
}
std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
- AccountingAllocator* allocator, CompilationEnv* env,
- const FunctionBody& func_body, int func_index) {
+ const WasmCode* code) {
+ auto* native_module = code->native_module();
+ auto* function = &native_module->module()->functions[code->index()];
+ ModuleWireBytes wire_bytes{native_module->wire_bytes()};
+ Vector<const byte> function_bytes = wire_bytes.GetFunctionBytes(function);
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ FunctionBody func_body{function->sig, 0, function_bytes.begin(),
+ function_bytes.end()};
+
+ AccountingAllocator* allocator = native_module->engine()->allocator();
Zone zone(allocator, "LiftoffDebugSideTableZone");
- auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
+ auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, function->sig);
DebugSideTableBuilder debug_sidetable_builder;
WasmFeatures detected;
+ constexpr int kSteppingBreakpoints[] = {0};
+ DCHECK(code->for_debugging() == kForDebugging ||
+ code->for_debugging() == kForStepping);
+ Vector<const int> breakpoints = code->for_debugging() == kForStepping
+ ? ArrayVector(kSteppingBreakpoints)
+ : Vector<const int>{};
WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
- &zone, env->module, env->enabled_features, &detected, func_body,
- call_descriptor, env, &zone,
+ &zone, native_module->module(), env.enabled_features, &detected,
+ func_body, call_descriptor, &env, &zone,
NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
- &debug_sidetable_builder, kForDebugging, func_index);
+ &debug_sidetable_builder, code->for_debugging(), code->index(),
+ breakpoints);
decoder.Decode();
DCHECK(decoder.ok());
DCHECK(!decoder.interface().did_bailout());
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index 177ca7b78f..6987c2e779 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -56,11 +56,11 @@ enum LiftoffBailoutReason : int8_t {
V8_EXPORT_PRIVATE WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
ForDebugging, Counters*, WasmFeatures* detected_features,
- Vector<int> breakpoints = {}, std::unique_ptr<DebugSideTable>* = nullptr,
- int dead_breakpoint = 0);
+ Vector<const int> breakpoints = {},
+ std::unique_ptr<DebugSideTable>* = nullptr, int dead_breakpoint = 0);
V8_EXPORT_PRIVATE std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
- AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index);
+ const WasmCode*);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index bd2e6ed4c2..bb27b99dc2 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -45,40 +45,37 @@ static_assert(kNeedS128RegPair == (kFpRegPair != kNoReg),
enum RegPairHalf : uint8_t { kLowWord = 0, kHighWord = 1 };
-static inline constexpr bool needs_gp_reg_pair(ValueType type) {
- return kNeedI64RegPair && type == kWasmI64;
+static inline constexpr bool needs_gp_reg_pair(ValueKind kind) {
+ return kNeedI64RegPair && kind == kI64;
}
-static inline constexpr bool needs_fp_reg_pair(ValueType type) {
- return kNeedS128RegPair && type == kWasmS128;
+static inline constexpr bool needs_fp_reg_pair(ValueKind kind) {
+ return kNeedS128RegPair && kind == kS128;
}
-static inline constexpr RegClass reg_class_for(ValueType::Kind kind) {
+static inline constexpr RegClass reg_class_for(ValueKind kind) {
switch (kind) {
- case ValueType::kF32:
- case ValueType::kF64:
+ case kF32:
+ case kF64:
return kFpReg;
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kI32:
+ case kI8:
+ case kI16:
+ case kI32:
return kGpReg;
- case ValueType::kI64:
+ case kI64:
return kNeedI64RegPair ? kGpRegPair : kGpReg;
- case ValueType::kS128:
+ case kS128:
return kNeedS128RegPair ? kFpRegPair : kFpReg;
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
return kGpReg;
default:
- return kNoReg; // unsupported type
+ return kNoReg; // unsupported kind
}
}
-static inline constexpr RegClass reg_class_for(ValueType type) {
- return reg_class_for(type.kind());
-}
-
// Description of LiftoffRegister code encoding.
// This example uses the ARM architecture, which as of writing has:
// - 9 GP registers, requiring 4 bits
@@ -191,9 +188,9 @@ class LiftoffRegister {
// Shifts the register code depending on the type before converting to a
// LiftoffRegister.
- static LiftoffRegister from_external_code(RegClass rc, ValueType type,
+ static LiftoffRegister from_external_code(RegClass rc, ValueKind kind,
int code) {
- if (!kSimpleFPAliasing && type == kWasmF32) {
+ if (!kSimpleFPAliasing && kind == kF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
@@ -201,7 +198,7 @@ class LiftoffRegister {
DCHECK_EQ(0, code % 2);
return LiftoffRegister::from_code(rc, code >> 1);
}
- if (kNeedS128RegPair && type == kWasmS128) {
+ if (kNeedS128RegPair && kind == kS128) {
// Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff.
return LiftoffRegister::ForFpPair(DoubleRegister::from_code(code << 1));
@@ -376,6 +373,10 @@ class LiftoffRegList {
}
return reg;
}
+ Register clear(Register reg) { return clear(LiftoffRegister{reg}).gp(); }
+ DoubleRegister clear(DoubleRegister reg) {
+ return clear(LiftoffRegister{reg}).fp();
+ }
bool has(LiftoffRegister reg) const {
if (reg.is_pair()) {
@@ -384,8 +385,8 @@ class LiftoffRegList {
}
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
}
- bool has(Register reg) const { return has(LiftoffRegister(reg)); }
- bool has(DoubleRegister reg) const { return has(LiftoffRegister(reg)); }
+ bool has(Register reg) const { return has(LiftoffRegister{reg}); }
+ bool has(DoubleRegister reg) const { return has(LiftoffRegister{reg}); }
constexpr bool is_empty() const { return regs_ == 0; }
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index c12eae4c39..94ba6f783e 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -84,25 +84,26 @@ inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
- int32_t offset, ValueType type) {
+ int32_t offset, ValueKind kind) {
MemOperand src(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
assm->lw(dst.gp(), src);
break;
- case ValueType::kI64:
+ case kI64:
assm->lw(dst.low_gp(),
MemOperand(base, offset + liftoff::kLowWordOffset));
assm->lw(dst.high_gp(),
MemOperand(base, offset + liftoff::kHighWordOffset));
break;
- case ValueType::kF32:
+ case kF32:
assm->lwc1(dst.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->Ldc1(dst.fp(), src);
break;
default:
@@ -111,25 +112,26 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
}
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
- LiftoffRegister src, ValueType type) {
+ LiftoffRegister src, ValueKind kind) {
MemOperand dst(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->Usw(src.gp(), dst);
break;
- case ValueType::kI64:
+ case kI64:
assm->Usw(src.low_gp(),
MemOperand(base, offset + liftoff::kLowWordOffset));
assm->Usw(src.high_gp(),
MemOperand(base, offset + liftoff::kHighWordOffset));
break;
- case ValueType::kF32:
+ case kF32:
assm->Uswc1(src.fp(), dst, t8);
break;
- case ValueType::kF64:
+ case kF64:
assm->Usdc1(src.fp(), dst, t8);
break;
default:
@@ -137,25 +139,25 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
assm->push(reg.gp());
break;
- case ValueType::kI64:
+ case kI64:
assm->Push(reg.high_gp(), reg.low_gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->addiu(sp, sp, -sizeof(float));
assm->swc1(reg.fp(), MemOperand(sp, 0));
break;
- case ValueType::kF64:
+ case kF64:
assm->addiu(sp, sp, -sizeof(double));
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
- case ValueType::kOptRef:
- assm->push(reg.gp());
- break;
default:
UNREACHABLE();
}
@@ -363,26 +365,26 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.kind() == ValueType::kS128 || type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case ValueType::kI64: {
+ case kI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -390,10 +392,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::li(reg.high_gp(), Operand(high_word));
break;
}
- case ValueType::kF32:
+ case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -401,17 +403,30 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
- int size) {
- DCHECK_LE(0, offset);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
- DCHECK_EQ(4, size);
- lw(dst, MemOperand(dst, offset));
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int32_t offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ lb(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ lw(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
int32_t offset) {
- LoadFromInstance(dst, offset, kTaggedSize);
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ lw(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -435,7 +450,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
STATIC_ASSERT(kTaggedSize == kInt32Size);
Register dst = no_reg;
if (offset_reg != no_reg) {
@@ -445,6 +461,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
: MemOperand(dst_addr, offset_imm);
Sw(src.gp(), dst_op);
+
+ if (skip_write_barrier) return;
+
// The write barrier.
Label write_barrier;
Label exit;
@@ -452,12 +471,12 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(dst_addr, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
&write_barrier);
- Branch(USE_DELAY_SLOT, &exit);
+ Branch(&exit);
bind(&write_barrier);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
- Addu(scratch, dst_addr, offset_imm);
+ Addu(scratch, dst_op.rm(), dst_op.offset());
CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
wasm::WasmCode::kRecordWrite);
bind(&exit);
@@ -678,60 +697,61 @@ void LiftoffAssembler::AtomicFence() { sync(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
- liftoff::Load(this, dst, fp, offset, type);
+ liftoff::Load(this, dst, fp, offset, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
- liftoff::Store(this, fp, offset, src, type);
+ liftoff::Store(this, fp, offset, src, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
- liftoff::Load(this, dst, sp, offset, type);
+ ValueKind kind) {
+ liftoff::Load(this, dst, sp, offset, kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
- Fill(reg, src_offset, type);
- Spill(dst_offset, reg, type);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
TurboAssembler::mov(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
sw(reg.gp(), dst);
break;
- case ValueType::kI64:
+ case kI64:
sw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
sw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
- case ValueType::kF32:
+ case kF32:
swc1(reg.fp(), dst);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Sdc1(reg.fp(), dst);
break;
default:
@@ -743,13 +763,15 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
- case ValueType::kI32: {
+ case kI32:
+ case kRef:
+ case kOptRef: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
- case ValueType::kI64: {
+ case kI64: {
LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {});
int32_t low_word = value.to_i64();
@@ -768,22 +790,22 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
MemOperand src = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
+ switch (kind) {
+ case kI32:
+ case kRef:
+ case kOptRef:
lw(reg.gp(), src);
break;
- case ValueType::kI64:
+ case kI64:
lw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
lw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
- case ValueType::kF32:
+ case kF32:
lwc1(reg.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Ldc1(reg.fp(), src);
break;
default:
@@ -1488,15 +1510,15 @@ void LiftoffAssembler::emit_jump(Register target) {
}
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
- DCHECK_EQ(type, kWasmI32);
+ DCHECK_EQ(kind, kI32);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
- DCHECK(type == kWasmI32 ||
- (type.is_reference_type() &&
+ DCHECK(kind == kI32 ||
+ (is_reference_type(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
}
@@ -1691,7 +1713,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -1712,6 +1734,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
bailout(kSimd, "load extend and load splat unimplemented");
}
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "storelane");
+}
+
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
@@ -1792,6 +1821,12 @@ SIMD_BINOP(i64x2_extmul_high_i32x4_u, ilvl_w, dotp_u_d)
#undef SIMD_BINOP
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_q15mulr_sat_s");
+}
+
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_eq");
@@ -1902,6 +1937,21 @@ void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_f32x4_le");
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_abs");
+}
+
void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_eq");
@@ -1964,9 +2014,9 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i8x16_neg");
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "emit_v8x16_anytrue");
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v128_anytrue");
}
void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
@@ -2074,16 +2124,16 @@ void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
bailout(kSimd, "emit_i8x16_max_u");
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_popcnt");
+}
+
void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i16x8_neg");
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "emit_v16x8_anytrue");
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_v16x8_alltrue");
@@ -2189,16 +2239,21 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
bailout(kSimd, "emit_i16x8_max_u");
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_neg");
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "emit_v32x4_anytrue");
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_v32x4_alltrue");
@@ -2286,11 +2341,26 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_dot_i16x8_s");
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i64x2_neg");
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v64x2_alltrue");
+}
+
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i64x2_bitmask");
@@ -2343,6 +2413,16 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i64x2_mul");
}
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ge_s");
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_f32x4_abs");
@@ -2493,6 +2573,21 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_f64x2_pmax");
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_promote_low_f32x4");
+}
+
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_sconvert_f32x4");
@@ -2503,6 +2598,16 @@ void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_uconvert_f32x4");
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_u_zero");
+}
+
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_f32x4_sconvert_i32x4");
@@ -2513,6 +2618,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
bailout(kSimd, "emit_f32x4_uconvert_i32x4");
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_demote_f64x2_zero");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2577,6 +2687,26 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_high");
+}
+
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2782,17 +2912,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
addiu(sp, sp, -stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- liftoff::Store(this, sp, arg_bytes, *args++, param_type);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -2818,8 +2948,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- liftoff::Load(this, *next_result_reg, sp, 0, out_argument_type);
+ if (out_argument_kind != kStmt) {
+ liftoff::Load(this, *next_result_reg, sp, 0, out_argument_kind);
}
addiu(sp, sp, stack_bytes);
@@ -2833,7 +2963,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -2873,7 +3003,7 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
- if (src.type().kind() == ValueType::kF64) {
+ if (src.kind() == kF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
@@ -2885,12 +3015,12 @@ void LiftoffStackSlots::Construct() {
break;
}
case LiftoffAssembler::VarState::kRegister:
- if (src.type().kind() == ValueType::kI64) {
+ if (src.kind() == kI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
- kWasmI32);
+ kI32);
} else {
- liftoff::push(asm_, src.reg(), src.type());
+ liftoff::push(asm_, src.reg(), src.kind());
}
break;
case LiftoffAssembler::VarState::kIntConst: {
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index b97b423e20..deb54995b1 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -6,6 +6,7 @@
#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -92,24 +93,25 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->Lw(dst.gp(), src);
break;
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
assm->Ld(dst.gp(), src);
break;
- case ValueType::kF32:
+ case kF32:
assm->Lwc1(dst.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->Ldc1(dst.fp(), src);
break;
- case ValueType::kS128:
+ case kS128:
assm->ld_b(dst.fp().toW(), src);
break;
default:
@@ -118,25 +120,26 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
}
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
- LiftoffRegister src, ValueType type) {
+ LiftoffRegister src, ValueKind kind) {
MemOperand dst(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
assm->Usw(src.gp(), dst);
break;
- case ValueType::kI64:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->Usd(src.gp(), dst);
break;
- case ValueType::kF32:
+ case kF32:
assm->Uswc1(src.fp(), dst, t8);
break;
- case ValueType::kF64:
+ case kF64:
assm->Usdc1(src.fp(), dst, t8);
break;
- case ValueType::kS128:
+ case kS128:
assm->st_b(src.fp().toW(), dst);
break;
default:
@@ -144,24 +147,27 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->sw(reg.gp(), MemOperand(sp, 0));
break;
- case ValueType::kI64:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
assm->push(reg.gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->swc1(reg.fp(), MemOperand(sp, 0));
break;
- case ValueType::kF64:
+ case kF64:
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
- case ValueType::kS128:
+ case kS128:
assm->daddiu(sp, sp, -kSystemPointerSize * 2);
assm->st_b(reg.fp().toW(), MemOperand(sp, 0));
break;
@@ -346,32 +352,32 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.kind() == ValueType::kS128 || type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case ValueType::kI64:
+ case kI64:
TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
break;
- case ValueType::kF32:
+ case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -379,21 +385,33 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
- int size) {
- DCHECK_LE(0, offset);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
Ld(dst, liftoff::GetInstanceOperand());
- DCHECK(size == 4 || size == 8);
- if (size == 4) {
- Lw(dst, MemOperand(dst, offset));
- } else {
- Ld(dst, MemOperand(dst, offset));
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ Lb(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ Lw(dst, MemOperand(instance, offset));
+ break;
+ case 8:
+ Ld(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
int32_t offset) {
- LoadFromInstance(dst, offset, kTaggedSize);
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Ld(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -417,24 +435,27 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
STATIC_ASSERT(kTaggedSize == kInt64Size);
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Sd(src.gp(), dst_op);
+ if (skip_write_barrier) return;
+
Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
&write_barrier);
- Branch(USE_DELAY_SLOT, &exit);
+ Branch(&exit);
bind(&write_barrier);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
- Daddu(scratch, dst_addr, offset_imm);
+ Daddu(scratch, dst_op.rm(), dst_op.offset());
CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
wasm::WasmCode::kRecordWrite);
bind(&exit);
@@ -605,67 +626,68 @@ void LiftoffAssembler::AtomicFence() { sync(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
- liftoff::Load(this, dst, src, type);
+ liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
- liftoff::Store(this, fp, offset, src, type);
+ liftoff::Store(this, fp, offset, src, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
- liftoff::Load(this, dst, MemOperand(sp, offset), type);
+ ValueKind kind) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
- Fill(reg, src_offset, type);
- Spill(dst_offset, reg, type);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here.
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
- if (type != kWasmS128) {
+ if (kind != kS128) {
TurboAssembler::Move(dst, src);
} else {
TurboAssembler::move_v(dst.toW(), src.toW());
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
Sw(reg.gp(), dst);
break;
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
Sd(reg.gp(), dst);
break;
- case ValueType::kF32:
+ case kF32:
Swc1(reg.fp(), dst);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Sdc1(reg.fp(), dst);
break;
- case ValueType::kS128:
+ case kS128:
TurboAssembler::st_b(reg.fp().toW(), dst);
break;
default:
@@ -677,15 +699,15 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
- case ValueType::kI32: {
+ case kI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
Sw(tmp.gp(), dst);
break;
}
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef: {
+ case kI64:
+ case kRef:
+ case kOptRef: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
Sd(tmp.gp(), dst);
@@ -698,24 +720,24 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
MemOperand src = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
Lw(reg.gp(), src);
break;
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
+ case kI64:
+ case kRef:
+ case kOptRef:
Ld(reg.gp(), src);
break;
- case ValueType::kF32:
+ case kF32:
Lwc1(reg.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Ldc1(reg.fp(), src);
break;
- case ValueType::kS128:
+ case kS128:
TurboAssembler::ld_b(reg.fp().toW(), src);
break;
default:
@@ -1342,15 +1364,15 @@ void LiftoffAssembler::emit_jump(Register target) {
}
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
- DCHECK(type == kWasmI32 || type == kWasmI64);
+ DCHECK(kind == kI32 || kind == kI64);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
- DCHECK((type == kWasmI32 || type == kWasmI64) ||
- (type.is_reference_type() &&
+ DCHECK((kind == kI32 || kind == kI64) ||
+ (is_reference_type(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
}
@@ -1527,7 +1549,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -1612,7 +1634,20 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx);
+ TurboAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op);
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ LoadStoreLaneParams store_params(type.mem_rep(), lane);
+ TurboAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op);
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -1719,6 +1754,24 @@ SIMD_BINOP(i64x2, i32x4_u, MSAU32)
#undef SIMD_BINOP
+#define SIMD_BINOP(name1, name2, type) \
+ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
+ LiftoffRegister dst, LiftoffRegister src) { \
+ TurboAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s, MSAS8)
+SIMD_BINOP(i16x8, i8x16_u, MSAU8)
+SIMD_BINOP(i32x4, i16x8_s, MSAS16)
+SIMD_BINOP(i32x4, i16x8_u, MSAU16)
+#undef SIMD_BINOP
+
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ mulr_q_h(dst.fp().toW(), src1.fp().toW(), src2.fp().toW());
+}
+
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
ceq_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1832,6 +1885,23 @@ void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
fcle_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+ nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ add_a_d(dst.fp().toW(), src.fp().toW(), kSimd128RegZero);
+}
+
void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
fceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1908,8 +1978,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
subv_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -2032,17 +2102,17 @@ void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
max_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ pcnt_b(dst.fp().toW(), src.fp().toW());
+}
+
void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
LiftoffRegister src) {
xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
subv_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_H);
@@ -2167,11 +2237,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
subv_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_W);
@@ -2276,6 +2341,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
subv_d(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_D);
+}
+
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
srli_d(kSimd128RegZero, src.fp().toW(), 63);
@@ -2335,6 +2405,16 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
mulv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ clt_s_d(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ cle_s_d(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
bclri_w(dst.fp().toW(), src.fp().toW(), 31);
@@ -2581,6 +2661,27 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bsel_v(dst_msa, lhs_msa, rhs_msa);
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvr_w(kSimd128RegZero, kSimd128RegZero, src.fp().toW());
+ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
+ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
+ ffint_s_d(dst.fp().toW(), kSimd128RegZero);
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvr_w(kSimd128RegZero, kSimd128RegZero, src.fp().toW());
+ ffint_u_d(dst.fp().toW(), kSimd128RegZero);
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ fexupr_d(dst.fp().toW(), src.fp().toW());
+}
+
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
ftrunc_s_w(dst.fp().toW(), src.fp().toW());
@@ -2591,6 +2692,22 @@ void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
ftrunc_u_w(dst.fp().toW(), src.fp().toW());
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ftrunc_s_d(kSimd128ScratchReg, src.fp().toW());
+ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ pckev_w(dst.fp().toW(), kSimd128RegZero, kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ftrunc_u_d(kSimd128ScratchReg, src.fp().toW());
+ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ pckev_w(dst.fp().toW(), kSimd128RegZero, kSimd128ScratchReg);
+}
+
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
ffint_s_w(dst.fp().toW(), src.fp().toW());
@@ -2601,6 +2718,12 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
ffint_u_w(dst.fp().toW(), src.fp().toW());
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ fexdo_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2691,6 +2814,32 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
ilvl_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ilvr_w(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
+ slli_d(dst.fp().toW(), kSimd128ScratchReg, 32);
+ srai_d(dst.fp().toW(), dst.fp().toW(), 32);
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ilvl_w(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
+ slli_d(dst.fp().toW(), kSimd128ScratchReg, 32);
+ srai_d(dst.fp().toW(), dst.fp().toW(), 32);
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvr_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvl_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2930,17 +3079,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
Daddu(sp, sp, -stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- liftoff::Store(this, sp, arg_bytes, *args++, param_type);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -2966,8 +3115,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type);
+ if (out_argument_kind != kStmt) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
}
Daddu(sp, sp, stack_bytes);
@@ -2981,7 +3130,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -3021,7 +3170,7 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
- if (src.type() != kWasmS128) {
+ if (src.kind() != kS128) {
asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
asm_->push(kScratchReg);
} else {
@@ -3032,7 +3181,7 @@ void LiftoffStackSlots::Construct() {
}
break;
case LiftoffAssembler::VarState::kRegister:
- liftoff::push(asm_, src.reg(), src.type());
+ liftoff::push(asm_, src.reg(), src.kind());
break;
case LiftoffAssembler::VarState::kIntConst: {
asm_->li(kScratchReg, Operand(src.i32_const()));
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 1a2e950615..644d392594 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -72,17 +72,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return (type.kind() == ValueType::kS128 || type.is_reference_type());
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return (kind == kS128 || is_reference_type(kind));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -90,11 +90,18 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
bailout(kUnsupportedArchitecture, "LoadConstant");
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
+ bailout(kUnsupportedArchitecture, "LoadInstanceFromFrame");
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
@@ -117,7 +124,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
bailout(kRefTypes, "GlobalSet");
}
@@ -195,36 +203,36 @@ void LiftoffAssembler::AtomicFence() { sync(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "StoreCallerFrameSlot");
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "LoadReturnStackSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "MoveStackValue");
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
bailout(kUnsupportedArchitecture, "Move Register");
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
bailout(kUnsupportedArchitecture, "Spill register");
}
@@ -232,7 +240,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
bailout(kUnsupportedArchitecture, "Fill");
}
@@ -520,7 +528,7 @@ void LiftoffAssembler::emit_jump(Register target) {
}
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
bailout(kUnsupportedArchitecture, "emit_cond_jump");
}
@@ -566,7 +574,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -590,6 +598,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
bailout(kSimd, "loadlane");
}
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "store lane");
+}
+
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -693,6 +708,21 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "pmax unimplemented");
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.promote_low_f32x4");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
@@ -813,6 +843,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64x2neg");
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v64x2_alltrue");
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shl");
@@ -883,6 +918,26 @@ void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
bailout(kSimd, "i64x2_bitmask");
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_high");
+}
+
void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -912,11 +967,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "v32x4_anytrue");
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "v32x4_alltrue");
@@ -1004,6 +1054,16 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
bailout(kSimd, "i32x4_dot_i16x8_s");
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1038,11 +1098,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "v16x8_anytrue");
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "v16x8_alltrue");
@@ -1161,6 +1216,16 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8replacelane");
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -1185,6 +1250,12 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
bailout(kSimd, "i16x8.extmul_high_i8x16_s unsupported");
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_q15mulr_sat_s");
+}
+
void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1199,6 +1270,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
bailout(kSimd, "i8x16_shuffle");
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i8x16.popcnt");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16splat");
@@ -1222,8 +1298,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
bailout(kSimd, "v8x16_anytrue");
}
@@ -1400,6 +1476,26 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i32x4ge_u");
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.ge_s");
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4_eq");
@@ -1491,6 +1587,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
bailout(kSimd, "f32x4_uconvert_i32x4");
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4.demote_f64x2_zero");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1555,6 +1656,16 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_high");
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1588,6 +1699,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_abs");
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2.abs");
+}
+
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
@@ -1647,10 +1763,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
bailout(kUnsupportedArchitecture, "CallC");
}
@@ -1663,7 +1779,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
bailout(kUnsupportedArchitecture, "CallIndirect");
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
new file mode 100644
index 0000000000..2f624f79f5
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -0,0 +1,2516 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_RISCV64_LIFTOFF_ASSEMBLER_RISCV64_H_
+#define V8_WASM_BASELINE_RISCV64_LIFTOFF_ASSEMBLER_RISCV64_H_
+
+#include "src/base/platform/wrappers.h"
+#include "src/heap/memory-chunk.h"
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace liftoff {
+
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return ult;
+ case kUnsignedLessEqual:
+ return ule;
+ case kUnsignedGreaterThan:
+ return ugt;
+ case kUnsignedGreaterEqual:
+ return uge;
+ }
+}
+
+// Liftoff Frames.
+//
+// slot Frame
+// +--------------------+---------------------------
+// n+4 | optional padding slot to keep the stack 16 byte aligned.
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 | ^
+// -4 | slot 1 | |
+// | | Frame slots
+// | | |
+// | | v
+// | optional padding slot to keep the stack 16 byte aligned.
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
+// fp-8 holds the stack marker, fp-16 is the instance parameter.
+constexpr int kInstanceOffset = 16;
+
+inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+
+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+
+inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
+ Register offset, uintptr_t offset_imm) {
+ if (is_uint31(offset_imm)) {
+ int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
+ if (offset == no_reg) return MemOperand(addr, offset_imm32);
+ assm->Add64(kScratchReg, addr, offset);
+ return MemOperand(kScratchReg, offset_imm32);
+ }
+ // Offset immediate does not fit in 31 bits.
+ assm->li(kScratchReg, offset_imm);
+ assm->Add64(kScratchReg, kScratchReg, addr);
+ if (offset != no_reg) {
+ assm->Add64(kScratchReg, kScratchReg, offset);
+ }
+ return MemOperand(kScratchReg, 0);
+}
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueType type) {
+ switch (type.kind()) {
+ case ValueType::kI32:
+ assm->Lw(dst.gp(), src);
+ break;
+ case ValueType::kI64:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kRtt:
+ assm->Ld(dst.gp(), src);
+ break;
+ case ValueType::kF32:
+ assm->LoadFloat(dst.fp(), src);
+ break;
+ case ValueType::kF64:
+ assm->LoadDouble(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
+ LiftoffRegister src, ValueType type) {
+ MemOperand dst(base, offset);
+ switch (type.kind()) {
+ case ValueType::kI32:
+ assm->Usw(src.gp(), dst);
+ break;
+ case ValueType::kI64:
+ case ValueType::kOptRef:
+ case ValueType::kRef:
+ case ValueType::kRtt:
+ assm->Usd(src.gp(), dst);
+ break;
+ case ValueType::kF32:
+ assm->UStoreFloat(src.fp(), dst);
+ break;
+ case ValueType::kF64:
+ assm->UStoreDouble(src.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
+ switch (type.kind()) {
+ case ValueType::kI32:
+ assm->addi(sp, sp, -kSystemPointerSize);
+ assm->Sw(reg.gp(), MemOperand(sp, 0));
+ break;
+ case ValueType::kI64:
+ case ValueType::kOptRef:
+ case ValueType::kRef:
+ case ValueType::kRtt:
+ assm->push(reg.gp());
+ break;
+ case ValueType::kF32:
+ assm->addi(sp, sp, -kSystemPointerSize);
+ assm->StoreFloat(reg.fp(), MemOperand(sp, 0));
+ break;
+ case ValueType::kF64:
+ assm->addi(sp, sp, -kSystemPointerSize);
+ assm->StoreDouble(reg.fp(), MemOperand(sp, 0));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+#if defined(V8_TARGET_BIG_ENDIAN)
+inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
+ LoadType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = dst;
+ switch (type.value()) {
+ case LoadType::kI64Load8U:
+ case LoadType::kI64Load8S:
+ case LoadType::kI32Load8U:
+ case LoadType::kI32Load8S:
+ // No need to change endianness for byte size.
+ return;
+ case LoadType::kF32Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load32U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case LoadType::kF64Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case LoadType::kF32Load:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
+ break;
+ case LoadType::kF64Load:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = src;
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ // No need to change endianness for byte size.
+ return;
+ case StoreType::kF32Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case StoreType::kI32Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case StoreType::kF64Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI64Store:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ break;
+ case StoreType::kI64Store32:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case StoreType::kI64Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case StoreType::kF32Store:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
+ break;
+ case StoreType::kF64Store:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+#endif // V8_TARGET_BIG_ENDIAN
+
+} // namespace liftoff
+
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
+ // When constant that represents size of stack frame can't be represented
+ // as 16bit we need three instructions to add it to sp, so we reserve space
+ // for this case.
+ Add64(sp, sp, Operand(0L));
+ nop();
+ nop();
+ return offset;
+}
+
+void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
+ int stack_param_delta) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ // Push the return address and frame pointer to complete the stack frame.
+ Ld(scratch, MemOperand(fp, 8));
+ Push(scratch);
+ Ld(scratch, MemOperand(fp, 0));
+ Push(scratch);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ Ld(scratch, MemOperand(sp, i * 8));
+ Sd(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ }
+
+ // Set the new stack and frame pointer.
+ Add64(sp, fp, -stack_param_delta * 8);
+ Pop(ra, fp);
+}
+
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
+ int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
+ // If bytes can be represented as 16bit, addi will be generated and two
+ // nops will stay untouched. Otherwise, lui-ori sequence will load it to
+ // register and, as third instruction, daddu will be generated.
+ patching_assembler.Add64(sp, sp, Operand(-frame_size));
+}
+
+void LiftoffAssembler::FinishCode() {}
+
+void LiftoffAssembler::AbortCompilation() {}
+
+// static
+constexpr int LiftoffAssembler::StaticStackFrameSize() {
+ return liftoff::kInstanceOffset;
+}
+
+int LiftoffAssembler::SlotSizeForType(ValueType type) {
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return type.element_size_bytes();
+ default:
+ return kStackSlotSize;
+ }
+}
+
+bool LiftoffAssembler::NeedsAlignment(ValueType type) {
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return true;
+ default:
+ // No alignment because all other types are kStackSlotSize.
+ return false;
+ }
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type().kind()) {
+ case ValueType::kI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case ValueType::kI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case ValueType::kF32:
+ TurboAssembler::LoadFPRImmediate(reg.fp(),
+ value.to_f32_boxed().get_bits());
+ break;
+ case ValueType::kF64:
+ TurboAssembler::LoadFPRImmediate(reg.fp(),
+ value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
+ int size) {
+ DCHECK_LE(0, offset);
+ Ld(dst, liftoff::GetInstanceOperand());
+ DCHECK(size == 4 || size == 8);
+ if (size == 4) {
+ Lw(dst, MemOperand(dst, offset));
+ } else {
+ Ld(dst, MemOperand(dst, offset));
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ int32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
+void LiftoffAssembler::SpillInstance(Register instance) {
+ Sd(instance, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ Ld(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ Ld(dst, src_op);
+}
+
+void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegister src,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ Sd(src.gp(), dst_op);
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ Branch(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
+ Add64(scratch, dst_addr, offset_imm);
+ CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
+ bind(&exit);
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc, bool is_load_mem) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ Lbu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ Lb(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ulhu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ulh(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ulwu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ulw(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Uld(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::ULoadFloat(dst.fp(), src_op);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::ULoadDouble(dst.fp(), src_op);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+#if defined(V8_TARGET_BIG_ENDIAN)
+ if (is_load_mem) {
+ pinned.set(src_op.rm());
+ liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
+ }
+#endif
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc, bool is_store_mem) {
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+
+#if defined(V8_TARGET_BIG_ENDIAN)
+ if (is_store_mem) {
+ pinned.set(dst_op.rm());
+ LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
+ // Save original value.
+ Move(tmp, src, type.value_type());
+
+ src = tmp;
+ pinned.set(tmp);
+ liftoff::ChangeEndiannessStore(this, src, type, pinned);
+ }
+#endif
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ Sb(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::Ush(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::Usw(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::Usd(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::UStoreFloat(src.fp(), dst_op);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::UStoreDouble(src.fp(), dst_op);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicLoad");
+}
+
+void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicStore");
+}
+
+void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAdd");
+}
+
+void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicSub");
+}
+
+void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAnd");
+}
+
+void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicOr");
+}
+
+void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicXor");
+}
+
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm,
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { sync(); }
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
+ liftoff::Load(this, dst, src, type);
+}
+
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
+ liftoff::Store(this, fp, offset, src, type);
+}
+
+void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
+ ValueType type) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), type);
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
+ ValueType type) {
+ DCHECK_NE(dst_offset, src_offset);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
+ Fill(reg, src_offset, type);
+ Spill(dst_offset, reg, type);
+}
+
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::Move(dst, src);
+}
+
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (type.kind()) {
+ case ValueType::kI32:
+ Sw(reg.gp(), dst);
+ break;
+ case ValueType::kI64:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kRtt:
+ case ValueType::kRttWithDepth:
+ Sd(reg.gp(), dst);
+ break;
+ case ValueType::kF32:
+ StoreFloat(reg.fp(), dst);
+ break;
+ case ValueType::kF64:
+ TurboAssembler::StoreDouble(reg.fp(), dst);
+ break;
+ case ValueType::kS128:
+ bailout(kSimd, "Spill S128");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, WasmValue value) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (value.type().kind()) {
+ case ValueType::kI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ Sw(tmp.gp(), dst);
+ break;
+ }
+ case ValueType::kI64:
+ case ValueType::kRef:
+ case ValueType::kOptRef: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ Sd(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+ MemOperand src = liftoff::GetStackSlot(offset);
+ switch (type.kind()) {
+ case ValueType::kI32:
+ Lw(reg.gp(), src);
+ break;
+ case ValueType::kI64:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ Ld(reg.gp(), src);
+ break;
+ case ValueType::kF32:
+ LoadFloat(reg.fp(), src);
+ break;
+ case ValueType::kF64:
+ TurboAssembler::LoadDouble(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
+ DCHECK_LT(0, size);
+ RecordUsedSpillOffset(start + size);
+
+ if (size <= 12 * kStackSlotSize) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<= 12 instructions total).
+ uint32_t remainder = size;
+ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
+ Sd(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ DCHECK(remainder == 4 || remainder == 0);
+ if (remainder) {
+ Sw(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Add64(a0, fp, Operand(-start - size));
+ Add64(a1, fp, Operand(-start));
+
+ Label loop;
+ bind(&loop);
+ Sd(zero_reg, MemOperand(a0));
+ addi(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
+void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Clz64(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Ctz64(dst.gp(), src.gp());
+}
+
+bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ TurboAssembler::Popcnt64(dst.gp(), src.gp());
+ return true;
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul32(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+
+ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne);
+ TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
+ add(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div32(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Divu32(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod32(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Modu32(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+
+// clang-format off
+I32_BINOP(add, addw)
+I32_BINOP(sub, subw)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+
+// clang-format off
+I32_BINOP_I(add, Add32)
+I32_BINOP_I(sub, Sub32)
+I32_BINOP_I(and, And)
+I32_BINOP_I(or, Or)
+I32_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I32_BINOP_I
+
+void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz32(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz32(dst, src);
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt32(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
+ Register amount) { \
+ instruction(dst, src, amount); \
+ }
+#define I32_SHIFTOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
+ int amount) { \
+ instruction(dst, src, amount & 31); \
+ }
+
+I32_SHIFTOP(shl, sllw)
+I32_SHIFTOP(sar, sraw)
+I32_SHIFTOP(shr, srlw)
+
+I32_SHIFTOP_I(shl, slliw)
+I32_SHIFTOP_I(sar, sraiw)
+I32_SHIFTOP_I(shr, srliw)
+
+#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
+
+void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ TurboAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp());
+}
+
+bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+
+ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::CompareI(kScratchReg, lhs.gp(),
+ Operand(std::numeric_limits<int64_t>::min()), ne);
+ TurboAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne);
+ add(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+#define I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ instruction(dst.gp(), lhs.gp(), rhs.gp()); \
+ }
+
+// clang-format off
+I64_BINOP(add, add)
+I64_BINOP(sub, sub)
+I64_BINOP(and, and_)
+I64_BINOP(or, or_)
+I64_BINOP(xor, xor_)
+// clang-format on
+
+#undef I64_BINOP
+
+#define I64_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i( \
+ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp(), lhs.gp(), Operand(imm)); \
+ }
+
+// clang-format off
+I64_BINOP_I(and, And)
+I64_BINOP_I(or, Or)
+I64_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I64_BINOP_I
+
+#define I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister src, Register amount) { \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+#define I64_SHIFTOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int amount) { \
+ DCHECK(is_uint6(amount)); \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+
+I64_SHIFTOP(shl, sll)
+I64_SHIFTOP(sar, sra)
+I64_SHIFTOP(shr, srl)
+
+I64_SHIFTOP_I(shl, slli)
+I64_SHIFTOP_I(sar, srai)
+I64_SHIFTOP_I(shr, srli)
+
+#undef I64_SHIFTOP
+#undef I64_SHIFTOP_I
+
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ TurboAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm));
+}
+void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+ addw(dst, src, zero_reg);
+}
+
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_s(dst, src);
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_d(dst, src);
+}
+
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ TurboAssembler::Float32Min(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ TurboAssembler::Float32Max(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f32_copysign");
+}
+
+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ TurboAssembler::Float64Min(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ TurboAssembler::Float64Max(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f64_copysign");
+}
+
+#define FP_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+#define FP_UNOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ }
+#define FP_UNOP_RETURN_TRUE(name, instruction) \
+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src, kScratchDoubleReg); \
+ return true; \
+ }
+
+FP_BINOP(f32_add, fadd_s)
+FP_BINOP(f32_sub, fsub_s)
+FP_BINOP(f32_mul, fmul_s)
+FP_BINOP(f32_div, fdiv_s)
+FP_UNOP(f32_abs, fabs_s)
+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s)
+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s)
+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s)
+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s)
+FP_UNOP(f32_sqrt, fsqrt_s)
+FP_BINOP(f64_add, fadd_d)
+FP_BINOP(f64_sub, fsub_d)
+FP_BINOP(f64_mul, fmul_d)
+FP_BINOP(f64_div, fdiv_d)
+FP_UNOP(f64_abs, fabs_d)
+FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d_d)
+FP_UNOP_RETURN_TRUE(f64_floor, Floor_d_d)
+FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d_d)
+FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d_d)
+FP_UNOP(f64_sqrt, fsqrt_d)
+
+#undef FP_BINOP
+#undef FP_UNOP
+#undef FP_UNOP_RETURN_TRUE
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src, Label* trap) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ // According to WebAssembly spec, if I64 value does not fit the range of
+ // I32, the value is undefined. Therefore, We use sign extension to
+ // implement I64 to I32 truncation
+ TurboAssembler::SignExtendWord(dst.gp(), src.gp());
+ return true;
+ case kExprI32SConvertF32:
+ case kExprI32UConvertF32:
+ case kExprI32SConvertF64:
+ case kExprI32UConvertF64:
+ case kExprI64SConvertF32:
+ case kExprI64UConvertF32:
+ case kExprI64SConvertF64:
+ case kExprI64UConvertF64:
+ case kExprF32ConvertF64: {
+ // real conversion, if src is out-of-bound of target integer types,
+ // kScratchReg is set to 0
+ switch (opcode) {
+ case kExprI32SConvertF32:
+ Trunc_w_s(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI32UConvertF32:
+ Trunc_uw_s(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI32SConvertF64:
+ Trunc_w_d(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI32UConvertF64:
+ Trunc_uw_d(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI64SConvertF32:
+ Trunc_l_s(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI64UConvertF32:
+ Trunc_ul_s(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI64SConvertF64:
+ Trunc_l_d(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI64UConvertF64:
+ Trunc_ul_d(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprF32ConvertF64:
+ fcvt_s_d(dst.fp(), src.fp());
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ TurboAssembler::SignExtendWord(dst.gp(), src.gp());
+ return true;
+ case kExprI64UConvertI32:
+ TurboAssembler::ZeroExtendWord(dst.gp(), src.gp());
+ return true;
+ case kExprI64ReinterpretF64:
+ fmv_x_d(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ TurboAssembler::Cvt_s_w(dst.fp(), src.gp());
+ return true;
+ }
+ case kExprF32UConvertI32:
+ TurboAssembler::Cvt_s_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF32ReinterpretI32:
+ fmv_w_x(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ TurboAssembler::Cvt_d_w(dst.fp(), src.gp());
+ return true;
+ }
+ case kExprF64UConvertI32:
+ TurboAssembler::Cvt_d_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF64ConvertF32:
+ fcvt_d_s(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ fmv_d_x(dst.fp(), src.gp());
+ return true;
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
+ default:
+ return false;
+ }
+}
+
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ slliw(dst, src, 32 - 8);
+ sraiw(dst, dst, 32 - 8);
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ slliw(dst, src, 32 - 16);
+ sraiw(dst, dst, 32 - 16);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ slli(dst.gp(), src.gp(), 64 - 8);
+ srai(dst.gp(), dst.gp(), 64 - 8);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ slli(dst.gp(), src.gp(), 64 - 16);
+ srai(dst.gp(), dst.gp(), 64 - 16);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ slli(dst.gp(), src.gp(), 64 - 32);
+ srai(dst.gp(), dst.gp(), 64 - 32);
+}
+
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
+
+void LiftoffAssembler::emit_jump(Register target) {
+ TurboAssembler::Jump(target);
+}
+
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ if (rhs == no_reg) {
+ DCHECK(type == kWasmI32 || type == kWasmI64);
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ } else {
+ DCHECK((type == kWasmI32 || type == kWasmI64) ||
+ (type.is_reference_type() &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ }
+}
+
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+}
+
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ TurboAssembler::Sltu(dst, src, 1);
+}
+
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ TurboAssembler::Sltu(dst, src.gp(), 1);
+}
+
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond);
+}
+
+static FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return EQ;
+ case kUnequal:
+ return NE;
+ case kUnsignedLessThan:
+ return LT;
+ case kUnsignedGreaterEqual:
+ return GE;
+ case kUnsignedLessEqual:
+ return LE;
+ case kUnsignedGreaterThan:
+ return GT;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond);
+ TurboAssembler::CompareF32(dst, fcond, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond);
+ TurboAssembler::CompareF64(dst, fcond, lhs, rhs);
+}
+
+bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
+ LiftoffRegister true_value,
+ LiftoffRegister false_value,
+ ValueType type) {
+ return false;
+}
+
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ Branch(target, condition, scratch, Operand(zero_reg));
+}
+
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "load extend and load splat unimplemented");
+}
+
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "StoreLane");
+}
+
+void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16],
+ bool is_swizzle) {
+ bailout(kSimd, "emit_i8x16_shuffle");
+}
+
+void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_swizzle");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_splat");
+}
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_low_" #name2); \
+ } \
+ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_high_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+
+SIMD_BINOP(i64x2, i32x4_s)
+SIMD_BINOP(i64x2, i32x4_u)
+
+#undef SIMD_BINOP
+
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_q15mulr_sat_s");
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_bitmask");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_eq");
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ne");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
+ const uint8_t imms[16]) {
+ bailout(kSimd, "emit_s128_const");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kSimd, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kSimd, "emit_s128_select");
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_neg");
+}
+
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v128_anytrue");
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_bitmask");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shli");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_u");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_mul");
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_neg");
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_bitmask");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shli");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_u");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_neg");
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_bitmask");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shli");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_u");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_neg");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shli");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_u");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_mul");
+}
+
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_abs");
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_neg");
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sqrt");
+}
+
+bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_mul");
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_div");
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_min");
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_max");
+}
+
+void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmin");
+}
+
+void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_sqrt");
+}
+
+bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_mul");
+}
+
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_max");
+}
+
+void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmin");
+}
+
+void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmax");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_abs");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_replace_lane");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_replace_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_replace_lane");
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ TurboAssembler::Uld(limit_address, MemOperand(limit_address));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ if (emit_debug_code()) Abort(reason);
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ int32_t num_gp_regs = gp_regs.GetNumRegsSet();
+ if (num_gp_regs) {
+ int32_t offset = num_gp_regs * kSystemPointerSize;
+ Add64(sp, sp, Operand(-offset));
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ offset -= kSystemPointerSize;
+ Sd(reg.gp(), MemOperand(sp, offset));
+ gp_regs.clear(reg);
+ }
+ DCHECK_EQ(offset, 0);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ int32_t num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ Add64(sp, sp, Operand(-(num_fp_regs * kStackSlotSize)));
+ int32_t offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset));
+ fp_regs.clear(reg);
+ offset += sizeof(double);
+ }
+ DCHECK_EQ(offset, num_fp_regs * sizeof(double));
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ int32_t fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += sizeof(double);
+ }
+ if (fp_offset) Add64(sp, sp, Operand(fp_offset));
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ int32_t gp_offset = 0;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ Ld(reg.gp(), MemOperand(sp, gp_offset));
+ gp_regs.clear(reg);
+ gp_offset += kSystemPointerSize;
+ }
+ Add64(sp, sp, Operand(gp_offset));
+}
+
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
+}
+
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ const LiftoffRegister* rets,
+ ValueType out_argument_type, int stack_bytes,
+ ExternalReference ext_ref) {
+ Add64(sp, sp, Operand(-stack_bytes));
+
+ int arg_bytes = 0;
+ for (ValueType param_type : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_type);
+ arg_bytes += param_type.element_size_bytes();
+ }
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ // On RISC-V, the first argument is passed in {a0}.
+ constexpr Register kFirstArgReg = a0;
+ mv(kFirstArgReg, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, kScratchReg);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* next_result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = a0;
+ if (kReturnReg != next_result_reg->gp()) {
+ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ ++next_result_reg;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_type != kWasmStmt) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type);
+ }
+
+ Add64(sp, sp, Operand(stack_bytes));
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ Call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
+ Jump(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ pop(kScratchReg);
+ Call(kScratchReg);
+ } else {
+ Call(target);
+ }
+}
+
+void LiftoffAssembler::TailCallIndirect(Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Jump(kScratchReg);
+ } else {
+ Jump(target);
+ }
+}
+
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ Add64(sp, sp, Operand(-size));
+ TurboAssembler::Move(addr, sp);
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ Add64(sp, sp, Operand(size));
+}
+
+void LiftoffStackSlots::Construct() {
+ for (auto& slot : slots_) {
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack:
+ asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->push(kScratchReg);
+ break;
+ case LiftoffAssembler::VarState::kRegister:
+ liftoff::push(asm_, src.reg(), src.type());
+ break;
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->li(kScratchReg, Operand(src.i32_const()));
+ asm_->push(kScratchReg);
+ break;
+ }
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_RISCV64_LIFTOFF_ASSEMBLER_RISCV64_H_
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 1161595705..7bb58877dc 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -92,7 +92,6 @@ inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
- bailout(kUnsupportedArchitecture, "PrepareStackFrame");
int offset = pc_offset();
lay(sp, MemOperand(sp));
return offset;
@@ -135,67 +134,279 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return (type.kind() == ValueType::kS128 || type.is_reference_type());
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return (kind == kS128 || is_reference_type(kind));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- bailout(kUnsupportedArchitecture, "LoadConstant");
+ switch (value.type().kind()) {
+ case kI32:
+ mov(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kI64:
+ mov(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kF32: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadF32(reg.fp(), value.to_f32_boxed().get_scalar(), scratch);
+ break;
+ }
+ case kF64: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadF64(reg.fp(), value.to_f64_boxed().get_bits(), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- bailout(kUnsupportedArchitecture, "LoadFromInstance");
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
+ LoadU64(dst, liftoff::GetInstanceOperand());
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
- bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ LoadU8(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ LoadU32(dst, MemOperand(instance, offset));
+ break;
+ case 8:
+ LoadU64(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
+ DCHECK_LE(0, offset);
+ LoadTaggedPointerField(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
- bailout(kUnsupportedArchitecture, "SpillInstance");
+ StoreU64(instance, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- bailout(kUnsupportedArchitecture, "FillInstanceInto");
+ LoadU64(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegList pinned) {
- bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
+ CHECK(is_int20(offset_imm));
+ LoadTaggedPointerField(
+ dst,
+ MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
- bailout(kRefTypes, "GlobalSet");
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
+ MemOperand dst_op =
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
+ StoreTaggedField(src.gp(), dst_op);
+
+ if (skip_write_barrier) return;
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, r1, MemoryChunk::kPointersFromHereAreInterestingMask,
+ ne, &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(src.gp(), src.gp());
+ }
+ CheckPageFlag(src.gp(), r1, MemoryChunk::kPointersToHereAreInterestingMask,
+ eq, &exit);
+ lay(r1, dst_op);
+ CallRecordWriteStub(dst_addr, r1, EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
+ bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- bailout(kUnsupportedArchitecture, "Load");
+ UseScratchRegisterScope temps(this);
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
+ MemOperand src_op =
+ MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ LoadU8(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ LoadS8(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ if (is_load_mem) {
+ LoadU16LE(dst.gp(), src_op);
+ } else {
+ LoadU16(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ if (is_load_mem) {
+ LoadS16LE(dst.gp(), src_op);
+ } else {
+ LoadS16(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kI64Load32U:
+ if (is_load_mem) {
+ LoadU32LE(dst.gp(), src_op);
+ } else {
+ LoadU32(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ if (is_load_mem) {
+ LoadS32LE(dst.gp(), src_op);
+ } else {
+ LoadS32(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kI64Load:
+ if (is_load_mem) {
+ LoadU64LE(dst.gp(), src_op);
+ } else {
+ LoadU64(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kF32Load:
+ if (is_load_mem) {
+ LoadF32LE(dst.fp(), src_op, r0);
+ } else {
+ LoadF32(dst.fp(), src_op);
+ }
+ break;
+ case LoadType::kF64Load:
+ if (is_load_mem) {
+ LoadF64LE(dst.fp(), src_op, r0);
+ } else {
+ LoadF64(dst.fp(), src_op);
+ }
+ break;
+ case LoadType::kS128Load:
+ if (is_load_mem) {
+ LoadV128LE(dst.fp(), src_op, r0, r1);
+ } else {
+ LoadV128(dst.fp(), src_op, r0);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- bailout(kUnsupportedArchitecture, "Store");
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
+ MemOperand dst_op =
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ StoreU8(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ if (is_store_mem) {
+ StoreU16LE(src.gp(), dst_op, r1);
+ } else {
+ StoreU16(src.gp(), dst_op, r1);
+ }
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ if (is_store_mem) {
+ StoreU32LE(src.gp(), dst_op, r1);
+ } else {
+ StoreU32(src.gp(), dst_op, r1);
+ }
+ break;
+ case StoreType::kI64Store:
+ if (is_store_mem) {
+ StoreU64LE(src.gp(), dst_op, r1);
+ } else {
+ StoreU64(src.gp(), dst_op, r1);
+ }
+ break;
+ case StoreType::kF32Store:
+ if (is_store_mem) {
+ StoreF32LE(src.fp(), dst_op, r1);
+ } else {
+ StoreF32(src.fp(), dst_op);
+ }
+ break;
+ case StoreType::kF64Store:
+ if (is_store_mem) {
+ StoreF64LE(src.fp(), dst_op, r1);
+ } else {
+ StoreF64(src.fp(), dst_op);
+ }
+ break;
+ case StoreType::kS128Store: {
+ if (is_store_mem) {
+ StoreV128LE(src.fp(), dst_op, r0, r1);
+ } else {
+ StoreV128(src.fp(), dst_op, r1);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
@@ -258,53 +469,274 @@ void LiftoffAssembler::AtomicFence() { bailout(kAtomics, "AtomicFence"); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
+ ValueKind kind) {
+ int32_t offset = (caller_slot_idx + 1) * 8;
+ switch (kind) {
+ case kI32: {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ LoadS32(dst.gp(), MemOperand(fp, offset + 4));
+ break;
+#else
+ LoadS32(dst.gp(), MemOperand(fp, offset));
+ break;
+#endif
+ }
+ case kRef:
+ case kRtt:
+ case kOptRef:
+ case kI64: {
+ LoadU64(dst.gp(), MemOperand(fp, offset));
+ break;
+ }
+ case kF32: {
+ LoadF32(dst.fp(), MemOperand(fp, offset));
+ break;
+ }
+ case kF64: {
+ LoadF64(dst.fp(), MemOperand(fp, offset));
+ break;
+ }
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadV128(dst.fp(), MemOperand(fp, offset), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "StoreCallerFrameSlot");
+ ValueKind kind) {
+ int32_t offset = (caller_slot_idx + 1) * 8;
+ switch (kind) {
+ case kI32: {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ StoreU32(src.gp(), MemOperand(fp, offset + 4));
+ break;
+#else
+ StoreU32(src.gp(), MemOperand(fp, offset));
+ break;
+#endif
+ }
+ case kRef:
+ case kRtt:
+ case kOptRef:
+ case kI64: {
+ StoreU64(src.gp(), MemOperand(fp, offset));
+ break;
+ }
+ case kF32: {
+ StoreF32(src.fp(), MemOperand(fp, offset));
+ break;
+ }
+ case kF64: {
+ StoreF64(src.fp(), MemOperand(fp, offset));
+ break;
+ }
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ StoreV128(src.fp(), MemOperand(fp, offset), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "LoadReturnStackSlot");
+ ValueKind kind) {
+ switch (kind) {
+ case kI32: {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ LoadS32(dst.gp(), MemOperand(sp, offset + 4));
+ break;
+#else
+ LoadS32(dst.gp(), MemOperand(sp, offset));
+ break;
+#endif
+ }
+ case kRef:
+ case kRtt:
+ case kOptRef:
+ case kI64: {
+ LoadU64(dst.gp(), MemOperand(sp, offset));
+ break;
+ }
+ case kF32: {
+ LoadF32(dst.fp(), MemOperand(sp, offset));
+ break;
+ }
+ case kF64: {
+ LoadF64(dst.fp(), MemOperand(sp, offset));
+ break;
+ }
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadV128(dst.fp(), MemOperand(sp, offset), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "MoveStackValue");
+ ValueKind kind) {
+ DCHECK_NE(dst_offset, src_offset);
+ int length = 0;
+ switch (kind) {
+ case kI32:
+ case kF32:
+ length = 4;
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kF64:
+ length = 8;
+ break;
+ case kS128:
+ length = 16;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_int20(dst_offset)) {
+ lay(ip, liftoff::GetStackSlot(dst_offset));
+ } else {
+ mov(ip, Operand(-dst_offset));
+ lay(ip, MemOperand(fp, ip));
+ }
+
+ if (is_int20(src_offset)) {
+ lay(r1, liftoff::GetStackSlot(src_offset));
+ } else {
+ mov(r1, Operand(-src_offset));
+ lay(r1, MemOperand(fp, r1));
+ }
+
+ MoveChar(MemOperand(ip), MemOperand(r1), Operand(length));
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- bailout(kUnsupportedArchitecture, "Move Register");
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
+ mov(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "Move DoubleRegister");
+ ValueKind kind) {
+ DCHECK_NE(dst, src);
+ if (kind == kF32) {
+ ler(dst, src);
+ } else if (kind == kF64) {
+ ldr(dst, src);
+ } else {
+ DCHECK_EQ(kS128, kind);
+ vlr(dst, src, Condition(0), Condition(0), Condition(0));
+ }
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
- bailout(kUnsupportedArchitecture, "Spill register");
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
+ DCHECK_LT(0, offset);
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ StoreU32(reg.gp(), dst);
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ StoreU64(reg.gp(), dst);
+ break;
+ case kF32:
+ StoreF32(reg.fp(), dst);
+ break;
+ case kF64:
+ StoreF64(reg.fp(), dst);
+ break;
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ StoreV128(reg.fp(), dst, scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
- bailout(kUnsupportedArchitecture, "Spill value");
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ UseScratchRegisterScope temps(this);
+ Register src = no_reg;
+ if (!is_uint12(abs(dst.offset()))) {
+ src = GetUnusedRegister(kGpReg, {}).gp();
+ } else {
+ src = temps.Acquire();
+ }
+ switch (value.type().kind()) {
+ case kI32: {
+ mov(src, Operand(value.to_i32()));
+ StoreU32(src, dst);
+ break;
+ }
+ case kI64: {
+ mov(src, Operand(value.to_i64()));
+ StoreU64(src, dst);
+ break;
+ }
+ default:
+ // We do not track f32 and f64 constants, hence they are unreachable.
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- bailout(kUnsupportedArchitecture, "Fill");
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ MemOperand src = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ LoadS32(reg.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ LoadU64(reg.gp(), src);
+ break;
+ case kF32:
+ LoadF32(reg.fp(), src);
+ break;
+ case kF64:
+ LoadF64(reg.fp(), src);
+ break;
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadV128(reg.fp(), src, scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
- bailout(kUnsupportedArchitecture, "FillI64Half");
+ UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
+ DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size);
// We need a zero reg. Always use r0 for that, and push it before to restore
@@ -328,16 +760,16 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// Use r3 for start address (inclusive), r4 for end address (exclusive).
push(r3);
push(r4);
- SubS64(r3, fp, Operand(start + size));
- SubS64(r4, fp, Operand(start));
+
+ lay(r3, MemOperand(fp, -start - size));
+ lay(r4, MemOperand(fp, -start));
Label loop;
bind(&loop);
- StoreU64(r0, MemOperand(r0));
- la(r0, MemOperand(r0, kSystemPointerSize));
+ StoreU64(r0, MemOperand(r3));
+ lay(r3, MemOperand(r3, kSystemPointerSize));
CmpU64(r3, r4);
bne(&loop);
-
pop(r4);
pop(r3);
}
@@ -345,122 +777,133 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
pop(r0);
}
-#define UNIMPLEMENTED_I32_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- Register rhs) { \
- bailout(kUnsupportedArchitecture, "i32 binop: " #name); \
- }
-#define UNIMPLEMENTED_I32_BINOP_I(name) \
- UNIMPLEMENTED_I32_BINOP(name) \
- void LiftoffAssembler::emit_##name##i(Register dst, Register lhs, \
- int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
- }
-#define UNIMPLEMENTED_I64_BINOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
- LiftoffRegister rhs) { \
- bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
- }
-#define UNIMPLEMENTED_I64_BINOP_I(name) \
- UNIMPLEMENTED_I64_BINOP(name) \
- void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
- LiftoffRegister lhs, int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i64 binop_i: " #name); \
- }
-#define UNIMPLEMENTED_GP_UNOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register src) { \
- bailout(kUnsupportedArchitecture, "gp unop: " #name); \
- }
-#define UNIMPLEMENTED_FP_BINOP(name) \
- void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
- DoubleRegister rhs) { \
- bailout(kUnsupportedArchitecture, "fp binop: " #name); \
- }
-#define UNIMPLEMENTED_FP_UNOP(name) \
- void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- bailout(kUnsupportedArchitecture, "fp unop: " #name); \
+#define SIGN_EXT(r) lgfr(r, r)
+#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
+#define REGISTER_AND_WITH_1F \
+ ([&](Register rhs) { \
+ AndP(r1, rhs, Operand(31)); \
+ return r1; \
+ })
+
+#define LFR_TO_REG(reg) reg.gp()
+
+// V(name, instr, dtype, stype, dcast, scast, rcast)
+#define UNOP_LIST(V) \
+ V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE) \
+ V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE) \
+ V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, USE) \
+ V(f32_abs, lpebr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f32_neg, lcebr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f32_sqrt, sqebr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f64_abs, lpdbr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f64_neg, lcdbr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f64_sqrt, sqdbr, DoubleRegister, DoubleRegister, , , USE)
+
+#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast) \
+ void LiftoffAssembler::emit_##name(dtype dst, stype src) { \
+ auto _dst = dcast(dst); \
+ auto _src = scast(src); \
+ instr(_dst, _src); \
+ rcast(_dst); \
}
-#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
- bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- bailout(kUnsupportedArchitecture, "fp unop: " #name); \
- return true; \
- }
-#define UNIMPLEMENTED_I32_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- Register amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
- } \
- void LiftoffAssembler::emit_##name##i(Register dst, Register src, \
- int32_t amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
- }
-#define UNIMPLEMENTED_I64_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
- Register amount) { \
- bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
- } \
- void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
- LiftoffRegister src, int32_t amount) { \
- bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
+UNOP_LIST(EMIT_UNOP_FUNCTION)
+#undef EMIT_UNOP_FUNCTION
+#undef UNOP_LIST
+
+// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast)
+#define BINOP_LIST(V) \
+ V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, SIGN_EXT) \
+ V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, SIGN_EXT) \
+ V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, SIGN_EXT) \
+ V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, SIGN_EXT) \
+ V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, SIGN_EXT) \
+ V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, SIGN_EXT) \
+ V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_andi, And, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_ori, Or, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_xori, Xor, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_add, AddS32, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_sub, SubS32, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_and, And, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_or, Or, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_xor, Xor, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_mul, MulS32, Register, Register, Register, , , , SIGN_EXT) \
+ V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_and, AndP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_or, OrP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_xor, XorP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE) \
+ V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE) \
+ V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE) \
+ V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE) \
+ V(i64_andi, AndP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE) \
+ V(i64_ori, OrP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE) \
+ V(i64_xori, XorP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE) \
+ V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE) \
+ V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE) \
+ V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE)
+
+#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
+ scast2, rcast) \
+ void LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, stype2 rhs) { \
+ auto _dst = dcast(dst); \
+ auto _lhs = scast1(lhs); \
+ auto _rhs = scast2(rhs); \
+ instr(_dst, _lhs, _rhs); \
+ rcast(_dst); \
}
-UNIMPLEMENTED_I32_BINOP_I(i32_add)
-UNIMPLEMENTED_I32_BINOP_I(i32_sub)
-UNIMPLEMENTED_I32_BINOP(i32_mul)
-UNIMPLEMENTED_I32_BINOP_I(i32_and)
-UNIMPLEMENTED_I32_BINOP_I(i32_or)
-UNIMPLEMENTED_I32_BINOP_I(i32_xor)
-UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
-UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
-UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
-UNIMPLEMENTED_I64_BINOP(i64_add)
-UNIMPLEMENTED_I64_BINOP(i64_sub)
-UNIMPLEMENTED_I64_BINOP(i64_mul)
-#ifdef V8_TARGET_ARCH_S390X
-UNIMPLEMENTED_I64_BINOP_I(i64_and)
-UNIMPLEMENTED_I64_BINOP_I(i64_or)
-UNIMPLEMENTED_I64_BINOP_I(i64_xor)
-#endif
-UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
-UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
-UNIMPLEMENTED_I64_SHIFTOP(i64_shr)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-UNIMPLEMENTED_FP_BINOP(f32_div)
-UNIMPLEMENTED_FP_BINOP(f32_copysign)
-UNIMPLEMENTED_FP_UNOP(f32_abs)
-UNIMPLEMENTED_FP_UNOP(f32_neg)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_ceil)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_floor)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_trunc)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_nearest_int)
-UNIMPLEMENTED_FP_UNOP(f32_sqrt)
-UNIMPLEMENTED_FP_BINOP(f64_add)
-UNIMPLEMENTED_FP_BINOP(f64_sub)
-UNIMPLEMENTED_FP_BINOP(f64_mul)
-UNIMPLEMENTED_FP_BINOP(f64_div)
-UNIMPLEMENTED_FP_BINOP(f64_copysign)
-UNIMPLEMENTED_FP_UNOP(f64_abs)
-UNIMPLEMENTED_FP_UNOP(f64_neg)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_floor)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_trunc)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_nearest_int)
-UNIMPLEMENTED_FP_UNOP(f64_sqrt)
-
-#undef UNIMPLEMENTED_I32_BINOP
-#undef UNIMPLEMENTED_I32_BINOP_I
-#undef UNIMPLEMENTED_I64_BINOP
-#undef UNIMPLEMENTED_I64_BINOP_I
-#undef UNIMPLEMENTED_GP_UNOP
-#undef UNIMPLEMENTED_FP_BINOP
-#undef UNIMPLEMENTED_FP_UNOP
-#undef UNIMPLEMENTED_FP_UNOP_RETURN_TRUE
-#undef UNIMPLEMENTED_I32_SHIFTOP
-#undef UNIMPLEMENTED_I64_SHIFTOP
+BINOP_LIST(EMIT_BINOP_FUNCTION)
+#undef BINOP_LIST
+#undef EMIT_BINOP_FUNCTION
+#undef SIGN_EXT
+#undef INT32_AND_WITH_1F
+#undef REGISTER_AND_WITH_1F
+#undef LFR_TO_REG
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
bailout(kUnsupportedArchitecture, "i32_popcnt");
@@ -469,13 +912,29 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "i64_popcnt");
+ Popcnt64(dst.gp(), src.gp());
return true;
}
-void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
- int64_t imm) {
- bailout(kUnsupportedArchitecture, "i64_addi");
+bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_POS_INF, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_NEG_INF, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_0, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
+ return true;
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -496,6 +955,27 @@ void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
FloatMin(dst, lhs, rhs);
}
+bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_POS_INF, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_NEG_INF, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_0, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
+ return true;
+}
+
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -517,61 +997,137 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i32_divs");
+ Label cont;
+
+ // Check for division by zero.
+ ltr(r0, rhs);
+ b(eq, trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS32(rhs, Operand(-1));
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt));
+ b(eq, trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_divu");
+ // Check for division by zero.
+ ltr(r0, rhs);
+ beq(trap_div_by_zero);
+ DivU32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_rems");
+ Label cont;
+ Label done;
+ Label trap_div_unrepresentable;
+ // Check for division by zero.
+ ltr(r0, rhs);
+ beq(trap_div_by_zero);
+
+ // Check kMinInt/-1 case.
+ CmpS32(rhs, Operand(-1));
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt));
+ beq(&trap_div_unrepresentable);
+
+ // Continue noraml calculation.
+ bind(&cont);
+ ModS32(dst, lhs, rhs);
+ bne(&done);
+
+ // trap by kMinInt/-1 case.
+ bind(&trap_div_unrepresentable);
+ mov(dst, Operand(0));
+ bind(&done);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_remu");
+ // Check for division by zero.
+ ltr(r0, rhs);
+ beq(trap_div_by_zero);
+ ModU32(dst, lhs, rhs);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i64_divs");
+ // Use r0 to check for kMinInt / -1.
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+ Label cont;
+ // Check for division by zero.
+ ltgr(r0, rhs.gp());
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1));
+ bne(&cont);
+ mov(r0, Operand(kMinInt64));
+ CmpS64(lhs.gp(), r0);
+ b(eq, trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_divu");
+ ltgr(r0, rhs.gp());
+ b(eq, trap_div_by_zero);
+ // Do div.
+ DivU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_rems");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+
+ Label trap_div_unrepresentable;
+ Label done;
+ Label cont;
+
+ // Check for division by zero.
+ ltgr(r0, rhs.gp());
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1));
+ bne(&cont);
+ mov(r0, Operand(kMinInt64));
+ CmpS64(lhs.gp(), r0);
+ beq(&trap_div_unrepresentable);
+
+ bind(&cont);
+ ModS64(dst.gp(), lhs.gp(), rhs.gp());
+ bne(&done);
+
+ bind(&trap_div_unrepresentable);
+ mov(dst.gp(), Operand(0));
+ bind(&done);
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_remu");
+ // Check for division by zero.
+ ltgr(r0, rhs.gp());
+ beq(trap_div_by_zero);
+ ModU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
-void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "i64_clz");
-}
-
-void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "i64_ctz");
-}
-
void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
bailout(kUnsupportedArchitecture, "emit_u32_to_intptr");
@@ -615,41 +1171,42 @@ void LiftoffAssembler::emit_jump(Label* label) {
}
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
bool use_signed = liftoff::UseSignedOp(liftoff_cond);
- if (type.kind() == ValueType::kI32) {
- if (rhs == no_reg) {
- if (use_signed) {
- CmpS32(lhs, Operand::Zero());
- } else {
- CmpU32(lhs, Operand::Zero());
- }
- } else {
- if (use_signed) {
- CmpS32(lhs, rhs);
- } else {
- CmpU32(lhs, rhs);
- }
+ if (rhs != no_reg) {
+ switch (kind) {
+ case kI32:
+ if (use_signed) {
+ CmpS32(lhs, rhs);
+ } else {
+ CmpU32(lhs, rhs);
+ }
+ break;
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ V8_FALLTHROUGH;
+ case kI64:
+ if (use_signed) {
+ CmpS64(lhs, rhs);
+ } else {
+ CmpU64(lhs, rhs);
+ }
+ break;
+ default:
+ UNREACHABLE();
}
} else {
- CHECK_EQ(type.kind(), ValueType::kI64);
- if (rhs == no_reg) {
- if (use_signed) {
- CmpS64(lhs, Operand::Zero());
- } else {
- CmpU64(lhs, Operand::Zero());
- }
- } else {
- if (use_signed) {
- CmpS64(lhs, rhs);
- } else {
- CmpU64(lhs, rhs);
- }
- }
+ DCHECK_EQ(kind, kI32);
+ CHECK(use_signed);
+ CmpS32(lhs, Operand::Zero());
}
+
b(cond, label);
}
@@ -719,7 +1276,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -743,6 +1300,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
bailout(kSimd, "loadlane");
}
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "store lane");
+}
+
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -846,6 +1410,21 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "pmax unimplemented");
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.promote_low_f32x4");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
@@ -966,6 +1545,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64x2neg");
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v64x2_alltrue");
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shl");
@@ -1036,6 +1620,26 @@ void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
bailout(kSimd, "i64x2_bitmask");
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_high");
+}
+
void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1065,11 +1669,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "v32x4_anytrue");
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "v32x4_alltrue");
@@ -1157,6 +1756,16 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
bailout(kSimd, "i32x4_dot_i16x8_s");
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1191,11 +1800,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "v16x8_anytrue");
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "v16x8_alltrue");
@@ -1314,6 +1918,16 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8replacelane");
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -1338,6 +1952,12 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
bailout(kSimd, "i16x8.extmul_high_i8x16_s unsupported");
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_q15mulr_sat_s");
+}
+
void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1352,6 +1972,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
bailout(kSimd, "i8x16_shuffle");
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i8x16.popcnt");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16splat");
@@ -1381,8 +2006,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
bailout(kSimd, "v8x16_anytrue");
}
@@ -1581,6 +2206,26 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i32x4ge_u");
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.ge_s");
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4_eq");
@@ -1672,6 +2317,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
bailout(kSimd, "f32x4_uconvert_i32x4");
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4.demote_f64x2_zero");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1736,6 +2386,16 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_high");
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1769,6 +2429,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_abs");
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2.abs");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
bailout(kUnsupportedArchitecture, "StackCheck");
}
@@ -1800,10 +2465,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
bailout(kUnsupportedArchitecture, "CallC");
}
@@ -1816,7 +2481,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
bailout(kUnsupportedArchitecture, "CallIndirect");
@@ -1842,6 +2507,16 @@ void LiftoffStackSlots::Construct() {
asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64_copysign");
+}
+
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32_copysign");
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index a95ef95f26..92005bdb8f 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -8,6 +8,7 @@
#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/machine-type.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
@@ -83,24 +84,25 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->movl(dst.gp(), src);
break;
- case ValueType::kI64:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->movq(dst.gp(), src);
break;
- case ValueType::kF32:
+ case kF32:
assm->Movss(dst.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->Movsd(dst.fp(), src);
break;
- case ValueType::kS128:
+ case kS128:
assm->Movdqu(dst.fp(), src);
break;
default:
@@ -109,21 +111,21 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
}
inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->movl(dst, src.gp());
break;
- case ValueType::kI64:
+ case kI64:
assm->movq(dst, src.gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->Movss(dst, src.fp());
break;
- case ValueType::kF64:
+ case kF64:
assm->Movsd(dst, src.fp());
break;
- case ValueType::kS128:
+ case kS128:
assm->Movdqu(dst, src.fp());
break;
default:
@@ -131,21 +133,23 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kI64:
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ case kI64:
+ case kRef:
+ case kOptRef:
assm->pushq(reg.gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->AllocateStackSpace(kSystemPointerSize);
assm->Movss(Operand(rsp, 0), reg.fp());
break;
- case ValueType::kF64:
+ case kF64:
assm->AllocateStackSpace(kSystemPointerSize);
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
- case ValueType::kS128:
+ case kS128:
assm->AllocateStackSpace(kSystemPointerSize * 2);
assm->Movdqu(Operand(rsp, 0), reg.fp());
break;
@@ -187,7 +191,9 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
popq(rbp);
}
-void LiftoffAssembler::AlignFrameSize() {}
+void LiftoffAssembler::AlignFrameSize() {
+ max_used_spill_offset_ = RoundUp(max_used_spill_offset_, kSystemPointerSize);
+}
void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
// The frame_size includes the frame marker. The frame marker has already been
@@ -195,7 +201,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
// anymore.
int frame_size = GetTotalFrameSize() - kSystemPointerSize;
// Need to align sp to system pointer size.
- frame_size = RoundUp(frame_size, kSystemPointerSize);
+ DCHECK_EQ(frame_size, RoundUp(frame_size, kSystemPointerSize));
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
@@ -237,36 +243,36 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- return type.is_reference_type() ? kSystemPointerSize
- : type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ return is_reference_type(kind) ? kSystemPointerSize
+ : element_size_bytes(kind);
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
if (value.to_i32() == 0 && RelocInfo::IsNone(rmode)) {
xorl(reg.gp(), reg.gp());
} else {
movl(reg.gp(), Immediate(value.to_i32(), rmode));
}
break;
- case ValueType::kI64:
+ case kI64:
if (RelocInfo::IsNone(rmode)) {
TurboAssembler::Set(reg.gp(), value.to_i64());
} else {
movq(reg.gp(), Immediate64(value.to_i64(), rmode));
}
break;
- case ValueType::kF32:
+ case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -274,21 +280,34 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- DCHECK_LE(0, offset);
- DCHECK(size == 4 || size == 8);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
movq(dst, liftoff::GetInstanceOperand());
- if (size == 4) {
- movl(dst, Operand(dst, offset));
- } else {
- movq(dst, Operand(dst, offset));
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ Operand src{instance, offset};
+ switch (size) {
+ case 1:
+ movzxbl(dst, src);
+ break;
+ case 4:
+ movl(dst, src);
+ break;
+ case 8:
+ movq(dst, src);
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
DCHECK_LE(0, offset);
- movq(dst, liftoff::GetInstanceOperand());
- LoadTaggedPointerField(dst, Operand(dst, offset));
+ LoadTaggedPointerField(dst, Operand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -316,13 +335,16 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
DCHECK_GE(offset_imm, 0);
- Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg,
static_cast<uint32_t>(offset_imm));
StoreTaggedField(dst_op, src.gp());
+ if (skip_write_barrier) return;
+
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
@@ -756,82 +778,83 @@ void LiftoffAssembler::AtomicFence() { mfence(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
Operand src(rbp, kSystemPointerSize * (caller_slot_idx + 1));
- liftoff::Load(this, dst, src, type);
+ liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
Operand dst(rbp, kSystemPointerSize * (caller_slot_idx + 1));
- liftoff::Store(this, dst, src, type);
+ liftoff::Store(this, dst, src, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset,
- ValueType type) {
+ ValueKind kind) {
Operand src(rsp, offset);
- liftoff::Load(this, reg, src, type);
+ liftoff::Load(this, reg, src, kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
Operand dst = liftoff::GetStackSlot(dst_offset);
Operand src = liftoff::GetStackSlot(src_offset);
- if (type.element_size_log2() == 2) {
+ if (element_size_log2(kind) == 2) {
movl(kScratchRegister, src);
movl(dst, kScratchRegister);
} else {
- DCHECK_EQ(3, type.element_size_log2());
+ DCHECK_EQ(3, element_size_log2(kind));
movq(kScratchRegister, src);
movq(dst, kScratchRegister);
}
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
- if (type == kWasmI32) {
+ if (kind == kI32) {
movl(dst, src);
} else {
- DCHECK(kWasmI64 == type || type.is_reference_type());
+ DCHECK(kI64 == kind || is_reference_type(kind));
movq(dst, src);
}
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
- if (type == kWasmF32) {
+ if (kind == kF32) {
Movss(dst, src);
- } else if (type == kWasmF64) {
+ } else if (kind == kF64) {
Movsd(dst, src);
} else {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
Movapd(dst, src);
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
movl(dst, reg.gp());
break;
- case ValueType::kI64:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
movq(dst, reg.gp());
break;
- case ValueType::kF32:
+ case kF32:
Movss(dst, reg.fp());
break;
- case ValueType::kF64:
+ case kF64:
Movsd(dst, reg.fp());
break;
- case ValueType::kS128:
+ case kS128:
Movdqu(dst, reg.fp());
break;
default:
@@ -843,10 +866,10 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
movl(dst, Immediate(value.to_i32()));
break;
- case ValueType::kI64: {
+ case kI64: {
if (is_int32(value.to_i64())) {
// Sign extend low word.
movq(dst, Immediate(static_cast<int32_t>(value.to_i64())));
@@ -866,8 +889,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- liftoff::Load(this, reg, liftoff::GetStackSlot(offset), type);
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ liftoff::Load(this, reg, liftoff::GetStackSlot(offset), kind);
}
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
@@ -1119,16 +1142,16 @@ void LiftoffAssembler::emit_i32_xori(Register dst, Register lhs, int32_t imm) {
}
namespace liftoff {
-template <ValueType::Kind type>
+template <ValueKind kind>
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register src, Register amount,
void (Assembler::*emit_shift)(Register)) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
- assm->Move(kScratchRegister, src, ValueType::Primitive(type));
- if (amount != rcx) assm->Move(rcx, amount, ValueType::Primitive(type));
+ assm->Move(kScratchRegister, src, kind);
+ if (amount != rcx) assm->Move(rcx, amount, kind);
(assm->*emit_shift)(kScratchRegister);
- assm->Move(rcx, kScratchRegister, ValueType::Primitive(type));
+ assm->Move(rcx, kScratchRegister, kind);
return;
}
@@ -1140,11 +1163,11 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
src == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
if (use_scratch) assm->movq(kScratchRegister, rcx);
if (src == rcx) src = kScratchRegister;
- assm->Move(rcx, amount, ValueType::Primitive(type));
+ assm->Move(rcx, amount, kind);
}
// Do the actual shift.
- if (dst != src) assm->Move(dst, src, ValueType::Primitive(type));
+ if (dst != src) assm->Move(dst, src, kind);
(assm->*emit_shift)(dst);
// Restore rcx if needed.
@@ -1154,8 +1177,8 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
void LiftoffAssembler::emit_i32_shl(Register dst, Register src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI32>(this, dst, src, amount,
- &Assembler::shll_cl);
+ liftoff::EmitShiftOperation<kI32>(this, dst, src, amount,
+ &Assembler::shll_cl);
}
void LiftoffAssembler::emit_i32_shli(Register dst, Register src,
@@ -1166,8 +1189,8 @@ void LiftoffAssembler::emit_i32_shli(Register dst, Register src,
void LiftoffAssembler::emit_i32_sar(Register dst, Register src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI32>(this, dst, src, amount,
- &Assembler::sarl_cl);
+ liftoff::EmitShiftOperation<kI32>(this, dst, src, amount,
+ &Assembler::sarl_cl);
}
void LiftoffAssembler::emit_i32_sari(Register dst, Register src,
@@ -1178,8 +1201,8 @@ void LiftoffAssembler::emit_i32_sari(Register dst, Register src,
void LiftoffAssembler::emit_i32_shr(Register dst, Register src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI32>(this, dst, src, amount,
- &Assembler::shrl_cl);
+ liftoff::EmitShiftOperation<kI32>(this, dst, src, amount,
+ &Assembler::shrl_cl);
}
void LiftoffAssembler::emit_i32_shri(Register dst, Register src,
@@ -1317,8 +1340,8 @@ void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI64>(this, dst.gp(), src.gp(), amount,
- &Assembler::shlq_cl);
+ liftoff::EmitShiftOperation<kI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::shlq_cl);
}
void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
@@ -1329,8 +1352,8 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI64>(this, dst.gp(), src.gp(), amount,
- &Assembler::sarq_cl);
+ liftoff::EmitShiftOperation<kI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::sarq_cl);
}
void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
@@ -1341,8 +1364,8 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI64>(this, dst.gp(), src.gp(), amount,
- &Assembler::shrq_cl);
+ liftoff::EmitShiftOperation<kI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::shrq_cl);
}
void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
@@ -2027,27 +2050,28 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
cmpl(lhs, rhs);
break;
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
- case ValueType::kI64:
+ case kI64:
cmpq(lhs, rhs);
break;
default:
UNREACHABLE();
}
} else {
- DCHECK_EQ(type, kWasmI32);
+ DCHECK_EQ(kind, kI32);
testl(lhs, lhs);
}
@@ -2136,12 +2160,12 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
- if (type != kWasmI32 && type != kWasmI64) return false;
+ ValueKind kind) {
+ if (kind != kI32 && kind != kI64) return false;
testl(condition, condition);
- if (type == kWasmI32) {
+ if (kind == kI32) {
if (dst == false_value) {
cmovl(not_zero, dst.gp(), true_value.gp());
} else {
@@ -2311,7 +2335,11 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister src) {
+ LiftoffRegister src,
+ base::Optional<CpuFeature> feature = base::nullopt) {
+ base::Optional<CpuFeatureScope> sse_scope;
+ if (feature.has_value()) sse_scope.emplace(assm, *feature);
+
XMMRegister tmp = kScratchDoubleReg;
assm->xorq(dst.gp(), dst.gp());
assm->Pxor(tmp, tmp);
@@ -2395,6 +2423,25 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
}
}
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ Operand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ MachineRepresentation rep = type.mem_rep();
+ if (rep == MachineRepresentation::kWord8) {
+ Pextrb(dst_op, src.fp(), lane);
+ } else if (rep == MachineRepresentation::kWord16) {
+ Pextrw(dst_op, src.fp(), lane);
+ } else if (rep == MachineRepresentation::kWord32) {
+ S128Store32Lane(dst_op, src.fp(), lane);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord64, rep);
+ S128Store64Lane(dst_op, src.fp(), lane);
+ }
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -2436,13 +2483,12 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- XMMRegister mask = kScratchDoubleReg;
- // Out-of-range indices should return 0, add 112 (0x70) so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- TurboAssembler::Move(mask, uint32_t{0x70707070});
- Pshufd(mask, mask, uint8_t{0x0});
- Paddusb(mask, rhs.fp());
- Pshufb(dst.fp(), lhs.fp(), mask);
+ I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp());
+}
+
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -2658,6 +2704,71 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
Pcmpeqd(dst.fp(), ref);
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqq, &Assembler::pcmpeqq>(
+ this, dst, lhs, rhs, SSE4_1);
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqq, &Assembler::pcmpeqq>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqq(kScratchDoubleReg, kScratchDoubleReg);
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Different register alias requirements depending on CpuFeatures supported:
+ if (CpuFeatures::IsSupported(AVX)) {
+ // 1. AVX, no requirements.
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp());
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ // 2. SSE4_2, dst == lhs.
+ if (dst != lhs) {
+ movdqa(dst.fp(), lhs.fp());
+ }
+ I64x2GtS(dst.fp(), dst.fp(), rhs.fp());
+ } else {
+ // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ if (dst == lhs || dst == rhs) {
+ // macro-assembler uses kScratchDoubleReg, so don't use it.
+ I64x2GtS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp());
+ movaps(dst.fp(), liftoff::kScratchDoubleReg2);
+ } else {
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp());
+ }
+ }
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Different register alias requirements depending on CpuFeatures supported:
+ if (CpuFeatures::IsSupported(AVX)) {
+ // 1. AVX, no requirements.
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp());
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ // 2. SSE4_2, dst != lhs.
+ if (dst == lhs) {
+ // macro-assembler uses kScratchDoubleReg, so don't use it.
+ I64x2GeS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp());
+ movdqa(dst.fp(), liftoff::kScratchDoubleReg2);
+ } else {
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp());
+ }
+ } else {
+ // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ if (dst == lhs || dst == rhs) {
+ // macro-assembler uses kScratchDoubleReg, so don't use it.
+ I64x2GeS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp());
+ movaps(dst.fp(), liftoff::kScratchDoubleReg2);
+ } else {
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp());
+ }
+ }
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpeqps, &Assembler::cmpeqps>(
@@ -2773,8 +2884,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -2790,7 +2901,7 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_simd_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
LiftoffRegister tmp_simd =
GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
// Mask off the unwanted bits before word-shifting.
@@ -2918,7 +3029,7 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
if (CpuFeatures::IsSupported(AVX)) {
@@ -3017,11 +3128,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
@@ -3148,6 +3254,18 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01());
+ Pmaddubsw(dst.fp(), src.fp(), op);
+}
+
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -3175,6 +3293,12 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
/*is_signed=*/false);
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp());
+}
+
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3186,11 +3310,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
@@ -3292,6 +3411,18 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i16x8_splat_0x0001());
+ Pmaddwd(dst.fp(), src.fp(), op);
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp());
+}
+
namespace liftoff {
// Helper function to check for register aliasing, AVX support, and moves
// registers around before calling the actual macro-assembler function.
@@ -3357,6 +3488,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpsllq, &Assembler::psllq, 6>(this, dst,
@@ -3408,7 +3544,7 @@ void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp1 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
@@ -3465,6 +3601,26 @@ void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
Movmskpd(dst.gp(), src.fp());
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovsxdq(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2SConvertI32x4High(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovzxdq(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2UConvertI32x4High(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3777,6 +3933,21 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, rhs, lhs);
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtdq2pd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtps2pd(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
// NAN->0
@@ -3860,6 +4031,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
Addps(dst.fp(), kScratchDoubleReg); // Add hi and lo, may round.
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtpd2ps(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3932,6 +4108,16 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
I32x4UConvertI16x8High(dst.fp(), src.fp());
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3968,6 +4154,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
Pabsd(dst.fp(), src.fp());
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2Abs(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -4194,17 +4385,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_type);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -4229,8 +4420,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type);
+ if (out_argument_kind != kStmt) {
+ liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_kind);
}
addq(rsp, Immediate(stack_bytes));
@@ -4244,7 +4435,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
near_jmp(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -4290,12 +4481,12 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
- if (src.type() == kWasmI32) {
+ if (src.kind() == kI32) {
// Load i32 values to a register first to ensure they are zero
// extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(kScratchRegister);
- } else if (src.type() == kWasmS128) {
+ } else if (src.kind() == kS128) {
// Since offsets are subtracted from sp, we need a smaller offset to
// push the top of a s128 value.
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_ - 8));
@@ -4309,7 +4500,7 @@ void LiftoffStackSlots::Construct() {
}
break;
case LiftoffAssembler::VarState::kRegister:
- liftoff::push(asm_, src.reg(), src.type());
+ liftoff::push(asm_, src.reg(), src.kind());
break;
case LiftoffAssembler::VarState::kIntConst:
asm_->pushq(Immediate(src.i32_const()));
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index be7c4cb54f..7d78e34a03 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -63,16 +63,16 @@ auto ReadLebU64(const byte_t** pos) -> uint64_t {
ValKind V8ValueTypeToWasm(i::wasm::ValueType v8_valtype) {
switch (v8_valtype.kind()) {
- case i::wasm::ValueType::kI32:
+ case i::wasm::kI32:
return I32;
- case i::wasm::ValueType::kI64:
+ case i::wasm::kI64:
return I64;
- case i::wasm::ValueType::kF32:
+ case i::wasm::kF32:
return F32;
- case i::wasm::ValueType::kF64:
+ case i::wasm::kF64:
return F64;
- case i::wasm::ValueType::kRef:
- case i::wasm::ValueType::kOptRef:
+ case i::wasm::kRef:
+ case i::wasm::kOptRef:
switch (v8_valtype.heap_representation()) {
case i::wasm::HeapType::kFunc:
return FUNCREF;
@@ -886,13 +886,13 @@ own<Instance> GetInstance(StoreImpl* store,
own<Frame> CreateFrameFromInternal(i::Handle<i::FixedArray> frames, int index,
i::Isolate* isolate, StoreImpl* store) {
- i::Handle<i::StackTraceFrame> frame(
- i::StackTraceFrame::cast(frames->get(index)), isolate);
- i::Handle<i::WasmInstanceObject> instance =
- i::StackTraceFrame::GetWasmInstance(frame);
- uint32_t func_index = i::StackTraceFrame::GetWasmFunctionIndex(frame);
- size_t func_offset = i::StackTraceFrame::GetFunctionOffset(frame);
- size_t module_offset = i::StackTraceFrame::GetColumnNumber(frame);
+ i::Handle<i::StackFrameInfo> frame(
+ i::StackFrameInfo::cast(frames->get(index)), isolate);
+ i::Handle<i::WasmInstanceObject> instance(frame->GetWasmInstance(), isolate);
+ uint32_t func_index = frame->GetWasmFunctionIndex();
+ size_t module_offset = i::StackFrameInfo::GetSourcePosition(frame);
+ size_t func_offset = module_offset - i::wasm::GetWasmFunctionOffset(
+ instance->module(), func_index);
return own<Frame>(seal<Frame>(new (std::nothrow) FrameImpl(
GetInstance(store, instance), func_index, func_offset, module_offset)));
}
@@ -1402,31 +1402,32 @@ void PushArgs(const i::wasm::FunctionSig* sig, const Val args[],
for (size_t i = 0; i < sig->parameter_count(); i++) {
i::wasm::ValueType type = sig->GetParam(i);
switch (type.kind()) {
- case i::wasm::ValueType::kI32:
+ case i::wasm::kI32:
packer->Push(args[i].i32());
break;
- case i::wasm::ValueType::kI64:
+ case i::wasm::kI64:
packer->Push(args[i].i64());
break;
- case i::wasm::ValueType::kF32:
+ case i::wasm::kF32:
packer->Push(args[i].f32());
break;
- case i::wasm::ValueType::kF64:
+ case i::wasm::kF64:
packer->Push(args[i].f64());
break;
- case i::wasm::ValueType::kRef:
- case i::wasm::ValueType::kOptRef:
+ case i::wasm::kRef:
+ case i::wasm::kOptRef:
// TODO(7748): Make sure this works for all heap types.
packer->Push(WasmRefToV8(store->i_isolate(), args[i].ref())->ptr());
break;
- case i::wasm::ValueType::kRtt:
- case i::wasm::ValueType::kS128:
+ case i::wasm::kRtt:
+ case i::wasm::kRttWithDepth:
+ case i::wasm::kS128:
// TODO(7748): Implement.
UNIMPLEMENTED();
- case i::wasm::ValueType::kI8:
- case i::wasm::ValueType::kI16:
- case i::wasm::ValueType::kStmt:
- case i::wasm::ValueType::kBottom:
+ case i::wasm::kI8:
+ case i::wasm::kI16:
+ case i::wasm::kStmt:
+ case i::wasm::kBottom:
UNREACHABLE();
break;
}
@@ -1439,34 +1440,35 @@ void PopArgs(const i::wasm::FunctionSig* sig, Val results[],
for (size_t i = 0; i < sig->return_count(); i++) {
i::wasm::ValueType type = sig->GetReturn(i);
switch (type.kind()) {
- case i::wasm::ValueType::kI32:
+ case i::wasm::kI32:
results[i] = Val(packer->Pop<int32_t>());
break;
- case i::wasm::ValueType::kI64:
+ case i::wasm::kI64:
results[i] = Val(packer->Pop<int64_t>());
break;
- case i::wasm::ValueType::kF32:
+ case i::wasm::kF32:
results[i] = Val(packer->Pop<float>());
break;
- case i::wasm::ValueType::kF64:
+ case i::wasm::kF64:
results[i] = Val(packer->Pop<double>());
break;
- case i::wasm::ValueType::kRef:
- case i::wasm::ValueType::kOptRef: {
+ case i::wasm::kRef:
+ case i::wasm::kOptRef: {
// TODO(7748): Make sure this works for all heap types.
i::Address raw = packer->Pop<i::Address>();
i::Handle<i::Object> obj(i::Object(raw), store->i_isolate());
results[i] = Val(V8RefValueToWasm(store, obj));
break;
}
- case i::wasm::ValueType::kRtt:
- case i::wasm::ValueType::kS128:
+ case i::wasm::kRtt:
+ case i::wasm::kRttWithDepth:
+ case i::wasm::kS128:
// TODO(7748): Implement.
UNIMPLEMENTED();
- case i::wasm::ValueType::kI8:
- case i::wasm::ValueType::kI16:
- case i::wasm::ValueType::kStmt:
- case i::wasm::ValueType::kBottom:
+ case i::wasm::kI8:
+ case i::wasm::kI16:
+ case i::wasm::kStmt:
+ case i::wasm::kBottom:
UNREACHABLE();
break;
}
@@ -1708,29 +1710,30 @@ auto Global::type() const -> own<GlobalType> {
auto Global::get() const -> Val {
i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
switch (v8_global->type().kind()) {
- case i::wasm::ValueType::kI32:
+ case i::wasm::kI32:
return Val(v8_global->GetI32());
- case i::wasm::ValueType::kI64:
+ case i::wasm::kI64:
return Val(v8_global->GetI64());
- case i::wasm::ValueType::kF32:
+ case i::wasm::kF32:
return Val(v8_global->GetF32());
- case i::wasm::ValueType::kF64:
+ case i::wasm::kF64:
return Val(v8_global->GetF64());
- case i::wasm::ValueType::kRef:
- case i::wasm::ValueType::kOptRef: {
+ case i::wasm::kRef:
+ case i::wasm::kOptRef: {
// TODO(7748): Make sure this works for all heap types.
StoreImpl* store = impl(this)->store();
i::HandleScope scope(store->i_isolate());
return Val(V8RefValueToWasm(store, v8_global->GetRef()));
}
- case i::wasm::ValueType::kRtt:
- case i::wasm::ValueType::kS128:
+ case i::wasm::kRtt:
+ case i::wasm::kRttWithDepth:
+ case i::wasm::kS128:
// TODO(7748): Implement these.
UNIMPLEMENTED();
- case i::wasm::ValueType::kI8:
- case i::wasm::ValueType::kI16:
- case i::wasm::ValueType::kStmt:
- case i::wasm::ValueType::kBottom:
+ case i::wasm::kI8:
+ case i::wasm::kI16:
+ case i::wasm::kStmt:
+ case i::wasm::kBottom:
UNREACHABLE();
}
}
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index a9526f702b..49ab7c8fe7 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -142,6 +142,8 @@ class V8_EXPORT_PRIVATE CompilationState {
bool top_tier_compilation_finished() const;
bool recompilation_finished() const;
+ void set_compilation_id(int compilation_id);
+
// Override {operator delete} to avoid implicit instantiation of {operator
// delete} with {size_t} argument. The {size_t} argument would be incorrect.
void operator delete(void* ptr) { ::operator delete(ptr); }
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 458b564313..fbd3be5dcf 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -61,6 +61,7 @@ class Decoder {
virtual ~Decoder() = default;
+ // Ensures there are at least {length} bytes left to read, starting at {pc}.
bool validate_size(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
if (V8_UNLIKELY(pc > end_ || length > static_cast<uint32_t>(end_ - pc))) {
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 3631309381..6c9700b100 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -35,7 +35,7 @@ struct WasmException;
if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
} while (false)
-#define TRACE_INST_FORMAT " @%-8d #%-20s|"
+#define TRACE_INST_FORMAT " @%-8d #%-30s|"
// Return the evaluation of `condition` if validate==true, DCHECK that it's
// true and always return true otherwise.
@@ -183,14 +183,12 @@ V8_INLINE WasmFeature feature_for_heap_type(HeapType heap_type) {
case HeapType::kFunc:
case HeapType::kExtern:
return WasmFeature::kFeature_reftypes;
- case HeapType::kExn:
- return WasmFeature::kFeature_eh;
case HeapType::kEq:
case HeapType::kI31:
+ case HeapType::kData:
case HeapType::kAny:
return WasmFeature::kFeature_gc;
case HeapType::kBottom:
- default:
UNREACHABLE();
}
}
@@ -213,10 +211,10 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
uint8_t code = static_cast<ValueTypeCode>(heap_index) & uint_7_mask;
switch (code) {
case kFuncRefCode:
- case kExnRefCode:
case kEqRefCode:
case kExternRefCode:
case kI31RefCode:
+ case kDataRefCode:
case kAnyRefCode: {
HeapType result = HeapType::from_code(code);
if (!VALIDATE(enabled.contains(feature_for_heap_type(result)))) {
@@ -279,14 +277,16 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
ValueTypeCode code = static_cast<ValueTypeCode>(val);
switch (code) {
case kFuncRefCode:
- case kExnRefCode:
case kEqRefCode:
case kExternRefCode:
case kI31RefCode:
+ case kDataRefCode:
case kAnyRefCode: {
HeapType heap_type = HeapType::from_code(code);
- ValueType result = ValueType::Ref(
- heap_type, code == kI31RefCode ? kNonNullable : kNullable);
+ Nullability nullability = code == kI31RefCode || code == kDataRefCode
+ ? kNonNullable
+ : kNullable;
+ ValueType result = ValueType::Ref(heap_type, nullability);
if (!VALIDATE(enabled.contains(feature_for_heap_type(heap_type)))) {
DecodeError<validate>(
decoder, pc,
@@ -321,7 +321,7 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
return heap_type.is_bottom() ? kWasmBottom
: ValueType::Ref(heap_type, nullability);
}
- case kRttCode: {
+ case kRttWithDepthCode: {
if (!VALIDATE(enabled.has_gc())) {
DecodeError<validate>(
decoder, pc,
@@ -338,12 +338,52 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
depth, kV8MaxRttSubtypingDepth);
return kWasmBottom;
}
- uint32_t heap_type_length;
- HeapType heap_type = read_heap_type<validate>(
- decoder, pc + *length, &heap_type_length, module, enabled);
- *length += heap_type_length;
- return heap_type.is_bottom() ? kWasmBottom
- : ValueType::Rtt(heap_type, depth);
+ uint32_t type_index_length;
+ uint32_t type_index =
+ decoder->read_u32v<validate>(pc + *length, &type_index_length);
+ *length += type_index_length;
+ if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
+ DecodeError<validate>(
+ decoder, pc,
+ "Type index %u is greater than the maximum number %zu "
+ "of type definitions supported by V8",
+ type_index, kV8MaxWasmTypes);
+ return kWasmBottom;
+ }
+ // We use capacity over size so this works mid-DecodeTypeSection.
+ if (!VALIDATE(module == nullptr ||
+ type_index < module->types.capacity())) {
+ DecodeError<validate>(decoder, pc, "Type index %u is out of bounds",
+ type_index);
+ return kWasmBottom;
+ }
+ return ValueType::Rtt(type_index, depth);
+ }
+ case kRttCode: {
+ if (!VALIDATE(enabled.has_gc())) {
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type 'rtt', enable with --experimental-wasm-gc");
+ return kWasmBottom;
+ }
+ uint32_t type_index = decoder->read_u32v<validate>(pc + 1, length);
+ *length += 1;
+ if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
+ DecodeError<validate>(
+ decoder, pc,
+ "Type index %u is greater than the maximum number %zu "
+ "of type definitions supported by V8",
+ type_index, kV8MaxWasmTypes);
+ return kWasmBottom;
+ }
+ // We use capacity over size so this works mid-DecodeTypeSection.
+ if (!VALIDATE(module == nullptr ||
+ type_index < module->types.capacity())) {
+ DecodeError<validate>(decoder, pc, "Type index %u is out of bounds",
+ type_index);
+ return kWasmBottom;
+ }
+ return ValueType::Rtt(type_index);
}
case kS128Code: {
if (!VALIDATE(enabled.has_simd())) {
@@ -582,6 +622,15 @@ struct TableIndexImmediate {
}
};
+template <Decoder::ValidateFlag validate>
+struct TypeIndexImmediate {
+ uint32_t index = 0;
+ uint32_t length = 1;
+ inline TypeIndexImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_u32v<validate>(pc, &length, "type index");
+ }
+};
+
// TODO(jkummerow): Introduce a common superclass for StructIndexImmediate and
// ArrayIndexImmediate? Maybe even FunctionIndexImmediate too?
template <Decoder::ValidateFlag validate>
@@ -997,7 +1046,7 @@ struct ControlBase : public PcForErrors<validate> {
F(RefFunc, uint32_t function_index, Value* result) \
F(RefAsNonNull, const Value& arg, Value* result) \
F(Drop) \
- F(DoReturn, Vector<Value> values) \
+ F(DoReturn) \
F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \
F(LocalTee, const Value& value, Value* result, \
@@ -1011,6 +1060,7 @@ struct ControlBase : public PcForErrors<validate> {
F(TableSet, const Value& index, const Value& value, \
const TableIndexImmediate<validate>& imm) \
F(Unreachable) \
+ F(NopForTestingUnsupportedInLiftoff) \
F(Select, const Value& cond, const Value& fval, const Value& tval, \
Value* result) \
F(BrOrRet, uint32_t depth) \
@@ -1101,13 +1151,21 @@ struct ControlBase : public PcForErrors<validate> {
F(I31New, const Value& input, Value* result) \
F(I31GetS, const Value& input, Value* result) \
F(I31GetU, const Value& input, Value* result) \
- F(RttCanon, const HeapTypeImmediate<validate>& imm, Value* result) \
- F(RttSub, const HeapTypeImmediate<validate>& imm, const Value& parent, \
- Value* result) \
+ F(RttCanon, uint32_t type_index, Value* result) \
+ F(RttSub, uint32_t type_index, const Value& parent, Value* result) \
F(RefTest, const Value& obj, const Value& rtt, Value* result) \
F(RefCast, const Value& obj, const Value& rtt, Value* result) \
F(BrOnCast, const Value& obj, const Value& rtt, Value* result_on_branch, \
uint32_t depth) \
+ F(RefIsData, const Value& object, Value* result) \
+ F(RefAsData, const Value& object, Value* result) \
+ F(BrOnData, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(RefIsFunc, const Value& object, Value* result) \
+ F(RefAsFunc, const Value& object, Value* result) \
+ F(BrOnFunc, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(RefIsI31, const Value& object, Value* result) \
+ F(RefAsI31, const Value& object, Value* result) \
+ F(BrOnI31, const Value& object, Value* value_on_branch, uint32_t br_depth) \
F(Forward, const Value& from, Value* to)
// Generic Wasm bytecode decoder with utilities for decoding immediates,
@@ -1337,6 +1395,14 @@ class WasmDecoder : public Decoder {
return true;
}
+ inline bool Validate(const byte* pc, TypeIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_->has_type(imm.index))) {
+ DecodeError(pc, "invalid type index: %u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
inline bool Complete(ArrayIndexImmediate<validate>& imm) {
if (!VALIDATE(module_->has_array(imm.index))) return false;
imm.array_type = module_->array_type(imm.index);
@@ -1623,6 +1689,7 @@ class WasmDecoder : public Decoder {
/********** Control opcodes **********/
case kExprUnreachable:
case kExprNop:
+ case kExprNopForTestingUnsupportedInLiftoff:
case kExprElse:
case kExprEnd:
case kExprReturn:
@@ -1813,7 +1880,6 @@ class WasmDecoder : public Decoder {
return length;
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
return length + 1;
- // clang-format on
FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
case kExprPrefetchT:
case kExprPrefetchNT: {
@@ -1822,20 +1888,14 @@ class WasmDecoder : public Decoder {
kConservativelyAssumeMemory64);
return length + imm.length;
}
- case kExprS128Load8Lane:
- case kExprS128Load16Lane:
- case kExprS128Load32Lane:
- case kExprS128Load64Lane:
- case kExprS128Store8Lane:
- case kExprS128Store16Lane:
- case kExprS128Store32Lane:
- case kExprS128Store64Lane: {
- MemoryAccessImmediate<validate> imm(decoder, pc + length,
- UINT32_MAX,
- kConservativelyAssumeMemory64);
+ FOREACH_SIMD_MEM_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE) {
+ MemoryAccessImmediate<validate> imm(
+ decoder, pc + length, UINT32_MAX,
+ kConservativelyAssumeMemory64);
// 1 more byte for lane index immediate.
return length + imm.length + 1;
}
+ // clang-format on
// Shuffles require a byte per lane, or 16 immediate bytes.
case kExprS128Const:
case kExprI8x16Shuffle:
@@ -1901,10 +1961,7 @@ class WasmDecoder : public Decoder {
}
case kExprRttCanon:
case kExprRttSub: {
- // TODO(7748): Account for rtt.sub's additional immediates if
- // they stick.
- HeapTypeImmediate<validate> imm(WasmFeatures::All(), decoder,
- pc + length, nullptr);
+ TypeIndexImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
case kExprI31New:
@@ -1913,11 +1970,8 @@ class WasmDecoder : public Decoder {
return length;
case kExprRefTest:
case kExprRefCast: {
- HeapTypeImmediate<validate> ht1(WasmFeatures::All(), decoder,
- pc + length, nullptr);
- HeapTypeImmediate<validate> ht2(WasmFeatures::All(), decoder,
- pc + length + ht1.length, nullptr);
- return length + ht1.length + ht2.length;
+ TypeIndexImmediate<validate> ht(decoder, pc + length);
+ return length + ht.length;
}
default:
// This is unreachable except for malformed modules.
@@ -2023,6 +2077,7 @@ class WasmDecoder : public Decoder {
case kExprUnwind:
case kExprRethrow:
case kExprNop:
+ case kExprNopForTestingUnsupportedInLiftoff:
case kExprReturn:
case kExprReturnCall:
case kExprReturnCallIndirect:
@@ -2392,6 +2447,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Nop) { return 1; }
+ DECODE(NopForTestingUnsupportedInLiftoff) {
+ if (!VALIDATE(FLAG_enable_testing_opcode_in_wasm)) {
+ this->DecodeError("Invalid opcode 0x%x", opcode);
+ return 0;
+ }
+ CALL_INTERFACE_IF_REACHABLE(NopForTestingUnsupportedInLiftoff);
+ return 1;
+ }
+
#define BUILD_SIMPLE_OPCODE(op, _, sig) \
DECODE(op) { return BuildSimpleOperator_##sig(kExpr##op); }
FOREACH_SIMPLE_OPCODE(BUILD_SIMPLE_OPCODE)
@@ -2450,13 +2514,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(eh);
ExceptionIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- if (!VALIDATE(!control_.empty())) {
- this->DecodeError("catch does not match any try");
- return 0;
- }
+ DCHECK(!control_.empty());
Control* c = &control_.back();
if (!VALIDATE(c->is_try())) {
- this->DecodeError("catch does not match any try");
+ this->DecodeError("catch does not match a try");
return 0;
}
if (!VALIDATE(!c->is_try_catchall())) {
@@ -2484,6 +2545,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(Delegate) {
+ CHECK_PROTOTYPE_OPCODE(eh);
BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
// -1 because the current try block is not included in the count.
if (!this->Validate(this->pc_ + 1, imm, control_depth() - 1)) return 0;
@@ -2539,13 +2601,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Unwind) {
CHECK_PROTOTYPE_OPCODE(eh);
- if (!VALIDATE(!control_.empty())) {
- this->DecodeError("unwind does not match any try");
- return 0;
- }
+ DCHECK(!control_.empty());
Control* c = &control_.back();
if (!VALIDATE(c->is_try())) {
- this->DecodeError("unwind does not match any try");
+ this->DecodeError("unwind does not match a try");
return 0;
}
if (!VALIDATE(!c->is_try_catch() && !c->is_try_catchall() &&
@@ -2570,12 +2629,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Control* c = control_at(imm.depth);
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
switch (ref_object.type.kind()) {
- case ValueType::kBottom:
+ case kBottom:
// We are in a polymorphic stack. No need to push an additional bottom
// value.
DCHECK(check_result != kReachableBranch);
break;
- case ValueType::kRef: {
+ case kRef: {
// Simply forward the popped argument to the result.
Value* result = Push(ref_object.type);
if (V8_LIKELY(check_result == kReachableBranch)) {
@@ -2583,7 +2642,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
break;
}
- case ValueType::kOptRef: {
+ case kOptRef: {
if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE_IF_REACHABLE(BrOnNull, ref_object, imm.depth);
Value* result =
@@ -2658,10 +2717,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(Else) {
- if (!VALIDATE(!control_.empty())) {
- this->DecodeError("else does not match any if");
- return 0;
- }
+ DCHECK(!control_.empty());
Control* c = &control_.back();
if (!VALIDATE(c->is_if())) {
this->DecodeError("else does not match an if");
@@ -2682,10 +2738,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(End) {
- if (!VALIDATE(!control_.empty())) {
- this->DecodeError("end does not match any if, try, or block");
- return 0;
- }
+ DCHECK(!control_.empty());
Control* c = &control_.back();
if (!VALIDATE(!c->is_incomplete_try())) {
this->DecodeError("missing catch or catch-all in try");
@@ -2913,13 +2966,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value value = Pop(0);
Value* result = Push(kWasmI32);
switch (value.type.kind()) {
- case ValueType::kOptRef:
+ case kOptRef:
CALL_INTERFACE_IF_REACHABLE(UnOp, kExprRefIsNull, value, result);
return 1;
- case ValueType::kBottom:
+ case kBottom:
// We are in unreachable code, the return value does not matter.
- case ValueType::kRef:
+ case kRef:
// For non-nullable references, the result is always false.
+ CALL_INTERFACE_IF_REACHABLE(Drop);
CALL_INTERFACE_IF_REACHABLE(I32Const, result, 0);
return 1;
default:
@@ -2947,14 +3001,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
Value value = Pop(0);
switch (value.type.kind()) {
- case ValueType::kBottom:
+ case kBottom:
// We are in unreachable code. Forward the bottom value.
- case ValueType::kRef: {
+ case kRef: {
Value* result = Push(value.type);
CALL_INTERFACE_IF_REACHABLE(Forward, value, result);
return 1;
}
- case ValueType::kOptRef: {
+ case kOptRef: {
Value* result =
Push(ValueType::Ref(value.type.heap_type(), kNonNullable));
CALL_INTERFACE_IF_REACHABLE(RefAsNonNull, value, result);
@@ -3089,7 +3143,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(MemorySize) {
if (!CheckHasMemory()) return 0;
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
- Value* result = Push(kWasmI32);
+ ValueType result_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value* result = Push(result_type);
CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
return 1 + imm.length;
}
@@ -3198,8 +3253,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (full_opcode == kExprTableGrow || full_opcode == kExprTableSize ||
full_opcode == kExprTableFill) {
CHECK_PROTOTYPE_OPCODE(reftypes);
- } else if (full_opcode >= kExprMemoryInit) {
- CHECK_PROTOTYPE_OPCODE(bulk_memory);
}
trace_msg->AppendOpcode(full_opcode);
return DecodeNumericOpcode(full_opcode, opcode_length);
@@ -3207,6 +3260,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Simd) {
CHECK_PROTOTYPE_OPCODE(simd);
+ if (!CheckHardwareSupportsSimd()) {
+ if (FLAG_correctness_fuzzer_suppressions) {
+ FATAL("Aborting on missing Wasm SIMD support");
+ }
+ this->DecodeError("Wasm SIMD unsupported");
+ return 0;
+ }
uint32_t opcode_length = 0;
WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
this->pc_, &opcode_length);
@@ -3292,6 +3352,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE_IMPL(BrTable);
DECODE_IMPL(Return);
DECODE_IMPL(Unreachable);
+ DECODE_IMPL(NopForTestingUnsupportedInLiftoff);
DECODE_IMPL(I32Const);
DECODE_IMPL(I64Const);
DECODE_IMPL(F32Const);
@@ -3348,7 +3409,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Set up initial function block.
{
- Control* c = PushControl(kControlBlock);
+ DCHECK(control_.empty());
+ control_.emplace_back(kControlBlock, 0, 0, this->pc_, kReachable);
+ Control* c = &control_.back();
InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
InitMerge(&c->end_merge,
static_cast<uint32_t>(this->sig_->return_count()),
@@ -3401,6 +3464,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
+ // Initializes start- and end-merges of {c} with values according to the
+ // in- and out-types of {c} respectively.
void SetBlockType(Control* c, BlockTypeImmediate<validate>& imm,
Value* args) {
const byte* pc = this->pc_;
@@ -3445,8 +3510,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
Control* PushControl(ControlKind kind, uint32_t locals_count = 0) {
- Reachability reachability =
- control_.empty() ? kReachable : control_.back().innerReachability();
+ DCHECK(!control_.empty());
+ Reachability reachability = control_.back().innerReachability();
control_.emplace_back(kind, locals_count, stack_size(), this->pc_,
reachability);
current_code_reachable_ = this->ok() && reachability == kReachable;
@@ -3454,6 +3519,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
void PopControl(Control* c) {
+ // This cannot be the outermost control block.
+ DCHECK_LT(1, control_.size());
+
DCHECK_EQ(c, &control_.back());
CALL_INTERFACE_IF_PARENT_REACHABLE(PopControl, c);
@@ -3816,8 +3884,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.is_bottom() ||
- rtt.type.heap_representation() == imm.index)) {
+ if (!VALIDATE(
+ rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
PopTypeError(imm.struct_type->field_count(), rtt,
"rtt for type " + std::to_string(imm.index));
return 0;
@@ -3850,8 +3919,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.is_bottom() ||
- rtt.type.heap_representation() == imm.index)) {
+ if (!VALIDATE(
+ rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
PopTypeError(0, rtt, "rtt for type " + std::to_string(imm.index));
return 0;
}
@@ -3923,8 +3993,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.is_bottom() ||
- rtt.type.heap_representation() == imm.index)) {
+ if (!VALIDATE(
+ rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
PopTypeError(2, rtt, "rtt for type " + std::to_string(imm.index));
return 0;
}
@@ -3952,8 +4023,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.is_bottom() ||
- rtt.type.heap_representation() == imm.index)) {
+ if (!VALIDATE(
+ rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
PopTypeError(1, rtt, "rtt for type " + std::to_string(imm.index));
return 0;
}
@@ -4039,109 +4111,93 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length;
}
case kExprRttCanon: {
- HeapTypeImmediate<validate> imm(
- this->enabled_, this, this->pc_ + opcode_length, this->module_);
- if (!VALIDATE(this->ok())) return 0;
- Value* value =
- Push(ValueType::Rtt(imm.type, imm.type == HeapType::kAny ? 0 : 1));
- CALL_INTERFACE_IF_REACHABLE(RttCanon, imm, value);
+ TypeIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
+ Value* value = Push(ValueType::Rtt(imm.index, 0));
+ CALL_INTERFACE_IF_REACHABLE(RttCanon, imm.index, value);
return opcode_length + imm.length;
}
case kExprRttSub: {
- // TODO(7748): The proposal currently includes additional immediates
- // here: the subtyping depth <n> and the "parent type", see:
- // https://github.com/WebAssembly/gc/commit/20a80e34 .
- // If these immediates don't get dropped (in the spirit of
- // https://github.com/WebAssembly/function-references/pull/31 ),
- // implement them here.
- HeapTypeImmediate<validate> imm(
- this->enabled_, this, this->pc_ + opcode_length, this->module_);
- if (!VALIDATE(this->ok())) return 0;
+ TypeIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value parent = Pop(0);
if (parent.type.is_bottom()) {
Push(kWasmBottom);
} else {
if (!VALIDATE(parent.type.is_rtt() &&
- IsHeapSubtypeOf(imm.type, parent.type.heap_type(),
+ IsHeapSubtypeOf(imm.index, parent.type.ref_index(),
this->module_))) {
- PopTypeError(0, parent,
- "rtt for a supertype of type " + imm.type.name());
+ PopTypeError(
+ 0, parent,
+ "rtt for a supertype of type " + std::to_string(imm.index));
return 0;
}
Value* value =
- Push(ValueType::Rtt(imm.type, parent.type.depth() + 1));
- // (rtt.sub $t (rtt.canon any)) is reduced to (rtt.canon $t),
- // unless t == any.
- // This is important because other canonical rtts are not cached in
- // (rtt.canon any)'s subtype list.
- if (parent.type == ValueType::Rtt(HeapType::kAny, 0) &&
- imm.type != HeapType::kAny) {
- CALL_INTERFACE_IF_REACHABLE(RttCanon, imm, value);
- } else {
- CALL_INTERFACE_IF_REACHABLE(RttSub, imm, parent, value);
- }
+ Push(ValueType::Rtt(imm.index, parent.type.depth() + 1));
+
+ CALL_INTERFACE_IF_REACHABLE(RttSub, imm.index, parent, value);
}
return opcode_length + imm.length;
}
case kExprRefTest: {
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
- HeapTypeImmediate<validate> obj_type(
- this->enabled_, this, this->pc_ + opcode_length, this->module_);
- int len = opcode_length + obj_type.length;
- HeapTypeImmediate<validate> rtt_type(this->enabled_, this,
- this->pc_ + len, this->module_);
- len += rtt_type.length;
- if (!VALIDATE(this->ok())) return 0;
-
- // The static type of {obj} must be a supertype of the {rtt}'s type.
- if (!VALIDATE(
- IsHeapSubtypeOf(rtt_type.type, obj_type.type, this->module_))) {
- this->DecodeError(
- "ref.test: immediate rtt type %s is not a subtype of immediate "
- "object type %s",
- rtt_type.type.name().c_str(), obj_type.type.name().c_str());
+ Value rtt = Pop(1);
+ Value obj = Pop(0);
+ Value* value = Push(kWasmI32);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
return 0;
}
- Value rtt = Pop(1);
- if (!VALIDATE(
- (rtt.type.is_rtt() && rtt.type.heap_type() == rtt_type.type) ||
- rtt.type == kWasmBottom)) {
- PopTypeError(1, rtt, "rtt for type " + rtt_type.type.name());
+ if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
+ IsSubtypeOf(obj.type,
+ ValueType::Ref(HeapType::kData, kNullable),
+ this->module_) ||
+ obj.type.is_bottom())) {
+ PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
return 0;
}
- Value obj = Pop(0, ValueType::Ref(obj_type.type, kNullable));
- Value* value = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(RefTest, obj, rtt, value);
- return len;
+ if (!obj.type.is_bottom() && !rtt.type.is_bottom()) {
+ if (!VALIDATE(IsSubtypeOf(
+ ValueType::Ref(rtt.type.ref_index(), kNonNullable), obj.type,
+ this->module_))) {
+ PopTypeError(
+ 0, obj,
+ "supertype of type " + std::to_string(rtt.type.ref_index()));
+ return 0;
+ }
+ CALL_INTERFACE_IF_REACHABLE(RefTest, obj, rtt, value);
+ }
+ return opcode_length;
}
case kExprRefCast: {
- HeapTypeImmediate<validate> obj_type(
- this->enabled_, this, this->pc_ + opcode_length, this->module_);
- int len = opcode_length + obj_type.length;
- HeapTypeImmediate<validate> rtt_type(this->enabled_, this,
- this->pc_ + len, this->module_);
- len += rtt_type.length;
- if (!VALIDATE(this->ok())) return 0;
-
- if (!VALIDATE(
- IsHeapSubtypeOf(rtt_type.type, obj_type.type, this->module_))) {
- this->DecodeError(
- "ref.test: immediate rtt type %s is not a subtype of immediate "
- "object type %s",
- rtt_type.type.name().c_str(), obj_type.type.name().c_str());
+ Value rtt = Pop(1);
+ Value obj = Pop(0);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
return 0;
}
- Value rtt = Pop(1);
- if (!VALIDATE(
- (rtt.type.is_rtt() && rtt.type.heap_type() == rtt_type.type) ||
- rtt.type == kWasmBottom)) {
- PopTypeError(1, rtt, "rtt for type " + rtt_type.type.name());
+ if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
+ IsSubtypeOf(obj.type,
+ ValueType::Ref(HeapType::kData, kNullable),
+ this->module_) ||
+ obj.type.is_bottom())) {
+ PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
return 0;
}
- Value obj = Pop(0, ValueType::Ref(obj_type.type, kNullable));
- Value* value = Push(ValueType::Ref(rtt_type.type, kNonNullable));
- CALL_INTERFACE_IF_REACHABLE(RefCast, obj, rtt, value);
- return len;
+ if (!obj.type.is_bottom() && !rtt.type.is_bottom()) {
+ if (!VALIDATE(IsSubtypeOf(
+ ValueType::Ref(rtt.type.ref_index(), kNonNullable), obj.type,
+ this->module_))) {
+ PopTypeError(
+ 0, obj,
+ "supertype of type " + std::to_string(rtt.type.ref_index()));
+ return 0;
+ }
+ Value* value = Push(
+ ValueType::Ref(rtt.type.ref_index(), obj.type.nullability()));
+ CALL_INTERFACE_IF_REACHABLE(RefCast, obj, rtt, value);
+ }
+ return opcode_length;
}
case kExprBrOnCast: {
BranchDepthImmediate<validate> branch_depth(this,
@@ -4150,31 +4206,40 @@ class WasmFullDecoder : public WasmDecoder<validate> {
control_.size())) {
return 0;
}
- // TODO(7748): If the heap type immediates remain in the spec, read
- // them here.
Value rtt = Pop(1);
if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
PopTypeError(1, rtt, "rtt");
return 0;
}
Value obj = Pop(0);
- if (!VALIDATE(obj.type.is_object_reference_type() ||
- rtt.type.is_bottom())) {
- PopTypeError(0, obj, "reference");
+ if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
+ IsSubtypeOf(obj.type,
+ ValueType::Ref(HeapType::kData, kNullable),
+ this->module_) ||
+ obj.type.is_bottom())) {
+ PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
return 0;
}
// The static type of {obj} must be a supertype of {rtt}'s type.
if (!VALIDATE(rtt.type.is_bottom() || obj.type.is_bottom() ||
- IsHeapSubtypeOf(rtt.type.heap_type(),
- obj.type.heap_type(), this->module_))) {
+ IsHeapSubtypeOf(rtt.type.ref_index(),
+ obj.type.heap_representation(),
+ this->module_))) {
PopTypeError(1, rtt, obj.type);
return 0;
}
Control* c = control_at(branch_depth.depth);
+ if (c->br_merge()->arity == 0) {
+ this->DecodeError(
+ "br_on_cast must target a branch of arity at least 1");
+ return 0;
+ }
+ // We temporarily push this value to the stack for TypeCheckBranchResult
+ // and for MergeValuesInto in the interface.
Value* result_on_branch =
Push(rtt.type.is_bottom()
? kWasmBottom
- : ValueType::Ref(rtt.type.heap_type(), kNonNullable));
+ : ValueType::Ref(rtt.type.ref_index(), kNonNullable));
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrOnCast, obj, rtt, result_on_branch,
@@ -4188,6 +4253,77 @@ class WasmFullDecoder : public WasmDecoder<validate> {
*result_on_fallthrough = obj;
return opcode_length + branch_depth.length;
}
+#define ABSTRACT_TYPE_CHECK(heap_type) \
+ case kExprRefIs##heap_type: { \
+ Value arg = Pop(0, kWasmAnyRef); \
+ Value* result = Push(kWasmI32); \
+ CALL_INTERFACE_IF_REACHABLE(RefIs##heap_type, arg, result); \
+ return opcode_length; \
+ }
+
+ ABSTRACT_TYPE_CHECK(Data)
+ ABSTRACT_TYPE_CHECK(Func)
+ ABSTRACT_TYPE_CHECK(I31)
+#undef ABSTRACT_TYPE_CHECK
+
+#define ABSTRACT_TYPE_CAST(heap_type) \
+ case kExprRefAs##heap_type: { \
+ Value arg = Pop(0, kWasmAnyRef); \
+ if (!arg.type.is_bottom()) { \
+ Value* result = \
+ Push(ValueType::Ref(HeapType::k##heap_type, kNonNullable)); \
+ CALL_INTERFACE_IF_REACHABLE(RefAs##heap_type, arg, result); \
+ } \
+ return opcode_length; \
+ }
+
+ ABSTRACT_TYPE_CAST(Data)
+ ABSTRACT_TYPE_CAST(Func)
+ ABSTRACT_TYPE_CAST(I31)
+#undef ABSTRACT_TYPE_CAST
+
+ case kExprBrOnData:
+ case kExprBrOnFunc:
+ case kExprBrOnI31: {
+ BranchDepthImmediate<validate> branch_depth(this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, branch_depth,
+ control_.size())) {
+ return 0;
+ }
+
+ Value obj = Pop(0, kWasmAnyRef);
+ Control* c = control_at(branch_depth.depth);
+ HeapType::Representation heap_type =
+ opcode == kExprBrOnFunc
+ ? HeapType::kFunc
+ : opcode == kExprBrOnData ? HeapType::kData : HeapType::kI31;
+ if (c->br_merge()->arity == 0) {
+ this->DecodeError("%s must target a branch of arity at least 1",
+ SafeOpcodeNameAt(this->pc_));
+ return 0;
+ }
+ // We temporarily push this value to the stack for TypeCheckBranchResult
+ // and for MergeValuesInto in the interface.
+ Value* result_on_branch = Push(ValueType::Ref(heap_type, kNonNullable));
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ if (opcode == kExprBrOnFunc) {
+ CALL_INTERFACE(BrOnFunc, obj, result_on_branch, branch_depth.depth);
+ } else if (opcode == kExprBrOnData) {
+ CALL_INTERFACE(BrOnData, obj, result_on_branch, branch_depth.depth);
+ } else {
+ CALL_INTERFACE(BrOnI31, obj, result_on_branch, branch_depth.depth);
+ }
+ c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ return 0;
+ }
+ Pop(0); // Drop {result_on_branch}, restore original value.
+ Value* result_on_fallthrough = Push(obj.type);
+ *result_on_fallthrough = obj;
+ return opcode_length + branch_depth.length;
+ }
default:
this->DecodeError("invalid gc opcode");
return 0;
@@ -4350,12 +4486,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
void DoReturn() {
- size_t return_count = this->sig_->return_count();
- DCHECK_GE(stack_size(), return_count);
- Vector<Value> return_values =
- Vector<Value>{stack_end_ - return_count, return_count};
-
- CALL_INTERFACE_IF_REACHABLE(DoReturn, return_values);
+ DCHECK_GE(stack_size(), this->sig_->return_count());
+ CALL_INTERFACE_IF_REACHABLE(DoReturn);
}
V8_INLINE void EnsureStackSpace(int slots_needed) {
@@ -4483,10 +4615,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
+ DCHECK_NE(c->kind, kControlLoop);
if (!TypeCheckFallThru()) return;
if (!c->reachable()) return;
-
- if (!c->is_loop()) CALL_INTERFACE(FallThruTo, c);
+ CALL_INTERFACE(FallThruTo, c);
c->end_merge.reached = true;
}
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index aa327821fb..c5aab2b593 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -4,6 +4,7 @@
#include "src/wasm/function-body-decoder.h"
+#include "src/codegen/assembler-inl.h"
#include "src/flags/flags.h"
#include "src/handles/handles.h"
#include "src/objects/objects-inl.h"
@@ -71,6 +72,8 @@ unsigned OpcodeLength(const byte* pc, const byte* end) {
return WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, pc);
}
+bool CheckHardwareSupportsSimd() { return CpuFeatures::SupportsWasmSimd128(); }
+
std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
const FunctionSig* sig,
const byte* pc, const byte* end) {
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 241f4ead2d..4bc42eda26 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -86,6 +86,9 @@ V8_EXPORT_PRIVATE std::pair<uint32_t, uint32_t> StackEffect(
const WasmModule* module, const FunctionSig* sig, const byte* pc,
const byte* end);
+// Checks if the underlying hardware supports the Wasm SIMD proposal.
+V8_EXPORT_PRIVATE bool CheckHardwareSupportsSimd();
+
// A simple forward iterator for bytecodes.
class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
// Base class for both iterators defined below.
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 0e4135f03a..0129d4d8e7 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -271,16 +271,14 @@ bool UseGenericWrapper(const FunctionSig* sig) {
if (sig->returns().size() > 1) {
return false;
}
- if (sig->returns().size() == 1 &&
- sig->GetReturn(0).kind() != ValueType::kI32 &&
- sig->GetReturn(0).kind() != ValueType::kI64 &&
- sig->GetReturn(0).kind() != ValueType::kF32 &&
- sig->GetReturn(0).kind() != ValueType::kF64) {
+ if (sig->returns().size() == 1 && sig->GetReturn(0).kind() != kI32 &&
+ sig->GetReturn(0).kind() != kI64 && sig->GetReturn(0).kind() != kF32 &&
+ sig->GetReturn(0).kind() != kF64) {
return false;
}
for (ValueType type : sig->parameters()) {
- if (type.kind() != ValueType::kI32 && type.kind() != ValueType::kI64 &&
- type.kind() != ValueType::kF32 && type.kind() != ValueType::kF64) {
+ if (type.kind() != kI32 && type.kind() != kI64 && type.kind() != kF32 &&
+ type.kind() != kF64) {
return false;
}
}
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 1b8edd0c24..3893d2841d 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -88,6 +88,8 @@ class WasmGraphBuildingInterface {
explicit Value(Args&&... args) V8_NOEXCEPT
: ValueBase(std::forward<Args>(args)...) {}
};
+ using StackValueVector = base::SmallVector<Value, 8>;
+ using NodeVector = base::SmallVector<TFNode*, 8>;
struct TryInfo : public ZoneObject {
SsaEnv* catch_env;
@@ -105,7 +107,8 @@ class WasmGraphBuildingInterface {
SsaEnv* false_env = nullptr; // false environment (only for if).
TryInfo* try_info = nullptr; // information about try statements.
int32_t previous_catch = -1; // previous Control with a catch.
-
+ BitVector* loop_assignments = nullptr; // locals assigned in this loop.
+ TFNode* loop_node = nullptr; // loop header of this loop.
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
template <typename... Args>
@@ -171,7 +174,40 @@ class WasmGraphBuildingInterface {
block->end_env = finish_try_env;
SetEnv(finish_try_env);
// The continue environment is the inner environment.
- PrepareForLoop(decoder);
+
+ ssa_env_->state = SsaEnv::kMerged;
+
+ TFNode* loop_node = builder_->Loop(control());
+ builder_->SetControl(loop_node);
+ decoder->control_at(0)->loop_node = loop_node;
+
+ TFNode* effect_inputs[] = {effect(), control()};
+ builder_->SetEffect(builder_->EffectPhi(1, effect_inputs));
+ builder_->TerminateLoop(effect(), control());
+ // Doing a preprocessing pass to analyze loop assignments seems to pay off
+ // compared to reallocating Nodes when rearranging Phis in Goto.
+ BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
+ decoder, decoder->pc(), decoder->num_locals(), decoder->zone());
+ if (decoder->failed()) return;
+ DCHECK_NOT_NULL(assigned);
+ decoder->control_at(0)->loop_assignments = assigned;
+
+ // Only introduce phis for variables assigned in this loop.
+ int instance_cache_index = decoder->num_locals();
+ for (int i = decoder->num_locals() - 1; i >= 0; i--) {
+ if (!assigned->Contains(i)) continue;
+ TFNode* inputs[] = {ssa_env_->locals[i], control()};
+ ssa_env_->locals[i] = builder_->Phi(decoder->local_type(i), 1, inputs);
+ }
+ // Introduce phis for instance cache pointers if necessary.
+ if (assigned->Contains(instance_cache_index)) {
+ builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache,
+ control());
+ }
+
+ SetEnv(Split(decoder->zone(), ssa_env_));
+ builder_->StackCheck(decoder->position());
+
ssa_env_->SetNotMerged();
if (!decoder->ok()) return;
// Wrap input merge into phis.
@@ -218,7 +254,24 @@ class WasmGraphBuildingInterface {
void PopControl(FullDecoder* decoder, Control* block) {
// A loop just continues with the end environment. There is no merge.
- if (block->is_loop()) return;
+ // However, if loop unrolling is enabled, we must create a loop exit and
+ // wrap the fallthru values on the stack.
+ if (block->is_loop()) {
+ if (FLAG_wasm_loop_unrolling && block->reachable()) {
+ BuildLoopExits(decoder, block);
+ WrapLocalsAtLoopExit(decoder, block);
+ uint32_t arity = block->end_merge.arity;
+ if (arity > 0) {
+ Value* stack_base = decoder->stack_value(arity);
+ for (uint32_t i = 0; i < arity; i++) {
+ Value* val = stack_base + i;
+ val->node = builder_->LoopExitValue(
+ val->node, val->type.machine_representation());
+ }
+ }
+ }
+ return;
+ }
// Any other block falls through to the parent block.
if (block->reachable()) FallThruTo(decoder, block);
if (block->is_onearmed_if()) {
@@ -282,15 +335,6 @@ class WasmGraphBuildingInterface {
void Drop(FullDecoder* decoder) {}
- void DoReturn(FullDecoder* decoder, Vector<Value> values) {
- base::SmallVector<TFNode*, 8> nodes(values.size());
- GetNodes(nodes.begin(), values);
- if (FLAG_trace_wasm) {
- BUILD(TraceFunctionExit, VectorOf(nodes), decoder->position());
- }
- BUILD(Return, VectorOf(nodes));
- }
-
void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
result->node = ssa_env_->locals[imm.index];
@@ -341,9 +385,16 @@ class WasmGraphBuildingInterface {
}
void Unreachable(FullDecoder* decoder) {
+ StackValueVector values;
+ if (FLAG_wasm_loop_unrolling) {
+ BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
+ values);
+ }
BUILD(Trap, wasm::TrapReason::kTrapUnreachable, decoder->position());
}
+ void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {}
+
void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
TFNode* controls[2];
@@ -355,20 +406,56 @@ class WasmGraphBuildingInterface {
builder_->SetControl(merge);
}
+ StackValueVector CopyStackValues(FullDecoder* decoder, uint32_t count) {
+ Value* stack_base = count > 0 ? decoder->stack_value(count) : nullptr;
+ StackValueVector stack_values(count);
+ for (uint32_t i = 0; i < count; i++) {
+ stack_values[i] = stack_base[i];
+ }
+ return stack_values;
+ }
+
+ void DoReturn(FullDecoder* decoder) {
+ uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
+ NodeVector values(ret_count);
+ SsaEnv* internal_env = ssa_env_;
+ if (FLAG_wasm_loop_unrolling) {
+ SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
+ SetEnv(exit_env);
+ auto stack_values = CopyStackValues(decoder, ret_count);
+ BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
+ stack_values);
+ GetNodes(values.begin(), VectorOf(stack_values));
+ } else {
+ Value* stack_base =
+ ret_count == 0 ? nullptr : decoder->stack_value(ret_count);
+ GetNodes(values.begin(), stack_base, ret_count);
+ }
+ if (FLAG_trace_wasm) {
+ BUILD(TraceFunctionExit, VectorOf(values), decoder->position());
+ }
+ BUILD(Return, VectorOf(values));
+ SetEnv(internal_env);
+ }
+
void BrOrRet(FullDecoder* decoder, uint32_t depth) {
if (depth == decoder->control_depth() - 1) {
- uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
- base::SmallVector<TFNode*, 8> values(ret_count);
- if (ret_count > 0) {
- GetNodes(values.begin(), decoder->stack_value(ret_count), ret_count);
- }
- if (FLAG_trace_wasm) {
- BUILD(TraceFunctionExit, VectorOf(values), decoder->position());
- }
- BUILD(Return, VectorOf(values));
+ DoReturn(decoder);
} else {
Control* target = decoder->control_at(depth);
- MergeValuesInto(decoder, target, target->br_merge());
+ if (FLAG_wasm_loop_unrolling) {
+ SsaEnv* internal_env = ssa_env_;
+ SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
+ SetEnv(exit_env);
+ uint32_t value_count = target->br_merge()->arity;
+ auto stack_values = CopyStackValues(decoder, value_count);
+ BuildNestedLoopExits(decoder, depth, true, stack_values);
+ MergeValuesInto(decoder, target, target->br_merge(),
+ stack_values.data());
+ SetEnv(internal_env);
+ } else {
+ MergeValuesInto(decoder, target, target->br_merge());
+ }
}
}
@@ -474,33 +561,34 @@ class WasmGraphBuildingInterface {
LoadContextIntoSsa(ssa_env_);
}
- enum CallMode { kDirect, kIndirect, kRef };
+ enum CallMode { kCallDirect, kCallIndirect, kCallRef };
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, kDirect, 0, CheckForNull::kWithoutNullCheck, nullptr,
+ DoCall(decoder, kCallDirect, 0, CheckForNull::kWithoutNullCheck, nullptr,
imm.sig, imm.index, args, returns);
}
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
- DoReturnCall(decoder, kDirect, 0, CheckForNull::kWithoutNullCheck, nullptr,
- imm.sig, imm.index, args);
+ DoReturnCall(decoder, kCallDirect, 0, CheckForNull::kWithoutNullCheck,
+ nullptr, imm.sig, imm.index, args);
}
void CallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, kIndirect, imm.table_index, CheckForNull::kWithoutNullCheck,
- index.node, imm.sig, imm.sig_index, args, returns);
+ DoCall(decoder, kCallIndirect, imm.table_index,
+ CheckForNull::kWithoutNullCheck, index.node, imm.sig, imm.sig_index,
+ args, returns);
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- DoReturnCall(decoder, kIndirect, imm.table_index,
+ DoReturnCall(decoder, kCallIndirect, imm.table_index,
CheckForNull::kWithoutNullCheck, index.node, imm.sig,
imm.sig_index, args);
}
@@ -511,8 +599,8 @@ class WasmGraphBuildingInterface {
CheckForNull null_check = func_ref.type.is_nullable()
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
- DoCall(decoder, kRef, 0, null_check, func_ref.node, sig, sig_index, args,
- returns);
+ DoCall(decoder, kCallRef, 0, null_check, func_ref.node, sig, sig_index,
+ args, returns);
}
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
@@ -521,8 +609,8 @@ class WasmGraphBuildingInterface {
CheckForNull null_check = func_ref.type.is_nullable()
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
- DoReturnCall(decoder, kRef, 0, null_check, func_ref.node, sig, sig_index,
- args);
+ DoReturnCall(decoder, kCallRef, 0, null_check, func_ref.node, sig,
+ sig_index, args);
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
@@ -539,7 +627,7 @@ class WasmGraphBuildingInterface {
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
- base::SmallVector<TFNode*, 8> inputs(args.size());
+ NodeVector inputs(args.size());
GetNodes(inputs.begin(), args);
TFNode* node = BUILD(SimdOp, opcode, inputs.begin());
if (result) result->node = node;
@@ -548,7 +636,7 @@ class WasmGraphBuildingInterface {
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm, Vector<Value> inputs,
Value* result) {
- base::SmallVector<TFNode*, 8> nodes(inputs.size());
+ NodeVector nodes(inputs.size());
GetNodes(nodes.begin(), inputs);
result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes.begin());
}
@@ -569,7 +657,7 @@ class WasmGraphBuildingInterface {
args[i] = value_args[i].node;
}
BUILD(Throw, imm.index, imm.exception, VectorOf(args), decoder->position());
- builder_->TerminateThrow(effect(), control());
+ TerminateThrow(decoder);
}
void Rethrow(FullDecoder* decoder, Control* block) {
@@ -578,7 +666,7 @@ class WasmGraphBuildingInterface {
TFNode* exception = block->try_info->exception;
DCHECK_NOT_NULL(exception);
BUILD(Rethrow, exception);
- builder_->TerminateThrow(effect(), control());
+ TerminateThrow(decoder);
}
void CatchException(FullDecoder* decoder,
@@ -619,7 +707,7 @@ class WasmGraphBuildingInterface {
// If the tags match we extract the values from the exception object and
// push them onto the operand stack using the passed {values} vector.
SetEnv(if_catch_env);
- base::SmallVector<TFNode*, 8> caught_values(values.size());
+ NodeVector caught_values(values.size());
Vector<TFNode*> caught_vector = VectorOf(caught_values);
BUILD(GetExceptionValues, exception, imm.exception, caught_vector);
for (size_t i = 0, e = values.size(); i < e; ++i) {
@@ -636,12 +724,16 @@ class WasmGraphBuildingInterface {
SetEnv(block->try_info->catch_env);
if (depth == decoder->control_depth() - 1) {
builder_->Rethrow(block->try_info->exception);
- builder_->TerminateThrow(effect(), control());
+ TerminateThrow(decoder);
current_catch_ = block->previous_catch;
return;
}
DCHECK(decoder->control_at(depth)->is_try());
TryInfo* target_try = decoder->control_at(depth)->try_info;
+ if (FLAG_wasm_loop_unrolling) {
+ StackValueVector stack_values;
+ BuildNestedLoopExits(decoder, depth, true, stack_values);
+ }
Goto(decoder, target_try->catch_env);
// Create or merge the exception.
@@ -677,7 +769,7 @@ class WasmGraphBuildingInterface {
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
- base::SmallVector<TFNode*, 8> inputs(args.size());
+ NodeVector inputs(args.size());
GetNodes(inputs.begin(), args);
TFNode* node = BUILD(AtomicOp, opcode, inputs.begin(), imm.alignment,
imm.offset, decoder->position());
@@ -744,7 +836,7 @@ class WasmGraphBuildingInterface {
const StructIndexImmediate<validate>& imm,
const Value& rtt, const Value args[], Value* result) {
uint32_t field_count = imm.struct_type->field_count();
- base::SmallVector<TFNode*, 16> arg_nodes(field_count);
+ NodeVector arg_nodes(field_count);
for (uint32_t i = 0; i < field_count; i++) {
arg_nodes[i] = args[i].node;
}
@@ -755,7 +847,7 @@ class WasmGraphBuildingInterface {
const StructIndexImmediate<validate>& imm,
const Value& rtt, Value* result) {
uint32_t field_count = imm.struct_type->field_count();
- base::SmallVector<TFNode*, 16> arg_nodes(field_count);
+ NodeVector arg_nodes(field_count);
for (uint32_t i = 0; i < field_count; i++) {
arg_nodes[i] = DefaultValue(imm.struct_type->field(i));
}
@@ -842,14 +934,13 @@ class WasmGraphBuildingInterface {
result->node = BUILD(I31GetU, input.node);
}
- void RttCanon(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
- Value* result) {
- result->node = BUILD(RttCanon, imm.type);
+ void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) {
+ result->node = BUILD(RttCanon, type_index);
}
- void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
- const Value& parent, Value* result) {
- result->node = BUILD(RttSub, imm.type, parent.node);
+ void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
+ Value* result) {
+ result->node = BUILD(RttSub, type_index, parent.node);
}
using StaticKnowledge = compiler::WasmGraphBuilder::ObjectReferenceKnowledge;
@@ -860,10 +951,12 @@ class WasmGraphBuildingInterface {
StaticKnowledge result;
result.object_can_be_null = object_type.is_nullable();
DCHECK(object_type.is_object_reference_type()); // Checked by validation.
- result.object_must_be_data_ref = is_data_ref_type(object_type, module);
- result.object_can_be_i31 = IsSubtypeOf(kWasmI31Ref, object_type, module);
- result.rtt_is_i31 = rtt_type.heap_representation() == HeapType::kI31;
- result.rtt_depth = rtt_type.depth();
+ // In the bottom case, the result is irrelevant.
+ result.reference_kind =
+ rtt_type != kWasmBottom && module->has_signature(rtt_type.ref_index())
+ ? compiler::WasmGraphBuilder::kFunction
+ : compiler::WasmGraphBuilder::kArrayOrStruct;
+ result.rtt_depth = rtt_type.has_depth() ? rtt_type.depth() : -1;
return result;
}
@@ -882,15 +975,22 @@ class WasmGraphBuildingInterface {
BUILD(RefCast, object.node, rtt.node, config, decoder->position());
}
- void BrOnCast(FullDecoder* decoder, const Value& object, const Value& rtt,
- Value* value_on_branch, uint32_t br_depth) {
+ template <TFNode* (compiler::WasmGraphBuilder::*branch_function)(
+ TFNode*, TFNode*, StaticKnowledge, TFNode**, TFNode**, TFNode**,
+ TFNode**)>
+ void BrOnCastAbs(FullDecoder* decoder, const Value& object, const Value& rtt,
+ Value* value_on_branch, uint32_t br_depth) {
StaticKnowledge config =
ComputeStaticKnowledge(object.type, rtt.type, decoder->module_);
SsaEnv* match_env = Split(decoder->zone(), ssa_env_);
SsaEnv* no_match_env = Steal(decoder->zone(), ssa_env_);
no_match_env->SetNotMerged();
- BUILD(BrOnCast, object.node, rtt.node, config, &match_env->control,
- &match_env->effect, &no_match_env->control, &no_match_env->effect);
+ DCHECK(decoder->ok());
+ CheckForException(
+ decoder,
+ (builder_->*branch_function)(
+ object.node, rtt.node, config, &match_env->control,
+ &match_env->effect, &no_match_env->control, &no_match_env->effect));
builder_->SetControl(no_match_env->control);
SetEnv(match_env);
value_on_branch->node = object.node;
@@ -898,6 +998,59 @@ class WasmGraphBuildingInterface {
SetEnv(no_match_env);
}
+ void BrOnCast(FullDecoder* decoder, const Value& object, const Value& rtt,
+ Value* value_on_branch, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnCast>(
+ decoder, object, rtt, value_on_branch, br_depth);
+ }
+
+ void RefIsData(FullDecoder* decoder, const Value& object, Value* result) {
+ result->node = BUILD(RefIsData, object.node, object.type.is_nullable());
+ }
+
+ void RefAsData(FullDecoder* decoder, const Value& object, Value* result) {
+ result->node = BUILD(RefAsData, object.node, object.type.is_nullable(),
+ decoder->position());
+ }
+
+ void BrOnData(FullDecoder* decoder, const Value& object,
+ Value* value_on_branch, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnData>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_branch,
+ br_depth);
+ }
+
+ void RefIsFunc(FullDecoder* decoder, const Value& object, Value* result) {
+ result->node = BUILD(RefIsFunc, object.node, object.type.is_nullable());
+ }
+
+ void RefAsFunc(FullDecoder* decoder, const Value& object, Value* result) {
+ result->node = BUILD(RefAsFunc, object.node, object.type.is_nullable(),
+ decoder->position());
+ }
+
+ void BrOnFunc(FullDecoder* decoder, const Value& object,
+ Value* value_on_branch, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnFunc>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_branch,
+ br_depth);
+ }
+
+ void RefIsI31(FullDecoder* decoder, const Value& object, Value* result) {
+ result->node = BUILD(RefIsI31, object.node);
+ }
+
+ void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) {
+ result->node = BUILD(RefAsI31, object.node, decoder->position());
+ }
+
+ void BrOnI31(FullDecoder* decoder, const Value& object,
+ Value* value_on_branch, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnI31>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_branch,
+ br_depth);
+ }
+
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
to->node = from.node;
}
@@ -911,9 +1064,13 @@ class WasmGraphBuildingInterface {
TFNode* control() { return builder_->control(); }
+ uint32_t control_depth_of_current_catch(FullDecoder* decoder) {
+ return decoder->control_depth() - 1 - current_catch_;
+ }
+
TryInfo* current_try_info(FullDecoder* decoder) {
DCHECK_LT(current_catch_, decoder->control_depth());
- return decoder->control_at(decoder->control_depth() - 1 - current_catch_)
+ return decoder->control_at(control_depth_of_current_catch(decoder))
->try_info;
}
@@ -983,6 +1140,11 @@ class WasmGraphBuildingInterface {
exception_env->effect = if_exception;
SetEnv(exception_env);
TryInfo* try_info = current_try_info(decoder);
+ if (FLAG_wasm_loop_unrolling) {
+ StackValueVector values;
+ BuildNestedLoopExits(decoder, control_depth_of_current_catch(decoder),
+ true, values);
+ }
Goto(decoder, try_info->catch_env);
if (try_info->exception == nullptr) {
DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state);
@@ -1001,24 +1163,25 @@ class WasmGraphBuildingInterface {
TFNode* DefaultValue(ValueType type) {
DCHECK(type.is_defaultable());
switch (type.kind()) {
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kI32:
+ case kI8:
+ case kI16:
+ case kI32:
return builder_->Int32Constant(0);
- case ValueType::kI64:
+ case kI64:
return builder_->Int64Constant(0);
- case ValueType::kF32:
+ case kF32:
return builder_->Float32Constant(0);
- case ValueType::kF64:
+ case kF64:
return builder_->Float64Constant(0);
- case ValueType::kS128:
+ case kS128:
return builder_->S128Zero();
- case ValueType::kOptRef:
+ case kOptRef:
return builder_->RefNull();
- case ValueType::kRtt:
- case ValueType::kStmt:
- case ValueType::kBottom:
- case ValueType::kRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kStmt:
+ case kBottom:
+ case kRef:
UNREACHABLE();
}
}
@@ -1137,37 +1300,6 @@ class WasmGraphBuildingInterface {
return ssa_env_->Kill();
}
- void PrepareForLoop(FullDecoder* decoder) {
- ssa_env_->state = SsaEnv::kMerged;
-
- builder_->SetControl(builder_->Loop(control()));
- TFNode* effect_inputs[] = {effect(), control()};
- builder_->SetEffect(builder_->EffectPhi(1, effect_inputs));
- builder_->TerminateLoop(effect(), control());
- // Doing a preprocessing pass to analyze loop assignments seems to pay off
- // compared to reallocating Nodes when rearranging Phis in Goto.
- BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
- decoder, decoder->pc(), decoder->num_locals(), decoder->zone());
- if (decoder->failed()) return;
- DCHECK_NOT_NULL(assigned);
-
- // Only introduce phis for variables assigned in this loop.
- int instance_cache_index = decoder->num_locals();
- for (int i = decoder->num_locals() - 1; i >= 0; i--) {
- if (!assigned->Contains(i)) continue;
- TFNode* inputs[] = {ssa_env_->locals[i], control()};
- ssa_env_->locals[i] = builder_->Phi(decoder->local_type(i), 1, inputs);
- }
- // Introduce phis for instance cache pointers if necessary.
- if (assigned->Contains(instance_cache_index)) {
- builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache,
- control());
- }
-
- SetEnv(Split(decoder->zone(), ssa_env_));
- builder_->StackCheck(decoder->position());
- }
-
// Create a complete copy of {from}.
SsaEnv* Split(Zone* zone, SsaEnv* from) {
DCHECK_NOT_NULL(from);
@@ -1206,22 +1338,22 @@ class WasmGraphBuildingInterface {
Value returns[]) {
size_t param_count = sig->parameter_count();
size_t return_count = sig->return_count();
- base::SmallVector<TFNode*, 16> arg_nodes(param_count + 1);
+ NodeVector arg_nodes(param_count + 1);
base::SmallVector<TFNode*, 1> return_nodes(return_count);
arg_nodes[0] = caller_node;
for (size_t i = 0; i < param_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
switch (call_mode) {
- case kIndirect:
+ case kCallIndirect:
BUILD(CallIndirect, table_index, sig_index, VectorOf(arg_nodes),
VectorOf(return_nodes), decoder->position());
break;
- case kDirect:
+ case kCallDirect:
BUILD(CallDirect, sig_index, VectorOf(arg_nodes),
VectorOf(return_nodes), decoder->position());
break;
- case kRef:
+ case kCallRef:
BUILD(CallRef, sig_index, VectorOf(arg_nodes), VectorOf(return_nodes),
null_check, decoder->position());
break;
@@ -1239,22 +1371,84 @@ class WasmGraphBuildingInterface {
TFNode* index_node, const FunctionSig* sig,
uint32_t sig_index, const Value args[]) {
size_t arg_count = sig->parameter_count();
- base::SmallVector<TFNode*, 16> arg_nodes(arg_count + 1);
+ NodeVector arg_nodes(arg_count + 1);
arg_nodes[0] = index_node;
for (size_t i = 0; i < arg_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
switch (call_mode) {
- case kIndirect:
+ case kCallIndirect:
BUILD(ReturnCallIndirect, table_index, sig_index, VectorOf(arg_nodes),
decoder->position());
break;
- case kDirect:
+ case kCallDirect:
BUILD(ReturnCall, sig_index, VectorOf(arg_nodes), decoder->position());
break;
- case kRef:
+ case kCallRef:
BUILD(ReturnCallRef, sig_index, VectorOf(arg_nodes), null_check,
decoder->position());
+ break;
+ }
+ }
+
+ void BuildLoopExits(FullDecoder* decoder, Control* loop) {
+ BUILD(LoopExit, loop->loop_node);
+ ssa_env_->control = control();
+ ssa_env_->effect = effect();
+ }
+
+ void WrapLocalsAtLoopExit(FullDecoder* decoder, Control* loop) {
+ for (uint32_t index = 0; index < decoder->num_locals(); index++) {
+ if (loop->loop_assignments->Contains(static_cast<int>(index))) {
+ ssa_env_->locals[index] = builder_->LoopExitValue(
+ ssa_env_->locals[index],
+ decoder->local_type(index).machine_representation());
+ }
+ }
+ if (loop->loop_assignments->Contains(decoder->num_locals())) {
+#define WRAP_CACHE_FIELD(field) \
+ if (ssa_env_->instance_cache.field != nullptr) { \
+ ssa_env_->instance_cache.field = builder_->LoopExitValue( \
+ ssa_env_->instance_cache.field, MachineType::PointerRepresentation()); \
+ }
+
+ WRAP_CACHE_FIELD(mem_start);
+ WRAP_CACHE_FIELD(mem_size);
+ WRAP_CACHE_FIELD(mem_mask);
+#undef WRAP_CACHE_FIELD
+ }
+ }
+
+ void BuildNestedLoopExits(FullDecoder* decoder, uint32_t depth_limit,
+ bool wrap_exit_values,
+ StackValueVector& stack_values) {
+ DCHECK(FLAG_wasm_loop_unrolling);
+ for (uint32_t i = 0; i < depth_limit; i++) {
+ Control* control = decoder->control_at(i);
+ if (!control->is_loop()) continue;
+ BuildLoopExits(decoder, control);
+ for (Value& value : stack_values) {
+ value.node = builder_->LoopExitValue(
+ value.node, value.type.machine_representation());
+ }
+ if (wrap_exit_values) {
+ WrapLocalsAtLoopExit(decoder, control);
+ }
+ }
+ }
+
+ void TerminateThrow(FullDecoder* decoder) {
+ if (FLAG_wasm_loop_unrolling) {
+ SsaEnv* internal_env = ssa_env_;
+ SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
+ SetEnv(exit_env);
+ StackValueVector stack_values;
+ BuildNestedLoopExits(decoder, decoder->control_depth(), false,
+ stack_values);
+ builder_->TerminateThrow(effect(), control());
+ SetEnv(internal_env);
+ } else {
+ builder_->TerminateThrow(effect(), control());
}
}
};
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 65752df59f..db2514791b 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -307,6 +307,46 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+#elif V8_TARGET_ARCH_RISCV64
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ int start = pc_offset();
+ li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
+ // Jump produces max. 8 instructions (include constant pool and j)
+ Jump(lazy_compile_target, RelocInfo::NONE);
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
+}
+
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ PatchAndJump(target);
+ return true;
+}
+
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ UseScratchRegisterScope temp(this);
+ Register rd = temp.Acquire();
+ auipc(rd, 0);
+ ld(rd, rd, 4 * kInstrSize);
+ Jump(rd);
+ nop();
+ dq(target);
+}
+
+// static
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
+}
+
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % kInstrSize);
+ for (; bytes > 0; bytes -= kInstrSize) {
+ nop();
+ }
+}
+
#else
#error Unknown architecture.
#endif
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 253f0bc018..b14d66eafe 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -215,6 +215,11 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
+#elif V8_TARGET_ARCH_RISCV64
+ static constexpr int kJumpTableLineSize = 6 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 10 * kInstrSize;
#else
#error Unknown architecture.
#endif
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
index 8130cbb6a4..e7d1c5f21b 100644
--- a/deps/v8/src/wasm/local-decl-encoder.cc
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -41,6 +41,9 @@ size_t LocalDeclEncoder::Emit(byte* buffer) const {
*pos = locals_type.depth();
++pos;
}
+ if (locals_type.is_rtt()) {
+ LEBHelper::write_u32v(&pos, locals_type.ref_index());
+ }
if (locals_type.encoding_needs_heap_type()) {
LEBHelper::write_i32v(&pos, locals_type.heap_type().code());
}
@@ -66,12 +69,14 @@ uint32_t LocalDeclEncoder::AddLocals(uint32_t count, ValueType type) {
size_t LocalDeclEncoder::Size() const {
size_t size = LEBHelper::sizeof_u32v(local_decls.size());
for (auto p : local_decls) {
- size += LEBHelper::sizeof_u32v(p.first) + // number of locals
- 1 + // Opcode
- (p.second.has_depth() ? 1 : 0) + // Inheritance depth
- (p.second.encoding_needs_heap_type()
- ? LEBHelper::sizeof_i32v(p.second.heap_type().code())
- : 0); // ref. index
+ size +=
+ LEBHelper::sizeof_u32v(p.first) + // number of locals
+ 1 + // Opcode
+ (p.second.has_depth() ? 1 : 0) + // Inheritance depth
+ (p.second.encoding_needs_heap_type()
+ ? LEBHelper::sizeof_i32v(p.second.heap_type().code())
+ : 0) +
+ (p.second.is_rtt() ? LEBHelper::sizeof_u32v(p.second.ref_index()) : 0);
}
return size;
}
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index ca34fa9c66..00d7dab43c 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -589,7 +589,7 @@ class CompilationStateImpl {
void OnFinishedUnits(Vector<WasmCode*>);
void OnFinishedJSToWasmWrapperUnits(int num);
- void OnCompilationStopped(const WasmFeatures& detected);
+ void OnCompilationStopped(WasmFeatures detected);
void PublishDetectedFeatures(Isolate*);
void SchedulePublishCompilationResults(
std::vector<std::unique_ptr<WasmCode>> unpublished_code);
@@ -627,7 +627,6 @@ class CompilationStateImpl {
CompileMode compile_mode() const { return compile_mode_; }
Counters* counters() const { return async_counters_.get(); }
- WasmFeatures* detected_features() { return &detected_features_; }
void SetWireBytesStorage(
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
@@ -641,6 +640,15 @@ class CompilationStateImpl {
return wire_bytes_storage_;
}
+ void set_compilation_id(int compilation_id) {
+ DCHECK_EQ(compilation_id_, kInvalidCompilationID);
+ compilation_id_ = compilation_id;
+ }
+
+ std::weak_ptr<NativeModule> const native_module_weak() const {
+ return native_module_weak_;
+ }
+
private:
// Trigger callbacks according to the internal counters below
// (outstanding_...), plus the given events.
@@ -682,6 +690,10 @@ class CompilationStateImpl {
// {CompilationStateImpl}.
std::unique_ptr<JobHandle> compile_job_;
+ // The compilation id to identify trace events linked to this compilation.
+ static constexpr int kInvalidCompilationID = -1;
+ int compilation_id_ = kInvalidCompilationID;
+
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
@@ -755,10 +767,10 @@ void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
using Feature = v8::Isolate::UseCounterFeature;
constexpr static std::pair<WasmFeature, Feature> kUseCounters[] = {
{kFeature_reftypes, Feature::kWasmRefTypes},
- {kFeature_bulk_memory, Feature::kWasmBulkMemory},
{kFeature_mv, Feature::kWasmMultiValue},
{kFeature_simd, Feature::kWasmSimdOpcodes},
- {kFeature_threads, Feature::kWasmThreadOpcodes}};
+ {kFeature_threads, Feature::kWasmThreadOpcodes},
+ {kFeature_eh, Feature::kWasmExceptionHandling}};
for (auto& feature : kUseCounters) {
if (detected.contains(feature.first)) isolate->CountUsage(feature.second);
@@ -798,15 +810,8 @@ void CompilationState::AddCallback(CompilationState::callback_t callback) {
}
void CompilationState::WaitForTopTierFinished() {
- // TODO(clemensb): Contribute to compilation while waiting.
- auto top_tier_finished_semaphore = std::make_shared<base::Semaphore>(0);
- AddCallback([top_tier_finished_semaphore](CompilationEvent event) {
- if (event == CompilationEvent::kFailedCompilation ||
- event == CompilationEvent::kFinishedTopTierCompilation) {
- top_tier_finished_semaphore->Signal();
- }
- });
- top_tier_finished_semaphore->Wait();
+ Impl(this)->WaitForCompilationEvent(
+ CompilationEvent::kFinishedTopTierCompilation);
}
void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); }
@@ -829,6 +834,10 @@ bool CompilationState::recompilation_finished() const {
return Impl(this)->recompilation_finished();
}
+void CompilationState::set_compilation_id(int compilation_id) {
+ Impl(this)->set_compilation_id(compilation_id);
+}
+
// static
std::unique_ptr<CompilationState> CompilationState::New(
const std::shared_ptr<NativeModule>& native_module,
@@ -1112,9 +1121,11 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
WasmCompilationUnit baseline_unit{func_index, tiers.baseline_tier,
kNoDebugging};
CompilationEnv env = native_module->CreateCompilationEnv();
+ WasmFeatures detected_features;
WasmCompilationResult result = baseline_unit.ExecuteCompilation(
isolate->wasm_engine(), &env, compilation_state->GetWireBytesStorage(),
- counters, compilation_state->detected_features());
+ counters, &detected_features);
+ compilation_state->OnCompilationStopped(detected_features);
// During lazy compilation, we can only get compilation errors when
// {--wasm-lazy-validation} is enabled. Otherwise, the module was fully
@@ -1235,11 +1246,12 @@ const char* GetCompilationEventName(const WasmCompilationUnit& unit,
}
} // namespace
+constexpr uint8_t kMainTaskId = 0;
+
// Run by the {BackgroundCompileJob} (on any thread).
CompilationExecutionResult ExecuteCompilationUnits(
- std::weak_ptr<NativeModule> native_module, WasmEngine* wasm_engine,
- Counters* counters, JobDelegate* delegate,
- CompileBaselineOnly baseline_only) {
+ std::weak_ptr<NativeModule> native_module, Counters* counters,
+ JobDelegate* delegate, CompileBaselineOnly baseline_only) {
TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits");
// Execute JS to Wasm wrapper units first, so that they are ready to be
@@ -1252,12 +1264,14 @@ CompilationExecutionResult ExecuteCompilationUnits(
// These fields are initialized in a {BackgroundCompileScope} before
// starting compilation.
+ WasmEngine* engine;
base::Optional<CompilationEnv> env;
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
// Task 0 is any main thread (there might be multiple from multiple isolates),
// worker threads start at 1 (thus the "+ 1").
- int task_id = delegate ? (int{delegate->GetTaskId()} + 1) : 0;
+ STATIC_ASSERT(kMainTaskId == 0);
+ int task_id = delegate ? (int{delegate->GetTaskId()} + 1) : kMainTaskId;
DCHECK_LE(0, task_id);
CompilationUnitQueues::Queue* queue;
base::Optional<WasmCompilationUnit> unit;
@@ -1269,6 +1283,7 @@ CompilationExecutionResult ExecuteCompilationUnits(
{
BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kYield;
+ engine = compile_scope.native_module()->engine();
env.emplace(compile_scope.native_module()->CreateCompilationEnv());
wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
module = compile_scope.native_module()->shared_module();
@@ -1287,7 +1302,7 @@ CompilationExecutionResult ExecuteCompilationUnits(
while (unit->tier() == current_tier) {
// (asynchronous): Execute the compilation.
WasmCompilationResult result = unit->ExecuteCompilation(
- wasm_engine, &env.value(), wire_bytes, counters, &detected_features);
+ engine, &env.value(), wire_bytes, counters, &detected_features);
results_to_publish.emplace_back(std::move(result));
bool yield = delegate && delegate->ShouldYield();
@@ -1595,15 +1610,14 @@ class BackgroundCompileJob final : public JobTask {
WasmEngine* engine,
std::shared_ptr<Counters> async_counters)
: native_module_(std::move(native_module)),
- engine_(engine),
- engine_barrier_(engine_->GetBarrierForBackgroundCompile()),
+ engine_barrier_(engine->GetBarrierForBackgroundCompile()),
async_counters_(std::move(async_counters)) {}
void Run(JobDelegate* delegate) override {
auto engine_scope = engine_barrier_->TryLock();
if (!engine_scope) return;
- ExecuteCompilationUnits(native_module_, engine_, async_counters_.get(),
- delegate, kBaselineOrTopTier);
+ ExecuteCompilationUnits(native_module_, async_counters_.get(), delegate,
+ kBaselineOrTopTier);
}
size_t GetMaxConcurrency(size_t worker_count) const override {
@@ -1619,7 +1633,6 @@ class BackgroundCompileJob final : public JobTask {
private:
std::weak_ptr<NativeModule> native_module_;
- WasmEngine* engine_;
std::shared_ptr<OperationsBarrier> engine_barrier_;
const std::shared_ptr<Counters> async_counters_;
};
@@ -1629,7 +1642,7 @@ class BackgroundCompileJob final : public JobTask {
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
- Handle<FixedArray>* export_wrappers_out) {
+ Handle<FixedArray>* export_wrappers_out, int compilation_id) {
const WasmModule* wasm_module = module.get();
OwnedVector<uint8_t> wire_bytes_copy =
OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
@@ -1662,6 +1675,7 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
native_module = isolate->wasm_engine()->NewNativeModule(
isolate, enabled, module, code_size_estimate);
native_module->SetWireBytes(std::move(wire_bytes_copy));
+ native_module->compilation_state()->set_compilation_id(compilation_id);
// Sync compilation is user blocking, so we increase the priority.
native_module->compilation_state()->SetHighPriority();
@@ -1695,13 +1709,16 @@ void RecompileNativeModule(NativeModule* native_module,
compilation_state->InitializeRecompilation(
tiering_state,
[recompilation_finished_semaphore](CompilationEvent event) {
+ DCHECK_NE(CompilationEvent::kFailedCompilation, event);
if (event == CompilationEvent::kFinishedRecompilation) {
recompilation_finished_semaphore->Signal();
}
});
- // Now wait until all compilation units finished.
- // TODO(clemensb): Contribute to compilation while waiting.
+ constexpr JobDelegate* kNoDelegate = nullptr;
+ ExecuteCompilationUnits(compilation_state->native_module_weak(),
+ compilation_state->counters(), kNoDelegate,
+ kBaselineOnly);
recompilation_finished_semaphore->Wait();
DCHECK(!compilation_state->failed());
}
@@ -1710,7 +1727,7 @@ AsyncCompileJob::AsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
Handle<Context> incumbent_context, const char* api_method_name,
- std::shared_ptr<CompilationResultResolver> resolver)
+ std::shared_ptr<CompilationResultResolver> resolver, int compilation_id)
: isolate_(isolate),
api_method_name_(api_method_name),
enabled_features_(enabled),
@@ -1718,7 +1735,8 @@ AsyncCompileJob::AsyncCompileJob(
start_time_(base::TimeTicks::Now()),
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
- resolver_(std::move(resolver)) {
+ resolver_(std::move(resolver)),
+ compilation_id_(compilation_id) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.AsyncCompileJob");
CHECK(FLAG_wasm_async_compilation);
@@ -1846,6 +1864,7 @@ void AsyncCompileJob::CreateNativeModule(
native_module_ = isolate_->wasm_engine()->NewNativeModule(
isolate_, enabled_features_, std::move(module), code_size_estimate);
native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
+ native_module_->compilation_state()->set_compilation_id(compilation_id_);
}
bool AsyncCompileJob::GetOrCreateNativeModule(
@@ -1944,6 +1963,12 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
// We can only update the feature counts once the entire compile is done.
compilation_state->PublishDetectedFeatures(isolate_);
+ // We might need to recompile the module for debugging, if the debugger was
+ // enabled while streaming compilation was running. Since handling this while
+ // compiling via streaming is tricky, we just tier down now, before publishing
+ // the module.
+ if (native_module_->IsTieredDown()) native_module_->RecompileForTiering();
+
// Finally, log all generated code (it does not matter if this happens
// repeatedly in case the script is shared).
native_module_->LogWasmCodes(isolate_, module_object_->script());
@@ -2693,13 +2718,6 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
}
const bool needs_finish = job_->DecrementAndCheckFinisherCount();
DCHECK_IMPLIES(!has_code_section, needs_finish);
- // We might need to recompile the module for debugging, if the debugger was
- // enabled while streaming compilation was running. Since handling this while
- // compiling via streaming is tricky, we just tier down now, before publishing
- // the module.
- if (job_->native_module_->IsTieredDown()) {
- job_->native_module_->RecompileForTiering();
- }
if (needs_finish) {
const bool failed = job_->native_module_->compilation_state()->failed();
if (!cache_hit) {
@@ -2871,19 +2889,18 @@ void CompilationStateImpl::InitializeRecompilation(
// is disabled).
base::Optional<base::MutexGuard> guard(&callbacks_mutex_);
- // For now, we cannot contribute to compilation here, because this would bump
- // the number of workers above the expected maximum concurrency. This can be
- // fixed once we grow the number of compilation unit queues dynamically.
- // TODO(clemensb): Contribute to compilation once the queues grow dynamically.
- while (outstanding_recompilation_functions_ > 0) {
- auto semaphore = std::make_shared<base::Semaphore>(0);
- callbacks_.emplace_back([semaphore](CompilationEvent event) {
- if (event == CompilationEvent::kFinishedRecompilation) {
- semaphore->Signal();
- }
- });
+ // As long as there are outstanding recompilation functions, take part in
+ // compilation. This is to avoid recompiling for the same tier or for
+ // different tiers concurrently. Note that the compilation unit queues can run
+ // empty before {outstanding_recompilation_functions_} drops to zero. In this
+ // case, we do not wait for the last running compilation threads to finish
+ // their units, but just start our own recompilation already.
+ while (outstanding_recompilation_functions_ > 0 &&
+ compilation_unit_queues_.GetTotalSize() > 0) {
guard.reset();
- semaphore->Wait();
+ constexpr JobDelegate* kNoDelegate = nullptr;
+ ExecuteCompilationUnits(native_module_weak_, async_counters_.get(),
+ kNoDelegate, kBaselineOrTopTier);
guard.emplace(&callbacks_mutex_);
}
@@ -3186,7 +3203,8 @@ void CompilationStateImpl::TriggerCallbacks(
std::make_pair(CompilationEvent::kFinishedRecompilation,
"wasm.RecompilationFinished")}) {
if (!triggered_events.contains(event.first)) continue;
- TRACE_EVENT0("v8.wasm", event.second);
+ DCHECK_NE(compilation_id_, kInvalidCompilationID);
+ TRACE_EVENT1("v8.wasm", event.second, "id", compilation_id_);
for (auto& callback : callbacks_) {
callback(event.first);
}
@@ -3200,7 +3218,7 @@ void CompilationStateImpl::TriggerCallbacks(
}
}
-void CompilationStateImpl::OnCompilationStopped(const WasmFeatures& detected) {
+void CompilationStateImpl::OnCompilationStopped(WasmFeatures detected) {
base::MutexGuard guard(&mutex_);
detected_features_.Add(detected);
}
@@ -3302,22 +3320,48 @@ void CompilationStateImpl::SetError() {
void CompilationStateImpl::WaitForCompilationEvent(
CompilationEvent expect_event) {
- auto compilation_event_semaphore = std::make_shared<base::Semaphore>(0);
+ auto semaphore = std::make_shared<base::Semaphore>(0);
+ auto done = std::make_shared<std::atomic<bool>>(false);
base::EnumSet<CompilationEvent> events{expect_event,
CompilationEvent::kFailedCompilation};
{
base::MutexGuard callbacks_guard(&callbacks_mutex_);
if (finished_events_.contains_any(events)) return;
- callbacks_.emplace_back(
- [compilation_event_semaphore, events](CompilationEvent event) {
- if (events.contains(event)) compilation_event_semaphore->Signal();
- });
+ callbacks_.emplace_back([semaphore, events, done](CompilationEvent event) {
+ if (!events.contains(event)) return;
+ done->store(true, std::memory_order_relaxed);
+ semaphore->Signal();
+ });
}
- constexpr JobDelegate* kNoDelegate = nullptr;
- ExecuteCompilationUnits(native_module_weak_, native_module_->engine(),
- async_counters_.get(), kNoDelegate, kBaselineOnly);
- compilation_event_semaphore->Wait();
+ class WaitForEventDelegate final : public JobDelegate {
+ public:
+ explicit WaitForEventDelegate(std::shared_ptr<std::atomic<bool>> done)
+ : done_(std::move(done)) {}
+
+ bool ShouldYield() override {
+ return done_->load(std::memory_order_relaxed);
+ }
+
+ void NotifyConcurrencyIncrease() override { UNIMPLEMENTED(); }
+
+ uint8_t GetTaskId() override { return kMainTaskId; }
+
+ private:
+ std::shared_ptr<std::atomic<bool>> done_;
+ };
+
+ WaitForEventDelegate delegate{done};
+ // Everything except for top-tier units will be processed with kBaselineOnly
+ // (including wrappers). Hence we choose this for any event except
+ // {kFinishedTopTierCompilation}.
+ auto compile_tiers =
+ expect_event == CompilationEvent::kFinishedTopTierCompilation
+ ? kBaselineOrTopTier
+ : kBaselineOnly;
+ ExecuteCompilationUnits(native_module_weak_, async_counters_.get(), &delegate,
+ compile_tiers);
+ semaphore->Wait();
}
namespace {
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 239c39c526..9c2b7556cb 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -43,7 +43,7 @@ struct WasmModule;
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
- Handle<FixedArray>* export_wrappers_out);
+ Handle<FixedArray>* export_wrappers_out, int compilation_id);
void RecompileNativeModule(NativeModule* native_module,
TieringState new_tiering_state);
@@ -113,7 +113,8 @@ class AsyncCompileJob {
std::unique_ptr<byte[]> bytes_copy, size_t length,
Handle<Context> context, Handle<Context> incumbent_context,
const char* api_method_name,
- std::shared_ptr<CompilationResultResolver> resolver);
+ std::shared_ptr<CompilationResultResolver> resolver,
+ int compilation_id);
~AsyncCompileJob();
void Start();
@@ -236,6 +237,9 @@ class AsyncCompileJob {
// compilation. The AsyncCompileJob does not actively use the
// StreamingDecoder.
std::shared_ptr<StreamingDecoder> stream_;
+
+ // The compilation id to identify trace events linked to this compilation.
+ const int compilation_id_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 221afbfb62..61d0f691d1 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -499,17 +499,15 @@ class ModuleDecoderImpl : public Decoder {
}
break;
case kDataCountSectionCode:
- if (enabled_features_.has_bulk_memory()) {
- DecodeDataCountSection();
- } else {
- errorf(pc(), "unexpected section <%s>", SectionName(section_code));
- }
+ DecodeDataCountSection();
break;
case kExceptionSectionCode:
if (enabled_features_.has_eh()) {
DecodeExceptionSection();
} else {
- errorf(pc(), "unexpected section <%s>", SectionName(section_code));
+ errorf(pc(),
+ "unexpected section <%s> (enable with --experimental-wasm-eh)",
+ SectionName(section_code));
}
break;
default:
@@ -619,9 +617,10 @@ class ModuleDecoderImpl : public Decoder {
const byte* type_position = pc();
ValueType type = consume_reference_type();
if (!WasmTable::IsValidTableType(type, module_.get())) {
- error(type_position,
- "Currently, only nullable exnref, externref, and "
- "function references are allowed as table types");
+ error(
+ type_position,
+ "Currently, only externref and function references are allowed "
+ "as table types");
break;
}
table->type = type;
@@ -719,8 +718,8 @@ class ModuleDecoderImpl : public Decoder {
ValueType table_type = consume_reference_type();
if (!WasmTable::IsValidTableType(table_type, module_.get())) {
error(type_position,
- "Currently, only nullable exnref, externref, and "
- "function references are allowed as table types");
+ "Currently, only externref and function references are allowed "
+ "as table types");
continue;
}
table->type = table_type;
@@ -1250,7 +1249,7 @@ class ModuleDecoderImpl : public Decoder {
WasmSectionIterator section_iter(&decoder);
- while (ok() && section_iter.more()) {
+ while (ok()) {
// Shift the offset by the section header length
offset += section_iter.payload_start() - section_iter.section_start();
if (section_iter.section_code() != SectionCode::kUnknownSectionCode) {
@@ -1259,6 +1258,7 @@ class ModuleDecoderImpl : public Decoder {
}
// Shift the offset by the remaining section payload
offset += section_iter.payload_length();
+ if (!section_iter.more()) break;
section_iter.advance(true);
}
@@ -1371,8 +1371,7 @@ class ModuleDecoderImpl : public Decoder {
case WasmInitExpr::kRefNullConst:
return ValueType::Ref(expr.immediate().heap_type, kNullable);
case WasmInitExpr::kRttCanon: {
- uint8_t depth = expr.immediate().heap_type == HeapType::kAny ? 0 : 1;
- return ValueType::Rtt(expr.immediate().heap_type, depth);
+ return ValueType::Rtt(expr.immediate().heap_type, 0);
}
case WasmInitExpr::kRttSub: {
ValueType operand_type = TypeOf(*expr.operand());
@@ -1770,18 +1769,21 @@ class ModuleDecoderImpl : public Decoder {
opcode = read_prefixed_opcode<validate>(pc(), &len);
switch (opcode) {
case kExprRttCanon: {
- HeapTypeImmediate<validate> imm(enabled_features_, this, pc() + 2,
- module_.get());
- if (V8_UNLIKELY(failed())) return {};
+ TypeIndexImmediate<validate> imm(this, pc() + 2);
+ if (V8_UNLIKELY(imm.index >= module_->types.capacity())) {
+ errorf(pc() + 2, "type index %u is out of bounds", imm.index);
+ return {};
+ }
len += imm.length;
- stack.push_back(
- WasmInitExpr::RttCanon(imm.type.representation()));
+ stack.push_back(WasmInitExpr::RttCanon(imm.index));
break;
}
case kExprRttSub: {
- HeapTypeImmediate<validate> imm(enabled_features_, this, pc() + 2,
- module_.get());
- if (V8_UNLIKELY(failed())) return {};
+ TypeIndexImmediate<validate> imm(this, pc() + 2);
+ if (V8_UNLIKELY(imm.index >= module_->types.capacity())) {
+ errorf(pc() + 2, "type index %u is out of bounds", imm.index);
+ return {};
+ }
len += imm.length;
if (stack.empty()) {
error(pc(), "calling rtt.sub without arguments");
@@ -1790,17 +1792,15 @@ class ModuleDecoderImpl : public Decoder {
WasmInitExpr parent = std::move(stack.back());
stack.pop_back();
ValueType parent_type = TypeOf(parent);
- if (V8_UNLIKELY(
- parent_type.kind() != ValueType::kRtt ||
- !IsSubtypeOf(
- ValueType::Ref(imm.type, kNonNullable),
- ValueType::Ref(parent_type.heap_type(), kNonNullable),
- module_.get()))) {
+ if (V8_UNLIKELY(!parent_type.is_rtt() ||
+ !IsHeapSubtypeOf(imm.index,
+ parent_type.ref_index(),
+ module_.get()))) {
error(pc(), "rtt.sub requires a supertype rtt on stack");
return {};
}
- stack.push_back(WasmInitExpr::RttSub(imm.type.representation(),
- std::move(parent)));
+ stack.push_back(
+ WasmInitExpr::RttSub(imm.index, std::move(parent)));
break;
}
default: {
@@ -1972,21 +1972,7 @@ class ModuleDecoderImpl : public Decoder {
ValueType* type, uint32_t* table_index,
WasmInitExpr* offset) {
const byte* pos = pc();
- uint32_t flag;
- if (enabled_features_.has_bulk_memory() ||
- enabled_features_.has_reftypes()) {
- flag = consume_u32v("flag");
- } else {
- uint32_t table_index = consume_u32v("table index");
- // The only valid flag value without bulk_memory or externref is '0'.
- if (table_index != 0) {
- error(
- "Element segments with table indices require "
- "--experimental-wasm-bulk-memory or --experimental-wasm-reftypes");
- return;
- }
- flag = 0;
- }
+ uint32_t flag = consume_u32v("flag");
// The mask for the bit in the flag which indicates if the segment is
// active or not.
@@ -2022,24 +2008,6 @@ class ModuleDecoderImpl : public Decoder {
"Declarative element segments require --experimental-wasm-reftypes");
return;
}
- if (*status == WasmElemSegment::kStatusPassive &&
- !enabled_features_.has_bulk_memory()) {
- error("Passive element segments require --experimental-wasm-bulk-memory");
- return;
- }
- if (*functions_as_elements && !enabled_features_.has_bulk_memory()) {
- error(
- "Illegal segment flag. Did you forget "
- "--experimental-wasm-bulk-memory?");
- return;
- }
- if (flag != 0 && !enabled_features_.has_bulk_memory() &&
- !enabled_features_.has_reftypes()) {
- error(
- "Invalid segment flag. Enable with --experimental-wasm-bulk-memory "
- "or --experimental-wasm-reftypes");
- return;
- }
if ((flag & kFullMask) != flag) {
errorf(pos, "illegal flag value %u. Must be between 0 and 7", flag);
}
@@ -2088,21 +2056,9 @@ class ModuleDecoderImpl : public Decoder {
uint32_t flag = consume_u32v("flag");
// Some flag values are only valid for specific proposals.
- if (flag == SegmentFlags::kPassive) {
- if (!enabled_features_.has_bulk_memory()) {
- error(
- "Passive element segments require --experimental-wasm-bulk-memory");
- return;
- }
- } else if (flag == SegmentFlags::kActiveWithIndex) {
- if (!(enabled_features_.has_bulk_memory() ||
- enabled_features_.has_reftypes())) {
- error(
- "Element segments with table indices require "
- "--experimental-wasm-bulk-memory or --experimental-wasm-reftypes");
- return;
- }
- } else if (flag != SegmentFlags::kActiveNoIndex) {
+ if (flag != SegmentFlags::kActiveNoIndex &&
+ flag != SegmentFlags::kPassive &&
+ flag != SegmentFlags::kActiveWithIndex) {
errorf(pos, "illegal flag value %u. Must be 0, 1, or 2", flag);
return;
}
@@ -2460,37 +2416,6 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
}
}
-void GenerateNamesFromImportsAndExports(
- ImportExportKindCode kind, const Vector<const WasmImport> import_table,
- const Vector<const WasmExport> export_table,
- std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>*
- names) {
- DCHECK_NOT_NULL(names);
- DCHECK(names->empty());
- DCHECK(kind == kExternalGlobal || kind == kExternalMemory ||
- kind == kExternalTable);
-
- // Extract from import table.
- for (const WasmImport& imp : import_table) {
- if (imp.kind != kind) continue;
- if (!imp.module_name.is_set() || !imp.field_name.is_set()) continue;
- if (names->count(imp.index) == 0) {
- names->insert(std::make_pair(
- imp.index, std::make_pair(imp.module_name, imp.field_name)));
- }
- }
-
- // Extract from export table.
- for (const WasmExport& exp : export_table) {
- if (exp.kind != kind) continue;
- if (!exp.name.is_set()) continue;
- if (names->count(exp.index) == 0) {
- names->insert(
- std::make_pair(exp.index, std::make_pair(WireBytesRef(), exp.name)));
- }
- }
-}
-
LocalNames DecodeLocalNames(Vector<const uint8_t> module_bytes) {
Decoder decoder(module_bytes);
if (!FindNameSection(&decoder)) return LocalNames{{}};
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 331ec33c1f..767f4fd088 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -179,13 +179,6 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
std::unordered_map<uint32_t, WireBytesRef>* names,
const Vector<const WasmExport> export_table);
-// Decode the global or memory names from import table and export table. Returns
-// the result as an unordered map.
-void GenerateNamesFromImportsAndExports(
- ImportExportKindCode kind, const Vector<const WasmImport> import_table,
- const Vector<const WasmExport> export_table,
- std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>* names);
-
// Decode the local names assignment from the name section.
// The result will be empty if no name section is present. On encountering an
// error in the name section, returns all information decoded up to the first
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 63469b94c4..5e7637de54 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -39,11 +39,11 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
case WasmInitExpr::kI32Const:
return expr.immediate().i32_const;
case WasmInitExpr::kGlobalGet: {
- uint32_t offset =
- instance->module()->globals[expr.immediate().index].offset;
+ const auto& global = instance->module()->globals[expr.immediate().index];
+ DCHECK_EQ(kWasmI32, global.type);
auto raw_addr = reinterpret_cast<Address>(
instance->untagged_globals_buffer().backing_store()) +
- offset;
+ global.offset;
return ReadLittleEndianValue<uint32_t>(raw_addr);
}
default:
@@ -51,6 +51,24 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
}
}
+uint64_t EvalUint64InitExpr(Handle<WasmInstanceObject> instance,
+ const WasmInitExpr& expr) {
+ switch (expr.kind()) {
+ case WasmInitExpr::kI64Const:
+ return expr.immediate().i64_const;
+ case WasmInitExpr::kGlobalGet: {
+ const auto& global = instance->module()->globals[expr.immediate().index];
+ DCHECK_EQ(kWasmI64, global.type);
+ auto raw_addr = reinterpret_cast<Address>(
+ instance->untagged_globals_buffer().backing_store()) +
+ global.offset;
+ return ReadLittleEndianValue<uint64_t>(raw_addr);
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
namespace {
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
@@ -101,7 +119,7 @@ class CompileImportWrapperJob final : public JobTask {
// TODO(jkummerow): Move these elsewhere.
Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
- int struct_index, Handle<Map> rtt_parent) {
+ int struct_index, Handle<Map> opt_rtt_parent) {
const wasm::StructType* type = module->struct_type(struct_index);
const int inobject_properties = 0;
DCHECK_LE(type->total_fields_size(), kMaxInt - WasmStruct::kHeaderSize);
@@ -111,7 +129,7 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
// TODO(jkummerow): If NO_ELEMENTS were supported, we could use that here.
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
- reinterpret_cast<Address>(type), rtt_parent);
+ reinterpret_cast<Address>(type), opt_rtt_parent);
Handle<Map> map = isolate->factory()->NewMap(
instance_type, instance_size, elements_kind, inobject_properties);
map->set_wasm_type_info(*type_info);
@@ -119,28 +137,14 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
}
Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
- int array_index, Handle<Map> rtt_parent) {
+ int array_index, Handle<Map> opt_rtt_parent) {
const wasm::ArrayType* type = module->array_type(array_index);
const int inobject_properties = 0;
const int instance_size = kVariableSizeSentinel;
const InstanceType instance_type = WASM_ARRAY_TYPE;
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
- reinterpret_cast<Address>(type), rtt_parent);
- Handle<Map> map = isolate->factory()->NewMap(
- instance_type, instance_size, elements_kind, inobject_properties);
- map->set_wasm_type_info(*type_info);
- return map;
-}
-
-Handle<Map> CreateGenericRtt(Isolate* isolate, const WasmModule* module,
- Handle<Map> rtt_parent) {
- const int inobject_properties = 0;
- const int instance_size = 0;
- const InstanceType instance_type = WASM_STRUCT_TYPE; // Fake; good enough.
- const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
- Handle<WasmTypeInfo> type_info =
- isolate->factory()->NewWasmTypeInfo(0, rtt_parent);
+ reinterpret_cast<Address>(type), opt_rtt_parent);
Handle<Map> map = isolate->factory()->NewMap(
instance_type, instance_size, elements_kind, inobject_properties);
map->set_wasm_type_info(*type_info);
@@ -180,31 +184,34 @@ class RttSubtypes : public ArrayList {
Handle<Map> AllocateSubRtt(Isolate* isolate,
Handle<WasmInstanceObject> instance, uint32_t type,
Handle<Map> parent) {
+ DCHECK(parent->IsWasmStructMap() || parent->IsWasmArrayMap() ||
+ parent->IsJSFunctionMap());
+
+ const wasm::WasmModule* module = instance->module();
+ if (module->has_signature(type)) {
+ // Currently, parent rtts for functions are meaningless,
+ // since (rtt.test func rtt) iff (func.map == rtt).
+ // Therefore, we simply create a fresh function map here.
+ // TODO(7748): Canonicalize rtts to make them work for identical function
+ // types.
+ return Map::Copy(isolate, isolate->wasm_exported_function_map(),
+ "fresh function map for AllocateSubRtt");
+ }
+
// Check for an existing RTT first.
- DCHECK(parent->IsWasmStructMap() || parent->IsWasmArrayMap());
Handle<ArrayList> cache(parent->wasm_type_info().subtypes(), isolate);
Map maybe_cached = RttSubtypes::SearchSubtype(cache, type);
if (!maybe_cached.is_null()) return handle(maybe_cached, isolate);
// Allocate a fresh RTT otherwise.
- const wasm::WasmModule* module = instance->module();
Handle<Map> rtt;
- if (wasm::HeapType(type).is_generic()) {
- rtt = wasm::CreateGenericRtt(isolate, module, parent);
- } else if (module->has_struct(type)) {
+ if (module->has_struct(type)) {
rtt = wasm::CreateStructMap(isolate, module, type, parent);
- } else if (module->has_array(type)) {
- rtt = wasm::CreateArrayMap(isolate, module, type, parent);
} else {
- DCHECK(module->has_signature(type));
- // Currently, parent rtts for functions are meaningless,
- // since (rtt.test func rtt) iff (func.map == rtt).
- // Therefore, we simply create a fresh function map here.
- // TODO(7748): Canonicalize rtts to make them work for identical function
- // types.
- rtt = Map::Copy(isolate, isolate->wasm_exported_function_map(),
- "fresh function map for AllocateSubRtt");
+ DCHECK(module->has_array(type));
+ rtt = wasm::CreateArrayMap(isolate, module, type, parent);
}
+
cache = RttSubtypes::Insert(isolate, cache, type, rtt);
parent->wasm_type_info().set_subtypes(*cache);
return rtt;
@@ -458,9 +465,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// The maximum number of pages isn't strictly necessary for memory
// objects used for asm.js, as they are never visible, but we might
// as well make it accurate.
- auto maximum_pages = static_cast<uint32_t>(
- RoundUp(buffer->byte_length(), wasm::kWasmPageSize) /
- wasm::kWasmPageSize);
+ auto maximum_pages =
+ static_cast<int>(RoundUp(buffer->byte_length(), wasm::kWasmPageSize) /
+ wasm::kWasmPageSize);
memory_object_ =
WasmMemoryObject::New(isolate_, memory_buffer_, maximum_pages);
} else {
@@ -610,18 +617,16 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (enabled_.has_gc()) {
Handle<FixedArray> maps = isolate_->factory()->NewUninitializedFixedArray(
static_cast<int>(module_->type_kinds.size()));
- Handle<Map> anyref_map =
- Handle<Map>::cast(isolate_->root_handle(RootIndex::kWasmRttAnyrefMap));
for (int map_index = 0;
map_index < static_cast<int>(module_->type_kinds.size());
map_index++) {
Handle<Map> map;
switch (module_->type_kinds[map_index]) {
case kWasmStructTypeCode:
- map = CreateStructMap(isolate_, module_, map_index, anyref_map);
+ map = CreateStructMap(isolate_, module_, map_index, Handle<Map>());
break;
case kWasmArrayTypeCode:
- map = CreateArrayMap(isolate_, module_, map_index, anyref_map);
+ map = CreateArrayMap(isolate_, module_, map_index, Handle<Map>());
break;
case kWasmFunctionTypeCode:
// TODO(7748): Think about canonicalizing rtts to make them work for
@@ -655,45 +660,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
InitializeExceptions(instance);
}
- // The bulk memory proposal changes the MVP behavior here; the segments are
- // written as if `memory.init` and `table.init` are executed directly, and
- // not bounds checked ahead of time.
- if (!enabled_.has_bulk_memory()) {
- //--------------------------------------------------------------------------
- // Check that indirect function table segments are within bounds.
- //--------------------------------------------------------------------------
- for (const WasmElemSegment& elem_segment : module_->elem_segments) {
- if (elem_segment.status != WasmElemSegment::kStatusActive) continue;
- DCHECK_LT(elem_segment.table_index, table_count);
- uint32_t base = EvalUint32InitExpr(instance, elem_segment.offset);
- // Because of imported tables, {table_size} has to come from the table
- // object itself.
- auto table_object = handle(WasmTableObject::cast(instance->tables().get(
- elem_segment.table_index)),
- isolate_);
- uint32_t table_size = table_object->current_length();
- if (!base::IsInBounds<uint32_t>(
- base, static_cast<uint32_t>(elem_segment.entries.size()),
- table_size)) {
- thrower_->LinkError("table initializer is out of bounds");
- return {};
- }
- }
-
- //--------------------------------------------------------------------------
- // Check that memory segments are within bounds.
- //--------------------------------------------------------------------------
- for (const WasmDataSegment& seg : module_->data_segments) {
- if (!seg.active) continue;
- uint32_t base = EvalUint32InitExpr(instance, seg.dest_addr);
- if (!base::IsInBounds<uint64_t>(base, seg.source.length(),
- instance->memory_size())) {
- thrower_->LinkError("data segment is out of bounds");
- return {};
- }
- }
- }
-
//--------------------------------------------------------------------------
// Set up the exports object for the new instance.
//--------------------------------------------------------------------------
@@ -862,33 +828,28 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
for (const WasmDataSegment& segment : module_->data_segments) {
uint32_t size = segment.source.length();
- if (enabled_.has_bulk_memory()) {
- // Passive segments are not copied during instantiation.
- if (!segment.active) continue;
-
- uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
- bool ok = base::ClampToBounds(
- dest_offset, &size, static_cast<uint32_t>(instance->memory_size()));
- if (!ok) {
- thrower_->RuntimeError("data segment is out of bounds");
- return;
- }
- // No need to copy empty segments.
- if (size == 0) continue;
- std::memcpy(instance->memory_start() + dest_offset,
- wire_bytes.begin() + segment.source.offset(), size);
+ // Passive segments are not copied during instantiation.
+ if (!segment.active) continue;
+
+ size_t dest_offset;
+ if (module_->is_memory64) {
+ uint64_t dest_offset_64 = EvalUint64InitExpr(instance, segment.dest_addr);
+ // Clamp to {std::numeric_limits<size_t>::max()}, which is always an
+ // invalid offset.
+ DCHECK_GT(std::numeric_limits<size_t>::max(), instance->memory_size());
+ dest_offset = static_cast<size_t>(std::min(
+ dest_offset_64, uint64_t{std::numeric_limits<size_t>::max()}));
} else {
- DCHECK(segment.active);
- // Segments of size == 0 are just nops.
- if (size == 0) continue;
-
- uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
- DCHECK(base::IsInBounds<uint64_t>(dest_offset, size,
- instance->memory_size()));
- byte* dest = instance->memory_start() + dest_offset;
- const byte* src = wire_bytes.begin() + segment.source.offset();
- base::Memcpy(dest, src, size);
+ dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
+ }
+
+ if (!base::IsInBounds<size_t>(dest_offset, size, instance->memory_size())) {
+ thrower_->RuntimeError("data segment is out of bounds");
+ return;
}
+
+ std::memcpy(instance->memory_start() + dest_offset,
+ wire_bytes.begin() + segment.source.offset(), size);
}
}
@@ -897,20 +858,20 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
global.type.name().c_str());
switch (global.type.kind()) {
- case ValueType::kI32:
+ case kI32:
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
DoubleToInt32(num));
break;
- case ValueType::kI64:
+ case kI64:
// The Wasm-BigInt proposal currently says that i64 globals may
// only be initialized with BigInts. See:
// https://github.com/WebAssembly/JS-BigInt-integration/issues/12
UNREACHABLE();
- case ValueType::kF32:
+ case kF32:
WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
DoubleToFloat32(num));
break;
- case ValueType::kF64:
+ case kF64:
WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
break;
default:
@@ -931,41 +892,42 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
TRACE("init [globals_start=%p + %u] = ", raw_buffer_ptr(untagged_globals_, 0),
global.offset);
switch (global.type.kind()) {
- case ValueType::kI32: {
+ case kI32: {
int32_t num = value->GetI32();
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num);
TRACE("%d", num);
break;
}
- case ValueType::kI64: {
+ case kI64: {
int64_t num = value->GetI64();
WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
TRACE("%" PRId64, num);
break;
}
- case ValueType::kF32: {
+ case kF32: {
float num = value->GetF32();
WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num);
TRACE("%f", num);
break;
}
- case ValueType::kF64: {
+ case kF64: {
double num = value->GetF64();
WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
TRACE("%lf", num);
break;
}
- case ValueType::kRtt:
- case ValueType::kRef:
- case ValueType::kOptRef: {
+ case kRtt:
+ case kRttWithDepth:
+ case kRef:
+ case kOptRef: {
tagged_globals_->set(global.offset, *value->GetRef());
break;
}
- case ValueType::kStmt:
- case ValueType::kS128:
- case ValueType::kBottom:
- case ValueType::kI8:
- case ValueType::kI16:
+ case kStmt:
+ case kS128:
+ case kBottom:
+ case kI8:
+ case kI16:
UNREACHABLE();
}
TRACE(", type = %s (from WebAssembly.Global)\n", global.type.name().c_str());
@@ -1579,28 +1541,11 @@ Handle<Object> InstanceBuilder::RecursivelyEvaluateGlobalInitializer(
return handle(tagged_globals_->get(old_offset), isolate_);
}
case WasmInitExpr::kRttCanon: {
- switch (init.immediate().heap_type) {
- case wasm::HeapType::kEq:
- return isolate_->root_handle(RootIndex::kWasmRttEqrefMap);
- case wasm::HeapType::kExtern:
- return isolate_->root_handle(RootIndex::kWasmRttExternrefMap);
- case wasm::HeapType::kFunc:
- return isolate_->root_handle(RootIndex::kWasmRttFuncrefMap);
- case wasm::HeapType::kI31:
- return isolate_->root_handle(RootIndex::kWasmRttI31refMap);
- case wasm::HeapType::kAny:
- return isolate_->root_handle(RootIndex::kWasmRttAnyrefMap);
- case wasm::HeapType::kExn:
- UNIMPLEMENTED(); // TODO(jkummerow): This is going away?
- case wasm::HeapType::kBottom:
- UNREACHABLE();
- }
- // Non-generic types fall through.
- int map_index = init.immediate().heap_type;
+ int map_index = init.immediate().index;
return handle(instance->managed_object_maps().get(map_index), isolate_);
}
case WasmInitExpr::kRttSub: {
- uint32_t type = static_cast<uint32_t>(init.immediate().heap_type);
+ uint32_t type = init.immediate().index;
Handle<Object> parent =
RecursivelyEvaluateGlobalInitializer(*init.operand(), instance);
return AllocateSubRtt(isolate_, instance, type,
@@ -1661,7 +1606,7 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
module_->globals[global.init.immediate().index].offset;
TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
if (global.type.is_reference_type()) {
- DCHECK(enabled_.has_reftypes() || enabled_.has_eh());
+ DCHECK(enabled_.has_reftypes());
tagged_globals_->set(new_offset, tagged_globals_->get(old_offset));
} else {
size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
@@ -1687,10 +1632,11 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
// Allocate memory for a module instance as a new JSArrayBuffer.
bool InstanceBuilder::AllocateMemory() {
- uint32_t initial_pages = module_->initial_pages;
- uint32_t maximum_pages =
- module_->has_maximum_pages ? module_->maximum_pages : max_mem_pages();
- if (initial_pages > max_mem_pages()) {
+ int initial_pages = static_cast<int>(module_->initial_pages);
+ int maximum_pages = module_->has_maximum_pages
+ ? static_cast<int>(module_->maximum_pages)
+ : WasmMemoryObject::kNoMaximum;
+ if (initial_pages > static_cast<int>(max_mem_pages())) {
thrower_->RangeError("Out of memory: wasm memory too large");
return false;
}
@@ -1740,12 +1686,18 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<JSObject> exports_object;
MaybeHandle<String> single_function_name;
bool is_asm_js = is_asmjs_module(module_);
+ // TODO(clemensb): Remove this #if once this compilation unit is fully
+ // excluded from non-wasm builds.
if (is_asm_js) {
+#if V8_ENABLE_WEBASSEMBLY
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate_->native_context()->object_function(), isolate_);
exports_object = isolate_->factory()->NewJSObject(object_function);
single_function_name =
isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
+#else
+ UNREACHABLE();
+#endif
} else {
exports_object = isolate_->factory()->NewJSObjectWithNullProto();
}
@@ -1994,16 +1946,12 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
// a dropped passive segment and an active segment have the same
// behavior.
instance->dropped_elem_segments()[segment_index] = 1;
- if (enabled_.has_bulk_memory()) {
- if (!success) {
- thrower_->RuntimeError("table initializer is out of bounds");
- // Break out instead of returning; we don't want to continue to
- // initialize any further element segments, but still need to add
- // dispatch tables below.
- break;
- }
- } else {
- CHECK(success);
+ if (!success) {
+ thrower_->RuntimeError("table initializer is out of bounds");
+ // Break out instead of returning; we don't want to continue to
+ // initialize any further element segments, but still need to add
+ // dispatch tables below.
+ break;
}
}
diff --git a/deps/v8/src/wasm/value-type.cc b/deps/v8/src/wasm/value-type.cc
new file mode 100644
index 0000000000..609d5c19f3
--- /dev/null
+++ b/deps/v8/src/wasm/value-type.cc
@@ -0,0 +1,34 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/value-type.h"
+
+#include "src/codegen/signature.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+base::Optional<wasm::ValueKind> WasmReturnTypeFromSignature(
+ const FunctionSig* wasm_signature) {
+ if (wasm_signature->return_count() == 0) {
+ return {};
+ } else {
+ DCHECK_EQ(wasm_signature->return_count(), 1);
+ ValueType return_type = wasm_signature->GetReturn(0);
+ switch (return_type.kind()) {
+ case kI32:
+ case kI64:
+ case kF32:
+ case kF64:
+ return {return_type.kind()};
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 95990a14e7..2dbd337b0a 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -6,6 +6,7 @@
#define V8_WASM_VALUE_TYPE_H_
#include "src/base/bit-field.h"
+#include "src/base/optional.h"
#include "src/codegen/machine-type.h"
#include "src/wasm/wasm-constants.h"
@@ -35,12 +36,13 @@ class Simd128;
V(I8, 0, I8, Int8, 'b', "i8") \
V(I16, 1, I16, Int16, 'h', "i16")
-#define FOREACH_VALUE_TYPE(V) \
- V(Stmt, -1, Void, None, 'v', "<stmt>") \
- FOREACH_NUMERIC_VALUE_TYPE(V) \
- V(Rtt, kTaggedSizeLog2, Rtt, TaggedPointer, 't', "rtt") \
- V(Ref, kTaggedSizeLog2, Ref, AnyTagged, 'r', "ref") \
- V(OptRef, kTaggedSizeLog2, OptRef, AnyTagged, 'n', "ref null") \
+#define FOREACH_VALUE_TYPE(V) \
+ V(Stmt, -1, Void, None, 'v', "<stmt>") \
+ FOREACH_NUMERIC_VALUE_TYPE(V) \
+ V(Rtt, kTaggedSizeLog2, Rtt, TaggedPointer, 't', "rtt") \
+ V(RttWithDepth, kTaggedSizeLog2, RttWithDepth, TaggedPointer, 'k', "rtt") \
+ V(Ref, kTaggedSizeLog2, Ref, AnyTagged, 'r', "ref") \
+ V(OptRef, kTaggedSizeLog2, OptRef, AnyTagged, 'n', "ref null") \
V(Bottom, -1, Void, None, '*', "<bot>")
// Represents a WebAssembly heap type, as per the typed-funcref and gc
@@ -55,8 +57,8 @@ class HeapType {
kFunc = kV8MaxWasmTypes, // shorthand: c
kExtern, // shorthand: e
kEq, // shorthand: q
- kExn, // shorthand: x
kI31, // shorthand: j
+ kData, // shorthand: o
kAny, // shorthand: a
// This value is used to represent failures in the parsing of heap types and
// does not correspond to a wasm heap type.
@@ -75,12 +77,12 @@ class HeapType {
return HeapType(kExtern);
case ValueTypeCode::kEqRefCode:
return HeapType(kEq);
- case ValueTypeCode::kExnRefCode:
- return HeapType(kExn);
case ValueTypeCode::kI31RefCode:
return HeapType(kI31);
case ValueTypeCode::kAnyRefCode:
return HeapType(kAny);
+ case ValueTypeCode::kDataRefCode:
+ return HeapType(kData);
default:
return HeapType(kBottom);
}
@@ -129,10 +131,10 @@ class HeapType {
return std::string("extern");
case kEq:
return std::string("eq");
- case kExn:
- return std::string("exn");
case kI31:
return std::string("i31");
+ case kData:
+ return std::string("data");
case kAny:
return std::string("any");
default:
@@ -148,14 +150,14 @@ class HeapType {
switch (representation_) {
case kFunc:
return mask | kFuncRefCode;
- case kExn:
- return mask | kExnRefCode;
case kExtern:
return mask | kExternRefCode;
case kEq:
return mask | kEqRefCode;
case kI31:
return mask | kI31RefCode;
+ case kData:
+ return mask | kDataRefCode;
case kAny:
return mask | kAnyRefCode;
default:
@@ -171,23 +173,105 @@ class HeapType {
enum Nullability : bool { kNonNullable, kNullable };
-// A ValueType is encoded by three components: A Kind, a heap representation
-// (for reference types), and an inheritance depth (for rtts only). Those are
-// encoded into 32 bits using base::BitField. The underlying Kind enumeration
-// includes four elements which do not strictly correspond to value types: the
-// two packed types i8 and i16, the type of void blocks (stmt), and a bottom
-// value (for internal use).
-class ValueType {
- public:
- enum Kind : uint8_t {
+enum ValueKind : uint8_t {
#define DEF_ENUM(kind, ...) k##kind,
- FOREACH_VALUE_TYPE(DEF_ENUM)
+ FOREACH_VALUE_TYPE(DEF_ENUM)
#undef DEF_ENUM
+};
+
+constexpr bool is_reference_type(ValueKind kind) {
+ return kind == kRef || kind == kOptRef || kind == kRtt ||
+ kind == kRttWithDepth;
+}
+
+constexpr bool is_object_reference_type(ValueKind kind) {
+ return kind == kRef || kind == kOptRef;
+}
+
+constexpr int element_size_log2(ValueKind kind) {
+ constexpr int8_t kElementSizeLog2[] = {
+#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
+ FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
+#undef ELEM_SIZE_LOG2
};
+ int size_log_2 = kElementSizeLog2[kind];
+ CONSTEXPR_DCHECK(size_log_2 >= 0);
+ return size_log_2;
+}
+
+constexpr int element_size_bytes(ValueKind kind) {
+ constexpr int8_t kElementSize[] = {
+#define ELEM_SIZE_LOG2(kind, log2Size, ...) \
+ log2Size == -1 ? -1 : (1 << std::max(0, log2Size)),
+ FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
+#undef ELEM_SIZE_LOG2
+ };
+
+ int size = kElementSize[kind];
+ CONSTEXPR_DCHECK(size > 0);
+ return size;
+}
+
+constexpr char short_name(ValueKind kind) {
+ constexpr char kShortName[] = {
+#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
+ FOREACH_VALUE_TYPE(SHORT_NAME)
+#undef SHORT_NAME
+ };
+
+ return kShortName[kind];
+}
+
+constexpr const char* name(ValueKind kind) {
+ constexpr const char* kKindName[] = {
+#define KIND_NAME(kind, log2Size, code, machineType, shortName, kindName, ...) \
+ kindName,
+ FOREACH_VALUE_TYPE(KIND_NAME)
+#undef TYPE_NAME
+ };
+
+ return kKindName[kind];
+}
+
+constexpr MachineType machine_type(ValueKind kind) {
+ CONSTEXPR_DCHECK(kBottom != kind);
+
+ constexpr MachineType kMachineType[] = {
+#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
+ MachineType::machineType(),
+ FOREACH_VALUE_TYPE(MACH_TYPE)
+#undef MACH_TYPE
+ };
+
+ return kMachineType[kind];
+}
+
+constexpr bool is_packed(ValueKind kind) { return kind == kI8 || kind == kI16; }
+constexpr ValueKind unpacked(ValueKind kind) {
+ return is_packed(kind) ? kI32 : kind;
+}
+
+constexpr bool is_rtt(ValueKind kind) {
+ return kind == kRtt || kind == kRttWithDepth;
+}
+
+constexpr bool is_defaultable(ValueKind kind) {
+ CONSTEXPR_DCHECK(kind != kBottom && kind != kStmt);
+ return kind != kRef && !is_rtt(kind);
+}
+
+// A ValueType is encoded by three components: A ValueKind, a heap
+// representation (for reference types), and an inheritance depth (for rtts
+// only). Those are encoded into 32 bits using base::BitField. The underlying
+// ValueKind enumeration includes four elements which do not strictly correspond
+// to value types: the two packed types i8 and i16, the type of void blocks
+// (stmt), and a bottom value (for internal use).
+class ValueType {
+ public:
/******************************* Constructors *******************************/
constexpr ValueType() : bit_field_(KindField::encode(kStmt)) {}
- static constexpr ValueType Primitive(Kind kind) {
+ static constexpr ValueType Primitive(ValueKind kind) {
CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
return ValueType(KindField::encode(kind));
}
@@ -201,16 +285,18 @@ class ValueType {
return Ref(heap_type.representation(), nullability);
}
- static constexpr ValueType Rtt(uint32_t heap_type,
- uint8_t inheritance_depth) {
- CONSTEXPR_DCHECK(HeapType(heap_type).is_valid());
+ static constexpr ValueType Rtt(uint32_t type_index) {
+ CONSTEXPR_DCHECK(HeapType(type_index).is_index());
return ValueType(KindField::encode(kRtt) |
- HeapTypeField::encode(heap_type) |
- DepthField::encode(inheritance_depth));
+ HeapTypeField::encode(type_index));
}
- static constexpr ValueType Rtt(HeapType heap_type,
+
+ static constexpr ValueType Rtt(uint32_t type_index,
uint8_t inheritance_depth) {
- return Rtt(heap_type.representation(), inheritance_depth);
+ CONSTEXPR_DCHECK(HeapType(type_index).is_index());
+ return ValueType(KindField::encode(kRttWithDepth) |
+ HeapTypeField::encode(type_index) |
+ DepthField::encode(inheritance_depth));
}
// Useful when deserializing a type stored in a runtime object.
@@ -220,11 +306,11 @@ class ValueType {
/******************************** Type checks *******************************/
constexpr bool is_reference_type() const {
- return kind() == kRef || kind() == kOptRef || kind() == kRtt;
+ return wasm::is_reference_type(kind());
}
constexpr bool is_object_reference_type() const {
- return kind() == kRef || kind() == kOptRef;
+ return wasm::is_object_reference_type(kind());
}
constexpr bool is_nullable() const { return kind() == kOptRef; }
@@ -234,34 +320,32 @@ class ValueType {
heap_representation() == htype;
}
- constexpr bool is_rtt() const { return kind() == kRtt; }
- constexpr bool has_depth() const { return is_rtt(); }
+ constexpr bool is_rtt() const { return wasm::is_rtt(kind()); }
+ constexpr bool has_depth() const { return kind() == kRttWithDepth; }
constexpr bool has_index() const {
- return is_reference_type() && heap_type().is_index();
+ return is_rtt() || (is_object_reference_type() && heap_type().is_index());
}
- constexpr bool is_defaultable() const {
- CONSTEXPR_DCHECK(kind() != kBottom && kind() != kStmt);
- return kind() != kRef && kind() != kRtt;
- }
+ constexpr bool is_defaultable() const { return wasm::is_defaultable(kind()); }
constexpr bool is_bottom() const { return kind() == kBottom; }
- constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
+ constexpr bool is_packed() const { return wasm::is_packed(kind()); }
constexpr ValueType Unpacked() const {
return is_packed() ? Primitive(kI32) : *this;
}
/***************************** Field Accessors ******************************/
- constexpr Kind kind() const { return KindField::decode(bit_field_); }
+ constexpr ValueKind kind() const { return KindField::decode(bit_field_); }
constexpr HeapType::Representation heap_representation() const {
- CONSTEXPR_DCHECK(is_reference_type());
+ CONSTEXPR_DCHECK(is_object_reference_type());
return static_cast<HeapType::Representation>(
HeapTypeField::decode(bit_field_));
}
constexpr HeapType heap_type() const {
+ CONSTEXPR_DCHECK(is_object_reference_type());
return HeapType(heap_representation());
}
constexpr uint8_t depth() const {
@@ -270,7 +354,11 @@ class ValueType {
}
constexpr uint32_t ref_index() const {
CONSTEXPR_DCHECK(has_index());
- return heap_type().ref_index();
+ return HeapTypeField::decode(bit_field_);
+ }
+ constexpr Nullability nullability() const {
+ CONSTEXPR_DCHECK(is_object_reference_type());
+ return kind() == kOptRef ? kNullable : kNonNullable;
}
// Useful when serializing this type to store it into a runtime object.
@@ -289,42 +377,16 @@ class ValueType {
}
constexpr int element_size_log2() const {
- constexpr int8_t kElementSizeLog2[] = {
-#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
- FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
-#undef ELEM_SIZE_LOG2
- };
-
- int size_log_2 = kElementSizeLog2[kind()];
- CONSTEXPR_DCHECK(size_log_2 >= 0);
- return size_log_2;
+ return wasm::element_size_log2(kind());
}
constexpr int element_size_bytes() const {
- constexpr int8_t kElementSize[] = {
-#define ELEM_SIZE_LOG2(kind, log2Size, ...) \
- log2Size == -1 ? -1 : (1 << std::max(0, log2Size)),
- FOREACH_VALUE_TYPE(ELEM_SIZE_LOG2)
-#undef ELEM_SIZE_LOG2
- };
-
- int size = kElementSize[kind()];
- CONSTEXPR_DCHECK(size > 0);
- return size;
+ return wasm::element_size_bytes(kind());
}
/*************************** Machine-type related ***************************/
constexpr MachineType machine_type() const {
- CONSTEXPR_DCHECK(kBottom != kind());
-
- constexpr MachineType kMachineType[] = {
-#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
- MachineType::machineType(),
- FOREACH_VALUE_TYPE(MACH_TYPE)
-#undef MACH_TYPE
- };
-
- return kMachineType[kind()];
+ return wasm::machine_type(kind());
}
constexpr MachineRepresentation machine_representation() const {
@@ -371,20 +433,26 @@ class ValueType {
return kExternRefCode;
case HeapType::kEq:
return kEqRefCode;
- case HeapType::kExn:
- return kExnRefCode;
case HeapType::kAny:
return kAnyRefCode;
default:
return kOptRefCode;
}
case kRef:
- if (heap_representation() == HeapType::kI31) return kI31RefCode;
- return kRefCode;
+ switch (heap_representation()) {
+ case HeapType::kI31:
+ return kI31RefCode;
+ case HeapType::kData:
+ return kDataRefCode;
+ default:
+ return kRefCode;
+ }
case kStmt:
return kVoidCode;
case kRtt:
return kRttCode;
+ case kRttWithDepth:
+ return kRttWithDepthCode;
#define NUMERIC_TYPE_CASE(kind, ...) \
case k##kind: \
return k##kind##Code;
@@ -399,47 +467,36 @@ class ValueType {
// Returns true iff the heap type is needed to encode this type in the wasm
// binary format, taking into account available type shorthands.
constexpr bool encoding_needs_heap_type() const {
- return (kind() == kRef && heap_representation() != HeapType::kI31) ||
- kind() == kRtt ||
- (kind() == kOptRef && (!heap_type().is_generic() ||
- heap_representation() == HeapType::kI31));
+ return (kind() == kRef && heap_representation() != HeapType::kI31 &&
+ heap_representation() != HeapType::kData) ||
+ (kind() == kOptRef && (heap_type().is_index() ||
+ heap_representation() == HeapType::kI31 ||
+ heap_representation() == HeapType::kData));
}
static constexpr int kLastUsedBit = 30;
/****************************** Pretty-printing *****************************/
- constexpr char short_name() const {
- constexpr char kShortName[] = {
-#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
- FOREACH_VALUE_TYPE(SHORT_NAME)
-#undef SHORT_NAME
- };
-
- return kShortName[kind()];
- }
+ constexpr char short_name() const { return wasm::short_name(kind()); }
std::string name() const {
std::ostringstream buf;
switch (kind()) {
case kRef:
- if (heap_representation() == HeapType::kI31) {
- buf << "i31ref";
- } else {
- buf << "(ref " << heap_type().name() << ")";
- }
- break;
case kOptRef:
- if (heap_type().is_generic() &&
- heap_representation() != HeapType::kI31) {
- // We use shorthands to be compatible with the 'reftypes' proposal.
- buf << heap_type().name() << "ref";
+ if (encoding_needs_heap_type()) {
+ buf << "(ref " << (kind() == kOptRef ? "null " : "")
+ << heap_type().name() << ")";
} else {
- buf << "(ref null " << heap_type().name() << ")";
+ buf << heap_type().name() << "ref";
}
break;
+ case kRttWithDepth:
+ buf << "(rtt " << static_cast<uint32_t>(depth()) << " " << ref_index()
+ << ")";
+ break;
case kRtt:
- buf << "(rtt " << static_cast<uint32_t>(depth()) << " "
- << heap_type().name() << ")";
+ buf << "(rtt " << ref_index() << ")";
break;
default:
buf << kind_name();
@@ -457,7 +514,7 @@ class ValueType {
// Note: we currently conservatively allow only 5 bits, but have room to
// store 6, so we can raise the limit if needed.
STATIC_ASSERT(kV8MaxRttSubtypingDepth < (1u << kDepthBits));
- using KindField = base::BitField<Kind, 0, kKindBits>;
+ using KindField = base::BitField<ValueKind, 0, kKindBits>;
using HeapTypeField = KindField::Next<uint32_t, kHeapTypeBits>;
using DepthField = HeapTypeField::Next<uint8_t, kDepthBits>;
@@ -468,17 +525,7 @@ class ValueType {
constexpr explicit ValueType(uint32_t bit_field) : bit_field_(bit_field) {}
- constexpr const char* kind_name() const {
- constexpr const char* kTypeName[] = {
-#define KIND_NAME(kind, log2Size, code, machineType, shortName, typeName, ...) \
- typeName,
- FOREACH_VALUE_TYPE(KIND_NAME)
-#undef TYPE_NAME
- };
-
- CONSTEXPR_DCHECK(kind() < arraysize(kTypeName));
- return kTypeName[kind()];
- }
+ constexpr const char* kind_name() const { return wasm::name(kind()); }
uint32_t bit_field_;
};
@@ -496,22 +543,23 @@ inline std::ostream& operator<<(std::ostream& oss, ValueType type) {
}
// Precomputed primitive types.
-constexpr ValueType kWasmI32 = ValueType::Primitive(ValueType::kI32);
-constexpr ValueType kWasmI64 = ValueType::Primitive(ValueType::kI64);
-constexpr ValueType kWasmF32 = ValueType::Primitive(ValueType::kF32);
-constexpr ValueType kWasmF64 = ValueType::Primitive(ValueType::kF64);
-constexpr ValueType kWasmS128 = ValueType::Primitive(ValueType::kS128);
-constexpr ValueType kWasmI8 = ValueType::Primitive(ValueType::kI8);
-constexpr ValueType kWasmI16 = ValueType::Primitive(ValueType::kI16);
-constexpr ValueType kWasmStmt = ValueType::Primitive(ValueType::kStmt);
-constexpr ValueType kWasmBottom = ValueType::Primitive(ValueType::kBottom);
-// Established wasm shorthands:
+constexpr ValueType kWasmI32 = ValueType::Primitive(kI32);
+constexpr ValueType kWasmI64 = ValueType::Primitive(kI64);
+constexpr ValueType kWasmF32 = ValueType::Primitive(kF32);
+constexpr ValueType kWasmF64 = ValueType::Primitive(kF64);
+constexpr ValueType kWasmS128 = ValueType::Primitive(kS128);
+constexpr ValueType kWasmI8 = ValueType::Primitive(kI8);
+constexpr ValueType kWasmI16 = ValueType::Primitive(kI16);
+constexpr ValueType kWasmStmt = ValueType::Primitive(kStmt);
+constexpr ValueType kWasmBottom = ValueType::Primitive(kBottom);
+// Established reference-type proposal shorthands.
constexpr ValueType kWasmFuncRef = ValueType::Ref(HeapType::kFunc, kNullable);
-constexpr ValueType kWasmExnRef = ValueType::Ref(HeapType::kExn, kNullable);
constexpr ValueType kWasmExternRef =
ValueType::Ref(HeapType::kExtern, kNullable);
constexpr ValueType kWasmEqRef = ValueType::Ref(HeapType::kEq, kNullable);
constexpr ValueType kWasmI31Ref = ValueType::Ref(HeapType::kI31, kNonNullable);
+constexpr ValueType kWasmDataRef =
+ ValueType::Ref(HeapType::kData, kNonNullable);
constexpr ValueType kWasmAnyRef = ValueType::Ref(HeapType::kAny, kNullable);
#define FOREACH_WASMVALUE_CTYPES(V) \
@@ -558,21 +606,21 @@ class LoadType {
constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineType mem_type() const { return kMemType[val_]; }
- static LoadType ForValueType(ValueType type, bool is_signed = false) {
- switch (type.kind()) {
- case ValueType::kI32:
+ static LoadType ForValueKind(ValueKind kind, bool is_signed = false) {
+ switch (kind) {
+ case kI32:
return kI32Load;
- case ValueType::kI64:
+ case kI64:
return kI64Load;
- case ValueType::kF32:
+ case kF32:
return kF32Load;
- case ValueType::kF64:
+ case kF64:
return kF64Load;
- case ValueType::kS128:
+ case kS128:
return kS128Load;
- case ValueType::kI8:
+ case kI8:
return is_signed ? kI32Load8S : kI32Load8U;
- case ValueType::kI16:
+ case kI16:
return is_signed ? kI32Load16S : kI32Load16U;
default:
UNREACHABLE();
@@ -592,7 +640,7 @@ class LoadType {
};
static constexpr ValueType kValueType[] = {
-#define VALUE_TYPE(type, ...) ValueType::Primitive(ValueType::k##type),
+#define VALUE_TYPE(type, ...) ValueType::Primitive(k##type),
FOREACH_LOAD_TYPE(VALUE_TYPE)
#undef VALUE_TYPE
};
@@ -634,21 +682,21 @@ class StoreType {
constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineRepresentation mem_rep() const { return kMemRep[val_]; }
- static StoreType ForValueType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ static StoreType ForValueKind(ValueKind kind) {
+ switch (kind) {
+ case kI32:
return kI32Store;
- case ValueType::kI64:
+ case kI64:
return kI64Store;
- case ValueType::kF32:
+ case kF32:
return kF32Store;
- case ValueType::kF64:
+ case kF64:
return kF64Store;
- case ValueType::kS128:
+ case kS128:
return kS128Store;
- case ValueType::kI8:
+ case kI8:
return kI32Store8;
- case ValueType::kI16:
+ case kI16:
return kI32Store16;
default:
UNREACHABLE();
@@ -667,7 +715,7 @@ class StoreType {
};
static constexpr ValueType kValueType[] = {
-#define VALUE_TYPE(type, ...) ValueType::Primitive(ValueType::k##type),
+#define VALUE_TYPE(type, ...) ValueType::Primitive(k##type),
FOREACH_STORE_TYPE(VALUE_TYPE)
#undef VALUE_TYPE
};
@@ -679,6 +727,9 @@ class StoreType {
};
};
+base::Optional<wasm::ValueKind> WasmReturnTypeFromSignature(
+ const FunctionSig* wasm_signature);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 9cd1e68f24..86726dcaf0 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -430,10 +430,6 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
it.rinfo()->Print(nullptr, os);
}
os << "\n";
-
- if (code_comments_size() > 0) {
- PrintCodeCommentsSection(os, code_comments(), code_comments_size());
- }
#endif // ENABLE_DISASSEMBLER
}
@@ -874,11 +870,13 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
- WasmCodeRefScope code_ref_scope;
base::MutexGuard lock(&allocation_mutex_);
for (auto& owned_entry : owned_code_) {
owned_entry.second->LogCode(isolate, source_url.get(), script.id());
}
+ for (auto& owned_entry : new_owned_code_) {
+ owned_entry->LogCode(isolate, source_url.get(), script.id());
+ }
}
CompilationEnv NativeModule::CreateCompilationEnv() const {
@@ -1133,6 +1131,10 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
+ // Add the code to the surrounding code ref scope, so the returned pointer is
+ // guaranteed to be valid.
+ WasmCodeRefScope::AddRef(code.get());
+
if (!code->IsAnonymous() &&
code->index() >= module_->num_imported_functions) {
DCHECK_LT(code->index(), num_functions());
@@ -1169,36 +1171,43 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
WasmCodeRefScope::AddRef(prior_code);
// The code is added to the current {WasmCodeRefScope}, hence the ref
// count cannot drop to zero here.
- CHECK(!prior_code->DecRef());
+ prior_code->DecRefOnLiveCode();
}
PatchJumpTablesLocked(slot_idx, code->instruction_start());
+ } else {
+ // The code tables does not hold a reference to the code, hence decrement
+ // the initial ref count of 1. The code was added to the
+ // {WasmCodeRefScope} though, so it cannot die here.
+ code->DecRefOnLiveCode();
}
if (!code->for_debugging() && tiering_state_ == kTieredDown &&
code->tier() == ExecutionTier::kTurbofan) {
liftoff_bailout_count_.fetch_add(1);
}
}
- WasmCodeRefScope::AddRef(code.get());
WasmCode* result = code.get();
- owned_code_.emplace(result->instruction_start(), std::move(code));
+ new_owned_code_.emplace_back(std::move(code));
return result;
}
-std::unique_ptr<WasmCode> NativeModule::AllocateDeserializedCode(
- int index, Vector<const byte> instructions, int stack_slots,
+Vector<uint8_t> NativeModule::AllocateForDeserializedCode(
+ size_t total_code_size) {
+ return code_allocator_.AllocateForCode(this, total_code_size);
+}
+
+std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
+ int index, Vector<byte> instructions, int stack_slots,
int tagged_parameter_slots, int safepoint_table_offset,
int handler_table_offset, int constant_pool_offset,
int code_comments_offset, int unpadded_binary_size,
Vector<const byte> protected_instructions_data,
Vector<const byte> reloc_info, Vector<const byte> source_position_table,
WasmCode::Kind kind, ExecutionTier tier) {
- Vector<uint8_t> dst_code_bytes =
- code_allocator_.AllocateForCode(this, instructions.size());
- UpdateCodeSize(dst_code_bytes.size(), tier, kNoDebugging);
+ UpdateCodeSize(instructions.size(), tier, kNoDebugging);
return std::unique_ptr<WasmCode>{new WasmCode{
- this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
+ this, index, instructions, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, unpadded_binary_size, protected_instructions_data,
reloc_info, source_position_table, kind, tier, kNoDebugging}};
@@ -1208,6 +1217,9 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
base::MutexGuard lock(&allocation_mutex_);
WasmCode** start = code_table_.get();
WasmCode** end = start + module_->num_declared_functions;
+ for (WasmCode* code : VectorOf(start, end - start)) {
+ if (code) WasmCodeRefScope::AddRef(code);
+ }
return std::vector<WasmCode*>{start, end};
}
@@ -1447,8 +1459,34 @@ void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
}
}
+void NativeModule::TransferNewOwnedCodeLocked() const {
+ // The caller holds the allocation mutex.
+ DCHECK(!allocation_mutex_.TryLock());
+ DCHECK(!new_owned_code_.empty());
+ // Sort the {new_owned_code_} vector reversed, such that the position of the
+ // previously inserted element can be used as a hint for the next element. If
+ // elements in {new_owned_code_} are adjacent, this will guarantee
+ // constant-time insertion into the map.
+ std::sort(new_owned_code_.begin(), new_owned_code_.end(),
+ [](const std::unique_ptr<WasmCode>& a,
+ const std::unique_ptr<WasmCode>& b) {
+ return a->instruction_start() > b->instruction_start();
+ });
+ auto insertion_hint = owned_code_.end();
+ for (auto& code : new_owned_code_) {
+ DCHECK_EQ(0, owned_code_.count(code->instruction_start()));
+ // Check plausibility of the insertion hint.
+ DCHECK(insertion_hint == owned_code_.end() ||
+ insertion_hint->first > code->instruction_start());
+ insertion_hint = owned_code_.emplace_hint(
+ insertion_hint, code->instruction_start(), std::move(code));
+ }
+ new_owned_code_.clear();
+}
+
WasmCode* NativeModule::Lookup(Address pc) const {
base::MutexGuard lock(&allocation_mutex_);
+ if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
auto iter = owned_code_.upper_bound(pc);
if (iter == owned_code_.begin()) return nullptr;
--iter;
@@ -1912,6 +1950,11 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
code_allocator_.AllocateForCode(this, total_code_space);
// Lookup the jump tables to use once, then use for all code objects.
auto jump_tables = FindJumpTablesForRegion(base::AddressRegionOf(code_space));
+ // If we happen to have a {total_code_space} which is bigger than
+ // {kMaxCodeSpaceSize}, we would not find valid jump tables for the whole
+ // region. If this ever happens, we need to handle this case (by splitting the
+ // {results} vector in smaller chunks).
+ CHECK(jump_tables.is_valid());
std::vector<std::unique_ptr<WasmCode>> generated_code;
generated_code.reserve(results.size());
@@ -1985,6 +2028,7 @@ void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
DebugInfo* debug_info = nullptr;
{
base::MutexGuard guard(&allocation_mutex_);
+ if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
debug_info = debug_info_.get();
// Free the {WasmCode} objects. This will also unregister trap handler data.
for (WasmCode* code : codes) {
@@ -2095,10 +2139,7 @@ WasmCodeRefScope::WasmCodeRefScope()
WasmCodeRefScope::~WasmCodeRefScope() {
DCHECK_EQ(this, current_code_refs_scope);
current_code_refs_scope = previous_scope_;
- std::vector<WasmCode*> code_ptrs;
- code_ptrs.reserve(code_ptrs_.size());
- code_ptrs.assign(code_ptrs_.begin(), code_ptrs_.end());
- WasmCode::DecrementRefCount(VectorOf(code_ptrs));
+ WasmCode::DecrementRefCount(VectorOf(code_ptrs_));
}
// static
@@ -2106,9 +2147,8 @@ void WasmCodeRefScope::AddRef(WasmCode* code) {
DCHECK_NOT_NULL(code);
WasmCodeRefScope* current_scope = current_code_refs_scope;
DCHECK_NOT_NULL(current_scope);
- auto entry = current_scope->code_ptrs_.insert(code);
- // If we added a new entry, increment the ref counter.
- if (entry.second) code->IncRef();
+ current_scope->code_ptrs_.push_back(code);
+ code->IncRef();
}
const char* GetRuntimeStubName(WasmCode::RuntimeStubId stub_id) {
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 994365a961..0924fd17b5 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -9,7 +9,6 @@
#include <map>
#include <memory>
#include <set>
-#include <unordered_set>
#include <utility>
#include <vector>
@@ -72,12 +71,12 @@ struct WasmModule;
V(WasmTableSet) \
V(WasmStackGuard) \
V(WasmStackOverflow) \
+ V(WasmAllocateFixedArray) \
V(WasmThrow) \
V(WasmRethrow) \
V(WasmTraceEnter) \
V(WasmTraceExit) \
V(WasmTraceMemory) \
- V(ArgumentsAdaptorTrampoline) \
V(BigIntToI32Pair) \
V(BigIntToI64) \
V(DoubleToI) \
@@ -87,7 +86,8 @@ struct WasmModule;
V(ToNumber) \
V(WasmAllocateArrayWithRtt) \
V(WasmAllocateRtt) \
- V(WasmAllocateStructWithRtt)
+ V(WasmAllocateStructWithRtt) \
+ V(WasmSubtypeCheck)
// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
@@ -230,6 +230,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
}
}
+ // Decrement the ref count on code that is known to be in use (i.e. the ref
+ // count cannot drop to zero here).
+ void DecRefOnLiveCode() {
+ int old_count = ref_count_.fetch_sub(1, std::memory_order_acq_rel);
+ DCHECK_LE(2, old_count);
+ USE(old_count);
+ }
+
// Decrement the ref count on code that is known to be dead, even though there
// might still be C++ references. Returns whether this drops the last
// reference and the code needs to be freed.
@@ -501,8 +509,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* PublishCode(std::unique_ptr<WasmCode>);
std::vector<WasmCode*> PublishCode(Vector<std::unique_ptr<WasmCode>>);
- std::unique_ptr<WasmCode> AllocateDeserializedCode(
- int index, Vector<const byte> instructions, int stack_slots,
+ Vector<uint8_t> AllocateForDeserializedCode(size_t total_code_size);
+
+ std::unique_ptr<WasmCode> AddDeserializedCode(
+ int index, Vector<byte> instructions, int stack_slots,
int tagged_parameter_slots, int safepoint_table_offset,
int handler_table_offset, int constant_pool_offset,
int code_comments_offset, int unpadded_binary_size,
@@ -541,8 +551,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
Address GetCallTargetForFunction(uint32_t func_index) const;
struct JumpTablesRef {
- const Address jump_table_start = kNullAddress;
- const Address far_jump_table_start = kNullAddress;
+ Address jump_table_start = kNullAddress;
+ Address far_jump_table_start = kNullAddress;
bool is_valid() const { return far_jump_table_start != kNullAddress; }
};
@@ -722,6 +732,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
+ // Transfer owned code from {new_owned_code_} to {owned_code_}.
+ void TransferNewOwnedCodeLocked() const;
+
// -- Fields of {NativeModule} start here.
WasmEngine* const engine_;
@@ -780,9 +793,15 @@ class V8_EXPORT_PRIVATE NativeModule final {
//////////////////////////////////////////////////////////////////////////////
// Protected by {allocation_mutex_}:
- // Holds all allocated code objects. For lookup based on pc, the key is the
- // instruction start address of the value.
- std::map<Address, std::unique_ptr<WasmCode>> owned_code_;
+ // Holds allocated code objects for fast lookup and deletion. For lookup based
+ // on pc, the key is the instruction start address of the value. Filled lazily
+ // from {new_owned_code_} (below).
+ mutable std::map<Address, std::unique_ptr<WasmCode>> owned_code_;
+
+ // Holds owned code which is not inserted into {owned_code_} yet. It will be
+ // inserted on demand. This has much better performance than inserting
+ // individual code objects.
+ mutable std::vector<std::unique_ptr<WasmCode>> new_owned_code_;
// Table of the latest code object per function, updated on initial
// compilation and tier up. The number of entries is
@@ -925,7 +944,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD WasmCodeRefScope {
private:
WasmCodeRefScope* const previous_scope_;
- std::unordered_set<WasmCode*> code_ptrs_;
+ std::vector<WasmCode*> code_ptrs_;
};
// Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 2c47e994f2..79356a203d 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -38,9 +38,9 @@ enum ValueTypeCode : uint8_t {
kOptRefCode = 0x6c,
kRefCode = 0x6b,
kI31RefCode = 0x6a,
- kRttCode = 0x69,
- // Exception handling proposal
- kExnRefCode = 0x68,
+ kRttWithDepthCode = 0x69,
+ kRttCode = 0x68,
+ kDataRefCode = 0x67,
};
// Binary encoding of other types.
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 0851bdcfed..6b67f6029d 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -12,6 +12,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/common/assert-scope.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/debug/debug-evaluate.h"
#include "src/execution/frames-inl.h"
#include "src/heap/factory.h"
#include "src/wasm/baseline/liftoff-compiler.h"
@@ -33,6 +34,8 @@ namespace wasm {
namespace {
+using ImportExportKey = std::pair<ImportExportKindCode, uint32_t>;
+
enum ReturnLocation { kAfterBreakpoint, kAfterWasmCall };
Address FindNewPC(WasmFrame* frame, WasmCode* wasm_code, int byte_offset,
@@ -89,10 +92,11 @@ void DebugSideTable::Print(std::ostream& os) const {
}
void DebugSideTable::Entry::Print(std::ostream& os) const {
- os << std::setw(6) << std::hex << pc_offset_ << std::dec << " [";
- for (auto& value : values_) {
- os << " " << value.type.name() << ":";
- switch (value.kind) {
+ os << std::setw(6) << std::hex << pc_offset_ << std::dec << " stack height "
+ << stack_height_ << " [";
+ for (auto& value : changed_values_) {
+ os << " " << name(value.kind) << ":";
+ switch (value.storage) {
case kConstant:
os << "const#" << value.i32_const;
break;
@@ -124,25 +128,26 @@ class DebugInfoImpl {
WasmValue GetLocalValue(int local, Address pc, Address fp,
Address debug_break_fp) {
FrameInspectionScope scope(this, pc);
- return GetValue(scope.debug_side_table_entry, local, fp, debug_break_fp);
+ return GetValue(scope.debug_side_table, scope.debug_side_table_entry, local,
+ fp, debug_break_fp);
}
int GetStackDepth(Address pc) {
FrameInspectionScope scope(this, pc);
if (!scope.is_inspectable()) return 0;
- int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
- int value_count = scope.debug_side_table_entry->num_values();
- return value_count - num_locals;
+ int num_locals = scope.debug_side_table->num_locals();
+ int stack_height = scope.debug_side_table_entry->stack_height();
+ return stack_height - num_locals;
}
WasmValue GetStackValue(int index, Address pc, Address fp,
Address debug_break_fp) {
FrameInspectionScope scope(this, pc);
- int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
- int value_count = scope.debug_side_table_entry->num_values();
+ int num_locals = scope.debug_side_table->num_locals();
+ int value_count = scope.debug_side_table_entry->stack_height();
if (num_locals + index >= value_count) return {};
- return GetValue(scope.debug_side_table_entry, num_locals + index, fp,
- debug_break_fp);
+ return GetValue(scope.debug_side_table, scope.debug_side_table_entry,
+ num_locals + index, fp, debug_break_fp);
}
const WasmFunction& GetFunctionAtAddress(Address pc) {
@@ -151,6 +156,39 @@ class DebugInfoImpl {
return module->functions[scope.code->index()];
}
+ WireBytesRef GetExportName(ImportExportKindCode kind, uint32_t index) {
+ base::MutexGuard guard(&mutex_);
+ if (!export_names_) {
+ export_names_ =
+ std::make_unique<std::map<ImportExportKey, WireBytesRef>>();
+ for (auto exp : native_module_->module()->export_table) {
+ auto exp_key = std::make_pair(exp.kind, exp.index);
+ if (export_names_->find(exp_key) != export_names_->end()) continue;
+ export_names_->insert(std::make_pair(exp_key, exp.name));
+ }
+ }
+ auto it = export_names_->find(std::make_pair(kind, index));
+ if (it != export_names_->end()) return it->second;
+ return {};
+ }
+
+ std::pair<WireBytesRef, WireBytesRef> GetImportName(ImportExportKindCode kind,
+ uint32_t index) {
+ base::MutexGuard guard(&mutex_);
+ if (!import_names_) {
+ import_names_ = std::make_unique<
+ std::map<ImportExportKey, std::pair<WireBytesRef, WireBytesRef>>>();
+ for (auto imp : native_module_->module()->import_table) {
+ import_names_->insert(
+ std::make_pair(std::make_pair(imp.kind, imp.index),
+ std::make_pair(imp.module_name, imp.field_name)));
+ }
+ }
+ auto it = import_names_->find(std::make_pair(kind, index));
+ if (it != import_names_->end()) return it->second;
+ return {};
+ }
+
WireBytesRef GetLocalName(int func_index, int local_index) {
base::MutexGuard guard(&mutex_);
if (!local_names_) {
@@ -177,7 +215,8 @@ class DebugInfoImpl {
return offset;
}
- WasmCode* RecompileLiftoffWithBreakpoints(int func_index, Vector<int> offsets,
+ WasmCode* RecompileLiftoffWithBreakpoints(int func_index,
+ Vector<const int> offsets,
int dead_breakpoint) {
DCHECK(!mutex_.TryLock()); // Mutex is held externally.
// Recompile the function with Liftoff, setting the new breakpoints.
@@ -193,22 +232,24 @@ class DebugInfoImpl {
ForDebugging for_debugging = offsets.size() == 1 && offsets[0] == 0
? kForStepping
: kWithBreakpoints;
+ // Debug side tables for stepping are generated lazily.
+ bool generate_debug_sidetable = for_debugging == kWithBreakpoints;
Counters* counters = nullptr;
WasmFeatures unused_detected;
WasmCompilationResult result = ExecuteLiftoffCompilation(
native_module_->engine()->allocator(), &env, body, func_index,
- for_debugging, counters, &unused_detected, offsets, &debug_sidetable,
- dead_breakpoint);
+ for_debugging, counters, &unused_detected, offsets,
+ generate_debug_sidetable ? &debug_sidetable : nullptr, dead_breakpoint);
// Liftoff compilation failure is a FATAL error. We rely on complete Liftoff
// support for debugging.
if (!result.succeeded()) FATAL("Liftoff compilation failed");
- DCHECK_NOT_NULL(debug_sidetable);
+ DCHECK_EQ(generate_debug_sidetable, debug_sidetable != nullptr);
WasmCode* new_code = native_module_->PublishCode(
native_module_->AddCompiledCode(std::move(result)));
DCHECK(new_code->is_inspectable());
- {
+ if (generate_debug_sidetable) {
base::MutexGuard lock(&debug_side_tables_mutex_);
DCHECK_EQ(0, debug_side_tables_.count(new_code));
debug_side_tables_.emplace(new_code, std::move(debug_sidetable));
@@ -288,12 +329,12 @@ class DebugInfoImpl {
void FloodWithBreakpoints(WasmFrame* frame, ReturnLocation return_location) {
// 0 is an invalid offset used to indicate flooding.
- int offset = 0;
+ constexpr int kFloodingBreakpoints[] = {0};
DCHECK(frame->wasm_code()->is_liftoff());
// Generate an additional source position for the current byte offset.
base::MutexGuard guard(&mutex_);
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
- frame->function_index(), VectorOf(&offset, 1), 0);
+ frame->function_index(), ArrayVector(kFloodingBreakpoints), 0);
UpdateReturnAddress(frame, new_code, return_location);
per_isolate_data_[frame->isolate()].stepping_frame = frame->id();
@@ -413,11 +454,9 @@ class DebugInfoImpl {
: code(debug_info->native_module_->engine()->code_manager()->LookupCode(
pc)),
pc_offset(static_cast<int>(pc - code->instruction_start())),
- debug_side_table(
- code->is_inspectable()
- ? debug_info->GetDebugSideTable(
- code, debug_info->native_module_->engine()->allocator())
- : nullptr),
+ debug_side_table(code->is_inspectable()
+ ? debug_info->GetDebugSideTable(code)
+ : nullptr),
debug_side_table_entry(debug_side_table
? debug_side_table->GetEntry(pc_offset)
: nullptr) {
@@ -433,8 +472,7 @@ class DebugInfoImpl {
const DebugSideTable::Entry* debug_side_table_entry;
};
- const DebugSideTable* GetDebugSideTable(WasmCode* code,
- AccountingAllocator* allocator) {
+ const DebugSideTable* GetDebugSideTable(WasmCode* code) {
DCHECK(code->is_inspectable());
{
// Only hold the mutex temporarily. We can't hold it while generating the
@@ -445,16 +483,8 @@ class DebugInfoImpl {
}
// Otherwise create the debug side table now.
- auto* module = native_module_->module();
- auto* function = &module->functions[code->index()];
- ModuleWireBytes wire_bytes{native_module_->wire_bytes()};
- Vector<const byte> function_bytes = wire_bytes.GetFunctionBytes(function);
- CompilationEnv env = native_module_->CreateCompilationEnv();
- FunctionBody func_body{function->sig, 0, function_bytes.begin(),
- function_bytes.end()};
std::unique_ptr<DebugSideTable> debug_side_table =
- GenerateLiftoffDebugSideTable(allocator, &env, func_body,
- code->index());
+ GenerateLiftoffDebugSideTable(code);
DebugSideTable* ret = debug_side_table.get();
// Check cache again, maybe another thread concurrently generated a debug
@@ -473,35 +503,34 @@ class DebugInfoImpl {
// Get the value of a local (including parameters) or stack value. Stack
// values follow the locals in the same index space.
- WasmValue GetValue(const DebugSideTable::Entry* debug_side_table_entry,
+ WasmValue GetValue(const DebugSideTable* debug_side_table,
+ const DebugSideTable::Entry* debug_side_table_entry,
int index, Address stack_frame_base,
Address debug_break_fp) const {
- ValueType type = debug_side_table_entry->value_type(index);
- if (debug_side_table_entry->is_constant(index)) {
- DCHECK(type == kWasmI32 || type == kWasmI64);
- return type == kWasmI32
- ? WasmValue(debug_side_table_entry->i32_constant(index))
- : WasmValue(
- int64_t{debug_side_table_entry->i32_constant(index)});
+ const auto* value =
+ debug_side_table->FindValue(debug_side_table_entry, index);
+ if (value->is_constant()) {
+ DCHECK(value->kind == kI32 || value->kind == kI64);
+ return value->kind == kI32 ? WasmValue(value->i32_const)
+ : WasmValue(int64_t{value->i32_const});
}
- if (debug_side_table_entry->is_register(index)) {
- LiftoffRegister reg = LiftoffRegister::from_liftoff_code(
- debug_side_table_entry->register_code(index));
+ if (value->is_register()) {
+ auto reg = LiftoffRegister::from_liftoff_code(value->reg_code);
auto gp_addr = [debug_break_fp](Register reg) {
return debug_break_fp +
WasmDebugBreakFrameConstants::GetPushedGpRegisterOffset(
reg.code());
};
if (reg.is_gp_pair()) {
- DCHECK_EQ(kWasmI64, type);
+ DCHECK_EQ(kI64, value->kind);
uint32_t low_word = ReadUnalignedValue<uint32_t>(gp_addr(reg.low_gp()));
uint32_t high_word =
ReadUnalignedValue<uint32_t>(gp_addr(reg.high_gp()));
return WasmValue((uint64_t{high_word} << 32) | low_word);
}
if (reg.is_gp()) {
- return type == kWasmI32
+ return value->kind == kI32
? WasmValue(ReadUnalignedValue<uint32_t>(gp_addr(reg.gp())))
: WasmValue(ReadUnalignedValue<uint64_t>(gp_addr(reg.gp())));
}
@@ -515,11 +544,11 @@ class DebugInfoImpl {
Address spilled_addr =
debug_break_fp +
WasmDebugBreakFrameConstants::GetPushedFpRegisterOffset(code);
- if (type == kWasmF32) {
+ if (value->kind == kF32) {
return WasmValue(ReadUnalignedValue<float>(spilled_addr));
- } else if (type == kWasmF64) {
+ } else if (value->kind == kF64) {
return WasmValue(ReadUnalignedValue<double>(spilled_addr));
- } else if (type == kWasmS128) {
+ } else if (value->kind == kS128) {
return WasmValue(Simd128(ReadUnalignedValue<int16>(spilled_addr)));
} else {
// All other cases should have been handled above.
@@ -528,18 +557,17 @@ class DebugInfoImpl {
}
// Otherwise load the value from the stack.
- Address stack_address =
- stack_frame_base - debug_side_table_entry->stack_offset(index);
- switch (type.kind()) {
- case ValueType::kI32:
+ Address stack_address = stack_frame_base - value->stack_offset;
+ switch (value->kind) {
+ case kI32:
return WasmValue(ReadUnalignedValue<int32_t>(stack_address));
- case ValueType::kI64:
+ case kI64:
return WasmValue(ReadUnalignedValue<int64_t>(stack_address));
- case ValueType::kF32:
+ case kF32:
return WasmValue(ReadUnalignedValue<float>(stack_address));
- case ValueType::kF64:
+ case kF64:
return WasmValue(ReadUnalignedValue<double>(stack_address));
- case ValueType::kS128: {
+ case kS128: {
return WasmValue(Simd128(ReadUnalignedValue<int16>(stack_address)));
}
default:
@@ -621,6 +649,14 @@ class DebugInfoImpl {
// {mutex_} protects all fields below.
mutable base::Mutex mutex_;
+ // Names of exports, lazily derived from the exports table.
+ std::unique_ptr<std::map<ImportExportKey, wasm::WireBytesRef>> export_names_;
+
+ // Names of imports, lazily derived from the imports table.
+ std::unique_ptr<std::map<ImportExportKey,
+ std::pair<wasm::WireBytesRef, wasm::WireBytesRef>>>
+ import_names_;
+
// Names of locals, lazily decoded from the wire bytes.
std::unique_ptr<LocalNames> local_names_;
@@ -651,6 +687,16 @@ const wasm::WasmFunction& DebugInfo::GetFunctionAtAddress(Address pc) {
return impl_->GetFunctionAtAddress(pc);
}
+WireBytesRef DebugInfo::GetExportName(ImportExportKindCode code,
+ uint32_t index) {
+ return impl_->GetExportName(code, index);
+}
+
+std::pair<WireBytesRef, WireBytesRef> DebugInfo::GetImportName(
+ ImportExportKindCode code, uint32_t index) {
+ return impl_->GetImportName(code, index);
+}
+
WireBytesRef DebugInfo::GetLocalName(int func_index, int local_index) {
return impl_->GetLocalName(func_index, local_index);
}
@@ -728,6 +774,22 @@ int FindNextBreakablePosition(wasm::NativeModule* native_module, int func_index,
// static
bool WasmScript::SetBreakPoint(Handle<Script> script, int* position,
Handle<BreakPoint> break_point) {
+ // Special handling for on-entry breakpoints.
+ if (*position == kOnEntryBreakpointPosition) {
+ AddBreakpointToInfo(script, *position, break_point);
+ script->set_break_on_entry(true);
+
+ // Update the "break_on_entry" flag on all live instances.
+ i::WeakArrayList weak_instance_list = script->wasm_weak_instance_list();
+ for (int i = 0; i < weak_instance_list.length(); ++i) {
+ if (weak_instance_list.Get(i)->IsCleared()) continue;
+ i::WasmInstanceObject instance = i::WasmInstanceObject::cast(
+ weak_instance_list.Get(i)->GetHeapObject());
+ instance.set_break_on_entry(true);
+ }
+ return true;
+ }
+
// Find the function for this breakpoint.
const wasm::WasmModule* module = script->wasm_native_module()->module();
int func_index = GetContainingWasmFunction(module, *position);
@@ -772,8 +834,7 @@ bool WasmScript::SetBreakPointForFunction(Handle<Script> script, int func_index,
const wasm::WasmFunction& func = module->functions[func_index];
// Insert new break point into {wasm_breakpoint_infos} of the script.
- WasmScript::AddBreakpointToInfo(script, func.code.offset() + offset,
- break_point);
+ AddBreakpointToInfo(script, func.code.offset() + offset, break_point);
native_module->GetDebugInfo()->SetBreakpoint(func_index, offset, isolate);
@@ -791,8 +852,9 @@ int FindBreakpointInfoInsertPos(Isolate* isolate,
Handle<FixedArray> breakpoint_infos,
int position) {
// Find insert location via binary search, taking care of undefined values on
- // the right. Position is always greater than zero.
- DCHECK_LT(0, position);
+ // the right. {position} is either {kOnEntryBreakpointPosition} (which is -1),
+ // or positive.
+ DCHECK(position == WasmScript::kOnEntryBreakpointPosition || position > 0);
int left = 0; // inclusive
int right = breakpoint_infos->length(); // exclusive
@@ -1010,10 +1072,34 @@ bool WasmScript::GetPossibleBreakpoints(
return true;
}
+namespace {
+
+bool CheckBreakPoint(Isolate* isolate, Handle<BreakPoint> break_point,
+ StackFrameId frame_id) {
+ if (break_point->condition().length() == 0) return true;
+
+ HandleScope scope(isolate);
+ Handle<String> condition(break_point->condition(), isolate);
+ Handle<Object> result;
+ // The Wasm engine doesn't perform any sort of inlining.
+ const int inlined_jsframe_index = 0;
+ const bool throw_on_side_effect = false;
+ if (!DebugEvaluate::Local(isolate, frame_id, inlined_jsframe_index, condition,
+ throw_on_side_effect)
+ .ToHandle(&result)) {
+ isolate->clear_pending_exception();
+ return false;
+ }
+ return result->BooleanValue(isolate);
+}
+
+} // namespace
+
// static
MaybeHandle<FixedArray> WasmScript::CheckBreakPoints(Isolate* isolate,
Handle<Script> script,
- int position) {
+ int position,
+ StackFrameId frame_id) {
if (!script->has_wasm_breakpoint_infos()) return {};
Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
@@ -1028,14 +1114,29 @@ MaybeHandle<FixedArray> WasmScript::CheckBreakPoints(Isolate* isolate,
Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
if (breakpoint_info->source_position() != position) return {};
- // There is no support for conditional break points. Just assume that every
- // break point always hits.
Handle<Object> break_points(breakpoint_info->break_points(), isolate);
- if (break_points->IsFixedArray()) {
- return Handle<FixedArray>::cast(break_points);
+ if (!break_points->IsFixedArray()) {
+ if (!CheckBreakPoint(isolate, Handle<BreakPoint>::cast(break_points),
+ frame_id)) {
+ return {};
+ }
+ Handle<FixedArray> break_points_hit = isolate->factory()->NewFixedArray(1);
+ break_points_hit->set(0, *break_points);
+ return break_points_hit;
+ }
+
+ Handle<FixedArray> array = Handle<FixedArray>::cast(break_points);
+ Handle<FixedArray> break_points_hit =
+ isolate->factory()->NewFixedArray(array->length());
+ int break_points_hit_count = 0;
+ for (int i = 0; i < array->length(); ++i) {
+ Handle<BreakPoint> break_point(BreakPoint::cast(array->get(i)), isolate);
+ if (CheckBreakPoint(isolate, break_point, frame_id)) {
+ break_points_hit->set(break_points_hit_count++, *break_point);
+ }
}
- Handle<FixedArray> break_points_hit = isolate->factory()->NewFixedArray(1);
- break_points_hit->set(0, *break_points);
+ if (break_points_hit_count == 0) return {};
+ break_points_hit->Shrink(isolate, break_points_hit_count);
return break_points_hit;
}
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
index 337578fe06..837692644c 100644
--- a/deps/v8/src/wasm/wasm-debug.h
+++ b/deps/v8/src/wasm/wasm-debug.h
@@ -13,6 +13,7 @@
#include "src/base/iterator.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
+#include "src/utils/vector.h"
#include "src/wasm/value-type.h"
namespace v8 {
@@ -20,8 +21,6 @@ namespace internal {
template <typename T>
class Handle;
-template <typename T>
-class Vector;
class WasmFrame;
namespace wasm {
@@ -41,60 +40,71 @@ class DebugSideTable {
public:
class Entry {
public:
- enum ValueKind : int8_t { kConstant, kRegister, kStack };
+ enum Storage : int8_t { kConstant, kRegister, kStack };
struct Value {
- ValueType type;
+ int index;
ValueKind kind;
+ Storage storage;
union {
int32_t i32_const; // if kind == kConstant
int reg_code; // if kind == kRegister
int stack_offset; // if kind == kStack
};
+
+ bool operator==(const Value& other) const {
+ if (index != other.index) return false;
+ if (kind != other.kind) return false;
+ if (storage != other.storage) return false;
+ switch (storage) {
+ case kConstant:
+ return i32_const == other.i32_const;
+ case kRegister:
+ return reg_code == other.reg_code;
+ case kStack:
+ return stack_offset == other.stack_offset;
+ }
+ }
+ bool operator!=(const Value& other) const { return !(*this == other); }
+
+ bool is_constant() const { return storage == kConstant; }
+ bool is_register() const { return storage == kRegister; }
};
- Entry(int pc_offset, std::vector<Value> values)
- : pc_offset_(pc_offset), values_(std::move(values)) {}
+ Entry(int pc_offset, int stack_height, std::vector<Value> changed_values)
+ : pc_offset_(pc_offset),
+ stack_height_(stack_height),
+ changed_values_(std::move(changed_values)) {}
// Constructor for map lookups (only initializes the {pc_offset_}).
explicit Entry(int pc_offset) : pc_offset_(pc_offset) {}
int pc_offset() const { return pc_offset_; }
- int num_values() const { return static_cast<int>(values_.size()); }
- ValueType value_type(int index) const { return values_[index].type; }
-
- auto values() const {
- return base::make_iterator_range(values_.begin(), values_.end());
- }
-
- int stack_offset(int index) const {
- DCHECK_EQ(kStack, values_[index].kind);
- return values_[index].stack_offset;
- }
-
- bool is_constant(int index) const {
- return values_[index].kind == kConstant;
- }
+ // Stack height, including locals.
+ int stack_height() const { return stack_height_; }
- bool is_register(int index) const {
- return values_[index].kind == kRegister;
+ Vector<const Value> changed_values() const {
+ return VectorOf(changed_values_);
}
- int32_t i32_constant(int index) const {
- DCHECK_EQ(kConstant, values_[index].kind);
- return values_[index].i32_const;
- }
-
- int32_t register_code(int index) const {
- DCHECK_EQ(kRegister, values_[index].kind);
- return values_[index].reg_code;
+ const Value* FindChangedValue(int stack_index) const {
+ DCHECK_GT(stack_height_, stack_index);
+ auto it = std::lower_bound(
+ changed_values_.begin(), changed_values_.end(), stack_index,
+ [](const Value& changed_value, int stack_index) {
+ return changed_value.index < stack_index;
+ });
+ return it != changed_values_.end() && it->index == stack_index ? &*it
+ : nullptr;
}
void Print(std::ostream&) const;
private:
int pc_offset_;
- std::vector<Value> values_;
+ int stack_height_;
+ // Only store differences from the last entry, to keep the table small.
+ std::vector<Value> changed_values_;
};
// Technically it would be fine to copy this class, but there should not be a
@@ -111,10 +121,25 @@ class DebugSideTable {
auto it = std::lower_bound(entries_.begin(), entries_.end(),
Entry{pc_offset}, EntryPositionLess{});
if (it == entries_.end() || it->pc_offset() != pc_offset) return nullptr;
- DCHECK_LE(num_locals_, it->num_values());
+ DCHECK_LE(num_locals_, it->stack_height());
return &*it;
}
+ const Entry::Value* FindValue(const Entry* entry, int stack_index) const {
+ while (true) {
+ if (auto* value = entry->FindChangedValue(stack_index)) {
+ // Check that the table was correctly minimized: If the previous stack
+ // also had an entry for {stack_index}, it must be different.
+ DCHECK(entry == &entries_.front() ||
+ (entry - 1)->stack_height() <= stack_index ||
+ *FindValue(entry - 1, stack_index) != *value);
+ return value;
+ }
+ DCHECK_NE(&entries_.front(), entry);
+ --entry;
+ }
+ }
+
auto entries() const {
return base::make_iterator_range(entries_.begin(), entries_.end());
}
@@ -154,6 +179,17 @@ class V8_EXPORT_PRIVATE DebugInfo {
WasmValue GetStackValue(int index, Address pc, Address fp,
Address debug_break_fp);
+ // Returns the name of the entity (with the given |index| and |kind|) derived
+ // from the exports table. If the entity is not exported, an empty reference
+ // will be returned instead.
+ WireBytesRef GetExportName(ImportExportKindCode kind, uint32_t index);
+
+ // Returns the module and field name of the entity (with the given |index|
+ // and |kind|) derived from the imports table. If the entity is not imported,
+ // a pair of empty references will be returned instead.
+ std::pair<WireBytesRef, WireBytesRef> GetImportName(ImportExportKindCode kind,
+ uint32_t index);
+
WireBytesRef GetLocalName(int func_index, int local_index);
void SetBreakpoint(int func_index, int offset, Isolate* current_isolate);
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 3dd8fa0ce7..339e6c9775 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -389,6 +389,16 @@ struct WasmEngine::IsolateInfo {
// Keep new modules in tiered down state.
bool keep_tiered_down = false;
+
+ // Elapsed time since last throw/rethrow/catch event.
+ base::ElapsedTimer throw_timer;
+ base::ElapsedTimer rethrow_timer;
+ base::ElapsedTimer catch_timer;
+
+ // Total number of exception events in this isolate.
+ int throw_count = 0;
+ int rethrow_count = 0;
+ int catch_count = 0;
};
struct WasmEngine::NativeModuleInfo {
@@ -453,7 +463,9 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Vector<const byte> asm_js_offset_table_bytes,
Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
- TRACE_EVENT0("v8.wasm", "wasm.SyncCompileTranslatedAsmJs");
+ int compilation_id = next_compilation_id_.fetch_add(1);
+ TRACE_EVENT1("v8.wasm", "wasm.SyncCompileTranslatedAsmJs", "id",
+ compilation_id);
ModuleOrigin origin = language_mode == LanguageMode::kSloppy
? kAsmJsSloppyOrigin
: kAsmJsStrictOrigin;
@@ -475,9 +487,9 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
// in {CompileToNativeModule}.
Handle<FixedArray> export_wrappers;
- std::shared_ptr<NativeModule> native_module =
- CompileToNativeModule(isolate, WasmFeatures::ForAsmjs(), thrower,
- std::move(result).value(), bytes, &export_wrappers);
+ std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
+ isolate, WasmFeatures::ForAsmjs(), thrower, std::move(result).value(),
+ bytes, &export_wrappers, compilation_id);
if (!native_module) return {};
return AsmWasmData::New(isolate, std::move(native_module), export_wrappers,
@@ -499,7 +511,8 @@ Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
const ModuleWireBytes& bytes) {
- TRACE_EVENT0("v8.wasm", "wasm.SyncCompile");
+ int compilation_id = next_compilation_id_.fetch_add(1);
+ TRACE_EVENT1("v8.wasm", "wasm.SyncCompile", "id", compilation_id);
ModuleResult result = DecodeWasmModule(
enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
isolate->counters(), isolate->metrics_recorder(),
@@ -513,9 +526,9 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
// in {CompileToNativeModule}.
Handle<FixedArray> export_wrappers;
- std::shared_ptr<NativeModule> native_module =
- CompileToNativeModule(isolate, enabled, thrower,
- std::move(result).value(), bytes, &export_wrappers);
+ std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
+ isolate, enabled, thrower, std::move(result).value(), bytes,
+ &export_wrappers, compilation_id);
if (!native_module) return {};
#ifdef DEBUG
@@ -596,7 +609,8 @@ void WasmEngine::AsyncCompile(
std::shared_ptr<CompilationResultResolver> resolver,
const ModuleWireBytes& bytes, bool is_shared,
const char* api_method_name_for_errors) {
- TRACE_EVENT0("v8.wasm", "wasm.AsyncCompile");
+ int compilation_id = next_compilation_id_.fetch_add(1);
+ TRACE_EVENT1("v8.wasm", "wasm.AsyncCompile", "id", compilation_id);
if (!FLAG_wasm_async_compilation) {
// Asynchronous compilation disabled; fall back on synchronous compilation.
ErrorThrower thrower(isolate, api_method_name_for_errors);
@@ -634,10 +648,10 @@ void WasmEngine::AsyncCompile(
std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
base::Memcpy(copy.get(), bytes.start(), bytes.length());
- AsyncCompileJob* job =
- CreateAsyncCompileJob(isolate, enabled, std::move(copy), bytes.length(),
- handle(isolate->context(), isolate),
- api_method_name_for_errors, std::move(resolver));
+ AsyncCompileJob* job = CreateAsyncCompileJob(
+ isolate, enabled, std::move(copy), bytes.length(),
+ handle(isolate->context(), isolate), api_method_name_for_errors,
+ std::move(resolver), compilation_id);
job->Start();
}
@@ -645,11 +659,13 @@ std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver) {
- TRACE_EVENT0("v8.wasm", "wasm.StartStreamingCompilation");
+ int compilation_id = next_compilation_id_.fetch_add(1);
+ TRACE_EVENT1("v8.wasm", "wasm.StartStreamingCompilation", "id",
+ compilation_id);
if (FLAG_wasm_async_compilation) {
AsyncCompileJob* job = CreateAsyncCompileJob(
isolate, enabled, std::unique_ptr<byte[]>(nullptr), 0, context,
- api_method_name, std::move(resolver));
+ api_method_name, std::move(resolver), compilation_id);
return job->CreateStreamingDecoder();
}
return StreamingDecoder::CreateSyncStreamingDecoder(
@@ -867,11 +883,11 @@ AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
const char* api_method_name,
- std::shared_ptr<CompilationResultResolver> resolver) {
+ std::shared_ptr<CompilationResultResolver> resolver, int compilation_id) {
Handle<Context> incumbent_context = isolate->GetIncumbentContext();
AsyncCompileJob* job = new AsyncCompileJob(
isolate, enabled, std::move(bytes_copy), length, context,
- incumbent_context, api_method_name, std::move(resolver));
+ incumbent_context, api_method_name, std::move(resolver), compilation_id);
// Pass ownership to the unique_ptr in {async_compile_jobs_}.
base::MutexGuard guard(&mutex_);
async_compile_jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
@@ -992,10 +1008,14 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
if (current_gc_info_) {
if (RemoveIsolateFromCurrentGC(isolate)) PotentiallyFinishCurrentGC();
}
- if (auto* task = info->log_codes_task) task->Cancel();
- for (auto& log_entry : info->code_to_log) {
- WasmCode::DecrementRefCount(VectorOf(log_entry.second.code));
+ if (auto* task = info->log_codes_task) {
+ task->Cancel();
+ for (auto& log_entry : info->code_to_log) {
+ WasmCode::DecrementRefCount(VectorOf(log_entry.second.code));
+ }
+ info->code_to_log.clear();
}
+ DCHECK(info->code_to_log.empty());
}
void WasmEngine::LogCode(Vector<WasmCode*> code_vec) {
@@ -1365,6 +1385,53 @@ WasmEngine::GetBarrierForBackgroundCompile() {
return operations_barrier_;
}
+namespace {
+void SampleExceptionEvent(base::ElapsedTimer* timer, TimedHistogram* counter) {
+ if (!timer->IsStarted()) {
+ timer->Start();
+ return;
+ }
+ counter->AddSample(static_cast<int>(timer->Elapsed().InMilliseconds()));
+ timer->Restart();
+}
+} // namespace
+
+void WasmEngine::SampleThrowEvent(Isolate* isolate) {
+ base::MutexGuard guard(&mutex_);
+ IsolateInfo* isolate_info = isolates_[isolate].get();
+ int& throw_count = isolate_info->throw_count;
+ // To avoid an int overflow, clip the count to the histogram's max value.
+ throw_count =
+ std::min(throw_count + 1, isolate->counters()->wasm_throw_count()->max());
+ isolate->counters()->wasm_throw_count()->AddSample(throw_count);
+ SampleExceptionEvent(&isolate_info->throw_timer,
+ isolate->counters()->wasm_time_between_throws());
+}
+
+void WasmEngine::SampleRethrowEvent(Isolate* isolate) {
+ base::MutexGuard guard(&mutex_);
+ IsolateInfo* isolate_info = isolates_[isolate].get();
+ int& rethrow_count = isolate_info->rethrow_count;
+ // To avoid an int overflow, clip the count to the histogram's max value.
+ rethrow_count = std::min(rethrow_count + 1,
+ isolate->counters()->wasm_rethrow_count()->max());
+ isolate->counters()->wasm_rethrow_count()->AddSample(rethrow_count);
+ SampleExceptionEvent(&isolate_info->rethrow_timer,
+ isolate->counters()->wasm_time_between_rethrows());
+}
+
+void WasmEngine::SampleCatchEvent(Isolate* isolate) {
+ base::MutexGuard guard(&mutex_);
+ IsolateInfo* isolate_info = isolates_[isolate].get();
+ int& catch_count = isolate_info->catch_count;
+ // To avoid an int overflow, clip the count to the histogram's max value.
+ catch_count =
+ std::min(catch_count + 1, isolate->counters()->wasm_catch_count()->max());
+ isolate->counters()->wasm_catch_count()->AddSample(catch_count);
+ SampleExceptionEvent(&isolate_info->catch_timer,
+ isolate->counters()->wasm_time_between_catch());
+}
+
void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
DCHECK(!mutex_.TryLock());
DCHECK_NULL(current_gc_info_);
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index e3013013e9..d04578e557 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -339,6 +339,10 @@ class V8_EXPORT_PRIVATE WasmEngine {
// preventing this object from being destroyed.
std::shared_ptr<OperationsBarrier> GetBarrierForBackgroundCompile();
+ void SampleThrowEvent(Isolate*);
+ void SampleRethrowEvent(Isolate*);
+ void SampleCatchEvent(Isolate*);
+
// Call on process start and exit.
static void InitializeOncePerProcess();
static void GlobalTearDown();
@@ -357,7 +361,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length,
Handle<Context> context, const char* api_method_name,
- std::shared_ptr<CompilationResultResolver> resolver);
+ std::shared_ptr<CompilationResultResolver> resolver, int compilation_id);
void TriggerGC(int8_t gc_sequence_index);
@@ -378,6 +382,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
std::unique_ptr<gdb_server::GdbServer> gdb_server_;
#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+ std::atomic<int> next_compilation_id_{0};
+
// This mutex protects all information which is mutated concurrently or
// fields that are initialized lazily on the first access.
base::Mutex mutex_;
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index a8b17bb554..37e985ddff 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -42,6 +42,7 @@
/* Exception handling proposal. */ \
/* https://github.com/WebAssembly/exception-handling */ \
/* V8 side owner: thibaudm */ \
+ /* Staged in v8.9 */ \
V(eh, "exception handling opcodes", false) \
\
/* Reference Types, a.k.a. reftypes proposal. */ \
@@ -80,13 +81,6 @@
// Shipped features (enabled by default). Remove the feature flag once they hit
// stable and are expected to stay enabled.
#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Bulk memory operations. */ \
- /* https://github.com/webassembly/bulk-memory-operations */ \
- /* V8 side owner: binji */ \
- /* Shipped in v7.5. */ \
- /* ITS: https://groups.google.com/forum/#!topic/v8-users/zM05lYEBVog */ \
- V(bulk_memory, "bulk memory opcodes", true) \
- \
/* Multi-value proposal. */ \
/* https://github.com/WebAssembly/multi-value */ \
/* V8 side owner: thibaudm */ \
diff --git a/deps/v8/src/wasm/wasm-features.cc b/deps/v8/src/wasm/wasm-features.cc
index 42ae237ed2..c236df670c 100644
--- a/deps/v8/src/wasm/wasm-features.cc
+++ b/deps/v8/src/wasm/wasm-features.cc
@@ -24,12 +24,12 @@ WasmFeatures WasmFeatures::FromFlags() {
// static
WasmFeatures WasmFeatures::FromIsolate(Isolate* isolate) {
WasmFeatures features = WasmFeatures::FromFlags();
- if (isolate->AreWasmThreadsEnabled(handle(isolate->context(), isolate))) {
- features.Add(kFeature_threads);
- }
if (isolate->IsWasmSimdEnabled(handle(isolate->context(), isolate))) {
features.Add(kFeature_simd);
}
+ if (isolate->AreWasmExceptionsEnabled(handle(isolate->context(), isolate))) {
+ features.Add(kFeature_eh);
+ }
return features;
}
diff --git a/deps/v8/src/wasm/wasm-features.h b/deps/v8/src/wasm/wasm-features.h
index 593ed70cb9..92dbc4a543 100644
--- a/deps/v8/src/wasm/wasm-features.h
+++ b/deps/v8/src/wasm/wasm-features.h
@@ -35,7 +35,7 @@ class WasmFeatures : public base::EnumSet<WasmFeature> {
// Simplified getters. Use {has_foo()} instead of {contains(kFeature_foo)}.
#define DECL_FEATURE_GETTER(feat, ...) \
- bool has_##feat() const { return contains(kFeature_##feat); }
+ constexpr bool has_##feat() const { return contains(kFeature_##feat); }
FOREACH_WASM_FEATURE(DECL_FEATURE_GETTER)
#undef DECL_FEATURE_GETTER
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index db9f34528c..163b89bc73 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -1063,7 +1063,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Table must be invoked with 'new'");
return;
@@ -1144,7 +1144,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
// The descriptor's 'maximum'.
- int64_t maximum = -1;
+ int64_t maximum = i::WasmMemoryObject::kNoMaximum;
if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "maximum"), nullptr, &maximum,
initial, i::wasm::max_mem_pages())) {
@@ -1176,8 +1176,8 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::JSObject> memory_obj;
- if (!i::WasmMemoryObject::New(i_isolate, static_cast<uint32_t>(initial),
- static_cast<uint32_t>(maximum), shared)
+ if (!i::WasmMemoryObject::New(i_isolate, static_cast<int>(initial),
+ static_cast<int>(maximum), shared)
.ToHandle(&memory_obj)) {
thrower.RangeError("could not allocate memory");
return;
@@ -1223,9 +1223,6 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
string->StringEquals(v8_str(isolate, "anyfunc"))) {
// The JS api spec uses 'anyfunc' instead of 'funcref'.
*type = i::wasm::kWasmFuncRef;
- } else if (enabled_features.has_eh() &&
- string->StringEquals(v8_str(isolate, "exnref"))) {
- *type = i::wasm::kWasmExnRef;
} else if (enabled_features.has_gc() &&
string->StringEquals(v8_str(isolate, "eqref"))) {
*type = i::wasm::kWasmEqRef;
@@ -1299,7 +1296,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Convert value to a WebAssembly value, the default value is 0.
Local<v8::Value> value = Local<Value>::Cast(args[1]);
switch (type.kind()) {
- case i::wasm::ValueType::kI32: {
+ case i::wasm::kI32: {
int32_t i32_value = 0;
if (!value->IsUndefined()) {
v8::Local<v8::Int32> int32_value;
@@ -1309,7 +1306,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetI32(i32_value);
break;
}
- case i::wasm::ValueType::kI64: {
+ case i::wasm::kI64: {
int64_t i64_value = 0;
if (!value->IsUndefined()) {
v8::Local<v8::BigInt> bigint_value;
@@ -1319,7 +1316,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetI64(i64_value);
break;
}
- case i::wasm::ValueType::kF32: {
+ case i::wasm::kF32: {
float f32_value = 0;
if (!value->IsUndefined()) {
double f64_value = 0;
@@ -1331,7 +1328,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetF32(f32_value);
break;
}
- case i::wasm::ValueType::kF64: {
+ case i::wasm::kF64: {
double f64_value = 0;
if (!value->IsUndefined()) {
v8::Local<v8::Number> number_value;
@@ -1341,11 +1338,10 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetF64(f64_value);
break;
}
- case i::wasm::ValueType::kRef:
- case i::wasm::ValueType::kOptRef: {
+ case i::wasm::kRef:
+ case i::wasm::kOptRef: {
switch (type.heap_representation()) {
case i::wasm::HeapType::kExtern:
- case i::wasm::HeapType::kExn:
case i::wasm::HeapType::kAny: {
if (args.Length() < 2) {
// When no initial value is provided, we have to use the WebAssembly
@@ -1372,21 +1368,27 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
break;
}
+ case internal::wasm::HeapType::kBottom:
+ UNREACHABLE();
case i::wasm::HeapType::kEq:
+ case internal::wasm::HeapType::kI31:
+ case internal::wasm::HeapType::kData:
default:
// TODO(7748): Implement these.
UNIMPLEMENTED();
+ break;
}
break;
}
- case i::wasm::ValueType::kRtt:
+ case i::wasm::kRtt:
+ case i::wasm::kRttWithDepth:
// TODO(7748): Implement.
UNIMPLEMENTED();
- case i::wasm::ValueType::kI8:
- case i::wasm::ValueType::kI16:
- case i::wasm::ValueType::kStmt:
- case i::wasm::ValueType::kS128:
- case i::wasm::ValueType::kBottom:
+ case i::wasm::kI8:
+ case i::wasm::kI16:
+ case i::wasm::kStmt:
+ case i::wasm::kS128:
+ case i::wasm::kBottom:
UNREACHABLE();
}
@@ -1818,32 +1820,35 @@ void WebAssemblyGlobalGetValueCommon(
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
switch (receiver->type().kind()) {
- case i::wasm::ValueType::kI32:
+ case i::wasm::kI32:
return_value.Set(receiver->GetI32());
break;
- case i::wasm::ValueType::kI64: {
+ case i::wasm::kI64: {
Local<BigInt> value = BigInt::New(isolate, receiver->GetI64());
return_value.Set(value);
break;
}
- case i::wasm::ValueType::kF32:
+ case i::wasm::kF32:
return_value.Set(receiver->GetF32());
break;
- case i::wasm::ValueType::kF64:
+ case i::wasm::kF64:
return_value.Set(receiver->GetF64());
break;
- case i::wasm::ValueType::kS128:
+ case i::wasm::kS128:
thrower.TypeError("Can't get the value of s128 WebAssembly.Global");
break;
- case i::wasm::ValueType::kRef:
- case i::wasm::ValueType::kOptRef:
+ case i::wasm::kRef:
+ case i::wasm::kOptRef:
switch (receiver->type().heap_representation()) {
case i::wasm::HeapType::kExtern:
case i::wasm::HeapType::kFunc:
- case i::wasm::HeapType::kExn:
case i::wasm::HeapType::kAny:
return_value.Set(Utils::ToLocal(receiver->GetRef()));
break;
+ case internal::wasm::HeapType::kBottom:
+ UNREACHABLE();
+ case internal::wasm::HeapType::kI31:
+ case internal::wasm::HeapType::kData:
case i::wasm::HeapType::kEq:
default:
// TODO(7748): Implement these.
@@ -1851,13 +1856,14 @@ void WebAssemblyGlobalGetValueCommon(
break;
}
break;
- case i::wasm::ValueType::kRtt:
+ case i::wasm::kRtt:
+ case i::wasm::kRttWithDepth:
UNIMPLEMENTED(); // TODO(7748): Implement.
break;
- case i::wasm::ValueType::kI8:
- case i::wasm::ValueType::kI16:
- case i::wasm::ValueType::kBottom:
- case i::wasm::ValueType::kStmt:
+ case i::wasm::kI8:
+ case i::wasm::kI16:
+ case i::wasm::kBottom:
+ case i::wasm::kStmt:
UNREACHABLE();
}
}
@@ -1893,38 +1899,37 @@ void WebAssemblyGlobalSetValue(
}
switch (receiver->type().kind()) {
- case i::wasm::ValueType::kI32: {
+ case i::wasm::kI32: {
int32_t i32_value = 0;
if (!args[0]->Int32Value(context).To(&i32_value)) return;
receiver->SetI32(i32_value);
break;
}
- case i::wasm::ValueType::kI64: {
+ case i::wasm::kI64: {
v8::Local<v8::BigInt> bigint_value;
if (!args[0]->ToBigInt(context).ToLocal(&bigint_value)) return;
receiver->SetI64(bigint_value->Int64Value());
break;
}
- case i::wasm::ValueType::kF32: {
+ case i::wasm::kF32: {
double f64_value = 0;
if (!args[0]->NumberValue(context).To(&f64_value)) return;
receiver->SetF32(i::DoubleToFloat32(f64_value));
break;
}
- case i::wasm::ValueType::kF64: {
+ case i::wasm::kF64: {
double f64_value = 0;
if (!args[0]->NumberValue(context).To(&f64_value)) return;
receiver->SetF64(f64_value);
break;
}
- case i::wasm::ValueType::kS128:
+ case i::wasm::kS128:
thrower.TypeError("Can't set the value of s128 WebAssembly.Global");
break;
- case i::wasm::ValueType::kRef:
- case i::wasm::ValueType::kOptRef:
+ case i::wasm::kRef:
+ case i::wasm::kOptRef:
switch (receiver->type().heap_representation()) {
case i::wasm::HeapType::kExtern:
- case i::wasm::HeapType::kExn:
case i::wasm::HeapType::kAny:
receiver->SetExternRef(Utils::OpenHandle(*args[0]));
break;
@@ -1936,7 +1941,10 @@ void WebAssemblyGlobalSetValue(
}
break;
}
-
+ case internal::wasm::HeapType::kBottom:
+ UNREACHABLE();
+ case internal::wasm::HeapType::kI31:
+ case internal::wasm::HeapType::kData:
case i::wasm::HeapType::kEq:
default:
// TODO(7748): Implement these.
@@ -1944,14 +1952,15 @@ void WebAssemblyGlobalSetValue(
break;
}
break;
- case i::wasm::ValueType::kRtt:
+ case i::wasm::kRtt:
+ case i::wasm::kRttWithDepth:
// TODO(7748): Implement.
UNIMPLEMENTED();
break;
- case i::wasm::ValueType::kI8:
- case i::wasm::ValueType::kI16:
- case i::wasm::ValueType::kBottom:
- case i::wasm::ValueType::kStmt:
+ case i::wasm::kI8:
+ case i::wasm::kI16:
+ case i::wasm::kBottom:
+ case i::wasm::kStmt:
UNREACHABLE();
}
}
@@ -2229,19 +2238,26 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
v8_str(isolate, "WebAssembly.Global"), ro_attributes);
// Setup Exception
+ Handle<String> exception_name = v8_str(isolate, "Exception");
+ Handle<JSFunction> exception_constructor =
+ CreateFunc(isolate, exception_name, WebAssemblyException, true,
+ SideEffectType::kHasSideEffect);
+ exception_constructor->shared().set_length(1);
if (enabled_features.has_eh()) {
- Handle<JSFunction> exception_constructor = InstallConstructorFunc(
- isolate, webassembly, "Exception", WebAssemblyException);
- context->set_wasm_exception_constructor(*exception_constructor);
- SetDummyInstanceTemplate(isolate, exception_constructor);
- JSFunction::EnsureHasInitialMap(exception_constructor);
- Handle<JSObject> exception_proto(
- JSObject::cast(exception_constructor->instance_prototype()), isolate);
- Handle<Map> exception_map = isolate->factory()->NewMap(
- i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kHeaderSize);
- JSFunction::SetInitialMap(exception_constructor, exception_map,
- exception_proto);
- }
+ JSObject::AddProperty(isolate, webassembly, exception_name,
+ exception_constructor, DONT_ENUM);
+ }
+ // Install the constructor on the context unconditionally so that it is also
+ // available when the feature is enabled via the origin trial.
+ context->set_wasm_exception_constructor(*exception_constructor);
+ SetDummyInstanceTemplate(isolate, exception_constructor);
+ JSFunction::EnsureHasInitialMap(exception_constructor);
+ Handle<JSObject> exception_proto(
+ JSObject::cast(exception_constructor->instance_prototype()), isolate);
+ Handle<Map> exception_map = isolate->factory()->NewMap(
+ i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kHeaderSize);
+ JSFunction::SetInitialMap(exception_constructor, exception_map,
+ exception_proto);
// Setup Function
if (enabled_features.has_type_reflection()) {
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index 7e56ea6eae..fd27d7108d 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -102,6 +102,18 @@ constexpr Register kGpReturnRegisters[] = {r2, r3};
constexpr DoubleRegister kFpParamRegisters[] = {d0, d2};
constexpr DoubleRegister kFpReturnRegisters[] = {d0, d2};
+#elif V8_TARGET_ARCH_RISCV64
+// ===========================================================================
+// == riscv64 =================================================================
+// ===========================================================================
+// Note that kGpParamRegisters and kFpParamRegisters are used in
+// Builtins::Generate_WasmCompileLazy (builtins-riscv64.cc)
+constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6};
+constexpr Register kGpReturnRegisters[] = {a0, a1};
+constexpr DoubleRegister kFpParamRegisters[] = {fa0, fa1, fa2, fa3,
+ fa4, fa5, fa6};
+constexpr DoubleRegister kFpReturnRegisters[] = {fa0, fa1};
+
#else
// ===========================================================================
// == unknown ================================================================
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index f6a9bfb76b..a3bd33c1d6 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -248,6 +248,7 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
data_segments_(zone),
indirect_functions_(zone),
globals_(zone),
+ exceptions_(zone),
signature_map_(zone),
start_function_index_(-1),
min_memory_size_(16),
@@ -280,6 +281,14 @@ uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
return index;
}
+uint32_t WasmModuleBuilder::AddException(FunctionSig* type) {
+ DCHECK_EQ(0, type->return_count());
+ int type_index = AddSignature(type);
+ uint32_t except_index = static_cast<uint32_t>(exceptions_.size());
+ exceptions_.push_back(type_index);
+ return except_index;
+}
+
uint32_t WasmModuleBuilder::AddStructType(StructType* type) {
uint32_t index = static_cast<uint32_t>(types_.size());
types_.push_back(Type(type));
@@ -414,12 +423,13 @@ void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
namespace {
void WriteValueType(ZoneBuffer* buffer, const ValueType& type) {
buffer->write_u8(type.value_type_code());
- if (type.has_depth()) {
- buffer->write_u32v(type.depth());
- }
if (type.encoding_needs_heap_type()) {
buffer->write_i32v(type.heap_type().code());
}
+ if (type.is_rtt()) {
+ if (type.has_depth()) buffer->write_u32v(type.depth());
+ buffer->write_u32v(type.ref_index());
+ }
}
void WriteGlobalInitializer(ZoneBuffer* buffer, const WasmInitExpr& init,
@@ -461,34 +471,35 @@ void WriteGlobalInitializer(ZoneBuffer* buffer, const WasmInitExpr& init,
case WasmInitExpr::kNone: {
// No initializer, emit a default value.
switch (type.kind()) {
- case ValueType::kI32:
+ case kI32:
buffer->write_u8(kExprI32Const);
// LEB encoding of 0.
buffer->write_u8(0);
break;
- case ValueType::kI64:
+ case kI64:
buffer->write_u8(kExprI64Const);
// LEB encoding of 0.
buffer->write_u8(0);
break;
- case ValueType::kF32:
+ case kF32:
buffer->write_u8(kExprF32Const);
buffer->write_f32(0.f);
break;
- case ValueType::kF64:
+ case kF64:
buffer->write_u8(kExprF64Const);
buffer->write_f64(0.);
break;
- case ValueType::kOptRef:
+ case kOptRef:
buffer->write_u8(kExprRefNull);
break;
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kStmt:
- case ValueType::kS128:
- case ValueType::kBottom:
- case ValueType::kRef:
- case ValueType::kRtt:
+ case kI8:
+ case kI16:
+ case kStmt:
+ case kS128:
+ case kBottom:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
UNREACHABLE();
}
break;
@@ -497,17 +508,15 @@ void WriteGlobalInitializer(ZoneBuffer* buffer, const WasmInitExpr& init,
STATIC_ASSERT((kExprRttCanon >> 8) == kGCPrefix);
buffer->write_u8(kGCPrefix);
buffer->write_u8(static_cast<uint8_t>(kExprRttCanon));
- buffer->write_i32v(HeapType(init.immediate().heap_type).code());
+ buffer->write_i32v(static_cast<int32_t>(init.immediate().index));
break;
case WasmInitExpr::kRttSub:
// The operand to rtt.sub must be emitted first.
WriteGlobalInitializer(buffer, *init.operand(), kWasmBottom);
- // TODO(7748): If immediates for rtts remain in the standard, adapt this
- // to emit them.
STATIC_ASSERT((kExprRttSub >> 8) == kGCPrefix);
buffer->write_u8(kGCPrefix);
buffer->write_u8(static_cast<uint8_t>(kExprRttSub));
- buffer->write_i32v(HeapType(init.immediate().heap_type).code());
+ buffer->write_i32v(static_cast<int32_t>(init.immediate().index));
break;
}
}
@@ -623,6 +632,17 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
FixupSection(buffer, start);
}
+ // Emit event section.
+ if (exceptions_.size() > 0) {
+ size_t start = EmitSection(kExceptionSectionCode, buffer);
+ buffer->write_size(exceptions_.size());
+ for (int type : exceptions_) {
+ buffer->write_u32v(kExceptionAttribute);
+ buffer->write_u32v(type);
+ }
+ FixupSection(buffer, start);
+ }
+
// == Emit globals ===========================================================
if (globals_.size() > 0) {
size_t start = EmitSection(kGlobalSectionCode, buffer);
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 76bf1894cd..f93b981d7c 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -250,6 +250,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
bool mutability, Vector<const char> module = {});
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
+ uint32_t AddException(FunctionSig* type);
uint32_t AddStructType(StructType* type);
uint32_t AddArrayType(ArrayType* type);
// In the current implementation, it's supported to have uninitialized slots
@@ -284,6 +285,12 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
return types_[index].sig;
}
+ int NumExceptions() { return static_cast<int>(exceptions_.size()); }
+
+ FunctionSig* GetExceptionType(int index) {
+ return types_[exceptions_[index]].sig;
+ }
+
private:
struct Type {
enum Kind { kFunctionSig, kStructType, kArrayType };
@@ -351,6 +358,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
ZoneVector<WasmDataSegment> data_segments_;
ZoneVector<uint32_t> indirect_functions_;
ZoneVector<WasmGlobal> globals_;
+ ZoneVector<int> exceptions_;
ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
int start_function_index_;
uint32_t max_table_size_ = 0;
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index ddd89616f8..c336dc5f7d 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -46,29 +46,6 @@ WireBytesRef LazilyGeneratedNames::LookupFunctionName(
return it->second;
}
-std::pair<WireBytesRef, WireBytesRef>
-LazilyGeneratedNames::LookupNameFromImportsAndExports(
- ImportExportKindCode kind, uint32_t index,
- Vector<const WasmImport> import_table,
- Vector<const WasmExport> export_table) const {
- base::MutexGuard lock(&mutex_);
- DCHECK(kind == kExternalGlobal || kind == kExternalMemory ||
- kind == kExternalTable);
- auto& names = kind == kExternalGlobal
- ? global_names_
- : kind == kExternalMemory ? memory_names_ : table_names_;
- if (!names) {
- names.reset(
- new std::unordered_map<uint32_t,
- std::pair<WireBytesRef, WireBytesRef>>());
- GenerateNamesFromImportsAndExports(kind, import_table, export_table,
- names.get());
- }
- auto it = names->find(index);
- if (it == names->end()) return {};
- return it->second;
-}
-
// static
int MaxNumExportWrappers(const WasmModule* module) {
// For each signature there may exist a wrapper, both for imported and
@@ -139,7 +116,7 @@ void LazilyGeneratedNames::AddForTesting(int function_index,
AsmJsOffsetInformation::AsmJsOffsetInformation(
Vector<const byte> encoded_offsets)
- : encoded_offsets_(OwnedVector<uint8_t>::Of(encoded_offsets)) {}
+ : encoded_offsets_(OwnedVector<const uint8_t>::Of(encoded_offsets)) {}
AsmJsOffsetInformation::~AsmJsOffsetInformation() = default;
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index ac3fe401cb..439be1d2c7 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -187,30 +187,15 @@ class V8_EXPORT_PRIVATE LazilyGeneratedNames {
uint32_t function_index,
Vector<const WasmExport> export_table) const;
- // For memory and global.
- std::pair<WireBytesRef, WireBytesRef> LookupNameFromImportsAndExports(
- ImportExportKindCode kind, uint32_t index,
- const Vector<const WasmImport> import_table,
- const Vector<const WasmExport> export_table) const;
-
void AddForTesting(int function_index, WireBytesRef name);
private:
- // {function_names_}, {global_names_}, {memory_names_} and {table_names_} are
- // populated lazily after decoding, and therefore need a mutex to protect
- // concurrent modifications from multiple {WasmModuleObject}.
+ // {function_names_} are populated lazily after decoding, and
+ // therefore need a mutex to protect concurrent modifications
+ // from multiple {WasmModuleObject}.
mutable base::Mutex mutex_;
mutable std::unique_ptr<std::unordered_map<uint32_t, WireBytesRef>>
function_names_;
- mutable std::unique_ptr<
- std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
- global_names_;
- mutable std::unique_ptr<
- std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
- memory_names_;
- mutable std::unique_ptr<
- std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
- table_names_;
};
class V8_EXPORT_PRIVATE AsmJsOffsetInformation {
@@ -372,7 +357,6 @@ struct WasmTable {
if (!type.is_nullable()) return false;
HeapType heap_type = type.heap_type();
return heap_type == HeapType::kFunc || heap_type == HeapType::kExtern ||
- heap_type == HeapType::kExn ||
(module != nullptr && heap_type.is_index() &&
module->has_signature(heap_type.ref_index()));
}
@@ -495,14 +479,6 @@ inline int declared_function_index(const WasmModule* module, int func_index) {
return declared_idx;
}
-inline bool is_data_ref_type(ValueType type, const WasmModule* module) {
- // TODO(7748): When we implement dataref (=any struct or array), support
- // that here.
- if (!type.has_index()) return false;
- uint32_t index = type.ref_index();
- return module->has_struct(index) || module->has_array(index);
-}
-
// TruncatedUserString makes it easy to output names up to a certain length, and
// output a truncation followed by '...' if they exceed a limit.
// Use like this:
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 4e6375ad13..2c76a4ec18 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -130,8 +130,7 @@ ACCESSORS(WasmGlobalObject, untagged_buffer, JSArrayBuffer,
kUntaggedBufferOffset)
ACCESSORS(WasmGlobalObject, tagged_buffer, FixedArray, kTaggedBufferOffset)
SMI_ACCESSORS(WasmGlobalObject, offset, kOffsetOffset)
-// TODO(7748): This will not suffice to hold the 32-bit encoding of a ValueType.
-// We need to devise and encoding that does, and also encodes is_mutable.
+// TODO(7748): Try to come up with some encoding that includes is_mutable?
SMI_ACCESSORS(WasmGlobalObject, raw_type, kRawTypeOffset)
SMI_ACCESSORS(WasmGlobalObject, is_mutable, kIsMutableOffset)
@@ -167,7 +166,7 @@ double WasmGlobalObject::GetF64() {
}
Handle<Object> WasmGlobalObject::GetRef() {
- // We use this getter for externref, funcref, and exnref.
+ // We use this getter for externref and funcref.
DCHECK(type().is_reference_type());
return handle(tagged_buffer().get(offset()), GetIsolate());
}
@@ -190,7 +189,6 @@ void WasmGlobalObject::SetF64(double value) {
void WasmGlobalObject::SetExternRef(Handle<Object> value) {
DCHECK(type().is_reference_to(wasm::HeapType::kExtern) ||
- type().is_reference_to(wasm::HeapType::kExn) ||
type().is_reference_to(wasm::HeapType::kAny));
tagged_buffer().set(offset(), *value);
}
@@ -240,6 +238,8 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, hook_on_function_call_address, Address,
kHookOnFunctionCallAddressOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, num_liftoff_function_calls_array,
uint32_t*, kNumLiftoffFunctionCallsArrayOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, break_on_entry, uint8_t,
+ kBreakOnEntryOffset)
ACCESSORS(WasmInstanceObject, module_object, WasmModuleObject,
kModuleObjectOffset)
@@ -412,7 +412,7 @@ wasm::StructType* WasmStruct::type(Map map) {
wasm::StructType* WasmStruct::GcSafeType(Map map) {
DCHECK_EQ(WASM_STRUCT_TYPE, map.instance_type());
- HeapObject raw = HeapObject::cast(map.constructor_or_backpointer());
+ HeapObject raw = HeapObject::cast(map.constructor_or_back_pointer());
MapWord map_word = raw.map_word();
HeapObject forwarded =
map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
@@ -435,7 +435,7 @@ wasm::ArrayType* WasmArray::type(Map map) {
wasm::ArrayType* WasmArray::GcSafeType(Map map) {
DCHECK_EQ(WASM_ARRAY_TYPE, map.instance_type());
- HeapObject raw = HeapObject::cast(map.constructor_or_backpointer());
+ HeapObject raw = HeapObject::cast(map.constructor_or_back_pointer());
MapWord map_word = raw.map_word();
HeapObject forwarded =
map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 3c9298cd09..ce74e73207 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -445,7 +445,6 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
switch (table->type().heap_representation()) {
case wasm::HeapType::kExtern:
- case wasm::HeapType::kExn:
case wasm::HeapType::kAny:
entries->set(entry_index, *entry);
return;
@@ -453,6 +452,7 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
SetFunctionTableEntry(isolate, table, entries, entry_index, entry);
return;
case wasm::HeapType::kEq:
+ case wasm::HeapType::kData:
case wasm::HeapType::kI31:
// TODO(7748): Implement once we have a story for struct/arrays/i31ref in
// JS.
@@ -490,7 +490,6 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
switch (table->type().heap_representation()) {
case wasm::HeapType::kExtern:
- case wasm::HeapType::kExn:
return entry;
case wasm::HeapType::kFunc:
if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
@@ -501,6 +500,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
break;
case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
+ case wasm::HeapType::kData:
case wasm::HeapType::kAny:
// TODO(7748): Implement once we have a story for struct/arrays/i31ref in
// JS.
@@ -816,8 +816,7 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
} // namespace
Handle<WasmMemoryObject> WasmMemoryObject::New(
- Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
- uint32_t maximum) {
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer, int maximum) {
Handle<JSArrayBuffer> buffer;
if (!maybe_buffer.ToHandle(&buffer)) {
// If no buffer was provided, create a zero-length one.
@@ -839,22 +838,38 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
backing_store->AttachSharedWasmMemoryObject(isolate, memory_object);
}
+ // For debugging purposes we memorize a link from the JSArrayBuffer
+ // to it's owning WasmMemoryObject instance.
+ Handle<Symbol> symbol = isolate->factory()->array_buffer_wasm_memory_symbol();
+ JSObject::SetProperty(isolate, buffer, symbol, memory_object).Check();
+
return memory_object;
}
MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
- uint32_t initial,
- uint32_t maximum,
+ int initial, int maximum,
SharedFlag shared) {
- auto heuristic_maximum = maximum;
+ bool has_maximum = maximum != kNoMaximum;
+ int heuristic_maximum = maximum;
+ if (!has_maximum) {
+ heuristic_maximum = static_cast<int>(wasm::max_mem_pages());
+ }
+
#ifdef V8_TARGET_ARCH_32_BIT
- // TODO(wasm): use a better heuristic for reserving more than the initial
- // number of pages on 32-bit systems. Being too greedy in reserving capacity
- // limits the number of memories that can be allocated, causing OOMs in many
- // tests. For now, on 32-bit we never reserve more than initial, unless the
- // memory is shared.
- if (shared == SharedFlag::kNotShared || !FLAG_wasm_grow_shared_memory) {
- heuristic_maximum = initial;
+ if (shared == SharedFlag::kNotShared) {
+ // On 32-bit platforms we need a heuristic here to balance overall memory
+ // and address space consumption. If a maximum memory size is defined, then
+ // we reserve that maximum size up to 1GB. If no maximum memory size is
+ // defined, we just allocate the initial size and grow with a realloc.
+ constexpr int kGBPages = 1024 * 1024 * 1024 / wasm::kWasmPageSize;
+ if (initial > kGBPages || !has_maximum) {
+ // We allocate at least the initial size. If no maximum is specified we
+ // also start with the initial size.
+ heuristic_maximum = initial;
+ } else {
+ // We reserve the maximum size, but at most 1GB.
+ heuristic_maximum = std::min(maximum, kGBPages);
+ }
}
#endif
@@ -931,30 +946,27 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// Try to handle shared memory first.
if (old_buffer->is_shared()) {
- if (FLAG_wasm_grow_shared_memory) {
- base::Optional<size_t> result =
- backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages);
- // Shared memories can only be grown in place; no copying.
- if (result.has_value()) {
- BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store);
- // Broadcasting the update should update this memory object too.
- CHECK_NE(*old_buffer, memory_object->array_buffer());
- size_t new_pages = result.value() + pages;
- // If the allocation succeeded, then this can't possibly overflow:
- size_t new_byte_length = new_pages * wasm::kWasmPageSize;
- // This is a less than check, as it is not guaranteed that the SAB
- // length here will be equal to the stashed length above as calls to
- // grow the same memory object can come in from different workers.
- // It is also possible that a call to Grow was in progress when
- // handling this call.
- CHECK_LE(new_byte_length, memory_object->array_buffer().byte_length());
- // As {old_pages} was read racefully, we return here the synchronized
- // value provided by {GrowWasmMemoryInPlace}, to provide the atomic
- // read-modify-write behavior required by the spec.
- return static_cast<int32_t>(result.value()); // success
- }
- }
- return -1;
+ base::Optional<size_t> result =
+ backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages);
+ // Shared memories can only be grown in place; no copying.
+ if (!result.has_value()) return -1;
+
+ BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store);
+ // Broadcasting the update should update this memory object too.
+ CHECK_NE(*old_buffer, memory_object->array_buffer());
+ size_t new_pages = result.value() + pages;
+ // If the allocation succeeded, then this can't possibly overflow:
+ size_t new_byte_length = new_pages * wasm::kWasmPageSize;
+ // This is a less than check, as it is not guaranteed that the SAB
+ // length here will be equal to the stashed length above as calls to
+ // grow the same memory object can come in from different workers.
+ // It is also possible that a call to Grow was in progress when
+ // handling this call.
+ CHECK_LE(new_byte_length, memory_object->array_buffer().byte_length());
+ // As {old_pages} was read racefully, we return here the synchronized
+ // value provided by {GrowWasmMemoryInPlace}, to provide the atomic
+ // read-modify-write behavior required by the spec.
+ return static_cast<int32_t>(result.value()); // success
}
base::Optional<size_t> result =
@@ -966,6 +978,11 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<JSArrayBuffer> new_buffer =
isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
memory_object->update_instances(isolate, new_buffer);
+ // For debugging purposes we memorize a link from the JSArrayBuffer
+ // to it's owning WasmMemoryObject instance.
+ Handle<Symbol> symbol =
+ isolate->factory()->array_buffer_wasm_memory_symbol();
+ JSObject::SetProperty(isolate, new_buffer, symbol, memory_object).Check();
DCHECK_EQ(result.value(), old_pages);
return static_cast<int32_t>(result.value()); // success
}
@@ -987,6 +1004,10 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<JSArrayBuffer> new_buffer =
isolate->factory()->NewJSArrayBuffer(std::move(new_backing_store));
memory_object->update_instances(isolate, new_buffer);
+ // For debugging purposes we memorize a link from the JSArrayBuffer
+ // to it's owning WasmMemoryObject instance.
+ Handle<Symbol> symbol = isolate->factory()->array_buffer_wasm_memory_symbol();
+ JSObject::SetProperty(isolate, new_buffer, symbol, memory_object).Check();
return static_cast<int32_t>(old_pages); // success
}
@@ -1284,6 +1305,7 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_managed_object_maps(*isolate->factory()->empty_fixed_array());
instance->set_num_liftoff_function_calls_array(
module_object->native_module()->num_liftoff_function_calls_array());
+ instance->set_break_on_entry(module_object->script().break_on_entry());
// Insert the new instance into the scripts weak list of instances. This list
// is used for breakpoints affecting all instances belonging to the script.
@@ -1572,64 +1594,6 @@ WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
}
// static
-MaybeHandle<String> WasmInstanceObject::GetGlobalNameOrNull(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t global_index) {
- return WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
- isolate, instance, wasm::ImportExportKindCode::kExternalGlobal,
- global_index);
-}
-
-// static
-MaybeHandle<String> WasmInstanceObject::GetMemoryNameOrNull(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t memory_index) {
- return WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
- isolate, instance, wasm::ImportExportKindCode::kExternalMemory,
- memory_index);
-}
-
-// static
-MaybeHandle<String> WasmInstanceObject::GetTableNameOrNull(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t table_index) {
- return WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
- isolate, instance, wasm::ImportExportKindCode::kExternalTable,
- table_index);
-}
-
-// static
-MaybeHandle<String> WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- wasm::ImportExportKindCode kind, uint32_t index) {
- DCHECK(kind == wasm::ImportExportKindCode::kExternalGlobal ||
- kind == wasm::ImportExportKindCode::kExternalMemory ||
- kind == wasm::ImportExportKindCode::kExternalTable);
- wasm::ModuleWireBytes wire_bytes(
- instance->module_object().native_module()->wire_bytes());
-
- // This is pair of <module_name, field_name>.
- // If field_name is not set then we don't generate a name. Else if module_name
- // is set then it is an imported one. Otherwise it is an exported one.
- std::pair<wasm::WireBytesRef, wasm::WireBytesRef> name_ref =
- instance->module()
- ->lazily_generated_names.LookupNameFromImportsAndExports(
- kind, index, VectorOf(instance->module()->import_table),
- VectorOf(instance->module()->export_table));
- if (!name_ref.second.is_set()) return {};
- Vector<const char> field_name = wire_bytes.GetNameOrNull(name_ref.second);
- if (!name_ref.first.is_set()) {
- return isolate->factory()->NewStringFromUtf8(VectorOf(field_name));
- }
- Vector<const char> module_name = wire_bytes.GetNameOrNull(name_ref.first);
- std::string full_name;
- full_name.append(module_name.begin(), module_name.end());
- full_name.append(".");
- full_name.append(field_name.begin(), field_name.end());
- return isolate->factory()->NewStringFromUtf8(VectorOf(full_name));
-}
-
-// static
wasm::WasmValue WasmInstanceObject::GetGlobalValue(
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
Isolate* isolate = instance->GetIsolate();
@@ -1644,7 +1608,7 @@ wasm::WasmValue WasmInstanceObject::GetGlobalValue(
using wasm::Simd128;
switch (global.type.kind()) {
#define CASE_TYPE(valuetype, ctype) \
- case wasm::ValueType::valuetype: \
+ case wasm::valuetype: \
return wasm::WasmValue(base::ReadLittleEndianValue<ctype>(ptr));
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
@@ -1786,29 +1750,30 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
uint32_t encoded_size = 0;
for (size_t i = 0; i < sig->parameter_count(); ++i) {
switch (sig->GetParam(i).kind()) {
- case wasm::ValueType::kI32:
- case wasm::ValueType::kF32:
+ case wasm::kI32:
+ case wasm::kF32:
DCHECK_EQ(2, ComputeEncodedElementSize(sig->GetParam(i)));
encoded_size += 2;
break;
- case wasm::ValueType::kI64:
- case wasm::ValueType::kF64:
+ case wasm::kI64:
+ case wasm::kF64:
DCHECK_EQ(4, ComputeEncodedElementSize(sig->GetParam(i)));
encoded_size += 4;
break;
- case wasm::ValueType::kS128:
+ case wasm::kS128:
DCHECK_EQ(8, ComputeEncodedElementSize(sig->GetParam(i)));
encoded_size += 8;
break;
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
+ case wasm::kRef:
+ case wasm::kOptRef:
encoded_size += 1;
break;
- case wasm::ValueType::kRtt:
- case wasm::ValueType::kStmt:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ case wasm::kStmt:
+ case wasm::kBottom:
+ case wasm::kI8:
+ case wasm::kI16:
UNREACHABLE();
}
}
@@ -2051,7 +2016,7 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Handle<String> name = factory->Function_string();
if (callable->IsJSFunction()) {
- name = JSFunction::GetName(Handle<JSFunction>::cast(callable));
+ name = JSFunction::GetDebugName(Handle<JSFunction>::cast(callable));
name = String::Flatten(isolate, name);
}
Handle<Map> function_map =
@@ -2152,10 +2117,10 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
const char** error_message) {
DCHECK(expected.is_reference_type());
switch (expected.kind()) {
- case ValueType::kOptRef:
+ case kOptRef:
if (value->IsNull(isolate)) return true;
V8_FALLTHROUGH;
- case ValueType::kRef:
+ case kRef:
switch (expected.heap_representation()) {
case HeapType::kFunc: {
if (!(WasmExternalFunction::IsWasmExternalFunction(*value) ||
@@ -2168,10 +2133,9 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
return true;
}
case HeapType::kExtern:
- case HeapType::kExn:
case HeapType::kAny:
return true;
- case HeapType::kEq: {
+ case HeapType::kData: {
// TODO(7748): Change this when we have a decision on the JS API for
// structs/arrays.
Handle<Name> key = isolate->factory()->wasm_wrapped_object_symbol();
@@ -2179,13 +2143,15 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
LookupIterator::OWN_SKIP_INTERCEPTOR);
if (it.state() == LookupIterator::DATA) return true;
*error_message =
- "eqref object must be null (if nullable) or wrapped with wasm "
- "object wrapper";
+ "dataref object must be null (if nullable) or wrapped with the "
+ "wasm object wrapper";
return false;
}
+ case HeapType::kEq:
case HeapType::kI31:
// TODO(7748): Implement when the JS API for i31ref is decided on.
- *error_message = "Assigning JS objects to i31ref not supported yet.";
+ *error_message =
+ "Assigning JS objects to eqref/i31ref not supported yet.";
return false;
default:
// Tables defined outside a module can't refer to user-defined types.
@@ -2248,7 +2214,7 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
"Assigning to struct/array globals not supported yet.";
return false;
}
- case ValueType::kRtt:
+ case kRtt:
// TODO(7748): Implement when the JS API for rtts is decided on.
*error_message = "Assigning to rtt globals not supported yet.";
return false;
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 9d7bdd1c4c..21156adab9 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -309,13 +309,15 @@ class WasmMemoryObject : public JSObject {
inline bool has_maximum_pages();
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
- Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum);
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, int maximum);
V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(Isolate* isolate,
- uint32_t initial,
- uint32_t maximum,
+ int initial,
+ int maximum,
SharedFlag shared);
+ static constexpr int kNoMaximum = -1;
+
void update_instances(Isolate* isolate, Handle<JSArrayBuffer> buffer);
V8_EXPORT_PRIVATE static int32_t Grow(Isolate*, Handle<WasmMemoryObject>,
@@ -339,8 +341,8 @@ class WasmGlobalObject : public JSObject {
DECL_INT32_ACCESSORS(offset)
DECL_INT_ACCESSORS(raw_type)
DECL_PRIMITIVE_ACCESSORS(type, wasm::ValueType)
- // TODO(7748): Once we improve the encoding of mutability/type, turn this back
- // into a boolean accessor.
+ // TODO(7748): If we encode mutability in raw_type, turn this into a boolean
+ // accessor.
DECL_INT_ACCESSORS(is_mutable)
// Dispatched behavior.
@@ -418,6 +420,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*)
DECL_PRIMITIVE_ACCESSORS(hook_on_function_call_address, Address)
DECL_PRIMITIVE_ACCESSORS(num_liftoff_function_calls_array, uint32_t*)
+ DECL_PRIMITIVE_ACCESSORS(break_on_entry, uint8_t)
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic. Depending on the V8 build mode there could be no padding.
@@ -466,6 +469,9 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
V(kHookOnFunctionCallAddressOffset, kSystemPointerSize) \
V(kNumLiftoffFunctionCallsArrayOffset, kSystemPointerSize) \
+ V(kBreakOnEntryOffset, kUInt8Size) \
+ /* More padding to make the header pointer-size aligned */ \
+ V(kHeaderPaddingOffset, POINTER_SIZE_PADDING(kHeaderPaddingOffset)) \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -573,29 +579,9 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
static wasm::WasmValue GetGlobalValue(Handle<WasmInstanceObject>,
const wasm::WasmGlobal&);
- // Get the name of a global in the given instance by index.
- static MaybeHandle<String> GetGlobalNameOrNull(Isolate*,
- Handle<WasmInstanceObject>,
- uint32_t global_index);
-
- // Get the name of a memory in the given instance by index.
- static MaybeHandle<String> GetMemoryNameOrNull(Isolate*,
- Handle<WasmInstanceObject>,
- uint32_t memory_index);
-
- // Get the name of a table in the given instance by index.
- static MaybeHandle<String> GetTableNameOrNull(Isolate*,
- Handle<WasmInstanceObject>,
- uint32_t table_index);
-
OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject);
private:
- // Get the name in the given instance by index and kind.
- static MaybeHandle<String> GetNameFromImportsAndExportsOrNull(
- Isolate*, Handle<WasmInstanceObject>, wasm::ImportExportKindCode kind,
- uint32_t index);
-
static void InitDataSegmentArrays(Handle<WasmInstanceObject>,
Handle<WasmModuleObject>);
static void InitElemSegmentArrays(Handle<WasmInstanceObject>,
@@ -815,6 +801,10 @@ class WasmJSFunctionData : public Struct {
class WasmScript : public AllStatic {
public:
+ // Position used for storing "on entry" breakpoints (a.k.a. instrumentation
+ // breakpoints). This would be an illegal position for any other breakpoint.
+ static constexpr int kOnEntryBreakpointPosition = -1;
+
// Set a breakpoint on the given byte position inside the given module.
// This will affect all live and future instances of the module.
// The passed position might be modified to point to the next breakable
@@ -858,7 +848,8 @@ class WasmScript : public AllStatic {
// Return an empty handle if no breakpoint is hit at that location, or a
// FixedArray with all hit breakpoint objects.
static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*, Handle<Script>,
- int position);
+ int position,
+ StackFrameId stack_frame_id);
private:
// Helper functions that update the breakpoint info list.
@@ -953,11 +944,9 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, HeapObject> {
namespace wasm {
Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
- int struct_index, Handle<Map> rtt_parent);
+ int struct_index, MaybeHandle<Map> rtt_parent);
Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
- int array_index, Handle<Map> rtt_parent);
-Handle<Map> CreateGenericRtt(Isolate* isolate, const WasmModule* module,
- Handle<Map> rtt_parent);
+ int array_index, MaybeHandle<Map> rtt_parent);
Handle<Map> AllocateSubRtt(Isolate* isolate,
Handle<WasmInstanceObject> instance, uint32_t type,
Handle<Map> parent);
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index 7821458386..adcf63ba87 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -8,7 +8,6 @@ type PodArrayOfWasmValueType extends ByteArray
@useParentTypeChecker
type ManagedWasmNativeModule extends Foreign
constexpr 'Managed<wasm::NativeModule>';
-type WasmValueType extends uint8 constexpr 'wasm::ValueType::Kind';
extern class WasmInstanceObject extends JSObject;
@@ -106,7 +105,6 @@ extern class AsmWasmData extends Struct {
@generateCppClass
extern class WasmTypeInfo extends Foreign {
- parent: Map;
supertypes: FixedArray;
subtypes: ArrayList;
}
diff --git a/deps/v8/src/wasm/wasm-opcodes-inl.h b/deps/v8/src/wasm/wasm-opcodes-inl.h
index 2d0ef375dc..0d2d774895 100644
--- a/deps/v8/src/wasm/wasm-opcodes-inl.h
+++ b/deps/v8/src/wasm/wasm-opcodes-inl.h
@@ -32,9 +32,11 @@ namespace wasm {
#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
+#define CASE_V128_OP(name, str) CASE_OP(V128##name, "v128." str)
#define CASE_S64x2_OP(name, str) CASE_OP(S64x2##name, "s64x2." str)
#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
+#define CASE_V64x2_OP(name, str) CASE_OP(V64x2##name, "v64x2." str)
#define CASE_V32x4_OP(name, str) CASE_OP(V32x4##name, "v32x4." str)
#define CASE_V16x8_OP(name, str) CASE_OP(V16x8##name, "v16x8." str)
#define CASE_V8x16_OP(name, str) CASE_OP(V8x16##name, "v8x16." str)
@@ -47,10 +49,14 @@ namespace wasm {
CASE_I8x16_OP(name, str)
#define CASE_SIMDF_OP(name, str) \
CASE_F32x4_OP(name, str) CASE_F64x2_OP(name, str)
-#define CASE_SIMDI_OP(name, str) \
+#define CASE_SIMDI_OP(name, str) \
+ CASE_I64x2_OP(name, str) CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
+ CASE_I8x16_OP(name, str)
+#define CASE_SIMDI_NO64X2_OP(name, str) \
CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
-#define CASE_SIMDV_OP(name, str) \
- CASE_V32x4_OP(name, str) CASE_V16x8_OP(name, str) CASE_V8x16_OP(name, str)
+#define CASE_SIMDV_OP(name, str) \
+ CASE_V64x2_OP(name, str) CASE_V32x4_OP(name, str) CASE_V16x8_OP(name, str) \
+ CASE_V8x16_OP(name, str)
#define CASE_SIGN_OP(TYPE, name, str) \
CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
#define CASE_UNSIGNED_OP(TYPE, name, str) CASE_##TYPE##_OP(name##U, str "_u")
@@ -138,6 +144,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I64_OP(SExtendI32, "extend32_s")
CASE_OP(Unreachable, "unreachable")
CASE_OP(Nop, "nop")
+ CASE_OP(NopForTestingUnsupportedInLiftoff, "nop_for_testing")
CASE_OP(Block, "block")
CASE_OP(Loop, "loop")
CASE_OP(If, "if")
@@ -236,8 +243,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Neg, "neg")
CASE_SIMDF_OP(Sqrt, "sqrt")
CASE_SIMD_OP(Eq, "eq")
- CASE_SIMDF_OP(Ne, "ne")
- CASE_SIMDI_OP(Ne, "ne")
+ CASE_SIMD_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
@@ -252,34 +258,35 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
CASE_SIMDF_OP(Min, "min")
CASE_SIMDF_OP(Max, "max")
- CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
- CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
- CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i32", "convert")
- CASE_CONVERT_OP(Convert, I32x4, I16x8High, "i32", "convert")
- CASE_CONVERT_OP(Convert, I16x8, I32x4, "i32", "convert")
- CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
- CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
- CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
+ CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32x4", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32x4", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i16x8_low", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, I16x8High, "i16x8_high", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I32x4, "i32x4", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i8x16_low", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i8x16_high", "convert")
+ CASE_CONVERT_OP(Convert, I8x16, I16x8, "i16x8", "convert")
CASE_SIMDF_OP(ExtractLane, "extract_lane")
CASE_SIMDF_OP(ReplaceLane, "replace_lane")
CASE_I64x2_OP(ExtractLane, "extract_lane")
- CASE_I64x2_OP(ReplaceLane, "replace_lane")
CASE_I32x4_OP(ExtractLane, "extract_lane")
CASE_SIGN_OP(I16x8, ExtractLane, "extract_lane")
CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
- CASE_SIGN_OP(SIMDI, Min, "min")
- CASE_SIGN_OP(SIMDI, Max, "max")
- CASE_SIGN_OP(SIMDI, Lt, "lt")
- CASE_SIGN_OP(SIMDI, Le, "le")
- CASE_SIGN_OP(SIMDI, Gt, "gt")
- CASE_SIGN_OP(SIMDI, Ge, "ge")
- CASE_CONVERT_OP(Convert, I64x2, I32x4Low, "i32", "convert")
- CASE_CONVERT_OP(Convert, I64x2, I32x4High, "i32", "convert")
+ CASE_SIGN_OP(SIMDI_NO64X2, Min, "min")
+ CASE_SIGN_OP(SIMDI_NO64X2, Max, "max")
+ CASE_SIGN_OP(SIMDI_NO64X2, Lt, "lt")
+ CASE_I64x2_OP(LtS, "lt_s")
+ CASE_I64x2_OP(GtS, "gt_s")
+ CASE_I64x2_OP(LeS, "le_s")
+ CASE_I64x2_OP(GeS, "ge_s")
+ CASE_SIGN_OP(SIMDI_NO64X2, Le, "le")
+ CASE_SIGN_OP(SIMDI_NO64X2, Gt, "gt")
+ CASE_SIGN_OP(SIMDI_NO64X2, Ge, "ge")
+ CASE_CONVERT_OP(Convert, I64x2, I32x4Low, "i32x4_low", "convert")
+ CASE_CONVERT_OP(Convert, I64x2, I32x4High, "i32x4_high", "convert")
CASE_SIGN_OP(SIMDI, Shr, "shr")
- CASE_SIGN_OP(I64x2, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
- CASE_I64x2_OP(Shl, "shl")
CASE_I32x4_OP(AddHoriz, "add_horizontal")
CASE_I16x8_OP(AddHoriz, "add_horizontal")
CASE_SIGN_OP(I16x8, AddSat, "add_sat")
@@ -294,7 +301,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(AndNot, "andnot")
CASE_I8x16_OP(Swizzle, "swizzle")
CASE_I8x16_OP(Shuffle, "shuffle")
- CASE_SIMDV_OP(AnyTrue, "any_true")
+ CASE_V128_OP(AnyTrue, "any_true")
CASE_SIMDV_OP(AllTrue, "all_true")
CASE_SIMDF_OP(Qfma, "qfma")
CASE_SIMDF_OP(Qfms, "qfms")
@@ -324,29 +331,18 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I16x8_OP(RoundingAverageU, "avgr_u")
CASE_I16x8_OP(Q15MulRSatS, "q15mulr_sat_s")
- CASE_I8x16_OP(Abs, "abs")
+ CASE_SIMDI_OP(Abs, "abs")
+ CASE_SIMDI_OP(BitMask, "bitmask")
CASE_I8x16_OP(Popcnt, "popcnt")
- CASE_I16x8_OP(Abs, "abs")
- CASE_I32x4_OP(Abs, "abs")
-
- CASE_I8x16_OP(BitMask, "bitmask")
- CASE_I16x8_OP(BitMask, "bitmask")
- CASE_I32x4_OP(BitMask, "bitmask")
- CASE_I64x2_OP(BitMask, "bitmask")
-
- CASE_F32x4_OP(Pmin, "pmin")
- CASE_F32x4_OP(Pmax, "pmax")
- CASE_F64x2_OP(Pmin, "pmin")
- CASE_F64x2_OP(Pmax, "pmax")
-
- CASE_F32x4_OP(Ceil, "ceil")
- CASE_F32x4_OP(Floor, "floor")
- CASE_F32x4_OP(Trunc, "trunc")
- CASE_F32x4_OP(NearestInt, "nearest")
- CASE_F64x2_OP(Ceil, "ceil")
- CASE_F64x2_OP(Floor, "floor")
- CASE_F64x2_OP(Trunc, "trunc")
- CASE_F64x2_OP(NearestInt, "nearest")
+
+
+ CASE_SIMDF_OP(Pmin, "pmin")
+ CASE_SIMDF_OP(Pmax, "pmax")
+
+ CASE_SIMDF_OP(Ceil, "ceil")
+ CASE_SIMDF_OP(Floor, "floor")
+ CASE_SIMDF_OP(Trunc, "trunc")
+ CASE_SIMDF_OP(NearestInt, "nearest")
CASE_I32x4_OP(DotI16x8S, "dot_i16x8_s")
@@ -356,7 +352,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIGN_OP(I32x4, ExtMulHighI16x8, "extmul_high_i16x8")
CASE_SIGN_OP(I64x2, ExtMulLowI32x4, "extmul_low_i32x4")
CASE_SIGN_OP(I64x2, ExtMulHighI32x4, "extmul_high_i32x4")
- CASE_SIMDI_OP(SignSelect, "signselect")
+ CASE_SIMDI_NO64X2_OP(SignSelect, "signselect")
CASE_I64x2_OP(SignSelect, "signselect")
CASE_SIGN_OP(I32x4, ExtAddPairwiseI16x8, "extadd_pairwise_i16x8")
@@ -365,6 +361,13 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(PrefetchT, "prefetch_t")
CASE_OP(PrefetchNT, "prefetch_nt")
+ CASE_F64x2_OP(ConvertLowI32x4S, "convert_low_i32x4_s")
+ CASE_F64x2_OP(ConvertLowI32x4U, "convert_low_i32x4_u")
+ CASE_I32x4_OP(TruncSatF64x2SZero, "trunc_sat_f64x2_s_zero")
+ CASE_I32x4_OP(TruncSatF64x2UZero, "trunc_sat_f64x2_u_zero")
+ CASE_F32x4_OP(DemoteF64x2Zero, "demote_f64x2_zero")
+ CASE_F64x2_OP(PromoteLowF32x4, "promote_low_f32x4")
+
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
CASE_INT_OP(AtomicWait, "atomic.wait")
@@ -401,10 +404,18 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(RefTest, "ref.test")
CASE_OP(RefCast, "ref.cast")
CASE_OP(BrOnCast, "br_on_cast")
+ CASE_OP(RefIsFunc, "ref.is_func")
+ CASE_OP(RefIsData, "ref.is_data")
+ CASE_OP(RefIsI31, "ref.is_i31")
+ CASE_OP(RefAsFunc, "ref.as_func")
+ CASE_OP(RefAsData, "ref.as_data")
+ CASE_OP(RefAsI31, "ref.as_i31")
+ CASE_OP(BrOnFunc, "br_on_func")
+ CASE_OP(BrOnData, "br_on_data")
+ CASE_OP(BrOnI31, "br_on_i31")
CASE_OP(RefEq, "ref.eq")
CASE_OP(Let, "let")
-
case kNumericPrefix:
case kSimdPrefix:
case kAtomicPrefix:
@@ -439,6 +450,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_ALL_OP
#undef CASE_SIMD_OP
#undef CASE_SIMDI_OP
+#undef CASE_SIMDI_NO64X2_OP
#undef CASE_SIGN_OP
#undef CASE_UNSIGNED_OP
#undef CASE_UNSIGNED_ALL_OP
@@ -581,7 +593,8 @@ constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
- FOREACH_SIMD_POST_MVP_MEM_OPCODE(CASE) kSigEnum_None;
+ FOREACH_SIMD_MEM_1_OPERAND_OPCODE(CASE)
+ FOREACH_SIMD_POST_MVP_MEM_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 7b97d6a8a0..5d1c89342e 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -41,7 +41,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
for (auto type : sig->all()) {
// TODO(7748): Allow structs, arrays, rtts and i31s when their
// JS-interaction is decided on.
- if (type == kWasmS128 || type.is_reference_to(HeapType::kI31) ||
+ if (type == kWasmS128 || type.is_reference_to(HeapType::kEq) ||
+ type.is_reference_to(HeapType::kI31) ||
(type.has_index() && !module->has_signature(type.ref_index())) ||
type.is_rtt()) {
return false;
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 4d69c939a9..d033fb08d8 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -47,7 +47,8 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(Let, 0x17, _ /* typed_funcref prototype */) \
V(Delegate, 0x18, _ /* eh_prototype */) \
V(CatchAll, 0x19, _ /* eh_prototype */) \
- V(BrOnNull, 0xd4, _ /* gc prototype */)
+ V(BrOnNull, 0xd4, _ /* gc prototype */) \
+ V(NopForTestingUnsupportedInLiftoff, 0x16, _)
// Constants, locals, globals, and calls.
#define FOREACH_MISC_OPCODE(V) \
@@ -295,6 +296,16 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(S128Load32Zero, 0xfdfc, s_i) \
V(S128Load64Zero, 0xfdfd, s_i)
+#define FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
+ V(S128Load8Lane, 0xfd58, s_is) \
+ V(S128Load16Lane, 0xfd59, s_is) \
+ V(S128Load32Lane, 0xfd5a, s_is) \
+ V(S128Load64Lane, 0xfd5b, s_is) \
+ V(S128Store8Lane, 0xfd5c, v_is) \
+ V(S128Store16Lane, 0xfd5d, v_is) \
+ V(S128Store32Lane, 0xfd5e, v_is) \
+ V(S128Store64Lane, 0xfd5f, v_is)
+
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _)
#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(I8x16Shuffle, 0xfd0d, s_ss)
@@ -337,6 +348,12 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I32x4LeU, 0xfd3e, s_ss) \
V(I32x4GeS, 0xfd3f, s_ss) \
V(I32x4GeU, 0xfd40, s_ss) \
+ V(I64x2Eq, 0xfdc0, s_ss) \
+ V(I64x2LtS, 0xfd74, s_ss) \
+ V(I64x2GtS, 0xfd7a, s_ss) \
+ V(I64x2LeS, 0xfdee, s_ss) \
+ V(I64x2GeS, 0xfde2, s_ss) \
+ V(I64x2Ne, 0xfdd0, s_ss) \
V(F32x4Eq, 0xfd41, s_ss) \
V(F32x4Ne, 0xfd42, s_ss) \
V(F32x4Lt, 0xfd43, s_ss) \
@@ -357,7 +374,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(S128Select, 0xfd52, s_sss) \
V(I8x16Abs, 0xfd60, s_s) \
V(I8x16Neg, 0xfd61, s_s) \
- V(V8x16AnyTrue, 0xfd62, i_s) \
+ V(V128AnyTrue, 0xfd62, i_s) \
V(V8x16AllTrue, 0xfd63, i_s) \
V(I8x16BitMask, 0xfd64, i_s) \
V(I8x16SConvertI16x8, 0xfd65, s_ss) \
@@ -376,9 +393,9 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I8x16MaxS, 0xfd78, s_ss) \
V(I8x16MaxU, 0xfd79, s_ss) \
V(I8x16RoundingAverageU, 0xfd7b, s_ss) \
+ V(I8x16Popcnt, 0xfd7c, s_s) \
V(I16x8Abs, 0xfd80, s_s) \
V(I16x8Neg, 0xfd81, s_s) \
- V(V16x8AnyTrue, 0xfd82, i_s) \
V(V16x8AllTrue, 0xfd83, i_s) \
V(I16x8BitMask, 0xfd84, i_s) \
V(I16x8SConvertI32x4, 0xfd85, s_ss) \
@@ -406,9 +423,11 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I16x8ExtMulHighI8x16S, 0xfd9d, s_ss) \
V(I16x8ExtMulLowI8x16U, 0xfd9e, s_ss) \
V(I16x8ExtMulHighI8x16U, 0xfd9f, s_ss) \
+ V(I16x8Q15MulRSatS, 0xfd9c, s_ss) \
+ V(I16x8ExtAddPairwiseI8x16S, 0xfdc2, s_s) \
+ V(I16x8ExtAddPairwiseI8x16U, 0xfdc3, s_s) \
V(I32x4Abs, 0xfda0, s_s) \
V(I32x4Neg, 0xfda1, s_s) \
- V(V32x4AnyTrue, 0xfda2, i_s) \
V(V32x4AllTrue, 0xfda3, i_s) \
V(I32x4BitMask, 0xfda4, i_s) \
V(I32x4SConvertI16x8Low, 0xfda7, s_s) \
@@ -430,7 +449,13 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I32x4ExtMulHighI16x8S, 0xfdbd, s_ss) \
V(I32x4ExtMulLowI16x8U, 0xfdbe, s_ss) \
V(I32x4ExtMulHighI16x8U, 0xfdbf, s_ss) \
+ V(I32x4TruncSatF64x2SZero, 0xfd55, s_s) \
+ V(I32x4TruncSatF64x2UZero, 0xfd56, s_s) \
+ V(I32x4ExtAddPairwiseI16x8S, 0xfda5, s_s) \
+ V(I32x4ExtAddPairwiseI16x8U, 0xfda6, s_s) \
+ V(I64x2Abs, 0xfda2, s_s) \
V(I64x2Neg, 0xfdc1, s_s) \
+ V(V64x2AllTrue, 0xfdcf, i_s) \
V(I64x2BitMask, 0xfdc4, i_s) \
V(I64x2Shl, 0xfdcb, s_si) \
V(I64x2ShrS, 0xfdcc, s_si) \
@@ -442,6 +467,10 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I64x2ExtMulHighI32x4S, 0xfdd3, s_ss) \
V(I64x2ExtMulLowI32x4U, 0xfdd6, s_ss) \
V(I64x2ExtMulHighI32x4U, 0xfdd7, s_ss) \
+ V(I64x2SConvertI32x4Low, 0xfdc7, s_s) \
+ V(I64x2SConvertI32x4High, 0xfdc8, s_s) \
+ V(I64x2UConvertI32x4Low, 0xfdc9, s_s) \
+ V(I64x2UConvertI32x4High, 0xfdca, s_s) \
V(F32x4Abs, 0xfde0, s_s) \
V(F32x4Neg, 0xfde1, s_s) \
V(F32x4Sqrt, 0xfde3, s_s) \
@@ -453,6 +482,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(F32x4Max, 0xfde9, s_ss) \
V(F32x4Pmin, 0xfdea, s_ss) \
V(F32x4Pmax, 0xfdeb, s_ss) \
+ V(F32x4DemoteF64x2Zero, 0xfd57, s_s) \
V(F64x2Abs, 0xfdec, s_s) \
V(F64x2Neg, 0xfded, s_s) \
V(F64x2Sqrt, 0xfdef, s_s) \
@@ -475,38 +505,22 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(F64x2Ceil, 0xfddc, s_s) \
V(F64x2Floor, 0xfddd, s_s) \
V(F64x2Trunc, 0xfdde, s_s) \
- V(F64x2NearestInt, 0xfddf, s_s)
+ V(F64x2NearestInt, 0xfddf, s_s) \
+ V(F64x2ConvertLowI32x4S, 0xfd53, s_s) \
+ V(F64x2ConvertLowI32x4U, 0xfd54, s_s) \
+ V(F64x2PromoteLowF32x4, 0xfd69, s_s)
#define FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
- V(S128Load8Lane, 0xfd58, s_is) \
- V(S128Load16Lane, 0xfd59, s_is) \
- V(S128Load32Lane, 0xfd5a, s_is) \
- V(S128Load64Lane, 0xfd5b, s_is) \
- V(S128Store8Lane, 0xfd5c, v_is) \
- V(S128Store16Lane, 0xfd5d, v_is) \
- V(S128Store32Lane, 0xfd5e, v_is) \
- V(S128Store64Lane, 0xfd5f, v_is) \
V(PrefetchT, 0xfdc5, v_i) \
V(PrefetchNT, 0xfdc6, v_i)
#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
V(I8x16Mul, 0xfd75, s_ss) \
- V(I8x16Popcnt, 0xfd7c, s_s) \
V(I8x16SignSelect, 0xfd7d, s_sss) \
V(I16x8SignSelect, 0xfd7e, s_sss) \
V(I32x4SignSelect, 0xfd7f, s_sss) \
V(I64x2SignSelect, 0xfd94, s_sss) \
- V(I16x8Q15MulRSatS, 0xfd9c, s_ss) \
- V(I32x4ExtAddPairwiseI16x8S, 0xfda5, s_s) \
- V(I32x4ExtAddPairwiseI16x8U, 0xfda6, s_s) \
- V(I16x8ExtAddPairwiseI8x16S, 0xfdc2, s_s) \
- V(I16x8ExtAddPairwiseI8x16U, 0xfdc3, s_s) \
- V(I64x2Eq, 0xfdc0, s_ss) \
V(F32x4Qfma, 0xfdb4, s_sss) \
- V(I64x2SConvertI32x4Low, 0xfdc7, s_s) \
- V(I64x2SConvertI32x4High, 0xfdc8, s_s) \
- V(I64x2UConvertI32x4Low, 0xfdc9, s_s) \
- V(I64x2UConvertI32x4High, 0xfdca, s_s) \
V(F32x4Qfms, 0xfdd4, s_sss) \
V(F64x2Qfma, 0xfdfe, s_sss) \
V(F64x2Qfms, 0xfdff, s_sss) \
@@ -542,12 +556,13 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
-#define FOREACH_SIMD_OPCODE(V) \
- FOREACH_SIMD_0_OPERAND_OPCODE(V) \
- FOREACH_SIMD_1_OPERAND_OPCODE(V) \
- FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
- FOREACH_SIMD_MEM_OPCODE(V) \
- FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
+#define FOREACH_SIMD_OPCODE(V) \
+ FOREACH_SIMD_0_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_1_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_MEM_OPCODE(V) \
+ FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
FOREACH_SIMD_CONST_OPCODE(V)
#define FOREACH_NUMERIC_OPCODE(V) \
@@ -663,7 +678,16 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(RttSub, 0xfb31, _) \
V(RefTest, 0xfb40, _) \
V(RefCast, 0xfb41, _) \
- V(BrOnCast, 0xfb42, _)
+ V(BrOnCast, 0xfb42, _) \
+ V(RefIsFunc, 0xfb50, _) \
+ V(RefIsData, 0xfb51, _) \
+ V(RefIsI31, 0xfb52, _) \
+ V(RefAsFunc, 0xfb58, _) \
+ V(RefAsData, 0xfb59, _) \
+ V(RefAsI31, 0xfb5a, _) \
+ V(BrOnFunc, 0xfb60, _) \
+ V(BrOnData, 0xfb61, _) \
+ V(BrOnI31, 0xfb62, _)
#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
/* AtomicFence does not target a particular linear memory. */ \
@@ -853,18 +877,17 @@ class WasmInitExpr {
return expr;
}
- static WasmInitExpr RttCanon(HeapType::Representation heap_type) {
+ static WasmInitExpr RttCanon(uint32_t index) {
WasmInitExpr expr;
expr.kind_ = kRttCanon;
- expr.immediate_.heap_type = heap_type;
+ expr.immediate_.index = index;
return expr;
}
- static WasmInitExpr RttSub(HeapType::Representation heap_type,
- WasmInitExpr supertype) {
+ static WasmInitExpr RttSub(uint32_t index, WasmInitExpr supertype) {
WasmInitExpr expr;
expr.kind_ = kRttSub;
- expr.immediate_.heap_type = heap_type;
+ expr.immediate_.index = index;
expr.operand_ = std::make_unique<WasmInitExpr>(std::move(supertype));
return expr;
}
@@ -880,6 +903,7 @@ class WasmInitExpr {
return true;
case kGlobalGet:
case kRefFuncConst:
+ case kRttCanon:
return immediate().index == other.immediate().index;
case kI32Const:
return immediate().i32_const == other.immediate().i32_const;
@@ -892,10 +916,9 @@ class WasmInitExpr {
case kS128Const:
return immediate().s128_const == other.immediate().s128_const;
case kRefNullConst:
- case kRttCanon:
return immediate().heap_type == other.immediate().heap_type;
case kRttSub:
- return immediate().heap_type == other.immediate().heap_type &&
+ return immediate().index == other.immediate().index &&
*operand() == *other.operand();
}
}
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 900851cfed..447e814040 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -186,9 +186,7 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
#endif
}
-constexpr size_t kHeaderSize =
- sizeof(uint32_t) + // total wasm function count
- sizeof(uint32_t); // imported functions (index of first wasm function)
+constexpr size_t kHeaderSize = sizeof(size_t); // total code size
constexpr size_t kCodeHeaderSize = sizeof(bool) + // whether code is present
sizeof(int) + // offset of constant pool
@@ -285,17 +283,18 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
private:
size_t MeasureCode(const WasmCode*) const;
- void WriteHeader(Writer*);
+ void WriteHeader(Writer*, size_t total_code_size);
bool WriteCode(const WasmCode*, Writer*);
const NativeModule* const native_module_;
- Vector<WasmCode* const> code_table_;
- bool write_called_;
+ const Vector<WasmCode* const> code_table_;
+ bool write_called_ = false;
+ size_t total_written_code_ = 0;
};
NativeModuleSerializer::NativeModuleSerializer(
const NativeModule* module, Vector<WasmCode* const> code_table)
- : native_module_(module), code_table_(code_table), write_called_(false) {
+ : native_module_(module), code_table_(code_table) {
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
@@ -320,12 +319,12 @@ size_t NativeModuleSerializer::Measure() const {
return size;
}
-void NativeModuleSerializer::WriteHeader(Writer* writer) {
+void NativeModuleSerializer::WriteHeader(Writer* writer,
+ size_t total_code_size) {
// TODO(eholk): We need to properly preserve the flag whether the trap
// handler was used or not when serializing.
- writer->Write(native_module_->num_functions());
- writer->Write(native_module_->num_imported_functions());
+ writer->Write(total_code_size);
}
bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
@@ -370,7 +369,8 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->WriteVector(code->source_positions());
writer->WriteVector(code->protected_instructions_data());
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_RISCV64
// On platforms that don't support misaligned word stores, copy to an aligned
// buffer if necessary so we can relocate the serialized code.
std::unique_ptr<byte[]> aligned_buffer;
@@ -431,6 +431,7 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
if (code_start != serialized_code_start) {
base::Memcpy(serialized_code_start, code_start, code_size);
}
+ total_written_code_ += code_size;
return true;
}
@@ -438,11 +439,22 @@ bool NativeModuleSerializer::Write(Writer* writer) {
DCHECK(!write_called_);
write_called_ = true;
- WriteHeader(writer);
+ size_t total_code_size = 0;
+ for (WasmCode* code : code_table_) {
+ if (code && code->tier() == ExecutionTier::kTurbofan) {
+ DCHECK(IsAligned(code->instructions().size(), kCodeAlignment));
+ total_code_size += code->instructions().size();
+ }
+ }
+ WriteHeader(writer, total_code_size);
for (WasmCode* code : code_table_) {
if (!WriteCode(code, writer)) return false;
}
+
+ // Make sure that the serialized total code size was correct.
+ CHECK_EQ(total_written_code_, total_code_size);
+
return true;
}
@@ -471,37 +483,46 @@ bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
struct DeserializationUnit {
Vector<const byte> src_code_buffer;
std::unique_ptr<WasmCode> code;
+ NativeModule::JumpTablesRef jump_tables;
};
class DeserializationQueue {
public:
- void Add(std::unique_ptr<std::vector<DeserializationUnit>> batch) {
+ void Add(std::vector<DeserializationUnit> batch) {
+ DCHECK(!batch.empty());
base::MutexGuard guard(&mutex_);
- queue_.push(std::move(batch));
- cv_.NotifyOne();
+ queue_.emplace(std::move(batch));
}
- std::unique_ptr<std::vector<DeserializationUnit>> Pop() {
+ std::vector<DeserializationUnit> Pop() {
base::MutexGuard guard(&mutex_);
- while (queue_.empty()) {
- cv_.Wait(&mutex_);
- }
+ if (queue_.empty()) return {};
auto batch = std::move(queue_.front());
- if (batch) queue_.pop();
+ queue_.pop();
return batch;
}
- std::unique_ptr<std::vector<DeserializationUnit>> UnlockedPop() {
- DCHECK(!queue_.empty());
- auto batch = std::move(queue_.front());
+ std::vector<DeserializationUnit> PopAll() {
+ base::MutexGuard guard(&mutex_);
+ if (queue_.empty()) return {};
+ auto units = std::move(queue_.front());
queue_.pop();
- return batch;
+ while (!queue_.empty()) {
+ units.insert(units.end(), std::make_move_iterator(queue_.front().begin()),
+ std::make_move_iterator(queue_.front().end()));
+ queue_.pop();
+ }
+ return units;
+ }
+
+ size_t NumBatches() {
+ base::MutexGuard guard(&mutex_);
+ return queue_.size();
}
private:
base::Mutex mutex_;
- base::ConditionVariable cv_;
- std::queue<std::unique_ptr<std::vector<DeserializationUnit>>> queue_;
+ std::queue<std::vector<DeserializationUnit>> queue_;
};
class V8_EXPORT_PRIVATE NativeModuleDeserializer {
@@ -516,169 +537,157 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
friend class CopyAndRelocTask;
friend class PublishTask;
- bool ReadHeader(Reader* reader);
- DeserializationUnit ReadCodeAndAlloc(int fn_index, Reader* reader);
+ void ReadHeader(Reader* reader);
+ DeserializationUnit ReadCode(int fn_index, Reader* reader);
void CopyAndRelocate(const DeserializationUnit& unit);
- void Publish(std::unique_ptr<std::vector<DeserializationUnit>> batch);
+ void Publish(std::vector<DeserializationUnit> batch);
NativeModule* const native_module_;
- bool read_called_;
#ifdef DEBUG
- std::atomic<int> total_published_{0};
+ bool read_called_ = false;
#endif
+
+ // Updated in {ReadCode}.
+ size_t remaining_code_size_ = 0;
+ Vector<byte> current_code_space_;
+ NativeModule::JumpTablesRef current_jump_tables_;
};
-class CopyAndRelocTask : public CancelableTask {
+class CopyAndRelocTask : public JobTask {
public:
CopyAndRelocTask(NativeModuleDeserializer* deserializer,
- DeserializationQueue& from_queue,
- DeserializationQueue& to_queue,
- CancelableTaskManager* task_manager)
- : CancelableTask(task_manager),
- deserializer_(deserializer),
+ DeserializationQueue* from_queue,
+ DeserializationQueue* to_queue,
+ std::shared_ptr<JobHandle> publish_handle)
+ : deserializer_(deserializer),
from_queue_(from_queue),
- to_queue_(to_queue) {}
+ to_queue_(to_queue),
+ publish_handle_(std::move(publish_handle)) {}
- void RunInternal() override {
+ void Run(JobDelegate* delegate) override {
CODE_SPACE_WRITE_SCOPE
- for (;;) {
- auto batch = from_queue_.Pop();
- if (!batch) break;
- for (auto& unit : *batch) {
+ do {
+ auto batch = from_queue_->Pop();
+ if (batch.empty()) break;
+ for (const auto& unit : batch) {
deserializer_->CopyAndRelocate(unit);
}
- to_queue_.Add(std::move(batch));
- }
- to_queue_.Add(nullptr);
+ to_queue_->Add(std::move(batch));
+ publish_handle_->NotifyConcurrencyIncrease();
+ } while (!delegate->ShouldYield());
+ }
+
+ size_t GetMaxConcurrency(size_t /* worker_count */) const override {
+ return from_queue_->NumBatches();
}
private:
- NativeModuleDeserializer* deserializer_;
- DeserializationQueue& from_queue_;
- DeserializationQueue& to_queue_;
+ NativeModuleDeserializer* const deserializer_;
+ DeserializationQueue* const from_queue_;
+ DeserializationQueue* const to_queue_;
+ std::shared_ptr<JobHandle> const publish_handle_;
};
-class PublishTask : public CancelableTask {
+class PublishTask : public JobTask {
public:
PublishTask(NativeModuleDeserializer* deserializer,
- DeserializationQueue& from_queue,
- CancelableTaskManager* task_manager)
- : CancelableTask(task_manager),
- deserializer_(deserializer),
- from_queue_(from_queue) {}
+ DeserializationQueue* from_queue)
+ : deserializer_(deserializer), from_queue_(from_queue) {}
- void RunInternal() override {
+ void Run(JobDelegate* delegate) override {
WasmCodeRefScope code_scope;
- for (;;) {
- auto batch = from_queue_.Pop();
- if (!batch) break;
- deserializer_->Publish(std::move(batch));
- }
+ do {
+ auto to_publish = from_queue_->PopAll();
+ if (to_publish.empty()) break;
+ deserializer_->Publish(std::move(to_publish));
+ } while (!delegate->ShouldYield());
+ }
+
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ // Publishing is sequential anyway, so never return more than 1. If a
+ // worker is already running, don't spawn a second one.
+ if (worker_count > 0) return 0;
+ return std::min(size_t{1}, from_queue_->NumBatches());
}
private:
- NativeModuleDeserializer* deserializer_;
- DeserializationQueue& from_queue_;
+ NativeModuleDeserializer* const deserializer_;
+ DeserializationQueue* const from_queue_;
};
NativeModuleDeserializer::NativeModuleDeserializer(NativeModule* native_module)
- : native_module_(native_module), read_called_(false) {}
+ : native_module_(native_module) {}
bool NativeModuleDeserializer::Read(Reader* reader) {
DCHECK(!read_called_);
+#ifdef DEBUG
read_called_ = true;
+#endif
- if (!ReadHeader(reader)) return false;
+ ReadHeader(reader);
uint32_t total_fns = native_module_->num_functions();
uint32_t first_wasm_fn = native_module_->num_imported_functions();
+
WasmCodeRefScope wasm_code_ref_scope;
DeserializationQueue reloc_queue;
DeserializationQueue publish_queue;
- CancelableTaskManager cancelable_task_manager;
+ std::shared_ptr<JobHandle> publish_handle = V8::GetCurrentPlatform()->PostJob(
+ TaskPriority::kUserVisible,
+ std::make_unique<PublishTask>(this, &publish_queue));
- auto copy_task = std::make_unique<CopyAndRelocTask>(
- this, reloc_queue, publish_queue, &cancelable_task_manager);
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(copy_task));
+ std::unique_ptr<JobHandle> copy_and_reloc_handle =
+ V8::GetCurrentPlatform()->PostJob(
+ TaskPriority::kUserVisible,
+ std::make_unique<CopyAndRelocTask>(this, &reloc_queue, &publish_queue,
+ publish_handle));
- auto publish_task = std::make_unique<PublishTask>(this, publish_queue,
- &cancelable_task_manager);
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(publish_task));
-
- auto batch = std::make_unique<std::vector<DeserializationUnit>>();
- int num_batches = 0;
+ std::vector<DeserializationUnit> batch;
const byte* batch_start = reader->current_location();
for (uint32_t i = first_wasm_fn; i < total_fns; ++i) {
- DeserializationUnit unit = ReadCodeAndAlloc(i, reader);
- if (unit.code) {
- batch->push_back(std::move(unit));
- }
+ DeserializationUnit unit = ReadCode(i, reader);
+ if (!unit.code) continue;
+ batch.emplace_back(std::move(unit));
uint64_t batch_size_in_bytes = reader->current_location() - batch_start;
constexpr int kMinBatchSizeInBytes = 100000;
if (batch_size_in_bytes >= kMinBatchSizeInBytes) {
reloc_queue.Add(std::move(batch));
- num_batches++;
- batch = std::make_unique<std::vector<DeserializationUnit>>();
+ DCHECK(batch.empty());
batch_start = reader->current_location();
+ copy_and_reloc_handle->NotifyConcurrencyIncrease();
}
}
- if (!batch->empty()) {
- reloc_queue.Add(std::move(batch));
- num_batches++;
- }
- reloc_queue.Add(nullptr);
+ // We should have read the expected amount of code now, and should have fully
+ // utilized the allocated code space.
+ DCHECK_EQ(0, remaining_code_size_);
+ DCHECK_EQ(0, current_code_space_.size());
- // Participate to deserialization in the main thread to ensure progress even
- // if background tasks are not scheduled.
- int published = 0;
- {
- CODE_SPACE_WRITE_SCOPE
- for (;;) {
- auto batch = reloc_queue.Pop();
- if (!batch) break;
- for (auto& unit : *batch) {
- CopyAndRelocate(unit);
- }
- Publish(std::move(batch));
- ++published;
- }
+ if (!batch.empty()) {
+ reloc_queue.Add(std::move(batch));
+ copy_and_reloc_handle->NotifyConcurrencyIncrease();
}
- if (published == num_batches) {
- // {CopyAndRelocTask} did not take any work from the reloc queue, probably
- // because it was not scheduled yet. Ensure that the end marker gets added
- // to the queue in this case.
- publish_queue.Add(nullptr);
- }
- cancelable_task_manager.CancelAndWait();
+ // Wait for all tasks to finish, while participating in their work.
+ copy_and_reloc_handle->Join();
+ publish_handle->Join();
- // Process the publish queue now in case {PublishTask} was canceled.
- for (;;) {
- auto batch = publish_queue.UnlockedPop();
- if (!batch) break;
- Publish(std::move(batch));
- }
- DCHECK_EQ(total_published_.load(), num_batches);
return reader->current_size() == 0;
}
-bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
- size_t functions = reader->Read<uint32_t>();
- size_t imports = reader->Read<uint32_t>();
- return functions == native_module_->num_functions() &&
- imports == native_module_->num_imported_functions();
+void NativeModuleDeserializer::ReadHeader(Reader* reader) {
+ remaining_code_size_ = reader->Read<size_t>();
}
-DeserializationUnit NativeModuleDeserializer::ReadCodeAndAlloc(int fn_index,
- Reader* reader) {
+DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
+ Reader* reader) {
bool has_code = reader->Read<bool>();
if (!has_code) {
DCHECK(FLAG_wasm_lazy_compilation ||
native_module_->enabled_features().has_compilation_hints());
native_module_->UseLazyStub(fn_index);
- return {{}, nullptr};
+ return {};
}
int constant_pool_offset = reader->Read<int>();
int safepoint_table_offset = reader->Read<int>();
@@ -694,17 +703,39 @@ DeserializationUnit NativeModuleDeserializer::ReadCodeAndAlloc(int fn_index,
WasmCode::Kind kind = reader->Read<WasmCode::Kind>();
ExecutionTier tier = reader->Read<ExecutionTier>();
+ DCHECK(IsAligned(code_size, kCodeAlignment));
+ DCHECK_GE(remaining_code_size_, code_size);
+ if (current_code_space_.size() < static_cast<size_t>(code_size)) {
+ // Allocate the next code space. Don't allocate more than 90% of
+ // {kMaxCodeSpaceSize}, to leave some space for jump tables.
+ constexpr size_t kMaxReservation =
+ RoundUp<kCodeAlignment>(WasmCodeAllocator::kMaxCodeSpaceSize * 9 / 10);
+ size_t code_space_size = std::min(kMaxReservation, remaining_code_size_);
+ current_code_space_ =
+ native_module_->AllocateForDeserializedCode(code_space_size);
+ DCHECK_EQ(current_code_space_.size(), code_space_size);
+ current_jump_tables_ = native_module_->FindJumpTablesForRegion(
+ base::AddressRegionOf(current_code_space_));
+ DCHECK(current_jump_tables_.is_valid());
+ }
+
DeserializationUnit unit;
unit.src_code_buffer = reader->ReadVector<byte>(code_size);
auto reloc_info = reader->ReadVector<byte>(reloc_size);
auto source_pos = reader->ReadVector<byte>(source_position_size);
auto protected_instructions =
reader->ReadVector<byte>(protected_instructions_size);
- unit.code = native_module_->AllocateDeserializedCode(
- fn_index, unit.src_code_buffer, stack_slot_count, tagged_parameter_slots,
+
+ Vector<uint8_t> instructions = current_code_space_.SubVector(0, code_size);
+ current_code_space_ += code_size;
+ remaining_code_size_ -= code_size;
+
+ unit.code = native_module_->AddDeserializedCode(
+ fn_index, instructions, stack_slot_count, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comment_offset, unpadded_binary_size, protected_instructions,
reloc_info, source_pos, kind, tier);
+ unit.jump_tables = current_jump_tables_;
return unit;
}
@@ -719,8 +750,6 @@ void NativeModuleDeserializer::CopyAndRelocate(
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- auto jump_tables_ref = native_module_->FindJumpTablesForRegion(
- base::AddressRegionOf(unit.code->instructions()));
for (RelocIterator iter(unit.code->instructions(), unit.code->reloc_info(),
unit.code->constant_pool(), mask);
!iter.done(); iter.next()) {
@@ -729,7 +758,7 @@ void NativeModuleDeserializer::CopyAndRelocate(
case RelocInfo::WASM_CALL: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
Address target =
- native_module_->GetNearCallTargetForFunction(tag, jump_tables_ref);
+ native_module_->GetNearCallTargetForFunction(tag, unit.jump_tables);
iter.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
break;
}
@@ -737,7 +766,7 @@ void NativeModuleDeserializer::CopyAndRelocate(
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
DCHECK_LT(tag, WasmCode::kRuntimeStubCount);
Address target = native_module_->GetNearRuntimeStubEntry(
- static_cast<WasmCode::RuntimeStubId>(tag), jump_tables_ref);
+ static_cast<WasmCode::RuntimeStubId>(tag), unit.jump_tables);
iter.rinfo()->set_wasm_stub_call_address(target, SKIP_ICACHE_FLUSH);
break;
}
@@ -765,21 +794,18 @@ void NativeModuleDeserializer::CopyAndRelocate(
unit.code->instructions().size());
}
-void NativeModuleDeserializer::Publish(
- std::unique_ptr<std::vector<DeserializationUnit>> batch) {
- DCHECK_NOT_NULL(batch);
+void NativeModuleDeserializer::Publish(std::vector<DeserializationUnit> batch) {
+ DCHECK(!batch.empty());
std::vector<std::unique_ptr<WasmCode>> codes;
- for (auto& unit : *batch) {
- codes.push_back(std::move(unit).code);
+ codes.reserve(batch.size());
+ for (auto& unit : batch) {
+ codes.emplace_back(std::move(unit).code);
}
auto published_codes = native_module_->PublishCode(VectorOf(codes));
for (auto* wasm_code : published_codes) {
wasm_code->MaybePrint();
wasm_code->Validate();
}
-#ifdef DEBUG
- total_published_.fetch_add(1);
-#endif
}
bool IsSupportedVersion(Vector<const byte> header) {
@@ -797,12 +823,15 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) return {};
if (!IsSupportedVersion(data)) return {};
- ModuleWireBytes wire_bytes(wire_bytes_vec);
+ // Make the copy of the wire bytes early, so we use the same memory for
+ // decoding, lookup in the native module cache, and insertion into the cache.
+ auto owned_wire_bytes = OwnedVector<uint8_t>::Of(wire_bytes_vec);
+
// TODO(titzer): module features should be part of the serialization format.
WasmEngine* wasm_engine = isolate->wasm_engine();
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
ModuleResult decode_result = DecodeWasmModule(
- enabled_features, wire_bytes.start(), wire_bytes.end(), false,
+ enabled_features, owned_wire_bytes.start(), owned_wire_bytes.end(), false,
i::wasm::kWasmOrigin, isolate->counters(), isolate->metrics_recorder(),
isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
DecodingMethod::kDeserialize, wasm_engine->allocator());
@@ -811,7 +840,7 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
CHECK_NOT_NULL(module);
auto shared_native_module = wasm_engine->MaybeGetNativeModule(
- module->origin, wire_bytes_vec, isolate);
+ module->origin, owned_wire_bytes.as_vector(), isolate);
if (shared_native_module == nullptr) {
const bool kIncludeLiftoff = false;
size_t code_size_estimate =
@@ -819,8 +848,13 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
kIncludeLiftoff);
shared_native_module = wasm_engine->NewNativeModule(
isolate, enabled_features, std::move(module), code_size_estimate);
- shared_native_module->SetWireBytes(
- OwnedVector<uint8_t>::Of(wire_bytes_vec));
+ // We have to assign a compilation ID here, as it is required for a
+ // potential re-compilation, e.g. triggered by
+ // {TierDownAllModulesPerIsolate}. The value is -2 so that it is different
+ // than the compilation ID of actual compilations, and also different than
+ // the sentinel value of the CompilationState.
+ shared_native_module->compilation_state()->set_compilation_id(-2);
+ shared_native_module->SetWireBytes(std::move(owned_wire_bytes));
NativeModuleDeserializer deserializer(shared_native_module.get());
Reader reader(data + WasmSerializer::kHeaderSize);
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 1e86a7b063..9e303ce65c 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_WASM_SERIALIZATION_H_
#define V8_WASM_WASM_SERIALIZATION_H_
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -42,6 +43,8 @@ class V8_EXPORT_PRIVATE WasmSerializer {
private:
NativeModule* native_module_;
+ // The {WasmCodeRefScope} keeps the pointers in {code_table_} alive.
+ WasmCodeRefScope code_ref_scope_;
std::vector<WasmCode*> code_table_;
};
diff --git a/deps/v8/src/wasm/wasm-subtyping.cc b/deps/v8/src/wasm/wasm-subtyping.cc
index 861114b54c..614d95f3a4 100644
--- a/deps/v8/src/wasm/wasm-subtyping.cc
+++ b/deps/v8/src/wasm/wasm-subtyping.cc
@@ -258,8 +258,7 @@ bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
}
}
-// TODO(7748): Expand this with function subtyping once the hiccups
-// with 'exact types' have been cleared.
+// TODO(7748): Expand this with function subtyping when it is introduced.
bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -274,15 +273,34 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
const WasmModule* super_module) {
DCHECK(subtype != supertype || sub_module != super_module);
- if (!subtype.is_reference_type()) return subtype == supertype;
-
- if (subtype.is_rtt()) {
- return subtype.heap_type().is_generic()
- ? subtype == supertype
- : (supertype.is_rtt() && subtype.depth() == supertype.depth() &&
- supertype.has_index() &&
- EquivalentIndices(subtype.ref_index(), supertype.ref_index(),
- sub_module, super_module));
+ switch (subtype.kind()) {
+ case kI32:
+ case kI64:
+ case kF32:
+ case kF64:
+ case kS128:
+ case kI8:
+ case kI16:
+ case kStmt:
+ case kBottom:
+ return subtype == supertype;
+ case kRtt:
+ return supertype.kind() == kRtt &&
+ EquivalentIndices(subtype.ref_index(), supertype.ref_index(),
+ sub_module, super_module);
+ case kRttWithDepth:
+ return (supertype.kind() == kRtt &&
+ ((sub_module == super_module &&
+ subtype.ref_index() == supertype.ref_index()) ||
+ EquivalentIndices(subtype.ref_index(), supertype.ref_index(),
+ sub_module, super_module))) ||
+ (supertype.kind() == kRttWithDepth &&
+ supertype.depth() == subtype.depth() &&
+ EquivalentIndices(subtype.ref_index(), supertype.ref_index(),
+ sub_module, super_module));
+ case kRef:
+ case kOptRef:
+ break;
}
DCHECK(subtype.is_object_reference_type());
@@ -303,12 +321,12 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
case HeapType::kFunc:
case HeapType::kExtern:
case HeapType::kEq:
- case HeapType::kExn:
return sub_heap == super_heap || super_heap == HeapType::kAny;
case HeapType::kAny:
return super_heap == HeapType::kAny;
case HeapType::kI31:
- return super_heap == HeapType::kI31 || super_heap == HeapType::kEq ||
+ case HeapType::kData:
+ return super_heap == sub_heap || super_heap == HeapType::kEq ||
super_heap == HeapType::kAny;
case HeapType::kBottom:
UNREACHABLE();
@@ -324,9 +342,9 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
case HeapType::kFunc:
return sub_module->has_signature(sub_index);
case HeapType::kEq:
+ case HeapType::kData:
return !sub_module->has_signature(sub_index);
case HeapType::kExtern:
- case HeapType::kExn:
case HeapType::kI31:
return false;
case HeapType::kAny:
diff --git a/deps/v8/src/wasm/wasm-subtyping.h b/deps/v8/src/wasm/wasm-subtyping.h
index 3842d94b14..7386baf10f 100644
--- a/deps/v8/src/wasm/wasm-subtyping.h
+++ b/deps/v8/src/wasm/wasm-subtyping.h
@@ -45,10 +45,14 @@ V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
// - ref(ht1) <: ref/optref(ht2) iff ht1 <: ht2.
// - rtt1 <: rtt2 iff rtt1 ~ rtt2.
// For heap types, the following subtyping rules hold:
-// - Each generic heap type is a subtype of itself.
-// - All heap types are subtypes of any.
+// - The abstract heap types form the following type hierarchy:
+// any
+// / | \
+// eq func extern
+// / \
+// i31 data
+// - All structs and arrays are subtypes of data.
// - All functions are subtypes of func.
-// - i31, structs and arrays are subtypes of eq.
// - Struct subtyping: Subtype must have at least as many fields as supertype,
// covariance for immutable fields, equivalence for mutable fields.
// - Array subtyping (mutable only) is the equivalence relation.
@@ -70,12 +74,18 @@ V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
}
// We have this function call IsSubtypeOf instead of the opposite because type
-// checks are much more common than heap type checks.
-V8_INLINE bool IsHeapSubtypeOf(HeapType subtype, HeapType supertype,
+// checks are much more common than heap type checks.}
+V8_INLINE bool IsHeapSubtypeOf(uint32_t subtype_index,
+ HeapType::Representation supertype,
const WasmModule* module) {
- return IsSubtypeOf(ValueType::Ref(subtype, kNonNullable),
+ return IsSubtypeOf(ValueType::Ref(subtype_index, kNonNullable),
ValueType::Ref(supertype, kNonNullable), module);
}
+V8_INLINE bool IsHeapSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* module) {
+ return IsSubtypeOf(ValueType::Ref(subtype_index, kNonNullable),
+ ValueType::Ref(supertype_index, kNonNullable), module);
+}
// Returns the weakest type that is a subtype of both a and b
// (which is currently always one of a, b, or kWasmBottom).
diff --git a/deps/v8/test/benchmarks/cpp/cppgc/allocation_perf.cc b/deps/v8/test/benchmarks/cpp/cppgc/allocation_perf.cc
index c5015ac9a2..513aaa2e8e 100644
--- a/deps/v8/test/benchmarks/cpp/cppgc/allocation_perf.cc
+++ b/deps/v8/test/benchmarks/cpp/cppgc/allocation_perf.cc
@@ -4,6 +4,7 @@
#include "include/cppgc/allocation.h"
#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/heap-consistency.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap.h"
#include "test/benchmarks/cpp/cppgc/utils.h"
@@ -21,7 +22,7 @@ class TinyObject final : public cppgc::GarbageCollected<TinyObject> {
};
BENCHMARK_F(Allocate, Tiny)(benchmark::State& st) {
- Heap::NoGCScope no_gc(*Heap::From(&heap()));
+ subtle::NoGarbageCollectionScope no_gc(*Heap::From(&heap()));
for (auto _ : st) {
benchmark::DoNotOptimize(
cppgc::MakeGarbageCollected<TinyObject>(heap().GetAllocationHandle()));
@@ -36,7 +37,7 @@ class LargeObject final : public GarbageCollected<LargeObject> {
};
BENCHMARK_F(Allocate, Large)(benchmark::State& st) {
- Heap::NoGCScope no_gc(*Heap::From(&heap()));
+ subtle::NoGarbageCollectionScope no_gc(*Heap::From(&heap()));
for (auto _ : st) {
benchmark::DoNotOptimize(
cppgc::MakeGarbageCollected<LargeObject>(heap().GetAllocationHandle()));
diff --git a/deps/v8/test/benchmarks/cpp/cppgc/trace_perf.cc b/deps/v8/test/benchmarks/cpp/cppgc/trace_perf.cc
index 8cc2bf7631..9ae26b8d0e 100644
--- a/deps/v8/test/benchmarks/cpp/cppgc/trace_perf.cc
+++ b/deps/v8/test/benchmarks/cpp/cppgc/trace_perf.cc
@@ -4,6 +4,7 @@
#include "include/cppgc/allocation.h"
#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/persistent.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap.h"
#include "test/benchmarks/cpp/cppgc/utils.h"
diff --git a/deps/v8/test/benchmarks/csuite/csuite.py b/deps/v8/test/benchmarks/csuite/csuite.py
index 9b0a0e6436..fd931a4a09 100755
--- a/deps/v8/test/benchmarks/csuite/csuite.py
+++ b/deps/v8/test/benchmarks/csuite/csuite.py
@@ -144,14 +144,23 @@ if __name__ == '__main__':
if mode == "baseline":
cmdline = "%s > %s" % (cmdline_base, output_file)
else:
- cmdline = "%s | %s %s" \
- % (cmdline_base, compare_baseline_py_path, output_file)
+ output_file_compare = output_file + "_compare"
+ cmdline = "%s > %s" % (cmdline_base, output_file_compare)
if opts.verbose:
print("Spawning subprocess: %s." % cmdline)
return_code = subprocess.call(cmdline, shell=True, cwd=suite_path)
if return_code < 0:
print("Error return code: %d." % return_code)
+
if mode == "baseline":
print("Wrote %s." % output_file)
print("Run %s again with compare mode to see results." % suite)
+ else:
+ print("Wrote %s." % output_file_compare)
+ cmdline = "python %s %s -f %s" % (compare_baseline_py_path, output_file, output_file_compare)
+ if opts.verbose:
+ print("Spawning subprocess: %s." % cmdline)
+ return_code = subprocess.call(cmdline, shell=True, cwd=suite_path)
+ if return_code < 0:
+ print("Error return code: %d." % return_code)
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index f4da906fb7..e63fe5ed35 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -60,6 +60,8 @@ v8_header_set("cctest_headers") {
"../..:internal_config_base",
]
+ deps = [ "../..:v8_config_headers" ]
+
sources = [ "cctest.h" ]
}
@@ -88,7 +90,7 @@ v8_source_set("cctest_sources") {
"compiler/codegen-tester.h",
"compiler/function-tester.cc",
"compiler/function-tester.h",
- "compiler/graph-builder-tester.h",
+ "compiler/node-observer-tester.h",
"compiler/serializer-tester.cc",
"compiler/serializer-tester.h",
"compiler/test-basic-block-profiler.cc",
@@ -128,6 +130,7 @@ v8_source_set("cctest_sources") {
"compiler/test-run-tail-calls.cc",
"compiler/test-run-unwinding-info.cc",
"compiler/test-run-variables.cc",
+ "compiler/test-sloppy-equality.cc",
"compiler/value-helper.cc",
"compiler/value-helper.h",
"disasm-regex-helper.cc",
@@ -211,6 +214,7 @@ v8_source_set("cctest_sources") {
"test-compiler.cc",
"test-concurrent-descriptor-array.cc",
"test-concurrent-feedback-vector.cc",
+ "test-concurrent-js-array.cc",
"test-concurrent-prototype.cc",
"test-concurrent-script-context-table.cc",
"test-concurrent-string.cc",
@@ -247,6 +251,7 @@ v8_source_set("cctest_sources") {
"test-inobject-slack-tracking.cc",
"test-inspector.cc",
"test-intl.cc",
+ "test-js-to-wasm.cc",
"test-js-weak-refs.cc",
"test-liveedit.cc",
"test-local-handles.cc",
@@ -261,6 +266,7 @@ v8_source_set("cctest_sources") {
"test-persistent-handles.cc",
"test-platform.cc",
"test-profile-generator.cc",
+ "test-property-details.cc",
"test-random-number-generator.cc",
"test-regexp.cc",
"test-representation.cc",
@@ -270,6 +276,7 @@ v8_source_set("cctest_sources") {
"test-smi-lexicographic-compare.cc",
"test-strings.cc",
"test-strtod.cc",
+ "test-swiss-name-dictionary.cc",
"test-symbols.cc",
"test-thread-termination.cc",
"test-threads.cc",
@@ -279,7 +286,6 @@ v8_source_set("cctest_sources") {
"test-transitions.h",
"test-typedarrays.cc",
"test-types.cc",
- "test-unboxed-doubles.cc",
"test-unscopables-hidden-prototype.cc",
"test-unwinder-code-pages.cc",
"test-usecounters.cc",
@@ -406,6 +412,14 @@ v8_source_set("cctest_sources") {
"test-assembler-s390.cc",
"test-disasm-s390.cc",
]
+ } else if (v8_current_cpu == "riscv64") {
+ sources += [ ### gcmole(arch:riscv64) ###
+ "test-assembler-riscv64.cc",
+ "test-disasm-riscv64.cc",
+ "test-helper-riscv64.cc",
+ "test-macro-assembler-riscv64.cc",
+ "test-simple-riscv64.cc",
+ ]
}
if (v8_use_perfetto) {
@@ -434,7 +448,10 @@ v8_source_set("cctest_sources") {
]
defines = []
- deps = [ "../..:run_torque" ]
+ deps = [
+ "../..:run_torque",
+ "../..:v8_shared_internal_headers",
+ ]
if (v8_enable_i18n_support) {
defines += [ "V8_INTL_SUPPORT" ]
@@ -448,7 +465,8 @@ v8_source_set("cctest_sources") {
v8_current_cpu == "arm" || v8_current_cpu == "arm64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
v8_current_cpu == "mips" || v8_current_cpu == "mips64" ||
- v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64") {
+ v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64" ||
+ v8_current_cpu == "riscv64") {
# Disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends.
if (!is_win) {
diff --git a/deps/v8/test/cctest/cctest-utils.h b/deps/v8/test/cctest/cctest-utils.h
index 804d458b0c..c27506178b 100644
--- a/deps/v8/test/cctest/cctest-utils.h
+++ b/deps/v8/test/cctest/cctest-utils.h
@@ -42,6 +42,9 @@ namespace internal {
#elif defined(__PPC__) || defined(_ARCH_PPC)
#define GET_STACK_POINTER_TO(sp_addr) \
__asm__ __volatile__("stw 1, %0" : "=m"(sp_addr))
+#elif V8_TARGET_ARCH_RISCV64
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("add %0, sp, x0" : "=r"(sp_addr))
#else
#error Host architecture was not detected as supported by v8
#endif
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 0ac0432f13..49969a0508 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -27,6 +27,7 @@
#include "test/cctest/cctest.h"
+#include "include/cppgc/platform.h"
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
#include "src/codegen/compiler.h"
@@ -263,7 +264,7 @@ i::Handle<i::JSFunction> Optimize(
i::Handle<i::SharedFunctionInfo> shared(function->shared(), isolate);
i::IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
CHECK(is_compiled_scope.is_compiled() ||
- i::Compiler::Compile(function, i::Compiler::CLEAR_EXCEPTION,
+ i::Compiler::Compile(isolate, function, i::Compiler::CLEAR_EXCEPTION,
&is_compiled_scope));
CHECK_NOT_NULL(zone);
@@ -282,7 +283,7 @@ i::Handle<i::JSFunction> Optimize(
i::compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker)
.ToHandleChecked();
info.native_context().AddOptimizedCode(*code);
- function->set_code(*code);
+ function->set_code(*code, v8::kReleaseStore);
return function;
}
@@ -333,6 +334,7 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<v8::Platform> platform(v8::platform::NewDefaultPlatform());
v8::V8::InitializePlatform(platform.get());
+ cppgc::InitializeProcess(platform->GetPageAllocator());
using HelpOptions = v8::internal::FlagList::HelpOptions;
v8::internal::FlagList::SetFlagsFromCommandLine(
&argc, argv, true, HelpOptions(HelpOptions::kExit, usage.c_str()));
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 6332cce936..e2fe217fd1 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -365,6 +365,10 @@ static inline v8::Local<v8::Integer> v8_int(int32_t x) {
return v8::Integer::New(v8::Isolate::GetCurrent(), x);
}
+static inline v8::Local<v8::BigInt> v8_bigint(int64_t x) {
+ return v8::BigInt::New(v8::Isolate::GetCurrent(), x);
+}
+
static inline v8::Local<v8::String> v8_str(const char* x) {
return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x).ToLocalChecked();
}
@@ -409,10 +413,11 @@ static inline int32_t v8_run_int32value(v8::Local<v8::Script> script) {
static inline v8::Local<v8::Script> CompileWithOrigin(
v8::Local<v8::String> source, v8::Local<v8::String> origin_url,
bool is_shared_cross_origin) {
- v8::ScriptOrigin origin(origin_url, 0, 0, is_shared_cross_origin);
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::ScriptOrigin origin(isolate, origin_url, 0, 0, is_shared_cross_origin);
v8::ScriptCompiler::Source script_source(source, origin);
- return v8::ScriptCompiler::Compile(
- v8::Isolate::GetCurrent()->GetCurrentContext(), &script_source)
+ return v8::ScriptCompiler::Compile(isolate->GetCurrentContext(),
+ &script_source)
.ToLocalChecked();
}
@@ -486,7 +491,8 @@ static inline v8::Local<v8::Value> CompileRunWithOrigin(const char* source,
int column_number) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8::ScriptOrigin origin(v8_str(origin_url), line_number, column_number);
+ v8::ScriptOrigin origin(isolate, v8_str(origin_url), line_number,
+ column_number);
v8::ScriptCompiler::Source script_source(v8_str(source), origin);
return CompileRun(context, &script_source,
v8::ScriptCompiler::CompileOptions());
@@ -498,7 +504,7 @@ static inline v8::Local<v8::Value> CompileRunWithOrigin(
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::ScriptCompiler::Source script_source(
- source, v8::ScriptOrigin(v8_str(origin_url)));
+ source, v8::ScriptOrigin(isolate, v8_str(origin_url)));
return CompileRun(context, &script_source,
v8::ScriptCompiler::CompileOptions());
}
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index be4041d59b..3e16d6ee6a 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -196,6 +196,10 @@
'test-streaming-compilation/SingleThreadedTestDeserializationFails': [SKIP],
'test-streaming-compilation/AsyncTestDeserializationFails': [SKIP],
'test-streaming-compilation/AsyncTestDeserializationBypassesCompilation': [SKIP],
+
+ # %ObserveNode tests relies on TurboFan.
+ 'test-sloppy-equality/*' : [SKIP],
+ 'test-js-to-wasm/*': [SKIP],
}], # variant == nooptimization
##############################################################################
@@ -293,6 +297,9 @@
['arch == arm and not simulator_run', {
# crbug.com/v8/7605
'test-heap/OutOfMemorySmallObjects': [SKIP],
+
+ # crbug.com/v8/11134
+ 'test-cpu-profiler/MultipleThreadsSingleIsolate': [PASS, ['system != android', SKIP]],
}], # 'arch == arm and not simulator_run'
##############################################################################
@@ -366,6 +373,39 @@
}], # 'mips_arch_variant == r6'
##############################################################################
+['arch == riscv64', {
+ # this test is unstable, sometimes fail when running w/ other tests
+ 'test-cpu-profiler/CrossScriptInliningCallerLineNumbers2': [SKIP],
+
+ # https://github.com/v8-riscv/v8/issues/297
+ 'test-run-wasm-simd/RunWasm_F64x2ExtractLaneWithI64x2_liftoff': [SKIP],
+ 'test-run-wasm-simd/RunWasm_I64x2ExtractWithF64x2_liftoff': [SKIP],
+
+ # https://github.com/v8-riscv/v8/issues/290
+ 'test-orderedhashtable/SmallOrderedNameDictionaryInsertionMax': [SKIP],
+ 'test-orderedhashtable/SmallOrderedNameDictionarySetAndMigrateHash': [SKIP],
+
+ # SIMD not fully implemented yet
+ 'test-run-wasm-simd-liftoff/*': [SKIP],
+
+ # Some wasm functionality is not implemented yet
+ 'test-run-wasm-atomics64/*': [SKIP],
+ 'test-run-wasm-atomics/*': [SKIP],
+ 'test-run-wasm-64/*': [SKIP],
+ 'test-run-wasm/*': [SKIP],
+}],
+
+##############################################################################
+['arch == riscv64 and simulator_run', {
+
+ # Pass but take too long with the simulator.
+ 'test-api/Threading*': [PASS, SLOW],
+ 'test-api/ExternalArrays': [PASS, SLOW],
+ 'test-heap-profiler/ManyLocalsInSharedContext': [PASS, SLOW],
+
+}], # 'arch == riscv64 and simulator_run'
+
+##############################################################################
['system == android', {
# Uses too much memory.
'test-api/NewStringRangeError': [SKIP],
@@ -375,9 +415,6 @@
['system != android and arch in [arm, arm64] and not simulator_run', {
# Consumes too much memory on ODROIDs in debug mode and optimize_for_size.
'test-code-generator/FuzzAssemble*': [PASS, ['(mode == debug) and optimize_for_size', SKIP]],
-
- # BUG(chromium:1163847): Seems to break on ODROIDs with recent Clang rolls.
- 'test-loop-analysis/LaEdgeMatrix2_0': [PASS, FAIL]
}], # 'system != android and arch in [arm, arm64] and not simulator_run'
##############################################################################
@@ -475,12 +512,9 @@
}],
##############################################################################
-['lite_mode or variant == jitless', {
-
- # Slow tests
- 'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [SKIP],
-
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
+ 'test-api/TurboAsmDisablesDetach': [SKIP],
'test-api/WasmI32AtomicWaitCallback': [SKIP],
'test-api/WasmI64AtomicWaitCallback': [SKIP],
'test-api-wasm/WasmStreaming*': [SKIP],
@@ -490,6 +524,7 @@
'test-jump-table-assembler/*': [SKIP],
'test-gc/*': [SKIP],
'test-grow-memory/*': [SKIP],
+ 'test-js-to-wasm/*': [SKIP],
'test-liftoff-inspection/*': [SKIP],
'test-run-wasm*': [SKIP],
'test-streaming-compilation/*': [SKIP],
@@ -502,6 +537,13 @@
'test-wasm-stack/*': [SKIP],
'test-wasm-trap-position/*': [SKIP],
'wasm-run-utils/*': [SKIP],
+}], # not has_webassembly or variant == jitless
+
+##############################################################################
+['lite_mode or variant == jitless', {
+
+ # Slow tests
+ 'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [SKIP],
# Tests that generate code at runtime.
'codegen-tester/*': [SKIP],
@@ -549,6 +591,7 @@
'test-run-unwinding-info/*': [SKIP],
'test-run-variables/*': [SKIP],
'test-serialize/*': [SKIP],
+ 'test-sloppy-equality/*' : [SKIP],
'test-torque/*': [SKIP],
'test-unwinder-code-pages/PCIsInV8_LargeCodeObject_CodePagesAPI': [SKIP],
@@ -587,9 +630,9 @@
'serializer-tester/SerializeConstructWithSpread': [SKIP],
'serializer-tester/SerializeInlinedClosure': [SKIP],
'serializer-tester/SerializeInlinedFunction': [SKIP],
- 'test-api/TurboAsmDisablesDetach': [SKIP],
'test-cpu-profiler/TickLinesOptimized': [SKIP],
'test-heap/TestOptimizeAfterBytecodeFlushingCandidate': [SKIP],
+ 'test-js-to-wasm/*': [SKIP],
'test-run-wasm-exceptions/RunWasmInterpreter_TryCatchCallDirect': [SKIP],
'test-run-wasm-exceptions/RunWasmInterpreter_TryCatchCallExternal': [SKIP],
'test-run-wasm-exceptions/RunWasmInterpreter_TryCatchCallIndirect': [SKIP],
@@ -623,7 +666,7 @@
}], # variant == jitless
##############################################################################
-['variant == turboprop', {
+['variant == turboprop or variant == turboprop_as_toptier', {
# Require inlining.
'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [SKIP],
'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [SKIP],
@@ -633,7 +676,8 @@
'test-cpu-profiler/DetailedSourcePositionAPI_Inlining': [SKIP],
'serializer-tester/BoundFunctionArguments': [SKIP],
'serializer-tester/BoundFunctionTarget': [SKIP],
-}], # variant == turboprop
+ 'test-js-to-wasm/*': [SKIP],
+}], # variant == turboprop or variant == turboprop_as_toptier
##############################################################################
['no_i18n == True', {
@@ -645,24 +689,11 @@
'*': [SKIP], # only relevant for mjsunit tests.
}],
-################################################################################
-['variant == nci or variant == nci_as_midtier', {
- # Optimizes and deopts differently than TurboFan.
- 'test-api/FastApiCalls': [SKIP],
- 'test-cpu-profiler/Deopt*': [SKIP],
- 'test-cpu-profiler/DetailedSourcePositionAPI_Inlining': [SKIP],
- 'test-cpu-profiler/DetailedSourcePositionAPI': [SKIP],
- 'test-heap/CellsInOptimizedCodeAreWeak': [SKIP],
- 'test-heap/EnsureAllocationSiteDependentCodesProcessed': [SKIP],
- 'test-heap/NewSpaceObjectsInOptimizedCode': [SKIP],
- 'test-heap/ObjectsInEagerlyDeoptimizedCodeAreWeak': [SKIP],
- 'test-heap/ObjectsInOptimizedCodeAreWeak': [SKIP],
- 'test-heap/OptimizedPretenuring*': [SKIP],
- 'test-heap-profiler/SamplingHeapProfilerPretenuredInlineAllocations': [SKIP],
- 'test-log/LogAll': [SKIP],
-
- # NCI code currently does not use the feedback vector's optimized code cache.
- 'test-compiler/OptimizedCodeSharing1': [SKIP],
-}], # variant == nci or variant == nci_as_midtier
+##############################################################################
+['no_simd_sse == True', {
+ 'test-run-wasm-simd/*': [SKIP],
+ 'test-run-wasm-simd-liftoff/*': [SKIP],
+ 'test-run-wasm-simd-scalar-lowering/*': [SKIP],
+}], # no_simd_sse == True
]
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h
index 5e16a610ff..33afeac1cc 100644
--- a/deps/v8/test/cctest/compiler/c-signature.h
+++ b/deps/v8/test/cctest/compiler/c-signature.h
@@ -100,8 +100,10 @@ class CSignatureOf : public CSignature {
static_assert(
std::is_same<decltype(*reps_), decltype(*param_types.data())>::value,
"type mismatch, cannot memcpy");
- memcpy(storage_ + kReturnCount, param_types.data(),
- sizeof(*storage_) * kParamCount);
+ if (kParamCount > 0) {
+ memcpy(storage_ + kReturnCount, param_types.data(),
+ sizeof(*storage_) * kParamCount);
+ }
}
private:
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 1bc31c34a0..13141bbd60 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -45,7 +45,7 @@ FunctionTester::FunctionTester(Handle<Code> code, int param_count)
flags_(0) {
CHECK(!code.is_null());
Compile(function);
- function->set_code(*code);
+ function->set_code(*code, kReleaseStore);
}
FunctionTester::FunctionTester(Handle<Code> code) : FunctionTester(code, 0) {}
@@ -158,7 +158,7 @@ Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) {
Pipeline::GenerateCodeForTesting(&info, isolate, call_descriptor, graph,
AssemblerOptions::Default(isolate))
.ToHandleChecked();
- function->set_code(*code);
+ function->set_code(*code, kReleaseStore);
return function;
}
diff --git a/deps/v8/test/cctest/compiler/node-observer-tester.h b/deps/v8/test/cctest/compiler/node-observer-tester.h
new file mode 100644
index 0000000000..253eba230e
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/node-observer-tester.h
@@ -0,0 +1,92 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_NODEOBSERVER_TESTER_H_
+#define V8_CCTEST_COMPILER_NODEOBSERVER_TESTER_H_
+
+#include "src/compiler/node-observer.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/objects/type-hints.h"
+#include "test/cctest/cctest.h"
+#include "test/common/wasm/flag-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Helpers to test TurboFan compilation using the %ObserveNode intrinsic.
+struct ObserveNodeScope {
+ public:
+ ObserveNodeScope(Isolate* isolate, NodeObserver* node_observer)
+ : isolate_(isolate) {
+ DCHECK_NOT_NULL(isolate_);
+ DCHECK_NULL(isolate_->node_observer());
+ isolate_->set_node_observer(node_observer);
+ }
+
+ ~ObserveNodeScope() {
+ DCHECK_NOT_NULL(isolate_->node_observer());
+
+ // Checks that the code wrapped by %ObserveNode() was actually compiled in
+ // the test.
+ CHECK(isolate_->node_observer()->has_observed_changes());
+
+ isolate_->set_node_observer(nullptr);
+ }
+
+ private:
+ Isolate* isolate_;
+};
+
+class CreationObserver : public NodeObserver {
+ public:
+ explicit CreationObserver(std::function<void(const Node*)> handler)
+ : handler_(handler) {
+ DCHECK(handler_);
+ }
+
+ Observation OnNodeCreated(const Node* node) override {
+ handler_(node);
+ return Observation::kStop;
+ }
+
+ private:
+ std::function<void(const Node*)> handler_;
+};
+
+class ModificationObserver : public NodeObserver {
+ public:
+ explicit ModificationObserver(
+ std::function<void(const Node*)> on_created_handler,
+ std::function<NodeObserver::Observation(
+ const Node*, const ObservableNodeState& old_state)>
+ on_changed_handler)
+ : on_created_handler_(on_created_handler),
+ on_changed_handler_(on_changed_handler) {
+ DCHECK(on_created_handler_);
+ DCHECK(on_changed_handler_);
+ }
+
+ Observation OnNodeCreated(const Node* node) override {
+ on_created_handler_(node);
+ return Observation::kContinue;
+ }
+
+ Observation OnNodeChanged(const char* reducer_name, const Node* node,
+ const ObservableNodeState& old_state) override {
+ return on_changed_handler_(node, old_state);
+ }
+
+ private:
+ std::function<void(const Node*)> on_created_handler_;
+ std::function<NodeObserver::Observation(const Node*,
+ const ObservableNodeState& old_state)>
+ on_changed_handler_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CCTEST_COMPILER_NODEOBSERVER_TESTER_H_
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.cc b/deps/v8/test/cctest/compiler/serializer-tester.cc
index d870c4f074..88e0becf23 100644
--- a/deps/v8/test/cctest/compiler/serializer-tester.cc
+++ b/deps/v8/test/cctest/compiler/serializer-tester.cc
@@ -22,10 +22,6 @@ SerializerTester::SerializerTester(const char* source)
: canonical_(main_isolate()) {
// The tests only make sense in the context of concurrent compilation.
FLAG_concurrent_inlining = true;
- // --local-heaps is enabled by default, but some bots disable it.
- // Ensure that it is enabled here because we have reverse implication
- // from --no-local-heaps to --no-concurrent-inlining.
- if (!FLAG_local_heaps) FLAG_local_heaps = true;
// The tests don't make sense when optimizations are turned off.
FLAG_opt = true;
// We need the IC to feed it to the serializer.
diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
index 795c1f1bd2..561f9a68b7 100644
--- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
+++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
@@ -28,9 +28,21 @@ class BasicBlockProfilerTest : public RawMachineAssemblerTester<int32_t> {
CHECK_NE(0, static_cast<int>(l->size()));
const BasicBlockProfilerData* data = l->back().get();
CHECK_EQ(static_cast<int>(size), static_cast<int>(data->n_blocks()));
- const double* counts = data->counts();
+ const uint32_t* counts = data->counts();
for (size_t i = 0; i < size; ++i) {
- CHECK_EQ(static_cast<double>(expected[i]), counts[i]);
+ CHECK_EQ(expected[i], counts[i]);
+ }
+ }
+
+ void SetCounts(size_t size, uint32_t* new_counts) {
+ const BasicBlockProfiler::DataList* l =
+ BasicBlockProfiler::Get()->data_list();
+ CHECK_NE(0, static_cast<int>(l->size()));
+ BasicBlockProfilerData* data = l->back().get();
+ CHECK_EQ(static_cast<int>(size), static_cast<int>(data->n_blocks()));
+ uint32_t* counts = const_cast<uint32_t*>(data->counts());
+ for (size_t i = 0; i < size; ++i) {
+ counts[i] = new_counts[i];
}
}
};
@@ -73,6 +85,21 @@ TEST(ProfileDiamond) {
uint32_t expected[] = {2, 1, 1, 1, 1, 2};
m.Expect(arraysize(expected), expected);
}
+
+ // Set the counters very high, to verify that they saturate rather than
+ // overflowing.
+ uint32_t near_overflow[] = {UINT32_MAX - 1, UINT32_MAX - 1, UINT32_MAX - 1,
+ UINT32_MAX - 1, UINT32_MAX - 1, UINT32_MAX - 1};
+ m.SetCounts(arraysize(near_overflow), near_overflow);
+ m.Expect(arraysize(near_overflow), near_overflow);
+
+ m.Call(0);
+ m.Call(0);
+ {
+ uint32_t expected[] = {UINT32_MAX, UINT32_MAX, UINT32_MAX,
+ UINT32_MAX - 1, UINT32_MAX - 1, UINT32_MAX};
+ m.Expect(arraysize(expected), expected);
+ }
}
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 105fa630ce..5bbfb1492b 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -10,13 +10,14 @@
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/execution/isolate.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/code-assembler-tester.h"
+#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -210,7 +211,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
Node* param = __ UntypedParameter(i + 2);
switch (parameters[i].representation()) {
case MachineRepresentation::kTagged:
- __ StoreFixedArrayElement(result_array, i, param,
+ __ StoreFixedArrayElement(result_array, i, __ Cast(param),
UNSAFE_SKIP_WRITE_BARRIER);
break;
// Box FP values into HeapNumbers.
@@ -221,7 +222,8 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
case MachineRepresentation::kFloat64: {
__ StoreObjectFieldNoWriteBarrier(
__ Cast(__ LoadFixedArrayElement(result_array, i)),
- HeapNumber::kValueOffset, __ UncheckedCast<Float64T>(param));
+ __ IntPtrConstant(HeapNumber::kValueOffset),
+ __ UncheckedCast<Float64T>(param));
} break;
case MachineRepresentation::kSimd128: {
TNode<FixedArray> vector =
@@ -1430,6 +1432,106 @@ TEST(AssembleTailCallGap) {
}
}
+namespace {
+
+std::shared_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate,
+ size_t code_size) {
+ std::shared_ptr<wasm::WasmModule> module(new wasm::WasmModule());
+ module->num_declared_functions = 1;
+ // We have to add the code object to a NativeModule, because the
+ // WasmCallDescriptor assumes that code is on the native heap and not
+ // within a code object.
+ auto native_module = isolate->wasm_engine()->NewNativeModule(
+ isolate, wasm::WasmFeatures::All(), std::move(module), code_size);
+ native_module->SetWireBytes({});
+ return native_module;
+}
+
+} // namespace
+
+// Test stack argument pushing with some gaps that require stack pointer
+// adjustment.
+TEST(Regress_1171759) {
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ // Create a minimal callee with enlough parameters to exhaust parameter
+ // registers and force some stack parameters.
+ constexpr int kDoubleParams = 16;
+ // These are followed by a single, and another double to create a gap.
+ constexpr int kTotalParams = kDoubleParams + 2;
+
+ wasm::FunctionSig::Builder builder(&zone, 1, kTotalParams);
+ // Make the first parameter slots double width.
+ for (int i = 0; i < kDoubleParams; i++) {
+ builder.AddParam(wasm::ValueType::For(MachineType::Float64()));
+ }
+ // Allocate a single parameter.
+ builder.AddParam(wasm::ValueType::For(MachineType::Float32()));
+ // Allocate a double parameter which should create a stack gap.
+ builder.AddParam(wasm::ValueType::For(MachineType::Float64()));
+
+ builder.AddReturn(wasm::ValueType::For(MachineType::Int32()));
+
+ CallDescriptor* desc =
+ compiler::GetWasmCallDescriptor(&zone, builder.Build());
+
+ HandleAndZoneScope handles(kCompressGraphZone);
+ RawMachineAssembler m(handles.main_isolate(),
+ handles.main_zone()->New<Graph>(handles.main_zone()),
+ desc, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+
+ m.Return(m.Int32Constant(0));
+
+ OptimizedCompilationInfo info(ArrayVector("testing"), handles.main_zone(),
+ CodeKind::WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(
+ &info, handles.main_isolate(), desc, m.graph(),
+ AssemblerOptions::Default(handles.main_isolate()), m.ExportForTest())
+ .ToHandleChecked();
+
+ std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
+ handles.main_isolate(), code->raw_instruction_size());
+ wasm::WasmCodeRefScope wasm_code_ref_scope;
+ byte* code_start = module->AddCodeForTesting(code)->instructions().begin();
+
+ // Generate a minimal calling function, to push stack arguments.
+ RawMachineAssemblerTester<int32_t> mt;
+ Node* function = mt.PointerConstant(code_start);
+ Node* dummy_context = mt.PointerConstant(nullptr);
+ Node* double_slot = mt.Float64Constant(0);
+ Node* single_slot_that_creates_gap = mt.Float32Constant(0);
+ Node* call_inputs[] = {function,
+ dummy_context,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ double_slot,
+ single_slot_that_creates_gap,
+ double_slot};
+
+ Node* call =
+ mt.AddNode(mt.common()->Call(desc), 2 + kTotalParams, call_inputs);
+
+ mt.Return(call);
+
+ CHECK_EQ(0, mt.Call());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
index 0d91d11322..7a9460a688 100644
--- a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
+++ b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
@@ -130,7 +130,7 @@ TEST(TestConcurrentSharedFunctionInfo) {
OptimizedCompilationInfo f_info(&zone, isolate, f_sfi, f, CodeKind::TURBOFAN);
Handle<Code> f_code =
Pipeline::GenerateCodeForTesting(&f_info, isolate).ToHandleChecked();
- f->set_code(*f_code);
+ f->set_code(*f_code, kReleaseStore);
IsCompiledScope compiled_scope_f(*f_sfi, isolate);
JSFunction::EnsureFeedbackVector(f, &compiled_scope_f);
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 65725cca37..a2db7d62bd 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -81,8 +81,8 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
graph.NewNode(common.StateValues(0, SparseInputMask::Dense()));
Node* state_node = graph.NewNode(
- common.FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
- nullptr),
+ common.FrameState(BytecodeOffset::None(),
+ OutputFrameStateCombine::Ignore(), nullptr),
parameters, locals, stack, context, UndefinedConstant(), graph.start());
return state_node;
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index eac874480c..912664117e 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -135,7 +135,7 @@ class BytecodeGraphTester {
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&compilation_info, isolate_)
.ToHandleChecked();
- function->set_code(*code);
+ function->set_code(*code, kReleaseStore);
return function;
}
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 7ef45f8574..5f7b6eed88 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -4208,6 +4208,8 @@ TEST(RunTruncateFloat32ToInt32) {
CHECK_FLOAT_EQ(std::numeric_limits<int32_t>::min(), m.Call(i));
#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
CHECK_FLOAT_EQ(0, m.Call(i));
+#elif V8_TARGET_ARCH_RISCV64
+ CHECK_FLOAT_EQ(std::numeric_limits<int32_t>::max(), m.Call(i));
#endif
}
}
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
index ccdc4821e0..c0a8324286 100644
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -92,9 +92,10 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
params.push_back(__ IntPtrConstant(i + 42));
}
DCHECK_EQ(param_count + 1, params.size());
- Node* raw_result = tester.raw_assembler_for_testing()->CallN(
- caller_descriptor, param_count + 1, params.data());
- __ Return(__ SmiTag(raw_result));
+ TNode<IntPtrT> intptr_result =
+ __ UncheckedCast<IntPtrT>(tester.raw_assembler_for_testing()->CallN(
+ caller_descriptor, param_count + 1, params.data()));
+ __ Return(__ SmiTag(intptr_result));
return tester.GenerateCodeCloseAndEscape();
}
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index 2b1ee39f6f..0601c161c1 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -73,9 +73,10 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
params.push_back(__ IntPtrConstant(i + 42));
}
DCHECK_EQ(param_count + 1, params.size());
- Node* raw_result = tester.raw_assembler_for_testing()->CallN(
- caller_descriptor, param_count + 1, params.data());
- __ Return(__ SmiTag(raw_result));
+ TNode<IntPtrT> intptr_result =
+ __ UncheckedCast<IntPtrT>(tester.raw_assembler_for_testing()->CallN(
+ caller_descriptor, param_count + 1, params.data()));
+ __ Return(__ SmiTag(intptr_result));
return tester.GenerateCodeCloseAndEscape();
}
diff --git a/deps/v8/test/cctest/compiler/test-sloppy-equality.cc b/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
new file mode 100644
index 0000000000..82450abb7c
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
@@ -0,0 +1,141 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/node-observer-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct TestCase {
+ TestCase(const char* l, const char* r, NodeObserver* observer)
+ : warmup{std::make_pair(l, r)}, observer(observer) {
+ DCHECK_NOT_NULL(observer);
+ }
+ std::vector<std::pair<const char*, const char*>> warmup;
+ NodeObserver* observer;
+};
+
+class TestSloppyEqualityFactory {
+ public:
+ explicit TestSloppyEqualityFactory(Zone* zone) : zone_(zone) {}
+
+ NodeObserver* SpeculativeNumberEqual(NumberOperationHint hint) {
+ return zone_->New<CreationObserver>([hint](const Node* node) {
+ CHECK_EQ(IrOpcode::kSpeculativeNumberEqual, node->opcode());
+ CHECK_EQ(hint, NumberOperationHintOf(node->op()));
+ });
+ }
+
+ NodeObserver* JSEqual(CompareOperationHint /*hint*/) {
+ return zone_->New<CreationObserver>([](const Node* node) {
+ CHECK_EQ(IrOpcode::kJSEqual, node->opcode());
+ // TODO(paolosev): compare hint
+ });
+ }
+
+ NodeObserver* OperatorChange(IrOpcode::Value created_op,
+ IrOpcode::Value modified_op) {
+ return zone_->New<ModificationObserver>(
+ [created_op](const Node* node) {
+ CHECK_EQ(created_op, node->opcode());
+ },
+ [modified_op](const Node* node, const ObservableNodeState& old_state)
+ -> NodeObserver::Observation {
+ if (old_state.opcode() != node->opcode()) {
+ CHECK_EQ(modified_op, node->opcode());
+ return NodeObserver::Observation::kStop;
+ }
+ return NodeObserver::Observation::kContinue;
+ });
+ }
+
+ private:
+ Zone* zone_;
+};
+
+TEST(TestSloppyEquality) {
+ FlagScope<bool> allow_natives_syntax(&i::FLAG_allow_natives_syntax, true);
+ FlagScope<bool> always_opt(&i::FLAG_always_opt, false);
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ TestSloppyEqualityFactory f(&zone);
+ // TODO(nicohartmann@, v8:5660): Collect more precise feedback for some useful
+ // cases.
+ TestCase cases[] = {
+ {"3", "8", f.SpeculativeNumberEqual(NumberOperationHint::kSignedSmall)},
+ //{"3", "null",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ //{"3", "undefined",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ //{"3", "true",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ {"3", "\"abc\"", f.JSEqual(CompareOperationHint::kAny)},
+ {"3.14", "3", f.SpeculativeNumberEqual(NumberOperationHint::kNumber)},
+ //{"3.14", "null",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ //{"3.14", "undefined",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ //{"3.14", "true",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ {"3.14", "\"abc\"", f.JSEqual(CompareOperationHint::kAny)},
+ {"\"abc\"", "3", f.JSEqual(CompareOperationHint::kAny)},
+ {"\"abc\"", "null", f.JSEqual(CompareOperationHint::kAny)},
+ {"\"abc\"", "undefined", f.JSEqual(CompareOperationHint::kAny)},
+ {"\"abc\"", "true", f.JSEqual(CompareOperationHint::kAny)},
+ {"\"abc\"", "\"xy\"",
+ f.JSEqual(CompareOperationHint::kInternalizedString)},
+ //{"true", "3",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ //{"true", "null",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ //{"true", "undefined",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ //{"true", "true",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ {"true", "\"abc\"", f.JSEqual(CompareOperationHint::kAny)},
+ //{"undefined", "3",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ {"undefined", "null",
+ f.JSEqual(CompareOperationHint::kReceiverOrNullOrUndefined)},
+ {"undefined", "undefined",
+ f.JSEqual(CompareOperationHint::kReceiverOrNullOrUndefined)},
+ //{"undefined", "true",
+ // f.SpeculativeNumberEqual(NumberOperationHint::kNumberOrOddball)},
+ {"undefined", "\"abc\"", f.JSEqual(CompareOperationHint::kAny)},
+ {"{}", "3", f.JSEqual(CompareOperationHint::kAny)},
+ {"{}", "null",
+ f.JSEqual(CompareOperationHint::kReceiverOrNullOrUndefined)},
+ {"{}", "undefined",
+ f.JSEqual(CompareOperationHint::kReceiverOrNullOrUndefined)},
+ {"{}", "true", f.JSEqual(CompareOperationHint::kAny)},
+ {"{}", "\"abc\"", f.JSEqual(CompareOperationHint::kAny)},
+
+ {"3.14", "3",
+ f.OperatorChange(IrOpcode::kSpeculativeNumberEqual,
+ IrOpcode::kFloat64Equal)}};
+
+ for (const auto& c : cases) {
+ std::ostringstream src;
+ src << "function test(a, b) {\n"
+ << " return %ObserveNode(a == b);\n"
+ << "}\n"
+ << "%PrepareFunctionForOptimization(test);\n";
+ for (const auto& args : c.warmup) {
+ src << "test(" << args.first << ", " << args.second << ");\n"
+ << "%OptimizeFunctionOnNextCall(test);"
+ << "test(" << args.first << ", " << args.second << ");\n";
+ }
+
+ {
+ compiler::ObserveNodeScope scope(isolate, c.observer);
+ CompileRun(src.str().c_str());
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 171beb5f82..24f04e9eed 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -243,12 +243,6 @@ bool InCorrectGeneration(HeapObject object) {
: i::Heap::InYoungGeneration(object);
}
-void EnsureFlagLocalHeapsEnabled() {
- // Avoid data race in concurrent thread by only setting the flag to true if
- // not already enabled.
- if (!FLAG_local_heaps) FLAG_local_heaps = true;
-}
-
void GrowNewSpace(Heap* heap) {
SafepointScope scope(heap);
if (!heap->new_space()->IsAtMaximumCapacity()) {
diff --git a/deps/v8/test/cctest/heap/heap-utils.h b/deps/v8/test/cctest/heap/heap-utils.h
index 33974294df..ab06763872 100644
--- a/deps/v8/test/cctest/heap/heap-utils.h
+++ b/deps/v8/test/cctest/heap/heap-utils.h
@@ -70,8 +70,6 @@ void InvokeScavenge(Isolate* isolate = nullptr);
void InvokeMarkSweep(Isolate* isolate = nullptr);
-void EnsureFlagLocalHeapsEnabled();
-
void GrowNewSpace(Heap* heap);
void GrowNewSpaceToMaximumCapacity(Heap* heap);
diff --git a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
index edc04065fb..5450e0358d 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
@@ -83,8 +83,6 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
FLAG_max_old_space_size = 32;
- FLAG_concurrent_allocation = true;
- FLAG_local_heaps = true;
FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params;
@@ -118,8 +116,6 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
FLAG_max_old_space_size = 4;
- FLAG_concurrent_allocation = true;
- FLAG_local_heaps = true;
FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params;
@@ -167,8 +163,6 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
UNINITIALIZED_TEST(ConcurrentAllocationInLargeSpace) {
FLAG_max_old_space_size = 32;
- FLAG_concurrent_allocation = true;
- FLAG_local_heaps = true;
FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params;
@@ -243,9 +237,6 @@ class ConcurrentBlackAllocationThread final : public v8::base::Thread {
};
UNINITIALIZED_TEST(ConcurrentBlackAllocation) {
- FLAG_concurrent_allocation = true;
- FLAG_local_heaps = true;
-
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -310,8 +301,6 @@ UNINITIALIZED_TEST(ConcurrentWriteBarrier) {
return;
}
ManualGCScope manual_gc_scope;
- FLAG_concurrent_allocation = true;
- FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -377,8 +366,6 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
}
FLAG_manual_evacuation_candidates_selection = true;
ManualGCScope manual_gc_scope;
- FLAG_concurrent_allocation = true;
- FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -393,7 +380,15 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
i::byte buffer[i::Assembler::kDefaultBufferSize];
MacroAssembler masm(i_isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
+#if V8_TARGET_ARCH_ARM64
+ // Arm64 requires stack alignment.
+ UseScratchRegisterScope temps(&masm);
+ Register tmp = temps.AcquireX();
+ masm.Mov(tmp, Operand(ReadOnlyRoots(heap).undefined_value_handle()));
+ masm.Push(tmp, padreg);
+#else
masm.Push(ReadOnlyRoots(heap).undefined_value_handle());
+#endif
CodeDesc desc;
masm.GetCode(i_isolate, &desc);
Handle<Code> code_handle =
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 2e92805edb..6b5ebb4bc8 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -82,7 +82,9 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
void TracePrologue(EmbedderHeapTracer::TraceFlags) final {
if (prologue_behavior_ == TracePrologueBehavior::kCallV8WriteBarrier) {
auto local = array_.Get(isolate());
- local->Set(local->CreationContext(), 0, v8::Object::New(isolate()))
+ local
+ ->Set(local->GetCreationContext().ToLocalChecked(), 0,
+ v8::Object::New(isolate()))
.Check();
}
}
@@ -128,12 +130,12 @@ TEST(V8RegisteringEmbedderReference) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- void* first_field = reinterpret_cast<void*>(0x2);
- v8::Local<v8::Object> api_object =
- ConstructTraceableJSApiObject(context, first_field, nullptr);
+ void* first_and_second_field = reinterpret_cast<void*>(0x2);
+ v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
+ context, first_and_second_field, first_and_second_field);
CHECK(!api_object.IsEmpty());
CcTest::CollectGarbage(i::OLD_SPACE);
- CHECK(tracer.IsRegisteredFromV8(first_field));
+ CHECK(tracer.IsRegisteredFromV8(first_and_second_field));
}
TEST(EmbedderRegisteringV8Reference) {
@@ -182,11 +184,11 @@ TEST(TracingInRevivedSubgraph) {
v8::Context::Scope context_scope(context);
v8::Global<v8::Object> g;
- void* first_field = reinterpret_cast<void*>(0x4);
+ void* first_and_second_field = reinterpret_cast<void*>(0x4);
{
v8::HandleScope inner_scope(isolate);
- v8::Local<v8::Object> api_object =
- ConstructTraceableJSApiObject(context, first_field, nullptr);
+ v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
+ context, first_and_second_field, first_and_second_field);
CHECK(!api_object.IsEmpty());
v8::Local<v8::Object> o =
v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
@@ -195,7 +197,7 @@ TEST(TracingInRevivedSubgraph) {
g.SetWeak(&g, ResurrectingFinalizer, v8::WeakCallbackType::kFinalizer);
}
CcTest::CollectGarbage(i::OLD_SPACE);
- CHECK(tracer.IsRegisteredFromV8(first_field));
+ CHECK(tracer.IsRegisteredFromV8(first_and_second_field));
}
TEST(TracingInEphemerons) {
@@ -211,13 +213,13 @@ TEST(TracingInEphemerons) {
v8::Local<v8::Object> key =
v8::Local<v8::Object>::New(isolate, v8::Object::New(isolate));
- void* first_field = reinterpret_cast<void*>(0x8);
+ void* first_and_second_field = reinterpret_cast<void*>(0x8);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
Handle<JSWeakMap> weak_map = i_isolate->factory()->NewJSWeakMap();
{
v8::HandleScope inner_scope(isolate);
- v8::Local<v8::Object> api_object =
- ConstructTraceableJSApiObject(context, first_field, nullptr);
+ v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
+ context, first_and_second_field, first_and_second_field);
CHECK(!api_object.IsEmpty());
Handle<JSObject> js_key =
handle(JSObject::cast(*v8::Utils::OpenHandle(*key)), i_isolate);
@@ -226,7 +228,7 @@ TEST(TracingInEphemerons) {
JSWeakCollection::Set(weak_map, js_key, js_api_object, hash);
}
CcTest::CollectGarbage(i::OLD_SPACE);
- CHECK(tracer.IsRegisteredFromV8(first_field));
+ CHECK(tracer.IsRegisteredFromV8(first_and_second_field));
}
TEST(FinalizeTracingIsNoopWhenNotMarking) {
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 6ee5e17945..96cb22827a 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -54,13 +54,13 @@
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/elements.h"
#include "src/objects/field-type.h"
-#include "src/objects/frame-array-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/managed.h"
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/transitions.h"
#include "src/regexp/regexp.h"
#include "src/snapshot/snapshot.h"
@@ -203,7 +203,15 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
// Add a new-space reference to the code.
+#if V8_TARGET_ARCH_ARM64
+ // Arm64 requires stack alignment.
+ UseScratchRegisterScope temps(&masm);
+ Register tmp = temps.AcquireX();
+ masm.Mov(tmp, Operand(value));
+ masm.Push(tmp, padreg);
+#else
masm.Push(value);
+#endif
CodeDesc desc;
masm.GetCode(isolate, &desc);
@@ -2639,19 +2647,11 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
FieldIndex idx1 = FieldIndex::ForPropertyIndex(o->map(), 0);
FieldIndex idx2 = FieldIndex::ForPropertyIndex(o->map(), 1);
CHECK(CcTest::heap()->InOldSpace(o->RawFastPropertyAt(idx1)));
- if (!o->IsUnboxedDoubleField(idx2)) {
- CHECK(CcTest::heap()->InOldSpace(o->RawFastPropertyAt(idx2)));
- } else {
- CHECK_EQ(1.1, o->RawFastDoublePropertyAt(idx2));
- }
+ CHECK(CcTest::heap()->InOldSpace(o->RawFastPropertyAt(idx2)));
JSObject inner_object = JSObject::cast(o->RawFastPropertyAt(idx1));
CHECK(CcTest::heap()->InOldSpace(inner_object));
- if (!inner_object.IsUnboxedDoubleField(idx1)) {
- CHECK(CcTest::heap()->InOldSpace(inner_object.RawFastPropertyAt(idx1)));
- } else {
- CHECK_EQ(2.2, inner_object.RawFastDoublePropertyAt(idx1));
- }
+ CHECK(CcTest::heap()->InOldSpace(inner_object.RawFastPropertyAt(idx1)));
CHECK(CcTest::heap()->InOldSpace(inner_object.RawFastPropertyAt(idx2)));
}
@@ -3530,7 +3530,7 @@ UNINITIALIZED_TEST(ReleaseStackTraceData) {
// TODO(mmarchini) also write tests for async/await and Promise.all
void DetailedErrorStackTraceTest(const char* src,
- std::function<void(Handle<FrameArray>)> test) {
+ std::function<void(Handle<FixedArray>)> test) {
FLAG_detailed_error_stack_trace = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -3546,8 +3546,11 @@ void DetailedErrorStackTraceTest(const char* src,
Handle<FixedArray> stack_trace(Handle<FixedArray>::cast(
Object::GetProperty(isolate, exception, key).ToHandleChecked()));
+ test(stack_trace);
+}
- test(GetFrameArrayFromStackTrace(isolate, stack_trace));
+FixedArray ParametersOf(Handle<FixedArray> stack_trace, int frame_index) {
+ return StackFrameInfo::cast(stack_trace->get(frame_index)).parameters();
}
// * Test interpreted function error
@@ -3567,13 +3570,13 @@ TEST(DetailedErrorStackTrace) {
"var foo = new Foo(); "
"main(foo); ";
- DetailedErrorStackTraceTest(source, [](Handle<FrameArray> stack_trace) {
- FixedArray foo_parameters = stack_trace->Parameters(0);
+ DetailedErrorStackTraceTest(source, [](Handle<FixedArray> stack_trace) {
+ FixedArray foo_parameters = ParametersOf(stack_trace, 0);
CHECK_EQ(foo_parameters.length(), 1);
CHECK(foo_parameters.get(0).IsSmi());
CHECK_EQ(Smi::ToInt(foo_parameters.get(0)), 42);
- FixedArray bar_parameters = stack_trace->Parameters(1);
+ FixedArray bar_parameters = ParametersOf(stack_trace, 1);
CHECK_EQ(bar_parameters.length(), 2);
CHECK(bar_parameters.get(0).IsJSObject());
CHECK(bar_parameters.get(1).IsBoolean());
@@ -3581,7 +3584,7 @@ TEST(DetailedErrorStackTrace) {
CHECK_EQ(bar_parameters.get(0), *foo);
CHECK(!bar_parameters.get(1).BooleanValue(CcTest::i_isolate()));
- FixedArray main_parameters = stack_trace->Parameters(2);
+ FixedArray main_parameters = ParametersOf(stack_trace, 2);
CHECK_EQ(main_parameters.length(), 2);
CHECK(main_parameters.get(0).IsJSObject());
CHECK(main_parameters.get(1).IsUndefined());
@@ -3608,13 +3611,13 @@ TEST(DetailedErrorStackTraceInline) {
"%OptimizeFunctionOnNextCall(foo); "
"foo(41); ";
- DetailedErrorStackTraceTest(source, [](Handle<FrameArray> stack_trace) {
- FixedArray parameters_add = stack_trace->Parameters(0);
+ DetailedErrorStackTraceTest(source, [](Handle<FixedArray> stack_trace) {
+ FixedArray parameters_add = ParametersOf(stack_trace, 0);
CHECK_EQ(parameters_add.length(), 1);
CHECK(parameters_add.get(0).IsSmi());
CHECK_EQ(Smi::ToInt(parameters_add.get(0)), 42);
- FixedArray parameters_foo = stack_trace->Parameters(1);
+ FixedArray parameters_foo = ParametersOf(stack_trace, 1);
CHECK_EQ(parameters_foo.length(), 1);
CHECK(parameters_foo.get(0).IsSmi());
CHECK_EQ(Smi::ToInt(parameters_foo.get(0)), 41);
@@ -3629,8 +3632,8 @@ TEST(DetailedErrorStackTraceBuiltinExit) {
"} "
"test(9999); ";
- DetailedErrorStackTraceTest(source, [](Handle<FrameArray> stack_trace) {
- FixedArray parameters = stack_trace->Parameters(0);
+ DetailedErrorStackTraceTest(source, [](Handle<FixedArray> stack_trace) {
+ FixedArray parameters = ParametersOf(stack_trace, 0);
CHECK_EQ(parameters.length(), 2);
CHECK(parameters.get(1).IsSmi());
@@ -4475,8 +4478,15 @@ static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
CodeDesc desc;
+#if V8_TARGET_ARCH_ARM64
+ UseScratchRegisterScope temps(&masm);
+ Register tmp = temps.AcquireX();
+ masm.Mov(tmp, Operand(isolate->factory()->undefined_value()));
+ masm.Push(tmp, tmp);
+#else
masm.Push(isolate->factory()->undefined_value());
masm.Push(isolate->factory()->undefined_value());
+#endif
masm.Drop(2);
masm.GetCode(isolate, &desc);
Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::TURBOFAN)
@@ -5277,8 +5287,8 @@ TEST(PreprocessStackTrace) {
Object::GetElement(isolate, stack_trace, 3).ToHandleChecked();
CHECK(pos->IsSmi());
- Handle<FrameArray> frame_array = Handle<FrameArray>::cast(stack_trace);
- int array_length = frame_array->FrameCount();
+ Handle<FixedArray> frame_array = Handle<FixedArray>::cast(stack_trace);
+ int array_length = frame_array->length();
for (int i = 0; i < array_length; i++) {
Handle<Object> element =
Object::GetElement(isolate, stack_trace, i).ToHandleChecked();
@@ -5915,7 +5925,7 @@ TEST(Regress631969) {
{
StaticOneByteResource external_string("12345678901234");
- CHECK(s3->MakeExternal(&external_string));
+ s3->MakeExternal(&external_string);
CcTest::CollectGarbage(OLD_SPACE);
// This avoids the GC from trying to free stack allocated resources.
i::Handle<i::ExternalOneByteString>::cast(s3)->SetResource(isolate,
@@ -7116,7 +7126,6 @@ TEST(Regress978156) {
}
TEST(GarbageCollectionWithLocalHeap) {
- EnsureFlagLocalHeapsEnabled();
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
@@ -7308,7 +7317,15 @@ TEST(Regress10900) {
i::byte buffer[i::Assembler::kDefaultBufferSize];
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
+#if V8_TARGET_ARCH_ARM64
+ UseScratchRegisterScope temps(&masm);
+ Register tmp = temps.AcquireX();
+ masm.Mov(tmp, Operand(static_cast<int32_t>(
+ ReadOnlyRoots(heap).undefined_value_handle()->ptr())));
+ masm.Push(tmp, tmp);
+#else
masm.Push(ReadOnlyRoots(heap).undefined_value_handle());
+#endif
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
@@ -7325,6 +7342,23 @@ TEST(Regress10900) {
CcTest::CollectAllAvailableGarbage();
}
+TEST(Regress11181) {
+ FLAG_always_compact = true;
+ CcTest::InitializeVM();
+ TracingFlags::runtime_stats.store(
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE,
+ std::memory_order_relaxed);
+ v8::HandleScope scope(CcTest::isolate());
+ const char* source =
+ "let roots = [];"
+ "for (let i = 0; i < 100; i++) roots.push(new Array(1000).fill(0));"
+ "roots.push(new Array(1000000).fill(0));"
+ "roots;";
+ CompileRun(source);
+ CcTest::CollectAllAvailableGarbage();
+ TracingFlags::runtime_stats.store(0, std::memory_order_relaxed);
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 0850c13d6b..5326c53644 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -328,6 +328,16 @@ TEST(OldLargeObjectSpace) {
CHECK(lo->Contains(ho));
+ CHECK_EQ(0, Heap::GetFillToAlign(ho.address(), kWordAligned));
+ // All large objects have the same alignment because they start at the
+ // same offset within a page. Fixed double arrays have the most strict
+ // alignment requirements.
+ CHECK_EQ(
+ 0, Heap::GetFillToAlign(
+ ho.address(),
+ HeapObject::RequiredAlignment(
+ ReadOnlyRoots(CcTest::i_isolate()).fixed_double_array_map())));
+
while (true) {
{
AllocationResult allocation = lo->AllocateRaw(lo_size);
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 3bf8dbbdbb..2856782081 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -73,8 +73,8 @@ v8::Local<v8::Script> BytecodeExpectationsPrinter::CompileScript(
v8::Local<v8::Module> BytecodeExpectationsPrinter::CompileModule(
const char* program) const {
- ScriptOrigin origin(Local<v8::Value>(), 0, 0, false, -1, Local<v8::Value>(),
- false, false, true);
+ ScriptOrigin origin(isolate_, Local<v8::Value>(), 0, 0, false, -1,
+ Local<v8::Value>(), false, false, true);
v8::ScriptCompiler::Source source(V8StringFromUTF8(program), origin);
return v8::ScriptCompiler::CompileModule(isolate_, &source).ToLocalChecked();
}
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index ddf531a5de..5c217356c5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -28,18 +28,18 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 35
+bytecode array length: 31
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(2),
+ B(Star2),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 54 E> */ B(StaInArrayLiteral), R(2), R(1), U8(1),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 59 E> */ B(AddSmi), I8(1), U8(3),
B(StaInArrayLiteral), R(2), R(1), U8(1),
@@ -75,28 +75,28 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 65
+bytecode array length: 57
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(4),
- B(Star), R(2),
+ B(Star2),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(0),
/* 56 E> */ B(StaInArrayLiteral), R(4), R(3), U8(2),
B(Ldar), R(4),
B(StaInArrayLiteral), R(2), R(1), U8(4),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(CreateArrayLiteral), U8(2), U8(6), U8(37),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(0),
/* 68 E> */ B(AddSmi), I8(2), U8(7),
B(StaInArrayLiteral), R(4), R(3), U8(8),
@@ -119,10 +119,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(0),
+ B(Star0),
/* 64 S> */ B(CreateArrayFromIterable),
/* 68 S> */ B(Return),
]
@@ -138,32 +138,32 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 74
+bytecode array length: 67
bytecodes: [
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(0),
+ B(Star0),
/* 52 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- /* 67 S> */ B(Star), R(1),
+ /* 67 S> */ B(Star1),
/* 67 E> */ B(GetIterator), R(0), U8(2), U8(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
+ B(Star4),
B(LdaNamedProperty), R(4), U8(2), U8(6),
- B(Star), R(3),
+ B(Star3),
B(CallProperty0), R(3), R(4), U8(15),
- B(Star), R(5),
+ B(Star5),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
B(LdaNamedProperty), R(5), U8(3), U8(17),
- B(JumpIfToBooleanTrue), U8(19),
+ B(JumpIfToBooleanTrue), U8(18),
B(LdaNamedProperty), R(5), U8(4), U8(8),
B(StaInArrayLiteral), R(2), R(1), U8(13),
B(Ldar), R(1),
B(Inc), U8(12),
- B(Star), R(1),
- B(JumpLoop), U8(33), I8(0),
+ B(Star1),
+ B(JumpLoop), U8(31), I8(0),
B(Ldar), R(2),
/* 71 S> */ B(Return),
]
@@ -183,14 +183,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 24
+bytecode array length: 21
bytecodes: [
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(0),
+ B(Star0),
/* 64 S> */ B(CreateArrayFromIterable),
- B(Star), R(2),
+ B(Star2),
B(LdaNamedProperty), R(2), U8(1), U8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
B(StaInArrayLiteral), R(2), R(1), U8(3),
B(Ldar), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
index 826018e952..6f2b1bf616 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
@@ -12,20 +12,20 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 18
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 49 S> */ B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 52 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(4),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(5),
- B(Star), R(1),
+ B(Star1),
/* 88 S> */ B(Return),
]
constant pool: [
@@ -41,13 +41,13 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 11
+bytecode array length: 8
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(55),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(LdaSmi), I8(100),
- B(Star), R(0),
- B(Star), R(1),
+ B(Star0),
+ B(Star1),
/* 74 S> */ B(Return),
]
constant pool: [
@@ -63,19 +63,19 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 26
+bytecode array length: 21
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(55),
- B(Star), R(0),
+ B(Star0),
/* 46 S> */ B(LdaSmi), I8(100),
B(Mov), R(0), R(1),
- B(Star), R(0),
+ B(Star0),
/* 52 E> */ B(Add), R(1), U8(0),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(101),
- B(Star), R(0),
+ B(Star0),
/* 64 E> */ B(Add), R(1), U8(1),
- B(Star), R(0),
+ B(Star0),
/* 86 S> */ B(Return),
]
constant pool: [
@@ -92,20 +92,20 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 27
+bytecode array length: 21
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(55),
- B(Star), R(0),
+ B(Star0),
/* 46 S> */ B(LdaSmi), I8(56),
- B(Star), R(0),
+ B(Star0),
/* 59 E> */ B(Sub), R(0), U8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(57),
- B(Star), R(0),
+ B(Star0),
/* 63 E> */ B(Add), R(1), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 75 S> */ B(Inc), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 89 S> */ B(Return),
]
constant pool: [
@@ -121,23 +121,23 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 35
+bytecode array length: 28
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(55),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(LdaSmi), I8(1),
B(Mov), R(0), R(2),
- B(Star), R(0),
+ B(Star0),
/* 56 E> */ B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 66 E> */ B(Add), R(2), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
/* 76 E> */ B(Add), R(2), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 96 S> */ B(Return),
]
constant pool: [
@@ -153,23 +153,23 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 35
+bytecode array length: 28
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(55),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(LdaSmi), I8(1),
B(Mov), R(0), R(1),
- B(Star), R(0),
+ B(Star0),
/* 56 E> */ B(Add), R(1), U8(0),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 66 E> */ B(Add), R(1), U8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
/* 76 E> */ B(Add), R(1), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 96 S> */ B(Return),
]
constant pool: [
@@ -184,37 +184,37 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 72
+bytecode array length: 59
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 50 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
/* 54 S> */ B(LdaSmi), I8(1),
B(Mov), R(0), R(2),
- B(Star), R(0),
+ B(Star0),
/* 63 E> */ B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(0),
/* 78 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 83 E> */ B(Mul), R(3), U8(1),
/* 73 E> */ B(Add), R(2), U8(3),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
/* 93 E> */ B(Add), R(2), U8(4),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(4),
- B(Star), R(0),
+ B(Star0),
/* 103 E> */ B(Add), R(2), U8(5),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(5),
- B(Star), R(1),
+ B(Star1),
/* 113 E> */ B(Add), R(2), U8(6),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(1),
/* 123 E> */ B(Add), R(2), U8(7),
/* 127 S> */ B(Return),
@@ -231,26 +231,26 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 42
+bytecode array length: 35
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(17),
- B(Star), R(0),
+ B(Star0),
/* 46 S> */ B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 55 E> */ B(Add), R(1), U8(0),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
B(ToNumeric), U8(1),
- B(Star), R(2),
+ B(Star2),
B(Inc), U8(1),
- B(Star), R(0),
+ B(Star0),
B(Ldar), R(2),
/* 59 E> */ B(Add), R(1), U8(2),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
B(Inc), U8(3),
- B(Star), R(0),
+ B(Star0),
/* 67 E> */ B(Add), R(1), U8(4),
/* 75 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index a055e87822..a127da4b7f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -14,64 +14,64 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 144
+bytecode array length: 130
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
/* 17 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
B(Ldar), R(0),
/* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
B(ResumeGenerator), R(0), R(0), U8(5),
- B(Star), R(5),
+ B(Star5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(5),
/* 17 E> */ B(Throw),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(5), R(2),
- B(Jump), U8(50),
+ B(Jump), U8(41),
B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
- B(Jump), U8(41),
- B(Star), R(5),
+ B(Star1),
+ B(Jump), U8(34),
+ B(Star5),
B(CreateCatchContext), R(5), U8(3),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(4),
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(6), U8(2),
B(PopContext), R(5),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(2),
- B(Star), R(1),
- B(Jump), U8(7),
- B(Star), R(2),
+ B(Star1),
+ B(Jump), U8(5),
+ B(Star2),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
B(Ldar), R(3),
B(SetPendingMessage),
B(Ldar), R(1),
B(SwitchOnSmiNoFeedback), U8(4), U8(3), I8(0),
- B(Jump), U8(22),
+ B(Jump), U8(21),
B(Ldar), R(2),
B(ReThrow),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(Mov), R(2), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
@@ -82,17 +82,17 @@ bytecodes: [
/* 22 S> */ B(Return),
]
constant pool: [
- Smi [29],
- Smi [16],
+ Smi [28],
+ Smi [15],
Smi [7],
SCOPE_INFO_TYPE,
Smi [6],
Smi [9],
- Smi [23],
+ Smi [22],
]
handlers: [
- [19, 98, 98],
- [22, 64, 64],
+ [18, 88, 88],
+ [21, 59, 59],
]
---
@@ -102,81 +102,81 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 189
+bytecode array length: 171
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
/* 17 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
B(Ldar), R(0),
/* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
B(ResumeGenerator), R(0), R(0), U8(5),
- B(Star), R(5),
+ B(Star5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(5),
/* 17 E> */ B(Throw),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(5), R(2),
- B(Jump), U8(95),
+ B(Jump), U8(82),
/* 22 S> */ B(LdaSmi), I8(42),
- B(Star), R(6),
+ B(Star6),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(5), U8(3),
/* 22 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(1),
B(ResumeGenerator), R(0), R(0), U8(5),
- B(Star), R(5),
+ B(Star5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
B(Ldar), R(5),
/* 22 E> */ B(Throw),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(5), R(2),
- B(Jump), U8(50),
+ B(Jump), U8(41),
B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
- B(Jump), U8(41),
- B(Star), R(5),
+ B(Star1),
+ B(Jump), U8(34),
+ B(Star5),
B(CreateCatchContext), R(5), U8(6),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(4),
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(6), U8(2),
B(PopContext), R(5),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(2),
- B(Star), R(1),
- B(Jump), U8(7),
- B(Star), R(2),
+ B(Star1),
+ B(Jump), U8(5),
+ B(Star2),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
B(Ldar), R(3),
B(SetPendingMessage),
B(Ldar), R(1),
B(SwitchOnSmiNoFeedback), U8(7), U8(3), I8(0),
- B(Jump), U8(22),
+ B(Jump), U8(21),
B(Ldar), R(2),
B(ReThrow),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(Mov), R(2), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
@@ -187,20 +187,20 @@ bytecodes: [
/* 31 S> */ B(Return),
]
constant pool: [
- Smi [29],
- Smi [74],
- Smi [16],
+ Smi [28],
+ Smi [69],
+ Smi [15],
Smi [7],
- Smi [16],
+ Smi [15],
Smi [7],
SCOPE_INFO_TYPE,
Smi [6],
Smi [9],
- Smi [23],
+ Smi [22],
]
handlers: [
- [19, 143, 143],
- [22, 109, 109],
+ [18, 129, 129],
+ [21, 100, 100],
]
---
@@ -210,50 +210,50 @@ snippet: "
"
frame size: 18
parameter count: 1
-bytecode array length: 341
+bytecode array length: 310
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
/* 17 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(6),
B(Mov), R(context), R(7),
B(Ldar), R(0),
/* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(8), U8(0),
B(ResumeGenerator), R(0), R(0), U8(8),
- B(Star), R(8),
+ B(Star8),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(8),
/* 17 E> */ B(Throw),
B(LdaSmi), I8(1),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(8), R(5),
- B(Jump), U8(247),
+ B(Jump), U8(221),
/* 36 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
- B(Star), R(10),
+ B(Star10),
B(GetIterator), R(10), U8(1), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(9),
+ B(Star9),
B(LdaNamedProperty), R(9), U8(5), U8(5),
- B(Star), R(8),
+ B(Star8),
B(LdaFalse),
- B(Star), R(10),
+ B(Star10),
B(Mov), R(context), R(13),
B(LdaTrue),
- B(Star), R(10),
+ B(Star10),
/* 31 S> */ B(CallProperty0), R(8), R(9), U8(7),
- B(Star), R(14),
+ B(Star14),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
B(LdaNamedProperty), R(14), U8(6), U8(9),
- B(JumpIfToBooleanTrue), U8(66),
+ B(JumpIfToBooleanTrue), U8(62),
B(LdaNamedProperty), R(14), U8(7), U8(11),
- B(Star), R(14),
+ B(Star14),
B(LdaFalse),
- B(Star), R(10),
+ B(Star10),
B(Mov), R(14), R(1),
/* 31 S> */ B(Mov), R(1), R(3),
/* 42 S> */ B(LdaFalse),
@@ -263,39 +263,39 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(15), U8(3),
/* 42 E> */ B(SuspendGenerator), R(0), R(0), U8(15), U8(1),
B(ResumeGenerator), R(0), R(0), U8(15),
- B(Star), R(15),
+ B(Star15),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
B(Ldar), R(15),
/* 42 E> */ B(Throw),
B(LdaSmi), I8(1),
- B(Star), R(11),
+ B(Star11),
B(Mov), R(15), R(12),
- B(Jump), U8(20),
+ B(Jump), U8(16),
B(Ldar), R(15),
- /* 22 E> */ B(JumpLoop), U8(83), I8(0),
+ /* 22 E> */ B(JumpLoop), U8(77), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(12),
- B(Star), R(11),
- B(Jump), U8(7),
- B(Star), R(12),
+ B(Star12),
+ B(Star11),
+ B(Jump), U8(5),
+ B(Star12),
B(LdaZero),
- B(Star), R(11),
+ B(Star11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(13),
+ B(Star13),
B(Ldar), R(10),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(37),
B(Mov), R(context), R(15),
B(LdaNamedProperty), R(9), U8(10), U8(13),
- B(JumpIfUndefinedOrNull), U8(29),
+ B(JumpIfUndefinedOrNull), U8(28),
B(Star), R(16),
B(CallProperty0), R(16), R(9), U8(15),
- B(JumpIfJSReceiver), U8(21),
+ B(JumpIfJSReceiver), U8(20),
B(Star), R(17),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(17), U8(1),
- B(Jump), U8(12),
- B(Star), R(15),
+ B(Jump), U8(11),
+ B(Star15),
B(LdaZero),
B(TestReferenceEqual), R(11),
B(JumpIfTrue), U8(5),
@@ -305,50 +305,50 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(11),
B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
- B(Jump), U8(14),
+ B(Jump), U8(13),
B(Ldar), R(12),
B(ReThrow),
B(LdaSmi), I8(1),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(12), R(5),
- B(Jump), U8(50),
+ B(Jump), U8(41),
B(LdaUndefined),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(1),
- B(Star), R(4),
- B(Jump), U8(41),
- B(Star), R(8),
+ B(Star4),
+ B(Jump), U8(34),
+ B(Star8),
B(CreateCatchContext), R(8), U8(13),
- B(Star), R(7),
+ B(Star7),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(7),
B(PushContext), R(8),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(10),
+ B(Star10),
B(Mov), R(0), R(9),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(9), U8(2),
B(PopContext), R(8),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(2),
- B(Star), R(4),
- B(Jump), U8(7),
- B(Star), R(5),
+ B(Star4),
+ B(Jump), U8(5),
+ B(Star5),
B(LdaZero),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(6),
+ B(Star6),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
B(Ldar), R(6),
B(SetPendingMessage),
B(Ldar), R(4),
B(SwitchOnSmiNoFeedback), U8(14), U8(3), I8(0),
- B(Jump), U8(22),
+ B(Jump), U8(21),
B(Ldar), R(5),
B(ReThrow),
B(LdaTrue),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(0), R(7),
B(Mov), R(5), R(8),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(7), U8(3),
@@ -359,15 +359,15 @@ bytecodes: [
/* 50 S> */ B(Return),
]
constant pool: [
- Smi [29],
- Smi [141],
- Smi [16],
+ Smi [28],
+ Smi [130],
+ Smi [15],
Smi [7],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- Smi [16],
+ Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
Smi [6],
@@ -375,13 +375,13 @@ constant pool: [
SCOPE_INFO_TYPE,
Smi [6],
Smi [9],
- Smi [23],
+ Smi [22],
]
handlers: [
- [19, 295, 295],
- [22, 261, 261],
- [86, 172, 180],
- [196, 217, 219],
+ [18, 268, 268],
+ [21, 239, 239],
+ [79, 159, 165],
+ [178, 199, 201],
]
---
@@ -392,111 +392,111 @@ snippet: "
"
frame size: 17
parameter count: 1
-bytecode array length: 463
+bytecode array length: 422
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(5),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
/* 44 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
B(Ldar), R(0),
/* 44 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
B(ResumeGenerator), R(0), R(0), U8(5),
- B(Star), R(5),
+ B(Star5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
B(Ldar), R(5),
/* 44 E> */ B(Throw),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(5), R(2),
B(JumpConstant), U8(18),
/* 49 S> */ B(LdaGlobal), U8(7), U8(0),
- B(Star), R(9),
+ B(Star9),
/* 56 E> */ B(CallUndefinedReceiver0), R(9), U8(2),
- B(Star), R(10),
+ B(Star10),
B(LdaNamedProperty), R(10), U8(8), U8(4),
- B(JumpIfUndefinedOrNull), U8(15),
- B(Star), R(11),
+ B(JumpIfUndefinedOrNull), U8(14),
+ B(Star11),
B(CallProperty0), R(11), R(10), U8(6),
- B(JumpIfJSReceiver), U8(23),
+ B(JumpIfJSReceiver), U8(21),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
B(LdaNamedProperty), R(10), U8(9), U8(8),
- B(Star), R(11),
+ B(Star11),
B(CallProperty0), R(11), R(10), U8(10),
- B(Star), R(11),
+ B(Star11),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(11), U8(1),
- B(Star), R(7),
+ B(Star7),
B(LdaNamedProperty), R(7), U8(10), U8(12),
- B(Star), R(9),
+ B(Star9),
B(LdaUndefined),
- B(Star), R(8),
+ B(Star8),
B(LdaZero),
- B(Star), R(6),
+ B(Star6),
B(Ldar), R(6),
B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(1),
B(CallProperty1), R(9), R(7), R(8), U8(14),
- B(Jump), U8(140),
+ B(Jump), U8(130),
B(LdaNamedProperty), R(7), U8(13), U8(16),
- B(JumpIfUndefinedOrNull), U8(11),
- B(Star), R(10),
+ B(JumpIfUndefinedOrNull), U8(10),
+ B(Star10),
B(CallProperty1), R(10), R(7), R(8), U8(18),
- B(Jump), U8(125),
+ B(Jump), U8(116),
B(Mov), R(0), R(10),
B(Mov), R(8), R(11),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(10), U8(2),
/* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(10), U8(1),
B(ResumeGenerator), R(0), R(0), U8(10),
- B(Star), R(10),
+ B(Star10),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(11),
+ B(Star11),
B(LdaZero),
B(TestReferenceEqual), R(11),
B(JumpIfTrue), U8(5),
B(Ldar), R(10),
B(ReThrow),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(10), R(2),
- B(Jump), U8(238),
+ B(Jump), U8(215),
B(LdaNamedProperty), R(7), U8(14), U8(20),
- B(JumpIfUndefinedOrNull), U8(11),
- B(Star), R(12),
+ B(JumpIfUndefinedOrNull), U8(10),
+ B(Star12),
B(CallProperty1), R(12), R(7), R(8), U8(22),
- B(Jump), U8(66),
+ B(Jump), U8(61),
B(LdaNamedProperty), R(7), U8(13), U8(24),
- B(JumpIfUndefinedOrNull), U8(55),
- B(Star), R(12),
+ B(JumpIfUndefinedOrNull), U8(50),
+ B(Star12),
B(CallProperty0), R(12), R(7), U8(26),
B(Jump), U8(2),
- B(Star), R(13),
+ B(Star13),
B(Mov), R(0), R(12),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
/* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(2),
B(ResumeGenerator), R(0), R(0), U8(12),
- B(Star), R(12),
+ B(Star12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(13),
+ B(Star13),
B(LdaZero),
B(TestReferenceEqual), R(13),
B(JumpIfTrue), U8(5),
B(Ldar), R(12),
B(ReThrow),
B(Ldar), R(12),
- B(JumpIfJSReceiver), U8(9),
- B(Star), R(14),
+ B(JumpIfJSReceiver), U8(8),
+ B(Star14),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
- B(Star), R(13),
+ B(Star13),
B(Mov), R(0), R(12),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
/* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(3),
B(ResumeGenerator), R(0), R(0), U8(12),
- B(Star), R(12),
+ B(Star12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(13),
+ B(Star13),
B(LdaZero),
B(TestReferenceEqual), R(13),
B(JumpIfTrue), U8(5),
@@ -507,65 +507,65 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
B(LdaNamedProperty), R(5), U8(15), U8(28),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(LdaNamedProperty), R(5), U8(16), U8(30),
- B(Star), R(15),
+ B(Star15),
B(LdaFalse),
B(Star), R(16),
B(Mov), R(0), R(14),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(14), U8(3),
/* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(4),
B(ResumeGenerator), R(0), R(0), U8(14),
- B(Star), R(8),
+ B(Star8),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(6),
- B(JumpLoop), U8(236), I8(0),
+ B(Star6),
+ B(JumpLoop), U8(220), I8(0),
B(LdaNamedProperty), R(5), U8(16), U8(32),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(1),
B(TestReferenceEqual), R(6),
- B(JumpIfFalse), U8(11),
+ B(JumpIfFalse), U8(10),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(7), R(2),
- B(Jump), U8(50),
+ B(Jump), U8(41),
B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
- B(Jump), U8(41),
- B(Star), R(5),
+ B(Star1),
+ B(Jump), U8(34),
+ B(Star5),
B(CreateCatchContext), R(5), U8(17),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(4),
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(6), U8(2),
B(PopContext), R(5),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(2),
- B(Star), R(1),
- B(Jump), U8(7),
- B(Star), R(2),
+ B(Star1),
+ B(Jump), U8(5),
+ B(Star2),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
B(Ldar), R(3),
B(SetPendingMessage),
B(Ldar), R(1),
B(SwitchOnSmiNoFeedback), U8(19), U8(3), I8(0),
- B(Jump), U8(22),
+ B(Jump), U8(21),
B(Ldar), R(2),
B(ReThrow),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(Mov), R(2), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
@@ -576,31 +576,31 @@ bytecodes: [
/* 60 S> */ B(Return),
]
constant pool: [
- Smi [29],
- Smi [157],
- Smi [229],
- Smi [279],
- Smi [338],
- Smi [16],
+ Smi [28],
+ Smi [144],
+ Smi [210],
+ Smi [256],
+ Smi [312],
+ Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
SYMBOL_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
Smi [11],
- Smi [70],
+ Smi [66],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["throw"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
SCOPE_INFO_TYPE,
- Smi [369],
+ Smi [333],
Smi [6],
Smi [9],
- Smi [23],
+ Smi [22],
]
handlers: [
- [19, 417, 417],
- [22, 383, 383],
+ [18, 380, 380],
+ [21, 351, 351],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
index 94d285aa15..a1cfdc3bc4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
@@ -14,16 +14,16 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 127
+bytecode array length: 116
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(2),
@@ -34,50 +34,50 @@ bytecodes: [
B(Ldar), R(1),
B(Mov), R(context), R(2),
/* 0 S> */ B(LdaSmi), I8(42),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(0), R(3),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(3), U8(2),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(1),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
B(TestReferenceEqual), R(4),
B(JumpIfTrue), U8(5),
B(Ldar), R(3),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(4),
+ B(Star4),
B(LdaTrue),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(0), R(3),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 10 S> */ B(Return),
- B(Star), R(3),
+ B(Star3),
B(CreateCatchContext), R(3), U8(4),
- B(Star), R(2),
+ B(Star2),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(2),
B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 10 S> */ B(Return),
]
constant pool: [
- Smi [21],
- Smi [65],
+ Smi [20],
+ Smi [62],
Smi [10],
Smi [7],
SCOPE_INFO_TYPE,
]
handlers: [
- [49, 99, 99],
+ [47, 92, 92],
]
---
@@ -86,16 +86,16 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 137
+bytecode array length: 125
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(2),
@@ -106,54 +106,54 @@ bytecodes: [
B(Ldar), R(1),
B(Mov), R(context), R(2),
/* 0 S> */ B(LdaConstant), U8(4),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(closure), R(3),
B(CallRuntime), U16(Runtime::kDynamicImportCall), R(3), U8(2),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(0), R(3),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(3), U8(2),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(1),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
B(TestReferenceEqual), R(4),
B(JumpIfTrue), U8(5),
B(Ldar), R(3),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(4),
+ B(Star4),
B(LdaTrue),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(0), R(3),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 21 S> */ B(Return),
- B(Star), R(3),
+ B(Star3),
B(CreateCatchContext), R(3), U8(5),
- B(Star), R(2),
+ B(Star2),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(2),
B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 21 S> */ B(Return),
]
constant pool: [
- Smi [21],
- Smi [75],
+ Smi [20],
+ Smi [71],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
SCOPE_INFO_TYPE,
]
handlers: [
- [49, 109, 109],
+ [47, 101, 101],
]
---
@@ -166,19 +166,19 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 138
+bytecode array length: 126
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
- B(Star), R(0),
+ B(Star0),
B(CreateClosure), U8(2), U8(0), U8(0),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
B(Ldar), R(3),
@@ -189,14 +189,14 @@ bytecodes: [
B(Ldar), R(2),
B(Mov), R(context), R(3),
/* 0 S> */ B(LdaSmi), I8(42),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(4), U8(2),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(1),
B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ B(Star4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(5),
+ B(Star5),
B(LdaZero),
B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
@@ -204,37 +204,37 @@ bytecodes: [
B(ReThrow),
/* 47 S> */ B(CallUndefinedReceiver0), R(1), U8(0),
B(LdaUndefined),
- B(Star), R(5),
+ B(Star5),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
/* 54 S> */ B(Return),
- B(Star), R(4),
+ B(Star4),
B(CreateCatchContext), R(4), U8(5),
- B(Star), R(3),
+ B(Star3),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(3),
B(PushContext), R(4),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(6),
+ B(Star6),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [29],
- Smi [73],
+ Smi [27],
+ Smi [69],
SHARED_FUNCTION_INFO_TYPE,
Smi [10],
Smi [7],
SCOPE_INFO_TYPE,
]
handlers: [
- [57, 110, 110],
+ [54, 102, 102],
]
---
@@ -244,21 +244,21 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 149
+bytecode array length: 135
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaZero),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(3), U8(1),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(3),
@@ -269,53 +269,53 @@ bytecodes: [
B(Ldar), R(2),
B(Mov), R(context), R(3),
/* 28 S> */ B(LdaConstant), U8(4),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(closure), R(4),
B(CallRuntime), U16(Runtime::kDynamicImportCall), R(4), U8(2),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(4), U8(2),
/* 28 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(1),
B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ B(Star4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(5),
+ B(Star5),
B(LdaZero),
B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
B(Ldar), R(4),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(5),
+ B(Star5),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
/* 49 S> */ B(Return),
- B(Star), R(4),
+ B(Star4),
B(CreateCatchContext), R(4), U8(5),
- B(Star), R(3),
+ B(Star3),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(3),
B(PushContext), R(4),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(6),
+ B(Star6),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [33],
- Smi [87],
+ Smi [30],
+ Smi [81],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
SCOPE_INFO_TYPE,
]
handlers: [
- [61, 121, 121],
+ [57, 111, 111],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
index 61e3ff9e4f..bb428c9c74 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
@@ -11,10 +11,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(8),
B(LdaZero),
/* 56 E> */ B(TestLessThan), R(0), U8(0),
@@ -35,10 +35,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(11),
B(LdaZero),
/* 56 E> */ B(TestLessThan), R(0), U8(0),
@@ -59,10 +59,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 20
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(8),
B(LdaZero),
/* 57 E> */ B(TestLessThan), R(0), U8(0),
@@ -70,7 +70,7 @@ bytecodes: [
B(LdaSmi), I8(2),
B(Jump), U8(4),
B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 71 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index 4d53c3afa5..c8fa67e343 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -13,10 +13,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 97 S> */ B(Return),
]
constant pool: [
@@ -34,10 +34,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 86 S> */ B(Return),
]
constant pool: [
@@ -59,21 +59,21 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 52
+bytecode array length: 48
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 65 S> */ B(LdaSmi), I8(10),
/* 65 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(37),
+ B(JumpIfFalse), U8(35),
/* 75 S> */ B(Ldar), R(1),
/* 81 E> */ B(MulSmi), I8(12), U8(1),
- B(Star), R(1),
+ B(Star1),
/* 89 S> */ B(Ldar), R(0),
/* 95 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 102 S> */ B(LdaSmi), I8(3),
/* 108 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
@@ -82,7 +82,7 @@ bytecodes: [
/* 132 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 138 S> */ B(Jump), U8(5),
- /* 56 E> */ B(JumpLoop), U8(39), I8(0),
+ /* 56 E> */ B(JumpLoop), U8(37), I8(0),
/* 147 S> */ B(Ldar), R(1),
/* 156 S> */ B(Return),
]
@@ -106,34 +106,34 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 60
+bytecode array length: 58
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 62 S> */ B(LdaZero),
/* 68 E> */ B(TestLessThan), R(0), U8(0),
B(JumpIfFalse), U8(4),
- /* 73 S> */ B(Jump), U8(45),
+ /* 73 S> */ B(Jump), U8(44),
/* 85 S> */ B(LdaSmi), I8(3),
/* 91 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
- /* 97 S> */ B(Jump), U8(39),
+ /* 97 S> */ B(Jump), U8(38),
/* 106 S> */ B(LdaSmi), I8(4),
/* 112 E> */ B(TestEqual), R(0), U8(2),
B(JumpIfFalse), U8(4),
- /* 118 S> */ B(Jump), U8(30),
+ /* 118 S> */ B(Jump), U8(29),
/* 127 S> */ B(LdaSmi), I8(10),
/* 133 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
- /* 140 S> */ B(Jump), U8(18),
+ /* 140 S> */ B(Jump), U8(17),
/* 152 S> */ B(LdaSmi), I8(5),
/* 158 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
- /* 164 S> */ B(Jump), U8(12),
+ /* 164 S> */ B(Jump), U8(11),
/* 173 S> */ B(Ldar), R(0),
/* 179 E> */ B(AddSmi), I8(1), U8(5),
- B(Star), R(0),
- /* 45 E> */ B(JumpLoop), U8(51), I8(0),
+ B(Star0),
+ /* 45 E> */ B(JumpLoop), U8(50), I8(0),
/* 186 S> */ B(Ldar), R(0),
/* 195 S> */ B(Return),
]
@@ -157,26 +157,26 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 43
+bytecode array length: 40
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 71 S> */ B(LdaSmi), I8(3),
/* 71 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(20),
+ B(JumpIfFalse), U8(19),
/* 82 S> */ B(LdaSmi), I8(2),
/* 88 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
- /* 94 S> */ B(Jump), U8(11),
+ /* 94 S> */ B(Jump), U8(10),
/* 105 S> */ B(Ldar), R(0),
/* 111 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(0),
- B(Jump), U8(11),
+ B(Star0),
+ B(Jump), U8(10),
/* 122 S> */ B(Ldar), R(0),
/* 128 E> */ B(AddSmi), I8(1), U8(3),
- B(Star), R(0),
+ B(Star0),
/* 135 S> */ B(Jump), U8(5),
- /* 45 E> */ B(JumpLoop), U8(34), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(32), I8(0),
/* 144 S> */ B(Ldar), R(0),
/* 153 S> */ B(Return),
]
@@ -197,21 +197,21 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 32
+bytecode array length: 28
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 64 S> */ B(Ldar), R(0),
- B(JumpIfToBooleanFalse), U8(19),
+ B(JumpIfToBooleanFalse), U8(17),
/* 71 S> */ B(Ldar), R(1),
/* 77 E> */ B(MulSmi), I8(12), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 85 S> */ B(Ldar), R(0),
/* 91 E> */ B(SubSmi), I8(1), U8(1),
- B(Star), R(0),
- /* 57 E> */ B(JumpLoop), U8(18), I8(0),
+ B(Star0),
+ /* 57 E> */ B(JumpLoop), U8(16), I8(0),
/* 98 S> */ B(Ldar), R(1),
/* 107 S> */ B(Return),
]
@@ -233,30 +233,30 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 52
+bytecode array length: 48
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 63 S> */ B(Ldar), R(1),
/* 69 E> */ B(MulSmi), I8(10), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 77 S> */ B(LdaSmi), I8(5),
/* 83 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
- /* 89 S> */ B(Jump), U8(28),
+ /* 89 S> */ B(Jump), U8(27),
/* 98 S> */ B(LdaSmi), I8(6),
/* 104 E> */ B(TestEqual), R(0), U8(2),
B(JumpIfFalse), U8(4),
- /* 110 S> */ B(Jump), U8(9),
+ /* 110 S> */ B(Jump), U8(8),
/* 122 S> */ B(Ldar), R(0),
/* 128 E> */ B(AddSmi), I8(1), U8(3),
- B(Star), R(0),
+ B(Star0),
/* 144 S> */ B(LdaSmi), I8(10),
/* 144 E> */ B(TestLessThan), R(0), U8(4),
B(JumpIfFalse), U8(5),
- /* 56 E> */ B(JumpLoop), U8(39), I8(0),
+ /* 56 E> */ B(JumpLoop), U8(37), I8(0),
/* 151 S> */ B(Ldar), R(1),
/* 160 S> */ B(Return),
]
@@ -277,20 +277,20 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 30
+bytecode array length: 26
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 64 S> */ B(Ldar), R(1),
/* 70 E> */ B(MulSmi), I8(12), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 78 S> */ B(Ldar), R(0),
/* 84 E> */ B(SubSmi), I8(1), U8(1),
- B(Star), R(0),
+ B(Star0),
/* 98 S> */ B(JumpIfToBooleanFalse), U8(5),
- /* 57 E> */ B(JumpLoop), U8(16), I8(0),
+ /* 57 E> */ B(JumpLoop), U8(14), I8(0),
/* 102 S> */ B(Ldar), R(1),
/* 111 S> */ B(Return),
]
@@ -312,21 +312,21 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 40
+bytecode array length: 36
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 69 S> */ B(MulSmi), I8(10), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 77 S> */ B(LdaSmi), I8(5),
/* 83 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
- /* 89 S> */ B(Jump), U8(18),
+ /* 89 S> */ B(Jump), U8(17),
/* 98 S> */ B(Ldar), R(0),
/* 104 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 111 S> */ B(LdaSmi), I8(6),
/* 117 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
@@ -352,27 +352,27 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 45
+bytecode array length: 41
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 63 S> */ B(Ldar), R(1),
/* 69 E> */ B(MulSmi), I8(10), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 77 S> */ B(LdaSmi), I8(5),
/* 83 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
- /* 89 S> */ B(Jump), U8(21),
+ /* 89 S> */ B(Jump), U8(20),
/* 98 S> */ B(Ldar), R(0),
/* 104 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 111 S> */ B(LdaSmi), I8(6),
/* 117 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
/* 123 S> */ B(Jump), U8(2),
- /* 56 E> */ B(JumpLoop), U8(32), I8(0),
+ /* 56 E> */ B(JumpLoop), U8(30), I8(0),
/* 149 S> */ B(Ldar), R(1),
/* 158 S> */ B(Return),
]
@@ -392,22 +392,22 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 33
+bytecode array length: 31
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 58 S> */ B(LdaSmi), I8(1),
/* 64 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
- /* 70 S> */ B(Jump), U8(21),
+ /* 70 S> */ B(Jump), U8(20),
/* 79 S> */ B(LdaSmi), I8(2),
/* 85 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
- /* 91 S> */ B(Jump), U8(9),
+ /* 91 S> */ B(Jump), U8(8),
/* 103 S> */ B(Ldar), R(0),
/* 109 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(0),
- /* 45 E> */ B(JumpLoop), U8(25), I8(0),
+ B(Star0),
+ /* 45 E> */ B(JumpLoop), U8(24), I8(0),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
@@ -426,22 +426,22 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 33
+bytecode array length: 31
bytecodes: [
/* 47 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 56 S> */ B(LdaSmi), I8(1),
/* 62 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
- /* 68 S> */ B(Jump), U8(21),
+ /* 68 S> */ B(Jump), U8(20),
/* 77 S> */ B(LdaSmi), I8(2),
/* 83 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
- /* 89 S> */ B(Jump), U8(9),
+ /* 89 S> */ B(Jump), U8(8),
/* 101 S> */ B(Ldar), R(0),
/* 107 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(0),
- /* 34 E> */ B(JumpLoop), U8(25), I8(0),
+ B(Star0),
+ /* 34 E> */ B(JumpLoop), U8(24), I8(0),
B(LdaUndefined),
/* 114 S> */ B(Return),
]
@@ -460,22 +460,22 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 33
+bytecode array length: 31
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 68 S> */ B(LdaSmi), I8(1),
/* 74 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
- /* 80 S> */ B(Jump), U8(21),
+ /* 80 S> */ B(Jump), U8(20),
/* 89 S> */ B(LdaSmi), I8(2),
/* 95 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
/* 101 S> */ B(Jump), U8(2),
/* 55 S> */ B(Ldar), R(0),
/* 59 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(0),
- /* 45 E> */ B(JumpLoop), U8(25), I8(0),
+ B(Star0),
+ /* 45 E> */ B(JumpLoop), U8(24), I8(0),
B(LdaUndefined),
/* 113 S> */ B(Return),
]
@@ -493,22 +493,22 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 33
+bytecode array length: 31
bytecodes: [
/* 47 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 66 S> */ B(LdaSmi), I8(1),
/* 72 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
- /* 78 S> */ B(Jump), U8(21),
+ /* 78 S> */ B(Jump), U8(20),
/* 87 S> */ B(LdaSmi), I8(2),
/* 93 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
/* 99 S> */ B(Jump), U8(2),
/* 53 S> */ B(Ldar), R(0),
/* 57 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(0),
- /* 34 E> */ B(JumpLoop), U8(25), I8(0),
+ B(Star0),
+ /* 34 E> */ B(JumpLoop), U8(24), I8(0),
B(LdaUndefined),
/* 111 S> */ B(Return),
]
@@ -527,23 +527,23 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 34
+bytecode array length: 30
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 58 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 63 S> */ B(LdaSmi), I8(100),
/* 63 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(21),
+ B(JumpIfFalse), U8(19),
/* 85 S> */ B(Ldar), R(0),
/* 91 E> */ B(AddSmi), I8(1), U8(1),
- B(Star), R(0),
+ B(Star0),
/* 98 S> */ B(Jump), U8(2),
/* 72 S> */ B(Ldar), R(1),
/* 76 E> */ B(AddSmi), I8(1), U8(2),
- B(Star), R(1),
- /* 45 E> */ B(JumpLoop), U8(23), I8(0),
+ B(Star1),
+ /* 45 E> */ B(JumpLoop), U8(21), I8(0),
B(LdaUndefined),
/* 110 S> */ B(Return),
]
@@ -562,21 +562,21 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 31
+bytecode array length: 27
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 58 S> */ B(LdaSmi), I8(10),
- B(Star), R(1),
+ B(Star1),
/* 62 S> */ B(Ldar), R(1),
- B(JumpIfToBooleanFalse), U8(18),
+ B(JumpIfToBooleanFalse), U8(16),
/* 74 S> */ B(Ldar), R(0),
/* 80 E> */ B(MulSmi), I8(12), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 67 S> */ B(Ldar), R(1),
B(Dec), U8(1),
- B(Star), R(1),
- /* 45 E> */ B(JumpLoop), U8(17), I8(0),
+ B(Star1),
+ /* 45 E> */ B(JumpLoop), U8(15), I8(0),
/* 88 S> */ B(Ldar), R(0),
/* 97 S> */ B(Return),
]
@@ -595,12 +595,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 9
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 58 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 91 S> */ B(Ldar), R(0),
/* 100 S> */ B(Return),
]
@@ -620,23 +620,23 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 34
+bytecode array length: 30
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 58 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 76 S> */ B(Ldar), R(0),
/* 82 E> */ B(AddSmi), I8(1), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 89 S> */ B(LdaSmi), I8(20),
/* 95 E> */ B(TestEqual), R(0), U8(1),
B(JumpIfFalse), U8(4),
- /* 102 S> */ B(Jump), U8(11),
+ /* 102 S> */ B(Jump), U8(10),
/* 69 S> */ B(Ldar), R(1),
B(Inc), U8(2),
- B(Star), R(1),
- /* 45 E> */ B(JumpLoop), U8(22), I8(0),
+ B(Star1),
+ /* 45 E> */ B(JumpLoop), U8(20), I8(0),
/* 112 S> */ B(Ldar), R(0),
/* 121 S> */ B(Return),
]
@@ -659,18 +659,18 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 48
+bytecode array length: 46
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 52 S> */ B(Ldar), R(0),
- B(JumpIfToBooleanFalse), U8(41),
+ B(JumpIfToBooleanFalse), U8(40),
B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 73 S> */ B(LdaSmi), I8(1),
/* 73 E> */ B(StaCurrentContextSlot), U8(2),
/* 102 S> */ B(Mov), R(2), R(1),
@@ -682,7 +682,7 @@ bytecodes: [
B(Inc), U8(0),
/* 127 E> */ B(StaCurrentContextSlot), U8(2),
B(PopContext), R(3),
- /* 45 E> */ B(JumpLoop), U8(40), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(39), I8(0),
B(LdaUndefined),
/* 137 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index e37a3ca0cf..a985e65b23 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -17,12 +17,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 11
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 62 S> */ B(AddSmi), I8(1), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 69 S> */ B(Jump), U8(2),
/* 97 S> */ B(Ldar), R(0),
/* 106 S> */ B(Return),
@@ -47,38 +47,38 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 66
+bytecode array length: 59
bytecodes: [
/* 44 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 71 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 76 S> */ B(LdaSmi), I8(10),
/* 76 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(52),
+ B(JumpIfFalse), U8(47),
/* 106 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star2),
/* 111 S> */ B(LdaSmi), I8(3),
/* 111 E> */ B(TestLessThan), R(2), U8(1),
- B(JumpIfFalse), U8(33),
+ B(JumpIfFalse), U8(30),
/* 129 S> */ B(Ldar), R(0),
B(Inc), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 142 S> */ B(Ldar), R(2),
/* 148 E> */ B(Add), R(1), U8(3),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(12),
/* 152 E> */ B(TestEqual), R(3), U8(4),
B(JumpIfFalse), U8(4),
- /* 161 S> */ B(Jump), U8(20),
+ /* 161 S> */ B(Jump), U8(18),
/* 118 S> */ B(Ldar), R(2),
B(Inc), U8(5),
- B(Star), R(2),
- /* 93 E> */ B(JumpLoop), U8(35), I8(1),
+ B(Star2),
+ /* 93 E> */ B(JumpLoop), U8(32), I8(1),
/* 84 S> */ B(Ldar), R(1),
B(Inc), U8(6),
- B(Star), R(1),
- /* 58 E> */ B(JumpLoop), U8(54), I8(0),
+ B(Star1),
+ /* 58 E> */ B(JumpLoop), U8(49), I8(0),
/* 188 S> */ B(Ldar), R(0),
/* 199 S> */ B(Return),
]
@@ -97,14 +97,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 28
+bytecode array length: 27
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 53 S> */ B(LdaSmi), I8(10),
/* 53 E> */ B(StaCurrentContextSlot), U8(2),
/* 85 S> */ B(Mov), R(1), R(0),
@@ -136,7 +136,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 52
+bytecode array length: 51
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(2),
@@ -149,7 +149,7 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 76 S> */ B(LdaSmi), I8(2),
/* 76 E> */ B(StaCurrentContextSlot), U8(2),
/* 113 S> */ B(Mov), R(1), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index a4a5428d7d..cec066fdb7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -11,14 +11,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 24
+bytecode array length: 21
bytecodes: [
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(2),
- B(Star), R(0),
+ B(Star0),
B(CreateArrayLiteral), U8(2), U8(4), U8(37),
- B(Star), R(2),
+ B(Star2),
/* 39 E> */ B(CallWithSpread), R(0), R(1), U8(2), U8(5),
B(LdaUndefined),
/* 58 S> */ B(Return),
@@ -37,16 +37,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 27
+bytecode array length: 23
bytecodes: [
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 39 E> */ B(LdaNamedProperty), R(1), U8(1), U8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaZero),
- B(Star), R(2),
+ B(Star2),
B(CreateArrayLiteral), U8(2), U8(4), U8(37),
- B(Star), R(3),
+ B(Star3),
/* 39 E> */ B(CallWithSpread), R(0), R(1), U8(3), U8(5),
B(LdaUndefined),
/* 61 S> */ B(Return),
@@ -65,37 +65,37 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 100
+bytecode array length: 91
bytecodes: [
/* 34 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaNamedProperty), R(0), U8(1), U8(2),
- B(Star), R(1),
+ B(Star1),
B(CreateArrayLiteral), U8(2), U8(4), U8(37),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
/* 49 S> */ B(CreateArrayLiteral), U8(3), U8(5), U8(37),
- B(Star), R(7),
+ B(Star7),
/* 49 E> */ B(GetIterator), R(7), U8(6), U8(8),
B(Mov), R(0), R(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(6),
+ B(Star6),
B(LdaNamedProperty), R(6), U8(4), U8(10),
- B(Star), R(5),
+ B(Star5),
B(CallProperty0), R(5), R(6), U8(19),
- B(Star), R(7),
+ B(Star7),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
B(LdaNamedProperty), R(7), U8(5), U8(21),
- B(JumpIfToBooleanTrue), U8(19),
+ B(JumpIfToBooleanTrue), U8(18),
B(LdaNamedProperty), R(7), U8(6), U8(12),
B(StaInArrayLiteral), R(4), R(3), U8(17),
B(Ldar), R(3),
B(Inc), U8(16),
- B(Star), R(3),
- B(JumpLoop), U8(33), I8(0),
+ B(Star3),
+ B(JumpLoop), U8(31), I8(0),
B(LdaSmi), I8(4),
B(StaInArrayLiteral), R(4), R(3), U8(17),
B(Mov), R(4), R(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
index 2dfa6dc4c0..853c191caf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
@@ -14,10 +14,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 39 E> */ B(CallUndefinedReceiver0), R(0), U8(2),
/* 43 S> */ B(Return),
]
@@ -35,16 +35,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 23
+bytecode array length: 19
bytecodes: [
/* 39 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
- B(Star), R(3),
+ B(Star3),
/* 46 E> */ B(CallUndefinedReceiver), R(0), R(1), U8(3), U8(2),
/* 57 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
index 1e5e0a2c8e..4fd62de6bc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
@@ -11,7 +11,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 74
+bytecode array length: 67
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
@@ -24,23 +24,23 @@ bytecodes: [
/* 34 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
/* 36 E> */ B(StaLookupSlot), U8(2), U8(0),
/* 52 S> */ B(LdaLookupGlobalSlot), U8(3), U8(0), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(4),
- B(Star), R(3),
+ B(Star3),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(30),
- B(Star), R(8),
+ B(Star8),
B(LdaSmi), I8(52),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(2), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
- B(Star), R(2),
+ B(Star2),
/* 52 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 62 S> */ B(LdaLookupGlobalSlot), U8(2), U8(4), U8(1),
- B(Star), R(2),
+ B(Star2),
/* 69 E> */ B(CallUndefinedReceiver0), R(2), U8(6),
/* 73 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
index 8674673d3d..2c67df8c01 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
@@ -14,10 +14,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 50 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 57 E> */ B(Construct), R(0), R(0), U8(0), U8(2),
/* 67 S> */ B(Return),
]
@@ -35,12 +35,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 15
bytecodes: [
/* 63 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 70 E> */ B(Construct), R(0), R(1), U8(1), U8(2),
/* 81 S> */ B(Return),
@@ -64,16 +64,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 25
+bytecode array length: 21
bytecodes: [
/* 105 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(4),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(5),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(0),
/* 112 E> */ B(Construct), R(0), R(1), U8(3), U8(2),
/* 129 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
index 32a9a902ae..5f15f3460c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
@@ -48,12 +48,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 15 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
B(CallRuntime), U16(Runtime::kAdd), R(0), U8(2),
/* 32 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index 880cba2b23..82b6e16be9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -20,25 +20,18 @@ snippet: "
test();
})();
"
-frame size: 6
+frame size: 1
parameter count: 1
-bytecode array length: 33
+bytecode array length: 16
bytecodes: [
- B(Mov), R(closure), R(0),
- /* 104 S> */ B(LdaConstant), U8(0),
- /* 111 E> */ B(LdaKeyedProperty), R(closure), U8(1),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(5),
- B(Mov), R(this), R(3),
- /* 117 E> */ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(3), U8(3),
- B(Star), R(1),
- /* 117 E> */ B(CallAnyReceiver), R(1), R(this), U8(1), U8(3),
+ /* 104 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
+ /* 117 E> */ B(LdaNamedPropertyFromSuper), R(this), U8(0), U8(1),
+ B(Star0),
+ /* 117 E> */ B(CallAnyReceiver), R(0), R(this), U8(1), U8(3),
/* 126 E> */ B(AddSmi), I8(1), U8(0),
/* 130 S> */ B(Return),
]
constant pool: [
- SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["method"],
]
handlers: [
@@ -59,31 +52,23 @@ snippet: "
test();
})();
"
-frame size: 5
+frame size: 4
parameter count: 1
-bytecode array length: 46
+bytecode array length: 24
bytecodes: [
- B(Mov), R(closure), R(0),
- /* 130 S> */ B(LdaConstant), U8(0),
- /* 130 E> */ B(LdaKeyedProperty), R(closure), U8(0),
- B(Star), R(2),
- B(LdaConstant), U8(1),
- B(Star), R(3),
+ /* 130 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
+ B(Star1),
+ B(LdaConstant), U8(0),
+ B(Star2),
B(LdaSmi), I8(2),
- B(Star), R(4),
- B(Mov), R(this), R(1),
- /* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper), R(1), U8(4),
- /* 143 S> */ B(LdaConstant), U8(0),
- /* 150 E> */ B(LdaKeyedProperty), R(closure), U8(2),
- B(Star), R(2),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(this), R(1),
- /* 156 E> */ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(1), U8(3),
+ B(Star3),
+ B(Mov), R(this), R(0),
+ /* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper), R(0), U8(4),
+ /* 143 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
+ /* 156 E> */ B(LdaNamedPropertyFromSuper), R(this), U8(0), U8(0),
/* 158 S> */ B(Return),
]
constant pool: [
- SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
@@ -104,17 +89,17 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 41
+bytecode array length: 39
bytecodes: [
B(Mov), R(closure), R(1),
/* 118 S> */ B(Ldar), R(1),
B(GetSuperConstructor), R(3),
B(LdaSmi), I8(1),
B(ThrowIfNotSuperConstructor), R(3),
- B(Star), R(4),
+ B(Star4),
B(Ldar), R(0),
/* 118 E> */ B(Construct), R(3), R(4), U8(1), U8(0),
- B(Star), R(5),
+ B(Star5),
B(Ldar), R(this),
B(ThrowSuperAlreadyCalledIfNotHole),
B(Mov), R(5), R(this),
@@ -147,7 +132,7 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 37
+bytecode array length: 36
bytecodes: [
B(Mov), R(closure), R(1),
/* 117 S> */ B(Ldar), R(1),
@@ -155,7 +140,7 @@ bytecodes: [
B(ThrowIfNotSuperConstructor), R(3),
B(Ldar), R(0),
/* 117 E> */ B(Construct), R(3), R(0), U8(0), U8(0),
- B(Star), R(4),
+ B(Star4),
B(Ldar), R(this),
B(ThrowSuperAlreadyCalledIfNotHole),
B(Mov), R(4), R(this),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index b16056a344..4fa55082d5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -14,21 +14,21 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 40
+bytecode array length: 35
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
- B(Star), R(3),
+ B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
B(LdaUndefined),
@@ -52,21 +52,21 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 40
+bytecode array length: 35
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
- B(Star), R(3),
+ B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
B(LdaUndefined),
@@ -92,7 +92,7 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 83
+bytecode array length: 77
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(2),
B(PushContext), R(1),
@@ -103,15 +103,15 @@ bytecodes: [
B(CreateBlockContext), U8(3),
B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(5), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(4),
- B(Star), R(4),
+ B(Star4),
/* 75 S> */ B(LdaImmutableContextSlot), R(2), U8(2), U8(0),
B(ToName), R(7),
B(CreateClosure), U8(6), U8(1), U8(2),
- B(Star), R(8),
+ B(Star8),
/* 106 S> */ B(LdaImmutableContextSlot), R(2), U8(3), U8(0),
B(ToName), R(9),
B(LdaConstant), U8(7),
@@ -120,9 +120,9 @@ bytecodes: [
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(10),
+ B(Star10),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(7),
- B(Star), R(4),
+ B(Star4),
B(PopContext), R(2),
B(Mov), R(3), R(0),
B(LdaUndefined),
@@ -150,7 +150,7 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 48
+bytecode array length: 44
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
@@ -159,14 +159,14 @@ bytecodes: [
B(CreateBlockContext), U8(1),
B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(2),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
+ B(Star4),
B(PopContext), R(2),
B(Mov), R(5), R(0),
/* 87 S> */ B(Ldar), R(0),
@@ -189,33 +189,33 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 69
+bytecode array length: 60
bytecodes: [
/* 34 S> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
+ B(Star3),
B(PopContext), R(1),
B(CreateBlockContext), U8(3),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(5), U8(1), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(4),
- B(Star), R(3),
+ B(Star3),
B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
- B(Star), R(3),
+ B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
index 848c420967..5e4b75e738 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
@@ -12,10 +12,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(TestNull),
/* 63 S> */ B(Return),
]
@@ -31,10 +31,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
/* 42 S> */ B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(TestUndefined),
/* 76 S> */ B(Return),
]
@@ -50,10 +50,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
/* 42 S> */ B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(TestUndefined),
/* 70 E> */ B(LogicalNot),
/* 76 S> */ B(Return),
@@ -70,10 +70,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(TestUndetectable),
/* 54 E> */ B(LogicalNot),
/* 62 S> */ B(Return),
@@ -90,10 +90,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
/* 42 S> */ B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(TestUndetectable),
/* 75 S> */ B(Return),
]
@@ -109,10 +109,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 42 S> */ B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(JumpIfNotUndefined), U8(6),
B(LdaSmi), I8(1),
B(Jump), U8(4),
@@ -131,10 +131,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(TestUndetectable),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
@@ -154,10 +154,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfUndefined), U8(6),
B(LdaSmi), I8(1),
B(Jump), U8(4),
@@ -176,10 +176,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfNotNull), U8(6),
B(LdaSmi), I8(1),
B(Jump), U8(4),
@@ -202,10 +202,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfNotNull), U8(5),
/* 65 S> */ B(LdaSmi), I8(1),
/* 74 S> */ B(Return),
@@ -226,10 +226,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(TestUndetectable),
B(JumpIfTrue), U8(5),
/* 69 S> */ B(LdaSmi), I8(1),
@@ -252,18 +252,18 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 18
bytecodes: [
/* 42 S> */ B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
/* 61 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 73 S> */ B(Ldar), R(0),
- B(JumpIfUndefined), U8(11),
+ B(JumpIfUndefined), U8(10),
/* 92 S> */ B(Ldar), R(1),
B(Inc), U8(0),
- B(Star), R(1),
- /* 64 E> */ B(JumpLoop), U8(10), I8(0),
+ B(Star1),
+ /* 64 E> */ B(JumpLoop), U8(9), I8(0),
B(LdaUndefined),
/* 99 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
index a040decf23..6a00a5445b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -11,12 +11,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 9
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(AddSmi), I8(2), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 53 S> */ B(Return),
]
@@ -31,12 +31,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 9
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(DivSmi), I8(2), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 53 S> */ B(Return),
]
@@ -51,10 +51,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 19
+bytecode array length: 18
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
B(MulSmi), I8(2), U8(3),
/* 61 E> */ B(StaNamedProperty), R(0), U8(1), U8(4),
@@ -74,12 +74,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 22
+bytecode array length: 20
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 52 S> */ B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaKeyedProperty), R(0), U8(1),
B(BitwiseXorSmi), I8(2), U8(3),
/* 57 E> */ B(StaKeyedProperty), R(0), R(2), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
index cd8f5de491..7ff49c3180 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
@@ -43,10 +43,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 34 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(1),
/* 43 E> */ B(TestLessThan), R(0), U8(0),
B(JumpIfFalse), U8(6),
@@ -67,10 +67,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(6),
B(LdaSmi), I8(2),
B(Jump), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
index d5240602e1..832cf1b5a1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
@@ -11,10 +11,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
/* 44 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 48 S> */ B(Return),
]
@@ -29,10 +29,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
/* 44 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 57 S> */ B(Return),
]
constant pool: [
@@ -46,16 +46,16 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 20
+bytecode array length: 17
bytecodes: [
B(LdaTheHole),
- B(Star), R(0),
+ B(Star0),
/* 44 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 48 E> */ B(ThrowReferenceErrorIfHole), U8(0),
B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 55 S> */ B(Return),
]
@@ -71,10 +71,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
/* 44 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 48 S> */ B(LdaSmi), I8(20),
/* 50 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
index 8250d98b0f..44500fba90 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
@@ -57,14 +57,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 25
+bytecode array length: 24
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
/* 44 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
B(LdaCurrentContextSlot), U8(2),
/* 47 E> */ B(ThrowReferenceErrorIfHole), U8(1),
B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
index d6a5199e52..368f865334 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
@@ -36,14 +36,14 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 18
+bytecode array length: 17
bytecodes: [
/* 10 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(2),
/* 27 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaCurrentContextSlot), U8(2),
/* 65 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
index 73d710b2fa..043f005946 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
@@ -77,12 +77,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
/* 41 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 64 E> */ B(CallUndefinedReceiver0), R(1), U8(0),
/* 68 S> */ B(LdaCurrentContextSlot), U8(2),
/* 77 S> */ B(Return),
@@ -388,7 +388,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 796
+bytecode array length: 795
bytecodes: [
/* 30 E> */ B(Wide), B(CreateFunctionContext), U16(0), U16(256),
B(PushContext), R(1),
@@ -903,7 +903,7 @@ bytecodes: [
/* 3721 S> */ B(LdaZero),
/* 3721 E> */ B(StaCurrentContextSlot), U8(255),
/* 3724 S> */ B(LdaGlobal), U8(1), U8(0),
- B(Star), R(2),
+ B(Star2),
/* 3724 E> */ B(CallUndefinedReceiver0), R(2), U8(2),
/* 3740 S> */ B(LdaSmi), I8(100),
/* 3740 E> */ B(Wide), B(StaCurrentContextSlot), U16(256),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index 0edc219769..91d5d07356 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -11,12 +11,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(Inc), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 56 S> */ B(Return),
]
constant pool: [
@@ -30,14 +30,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 12
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(ToNumeric), U8(0),
- B(Star), R(1),
+ B(Star1),
B(Inc), U8(0),
- B(Star), R(0),
+ B(Star0),
B(Ldar), R(1),
/* 56 S> */ B(Return),
]
@@ -52,12 +52,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(Dec), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 56 S> */ B(Return),
]
constant pool: [
@@ -71,14 +71,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 12
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(ToNumeric), U8(0),
- B(Star), R(1),
+ B(Star1),
B(Dec), U8(0),
- B(Star), R(0),
+ B(Star0),
B(Ldar), R(1),
/* 56 S> */ B(Return),
]
@@ -93,15 +93,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 25
+bytecode array length: 22
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
B(ToNumeric), U8(3),
- B(Star), R(2),
+ B(Star2),
B(Inc), U8(3),
- B(Star), R(3),
+ B(Star3),
/* 66 E> */ B(StaNamedProperty), R(0), U8(1), U8(4),
B(Ldar), R(2),
/* 69 S> */ B(Return),
@@ -119,13 +119,13 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 21
+bytecode array length: 19
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
B(Dec), U8(3),
- B(Star), R(2),
+ B(Star2),
/* 65 E> */ B(StaNamedProperty), R(0), U8(1), U8(4),
B(Ldar), R(2),
/* 69 S> */ B(Return),
@@ -143,18 +143,18 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 30
+bytecode array length: 26
bytecodes: [
/* 45 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
/* 72 S> */ B(Ldar), R(0),
/* 81 E> */ B(LdaKeyedProperty), R(1), U8(1),
B(ToNumeric), U8(3),
- B(Star), R(4),
+ B(Star4),
B(Dec), U8(3),
- B(Star), R(5),
+ B(Star5),
/* 86 E> */ B(StaKeyedProperty), R(1), R(0), U8(4),
B(Ldar), R(4),
/* 89 S> */ B(Return),
@@ -172,16 +172,16 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 26
+bytecode array length: 23
bytecodes: [
/* 45 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
/* 72 S> */ B(Ldar), R(0),
/* 83 E> */ B(LdaKeyedProperty), R(1), U8(1),
B(Inc), U8(3),
- B(Star), R(4),
+ B(Star4),
/* 87 E> */ B(StaKeyedProperty), R(1), R(0), U8(4),
B(Ldar), R(4),
/* 89 S> */ B(Return),
@@ -199,14 +199,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(2),
/* 53 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 78 S> */ B(LdaCurrentContextSlot), U8(2),
B(Inc), U8(0),
/* 87 E> */ B(StaCurrentContextSlot), U8(2),
@@ -225,17 +225,17 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 28
+bytecode array length: 26
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(2),
/* 53 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 78 S> */ B(LdaCurrentContextSlot), U8(2),
B(ToNumeric), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Dec), U8(0),
/* 86 E> */ B(StaCurrentContextSlot), U8(2),
B(Ldar), R(2),
@@ -254,19 +254,19 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 26
bytecodes: [
/* 44 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(1),
+ B(Star1),
/* 63 S> */ B(Ldar), R(0),
B(ToNumeric), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Inc), U8(1),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
/* 79 E> */ B(StaKeyedProperty), R(1), R(3), U8(2),
B(Ldar), R(4),
/* 83 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
index 3bb8f8ac2c..489ac63a92 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
@@ -13,10 +13,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
/* 10 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 32 S> */ B(Return),
]
constant pool: [
@@ -31,10 +31,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 10 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 15 S> */ B(LdaZero),
/* 31 E> */ B(LdaKeyedProperty), R(0), U8(0),
/* 35 S> */ B(Return),
@@ -51,10 +51,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
/* 10 E> */ B(CreateUnmappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 46 S> */ B(Return),
]
constant pool: [
@@ -69,14 +69,14 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 10 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(2),
B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 16 S> */ B(LdaZero),
/* 32 E> */ B(LdaKeyedProperty), R(0), U8(0),
/* 36 S> */ B(Return),
@@ -94,7 +94,7 @@ snippet: "
"
frame size: 2
parameter count: 4
-bytecode array length: 21
+bytecode array length: 20
bytecodes: [
/* 10 E> */ B(CreateFunctionContext), U8(0), U8(3),
B(PushContext), R(1),
@@ -105,7 +105,7 @@ bytecodes: [
B(Ldar), R(arg2),
B(StaCurrentContextSlot), U8(2),
B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 39 S> */ B(Return),
]
constant pool: [
@@ -121,10 +121,10 @@ snippet: "
"
frame size: 1
parameter count: 4
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
/* 10 E> */ B(CreateUnmappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index a0063ba5c4..022e676013 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -13,11 +13,11 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 6
+bytecode array length: 4
bytecodes: [
/* 10 E> */ B(CreateRestParameter),
- B(Star), R(1),
- B(Star), R(0),
+ B(Star1),
+ B(Star0),
/* 42 S> */ B(Return),
]
constant pool: [
@@ -32,10 +32,10 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 10 E> */ B(CreateRestParameter),
- B(Star), R(2),
+ B(Star2),
B(Mov), R(arg0), R(0),
B(Mov), R(2), R(1),
/* 29 S> */ B(Ldar), R(1),
@@ -53,10 +53,10 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
/* 10 E> */ B(CreateRestParameter),
- B(Star), R(2),
+ B(Star2),
B(Mov), R(arg0), R(0),
B(Mov), R(2), R(1),
/* 29 S> */ B(LdaZero),
@@ -75,17 +75,17 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 26
+bytecode array length: 23
bytecodes: [
/* 10 E> */ B(CreateUnmappedArguments),
- B(Star), R(3),
+ B(Star3),
B(CreateRestParameter),
- B(Star), R(2),
+ B(Star2),
B(Mov), R(arg0), R(0),
B(Mov), R(2), R(1),
/* 29 S> */ B(LdaZero),
/* 44 E> */ B(LdaKeyedProperty), R(1), U8(1),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
/* 59 E> */ B(LdaKeyedProperty), R(3), U8(3),
/* 48 E> */ B(Add), R(4), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
index 3d95c9c4f4..d2d3caba17 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
@@ -27,10 +27,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
/* 66 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 69 S> */ B(Return),
]
@@ -61,10 +61,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(5),
/* 54 S> */ B(LdaSmi), I8(1),
/* 63 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
index 27be2b47bd..bea6428fad 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
@@ -12,10 +12,10 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 19
+bytecode array length: 18
bytecodes: [
B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(closure), R(2),
/* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
/* 8 S> */ B(LdaSmi), I8(1),
@@ -36,10 +36,10 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(closure), R(1),
/* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(0), U8(2),
B(LdaUndefined),
@@ -58,17 +58,17 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 25
+bytecode array length: 23
bytecodes: [
B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(closure), R(2),
/* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
/* 8 S> */ B(LdaSmi), I8(1),
/* 8 E> */ B(StaGlobal), U8(1), U8(0),
/* 11 S> */ B(LdaSmi), I8(2),
/* 12 E> */ B(StaGlobal), U8(1), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 16 S> */ B(Return),
]
constant pool: [
@@ -85,16 +85,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 23
+bytecode array length: 20
bytecodes: [
B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(closure), R(2),
/* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
/* 16 S> */ B(LdaGlobal), U8(1), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 16 E> */ B(CallUndefinedReceiver0), R(1), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 21 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
index 3af61115bd..183adb06d4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
@@ -11,10 +11,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 56 S> */ B(LdaConstant), U8(1),
B(DeletePropertySloppy), R(0),
/* 74 S> */ B(Return),
@@ -32,10 +32,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 56 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 70 S> */ B(LdaConstant), U8(1),
B(DeletePropertyStrict), R(0),
/* 88 S> */ B(Return),
@@ -53,10 +53,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 56 S> */ B(LdaSmi), I8(2),
B(DeletePropertySloppy), R(0),
/* 75 S> */ B(Return),
@@ -73,10 +73,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 46 S> */ B(LdaFalse),
/* 62 S> */ B(Return),
]
@@ -94,7 +94,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
@@ -102,7 +102,7 @@ bytecodes: [
/* 56 E> */ B(StaCurrentContextSlot), U8(2),
/* 64 S> */ B(CreateClosure), U8(2), U8(0), U8(2),
/* 93 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
B(DeletePropertyStrict), R(1),
/* 112 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
index bff4c7a5da..af1c1806bd 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
@@ -19,10 +19,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 15 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
B(LdaUndefined),
/* 25 S> */ B(Return),
@@ -69,10 +69,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
/* 15 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
/* 31 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
index 36915d6c5e..6ad4a1fd3f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -12,60 +12,60 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 146
+bytecode array length: 129
bytecodes: [
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(1),
+ B(Star1),
/* 60 S> */ B(GetIterator), R(1), U8(1), U8(3),
B(Mov), R(1), R(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
+ B(Star4),
B(LdaNamedProperty), R(4), U8(1), U8(5),
- B(Star), R(3),
+ B(Star3),
B(LdaFalse),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(context), R(8),
/* 57 S> */ B(Ldar), R(5),
- B(JumpIfToBooleanTrue), U8(37),
+ B(JumpIfToBooleanTrue), U8(33),
B(LdaTrue),
- B(Star), R(5),
+ B(Star5),
B(CallProperty0), R(3), R(4), U8(11),
- B(Star), R(9),
+ B(Star9),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
B(LdaNamedProperty), R(9), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(15),
+ B(JumpIfToBooleanTrue), U8(13),
B(LdaNamedProperty), R(9), U8(3), U8(7),
- B(Star), R(9),
+ B(Star9),
B(LdaFalse),
- B(Star), R(5),
+ B(Star5),
B(Ldar), R(9),
B(Jump), U8(3),
B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(-1),
- B(Star), R(7),
- B(Star), R(6),
- B(Jump), U8(7),
- B(Star), R(7),
+ B(Star7),
+ B(Star6),
+ B(Jump), U8(5),
+ B(Star7),
B(LdaZero),
- B(Star), R(6),
+ B(Star6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(8),
+ B(Star8),
B(Ldar), R(5),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(10),
B(LdaNamedProperty), R(4), U8(4), U8(13),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(11),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star11),
B(CallProperty0), R(11), R(4), U8(15),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(12),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star12),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(Jump), U8(12),
- B(Star), R(10),
+ B(Jump), U8(11),
+ B(Star10),
B(LdaZero),
B(TestReferenceEqual), R(6),
B(JumpIfTrue), U8(5),
@@ -89,8 +89,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [34, 76, 84],
- [100, 121, 123],
+ [30, 67, 73],
+ [86, 105, 107],
]
---
@@ -100,96 +100,96 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 236
+bytecode array length: 210
bytecodes: [
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(2),
+ B(Star2),
/* 69 S> */ B(GetIterator), R(2), U8(1), U8(3),
B(Mov), R(2), R(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
+ B(Star5),
B(LdaNamedProperty), R(5), U8(1), U8(5),
- B(Star), R(4),
+ B(Star4),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(context), R(9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(35),
+ B(JumpIfToBooleanTrue), U8(31),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(CallProperty0), R(4), R(5), U8(11),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(13),
+ B(JumpIfToBooleanTrue), U8(11),
B(LdaNamedProperty), R(10), U8(3), U8(7),
- B(Star), R(10),
+ B(Star10),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Ldar), R(10),
/* 61 S> */ B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(37),
+ B(JumpIfToBooleanTrue), U8(33),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(CallProperty0), R(4), R(5), U8(13),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(15),
+ B(JumpIfToBooleanTrue), U8(13),
B(LdaNamedProperty), R(10), U8(3), U8(7),
- B(Star), R(10),
+ B(Star10),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Ldar), R(10),
B(Jump), U8(3),
B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(CreateEmptyArrayLiteral), U8(15),
- B(Star), R(11),
+ B(Star11),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(44),
+ B(JumpIfToBooleanTrue), U8(40),
B(LdaZero),
- B(Star), R(12),
+ B(Star12),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(CallProperty0), R(4), R(5), U8(19),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(2), U8(21),
- B(JumpIfToBooleanTrue), U8(19),
+ B(JumpIfToBooleanTrue), U8(18),
B(LdaNamedProperty), R(10), U8(3), U8(7),
B(StaInArrayLiteral), R(11), R(12), U8(16),
B(Ldar), R(12),
B(Inc), U8(18),
- B(Star), R(12),
- B(JumpLoop), U8(33), I8(0),
+ B(Star12),
+ B(JumpLoop), U8(31), I8(0),
B(Mov), R(11), R(1),
B(LdaSmi), I8(-1),
- B(Star), R(8),
- B(Star), R(7),
- B(Jump), U8(7),
- B(Star), R(8),
+ B(Star8),
+ B(Star7),
+ B(Jump), U8(5),
+ B(Star8),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(9),
+ B(Star9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
B(LdaNamedProperty), R(5), U8(4), U8(23),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(12),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star12),
B(CallProperty0), R(12), R(5), U8(25),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(13),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(Jump), U8(12),
- B(Star), R(11),
+ B(Jump), U8(11),
+ B(Star11),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
@@ -213,8 +213,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [34, 166, 174],
- [190, 211, 213],
+ [30, 148, 154],
+ [167, 186, 188],
]
---
@@ -224,81 +224,81 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 197
+bytecode array length: 175
bytecodes: [
/* 40 S> */ B(CreateEmptyObjectLiteral),
- B(Star), R(0),
+ B(Star0),
/* 51 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(2),
+ B(Star2),
/* 68 S> */ B(GetIterator), R(2), U8(1), U8(3),
B(Mov), R(2), R(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
+ B(Star5),
B(LdaNamedProperty), R(5), U8(1), U8(5),
- B(Star), R(4),
+ B(Star4),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(context), R(9),
/* 59 S> */ B(Ldar), R(6),
B(Mov), R(0), R(11),
- B(JumpIfToBooleanTrue), U8(37),
+ B(JumpIfToBooleanTrue), U8(33),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(CallProperty0), R(4), R(5), U8(11),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(15),
+ B(JumpIfToBooleanTrue), U8(13),
B(LdaNamedProperty), R(10), U8(3), U8(7),
- B(Star), R(10),
+ B(Star10),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Ldar), R(10),
B(Jump), U8(3),
B(LdaUndefined),
B(StaNamedProperty), R(11), U8(4), U8(13),
/* 63 S> */ B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(37),
+ B(JumpIfToBooleanTrue), U8(33),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(CallProperty0), R(4), R(5), U8(15),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(15),
+ B(JumpIfToBooleanTrue), U8(13),
B(LdaNamedProperty), R(10), U8(3), U8(7),
- B(Star), R(10),
+ B(Star10),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Ldar), R(10),
B(JumpIfNotUndefined), U8(4),
B(LdaSmi), I8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(-1),
- B(Star), R(8),
- B(Star), R(7),
- B(Jump), U8(7),
- B(Star), R(8),
+ B(Star8),
+ B(Star7),
+ B(Jump), U8(5),
+ B(Star8),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(9),
+ B(Star9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(12),
B(LdaNamedProperty), R(5), U8(5), U8(17),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(13),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star13),
B(CallProperty0), R(13), R(5), U8(19),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(14),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star14),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
- B(Jump), U8(12),
- B(Star), R(12),
+ B(Jump), U8(11),
+ B(Star12),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
@@ -323,8 +323,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [37, 127, 135],
- [151, 172, 174],
+ [32, 113, 119],
+ [132, 151, 153],
]
---
@@ -334,12 +334,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
/* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 63 S> */ B(Return),
]
@@ -357,12 +357,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 19
+bytecode array length: 17
bytecodes: [
/* 40 S> */ B(CreateEmptyObjectLiteral),
- B(Star), R(0),
+ B(Star0),
/* 48 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
/* 61 S> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
B(StaNamedProperty), R(0), U8(2), U8(3),
B(LdaUndefined),
@@ -383,17 +383,17 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 32
+bytecode array length: 29
bytecodes: [
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
/* 64 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaNamedProperty), R(1), U8(1), U8(1),
B(Mov), R(1), R(2),
B(JumpIfNotUndefined), U8(3),
B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 71 S> */ B(CallRuntime), U16(Runtime::kCopyDataPropertiesWithExcludedProperties), R(2), U8(2),
B(StaGlobal), U8(2), U8(3),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
index bb695e0c35..2071139593 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
@@ -11,7 +11,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 58
+bytecode array length: 52
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
@@ -22,20 +22,20 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(4),
/* 34 S> */ B(LdaLookupGlobalSlot), U8(1), U8(0), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(30),
- B(Star), R(8),
+ B(Star8),
B(LdaSmi), I8(41),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(2), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
- B(Star), R(2),
+ B(Star2),
/* 41 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 52 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 3a4c3f50cf..12a5b13aa0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -16,44 +16,44 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 299
+bytecode array length: 272
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
/* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
- B(Star), R(7),
+ B(Star7),
B(LdaNamedProperty), R(7), U8(3), U8(1),
- B(JumpIfUndefinedOrNull), U8(15),
- B(Star), R(8),
+ B(JumpIfUndefinedOrNull), U8(14),
+ B(Star8),
B(CallProperty0), R(8), R(7), U8(3),
- B(JumpIfJSReceiver), U8(23),
+ B(JumpIfJSReceiver), U8(21),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
B(LdaNamedProperty), R(7), U8(4), U8(5),
- B(Star), R(8),
+ B(Star8),
B(CallProperty0), R(8), R(7), U8(7),
- B(Star), R(8),
+ B(Star8),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
- B(Star), R(6),
+ B(Star6),
B(LdaNamedProperty), R(6), U8(5), U8(9),
- B(Star), R(5),
+ B(Star5),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(context), R(10),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
/* 38 S> */ B(CallProperty0), R(5), R(6), U8(11),
- B(Star), R(13),
+ B(Star13),
B(Mov), R(0), R(12),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(12), U8(2),
B(SuspendGenerator), R(0), R(0), U8(12), U8(0),
B(ResumeGenerator), R(0), R(0), U8(12),
- B(Star), R(12),
+ B(Star12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(13),
+ B(Star13),
B(LdaZero),
B(TestReferenceEqual), R(13),
B(JumpIfTrue), U8(5),
@@ -64,31 +64,31 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(6), U8(13),
- B(JumpIfToBooleanTrue), U8(22),
+ B(JumpIfToBooleanTrue), U8(20),
B(LdaNamedProperty), R(11), U8(7), U8(15),
- B(Star), R(11),
+ B(Star11),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(11), R(1),
/* 38 S> */ B(Mov), R(1), R(3),
B(Ldar), R(11),
- /* 23 E> */ B(JumpLoop), U8(76), I8(0),
+ /* 23 E> */ B(JumpLoop), U8(70), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(9),
- B(Star), R(8),
- B(Jump), U8(7),
- B(Star), R(9),
+ B(Star9),
+ B(Star8),
+ B(Jump), U8(5),
+ B(Star9),
B(LdaZero),
- B(Star), R(8),
+ B(Star8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
+ B(Star10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(74),
+ B(JumpIfToBooleanTrue), U8(72),
B(Mov), R(context), R(14),
B(LdaNamedProperty), R(6), U8(8), U8(17),
- B(JumpIfUndefinedOrNull), U8(65),
- B(Star), R(15),
+ B(JumpIfUndefinedOrNull), U8(63),
+ B(Star15),
B(CallProperty0), R(15), R(6), U8(19),
B(Star), R(17),
B(Mov), R(0), R(16),
@@ -104,11 +104,11 @@ bytecodes: [
B(Ldar), R(16),
B(ReThrow),
B(Ldar), R(16),
- B(JumpIfJSReceiver), U8(21),
+ B(JumpIfJSReceiver), U8(20),
B(Star), R(18),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(18), U8(1),
- B(Jump), U8(12),
- B(Star), R(14),
+ B(Jump), U8(11),
+ B(Star14),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
@@ -122,30 +122,30 @@ bytecodes: [
B(Ldar), R(9),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(6),
+ B(Star6),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 57 S> */ B(Return),
- B(Star), R(5),
+ B(Star5),
B(CreateCatchContext), R(5), U8(9),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(4),
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(LdaTrue),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 57 S> */ B(Return),
]
constant pool: [
- Smi [95],
- Smi [203],
+ Smi [85],
+ Smi [183],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
@@ -156,9 +156,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [19, 271, 271],
- [74, 153, 161],
- [177, 234, 236],
+ [18, 248, 248],
+ [66, 139, 145],
+ [158, 214, 216],
]
---
@@ -170,44 +170,44 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 320
+bytecode array length: 292
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
/* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
- B(Star), R(7),
+ B(Star7),
B(LdaNamedProperty), R(7), U8(3), U8(1),
- B(JumpIfUndefinedOrNull), U8(15),
- B(Star), R(8),
+ B(JumpIfUndefinedOrNull), U8(14),
+ B(Star8),
B(CallProperty0), R(8), R(7), U8(3),
- B(JumpIfJSReceiver), U8(23),
+ B(JumpIfJSReceiver), U8(21),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
B(LdaNamedProperty), R(7), U8(4), U8(5),
- B(Star), R(8),
+ B(Star8),
B(CallProperty0), R(8), R(7), U8(7),
- B(Star), R(8),
+ B(Star8),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
- B(Star), R(6),
+ B(Star6),
B(LdaNamedProperty), R(6), U8(5), U8(9),
- B(Star), R(5),
+ B(Star5),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(context), R(10),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
/* 38 S> */ B(CallProperty0), R(5), R(6), U8(11),
- B(Star), R(13),
+ B(Star13),
B(Mov), R(0), R(12),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(12), U8(2),
B(SuspendGenerator), R(0), R(0), U8(12), U8(0),
B(ResumeGenerator), R(0), R(0), U8(12),
- B(Star), R(12),
+ B(Star12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(13),
+ B(Star13),
B(LdaZero),
B(TestReferenceEqual), R(13),
B(JumpIfTrue), U8(5),
@@ -218,33 +218,33 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(6), U8(13),
- B(JumpIfToBooleanTrue), U8(26),
+ B(JumpIfToBooleanTrue), U8(23),
B(LdaNamedProperty), R(11), U8(7), U8(15),
- B(Star), R(11),
+ B(Star11),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(11), R(1),
/* 38 S> */ B(Mov), R(1), R(3),
/* 56 S> */ B(LdaSmi), I8(1),
B(Mov), R(11), R(9),
- B(Star), R(8),
- B(Jump), U8(15),
+ B(Star8),
+ B(Jump), U8(11),
B(LdaSmi), I8(-1),
- B(Star), R(9),
- B(Star), R(8),
- B(Jump), U8(7),
- B(Star), R(9),
+ B(Star9),
+ B(Star8),
+ B(Jump), U8(5),
+ B(Star9),
B(LdaZero),
- B(Star), R(8),
+ B(Star8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
+ B(Star10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(74),
+ B(JumpIfToBooleanTrue), U8(72),
B(Mov), R(context), R(14),
B(LdaNamedProperty), R(6), U8(8), U8(17),
- B(JumpIfUndefinedOrNull), U8(65),
- B(Star), R(15),
+ B(JumpIfUndefinedOrNull), U8(63),
+ B(Star15),
B(CallProperty0), R(15), R(6), U8(19),
B(Star), R(17),
B(Mov), R(0), R(16),
@@ -260,11 +260,11 @@ bytecodes: [
B(Ldar), R(16),
B(ReThrow),
B(Ldar), R(16),
- B(JumpIfJSReceiver), U8(21),
+ B(JumpIfJSReceiver), U8(20),
B(Star), R(18),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(18), U8(1),
- B(Jump), U8(12),
- B(Star), R(14),
+ B(Jump), U8(11),
+ B(Star14),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
@@ -284,30 +284,30 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(14), U8(3),
/* 68 S> */ B(Return),
B(LdaUndefined),
- B(Star), R(6),
+ B(Star6),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 68 S> */ B(Return),
- B(Star), R(5),
+ B(Star5),
B(CreateCatchContext), R(5), U8(11),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(4),
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(LdaTrue),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 68 S> */ B(Return),
]
constant pool: [
- Smi [95],
- Smi [207],
+ Smi [85],
+ Smi [186],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
@@ -320,9 +320,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [19, 292, 292],
- [74, 157, 165],
- [181, 238, 240],
+ [18, 268, 268],
+ [66, 142, 148],
+ [161, 217, 219],
]
---
@@ -337,44 +337,44 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 315
+bytecode array length: 288
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
/* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(4),
/* 43 S> */ B(CreateArrayLiteral), U8(2), U8(0), U8(37),
- B(Star), R(7),
+ B(Star7),
B(LdaNamedProperty), R(7), U8(3), U8(1),
- B(JumpIfUndefinedOrNull), U8(15),
- B(Star), R(8),
+ B(JumpIfUndefinedOrNull), U8(14),
+ B(Star8),
B(CallProperty0), R(8), R(7), U8(3),
- B(JumpIfJSReceiver), U8(23),
+ B(JumpIfJSReceiver), U8(21),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
B(LdaNamedProperty), R(7), U8(4), U8(5),
- B(Star), R(8),
+ B(Star8),
B(CallProperty0), R(8), R(7), U8(7),
- B(Star), R(8),
+ B(Star8),
B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(8), U8(1),
- B(Star), R(6),
+ B(Star6),
B(LdaNamedProperty), R(6), U8(5), U8(9),
- B(Star), R(5),
+ B(Star5),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(context), R(10),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
/* 38 S> */ B(CallProperty0), R(5), R(6), U8(11),
- B(Star), R(13),
+ B(Star13),
B(Mov), R(0), R(12),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(12), U8(2),
B(SuspendGenerator), R(0), R(0), U8(12), U8(0),
B(ResumeGenerator), R(0), R(0), U8(12),
- B(Star), R(12),
+ B(Star12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(13),
+ B(Star13),
B(LdaZero),
B(TestReferenceEqual), R(13),
B(JumpIfTrue), U8(5),
@@ -385,11 +385,11 @@ bytecodes: [
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(6), U8(13),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(36),
B(LdaNamedProperty), R(11), U8(7), U8(15),
- B(Star), R(11),
+ B(Star11),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(11), R(1),
/* 38 S> */ B(Mov), R(1), R(3),
/* 63 S> */ B(LdaSmi), I8(10),
@@ -400,23 +400,23 @@ bytecodes: [
/* 96 E> */ B(TestEqual), R(3), U8(18),
B(JumpIfFalse), U8(4),
/* 103 S> */ B(Jump), U8(5),
- /* 23 E> */ B(JumpLoop), U8(92), I8(0),
+ /* 23 E> */ B(JumpLoop), U8(86), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(9),
- B(Star), R(8),
- B(Jump), U8(7),
- B(Star), R(9),
+ B(Star9),
+ B(Star8),
+ B(Jump), U8(5),
+ B(Star9),
B(LdaZero),
- B(Star), R(8),
+ B(Star8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
+ B(Star10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(74),
+ B(JumpIfToBooleanTrue), U8(72),
B(Mov), R(context), R(14),
B(LdaNamedProperty), R(6), U8(8), U8(19),
- B(JumpIfUndefinedOrNull), U8(65),
- B(Star), R(15),
+ B(JumpIfUndefinedOrNull), U8(63),
+ B(Star15),
B(CallProperty0), R(15), R(6), U8(21),
B(Star), R(17),
B(Mov), R(0), R(16),
@@ -432,11 +432,11 @@ bytecodes: [
B(Ldar), R(16),
B(ReThrow),
B(Ldar), R(16),
- B(JumpIfJSReceiver), U8(21),
+ B(JumpIfJSReceiver), U8(20),
B(Star), R(18),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(18), U8(1),
- B(Jump), U8(12),
- B(Star), R(14),
+ B(Jump), U8(11),
+ B(Star14),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
@@ -450,30 +450,30 @@ bytecodes: [
B(Ldar), R(9),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(6),
+ B(Star6),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 114 S> */ B(Return),
- B(Star), R(5),
+ B(Star5),
B(CreateCatchContext), R(5), U8(9),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(4),
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(LdaTrue),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 114 S> */ B(Return),
]
constant pool: [
- Smi [95],
- Smi [219],
+ Smi [85],
+ Smi [199],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
@@ -484,9 +484,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [19, 287, 287],
- [74, 169, 177],
- [193, 250, 252],
+ [18, 264, 264],
+ [66, 155, 161],
+ [174, 230, 232],
]
---
@@ -499,68 +499,68 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 231
+bytecode array length: 204
bytecodes: [
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(2),
/* 31 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
/* 68 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(5),
+ B(Star5),
B(GetIterator), R(5), U8(2), U8(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
+ B(Star4),
B(LdaNamedProperty), R(4), U8(2), U8(6),
- B(Star), R(3),
+ B(Star3),
B(LdaFalse),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(context), R(8),
B(LdaTrue),
- B(Star), R(5),
+ B(Star5),
/* 59 S> */ B(CallProperty0), R(3), R(4), U8(8),
- B(Star), R(9),
+ B(Star9),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
B(LdaNamedProperty), R(9), U8(3), U8(10),
- B(JumpIfToBooleanTrue), U8(32),
+ B(JumpIfToBooleanTrue), U8(28),
B(LdaNamedProperty), R(9), U8(4), U8(12),
- B(Star), R(9),
+ B(Star9),
B(LdaFalse),
- B(Star), R(5),
+ B(Star5),
B(Ldar), R(9),
/* 58 E> */ B(StaNamedProperty), R(1), U8(5), U8(14),
/* 87 S> */ B(LdaNamedProperty), R(1), U8(5), U8(16),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(1), R(10),
- B(Jump), U8(15),
+ B(Jump), U8(11),
B(LdaSmi), I8(-1),
- B(Star), R(7),
- B(Star), R(6),
- B(Jump), U8(7),
- B(Star), R(7),
+ B(Star7),
+ B(Star6),
+ B(Jump), U8(5),
+ B(Star7),
B(LdaZero),
- B(Star), R(6),
+ B(Star6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(8),
+ B(Star8),
B(Ldar), R(5),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
B(LdaNamedProperty), R(4), U8(6), U8(18),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(12),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star12),
B(CallProperty0), R(12), R(4), U8(20),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(13),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(Jump), U8(12),
- B(Star), R(11),
+ B(Jump), U8(11),
+ B(Star11),
B(LdaZero),
B(TestReferenceEqual), R(6),
B(JumpIfTrue), U8(5),
@@ -570,33 +570,33 @@ bytecodes: [
B(SetPendingMessage),
B(Ldar), R(6),
B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
- B(Jump), U8(19),
+ B(Jump), U8(18),
B(Ldar), R(7),
B(ReThrow),
B(LdaFalse),
- B(Star), R(13),
+ B(Star13),
B(Mov), R(0), R(11),
B(Mov), R(7), R(12),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(11), U8(3),
/* 96 S> */ B(Return),
B(LdaUndefined),
- B(Star), R(4),
+ B(Star4),
B(LdaFalse),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(0), R(3),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 96 S> */ B(Return),
- B(Star), R(3),
+ B(Star3),
B(CreateCatchContext), R(3), U8(9),
- B(Star), R(2),
+ B(Star2),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(2),
B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 96 S> */ B(Return),
@@ -614,8 +614,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [15, 203, 203],
- [52, 104, 112],
- [128, 149, 151],
+ [14, 180, 180],
+ [46, 92, 98],
+ [111, 130, 132],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index 680e5ee5cf..2aae4ae80e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -60,26 +60,26 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 42
+bytecode array length: 37
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
- /* 68 S> */ B(JumpIfUndefinedOrNull), U8(36),
+ B(Star0),
+ /* 68 S> */ B(JumpIfUndefinedOrNull), U8(32),
B(ToObject), R(3),
B(ForInEnumerate), R(3),
B(ForInPrepare), R(4), U8(0),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
/* 63 S> */ B(ForInContinue), R(7), R(6),
- B(JumpIfFalse), U8(21),
+ B(JumpIfFalse), U8(18),
B(ForInNext), R(3), R(7), R(4), U8(0),
- B(JumpIfUndefined), U8(7),
- B(Star), R(2),
- /* 63 S> */ B(Star), R(1),
+ B(JumpIfUndefined), U8(5),
+ B(Star2),
+ /* 63 S> */ B(Star1),
/* 82 S> */ B(Return),
B(ForInStep), R(7),
- B(Star), R(7),
- /* 54 E> */ B(JumpLoop), U8(21), I8(0),
+ B(Star7),
+ /* 54 E> */ B(JumpLoop), U8(18), I8(0),
B(LdaUndefined),
/* 85 S> */ B(Return),
]
@@ -96,30 +96,30 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 54
+bytecode array length: 48
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 59 S> */ B(CreateArrayLiteral), U8(0), U8(1), U8(37),
- B(JumpIfUndefinedOrNull), U8(45),
+ B(JumpIfUndefinedOrNull), U8(40),
B(ToObject), R(3),
B(ForInEnumerate), R(3),
B(ForInPrepare), R(4), U8(0),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
/* 54 S> */ B(ForInContinue), R(7), R(6),
- B(JumpIfFalse), U8(30),
+ B(JumpIfFalse), U8(26),
B(ForInNext), R(3), R(7), R(4), U8(0),
- B(JumpIfUndefined), U8(16),
- B(Star), R(2),
- /* 54 S> */ B(Star), R(1),
+ B(JumpIfUndefined), U8(13),
+ B(Star2),
+ /* 54 S> */ B(Star1),
/* 70 S> */ B(Ldar), R(2),
/* 75 E> */ B(Add), R(0), U8(2),
B(Mov), R(0), R(8),
- B(Star), R(0),
+ B(Star0),
/* 72 E> */ B(ForInStep), R(7),
- B(Star), R(7),
- /* 45 E> */ B(JumpLoop), U8(30), I8(0),
+ B(Star7),
+ /* 45 E> */ B(JumpLoop), U8(26), I8(0),
B(LdaUndefined),
/* 80 S> */ B(Return),
]
@@ -139,39 +139,39 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 81
+bytecode array length: 75
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(37),
- B(JumpIfUndefinedOrNull), U8(69),
+ B(JumpIfUndefinedOrNull), U8(64),
B(ToObject), R(1),
B(ForInEnumerate), R(1),
B(ForInPrepare), R(2), U8(1),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
/* 68 S> */ B(ForInContinue), R(5), R(4),
- B(JumpIfFalse), U8(54),
+ B(JumpIfFalse), U8(50),
B(ForInNext), R(1), R(5), R(2), U8(1),
- B(JumpIfUndefined), U8(40),
- B(Star), R(6),
+ B(JumpIfUndefined), U8(37),
+ B(Star6),
B(Ldar), R(6),
/* 68 E> */ B(StaNamedProperty), R(0), U8(2), U8(3),
/* 100 S> */ B(LdaNamedProperty), R(0), U8(2), U8(5),
- B(Star), R(6),
+ B(Star6),
B(LdaSmi), I8(10),
/* 106 E> */ B(TestEqual), R(6), U8(7),
B(JumpIfFalse), U8(4),
- /* 113 S> */ B(Jump), U8(17),
+ /* 113 S> */ B(Jump), U8(16),
/* 130 S> */ B(LdaNamedProperty), R(0), U8(2), U8(5),
- B(Star), R(6),
+ B(Star6),
B(LdaSmi), I8(20),
/* 136 E> */ B(TestEqual), R(6), U8(8),
B(JumpIfFalse), U8(4),
- /* 143 S> */ B(Jump), U8(9),
+ /* 143 S> */ B(Jump), U8(8),
B(ForInStep), R(5),
- B(Star), R(5),
- /* 62 E> */ B(JumpLoop), U8(54), I8(0),
+ B(Star5),
+ /* 62 E> */ B(JumpLoop), U8(50), I8(0),
B(LdaUndefined),
/* 152 S> */ B(Return),
]
@@ -190,32 +190,32 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 60
+bytecode array length: 55
bytecodes: [
/* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(0),
+ B(Star0),
/* 72 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(37),
- B(JumpIfUndefinedOrNull), U8(48),
+ B(JumpIfUndefinedOrNull), U8(44),
B(ToObject), R(1),
B(ForInEnumerate), R(1),
B(ForInPrepare), R(2), U8(1),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
/* 65 S> */ B(ForInContinue), R(5), R(4),
- B(JumpIfFalse), U8(33),
+ B(JumpIfFalse), U8(30),
B(ForInNext), R(1), R(5), R(2), U8(1),
- B(JumpIfUndefined), U8(19),
- B(Star), R(6),
+ B(JumpIfUndefined), U8(17),
+ B(Star6),
B(LdaZero),
- B(Star), R(8),
+ B(Star8),
B(Ldar), R(6),
/* 65 E> */ B(StaKeyedProperty), R(0), R(8), U8(3),
/* 83 S> */ B(LdaSmi), I8(3),
/* 91 E> */ B(LdaKeyedProperty), R(0), U8(5),
/* 95 S> */ B(Return),
B(ForInStep), R(5),
- B(Star), R(5),
- /* 59 E> */ B(JumpLoop), U8(33), I8(0),
+ B(Star5),
+ /* 59 E> */ B(JumpLoop), U8(30), I8(0),
B(LdaUndefined),
/* 98 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index a3b9a1a86c..8794a29087 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -11,57 +11,57 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 143
+bytecode array length: 127
bytecodes: [
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(4),
+ B(Star4),
B(GetIterator), R(4), U8(1), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(3),
+ B(Star3),
B(LdaNamedProperty), R(3), U8(1), U8(5),
- B(Star), R(2),
+ B(Star2),
B(LdaFalse),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(context), R(7),
B(LdaTrue),
- B(Star), R(4),
+ B(Star4),
/* 43 S> */ B(CallProperty0), R(2), R(3), U8(7),
- B(Star), R(8),
+ B(Star8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
B(LdaNamedProperty), R(8), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(22),
+ B(JumpIfToBooleanTrue), U8(20),
B(LdaNamedProperty), R(8), U8(3), U8(11),
- B(Star), R(8),
+ B(Star8),
B(LdaFalse),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(8), R(1),
/* 43 S> */ B(Mov), R(1), R(0),
B(Ldar), R(8),
- /* 34 E> */ B(JumpLoop), U8(39), I8(0),
+ /* 34 E> */ B(JumpLoop), U8(35), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(6),
- B(Star), R(5),
- B(Jump), U8(7),
- B(Star), R(6),
+ B(Star6),
+ B(Star5),
+ B(Jump), U8(5),
+ B(Star6),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(7),
+ B(Star7),
B(Ldar), R(4),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(9),
B(LdaNamedProperty), R(3), U8(4), U8(13),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(10),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star10),
B(CallProperty0), R(10), R(3), U8(15),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(11),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star11),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Jump), U8(12),
- B(Star), R(9),
+ B(Jump), U8(11),
+ B(Star9),
B(LdaZero),
B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
@@ -85,8 +85,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [31, 73, 81],
- [97, 118, 120],
+ [27, 65, 71],
+ [84, 103, 105],
]
---
@@ -96,59 +96,59 @@ snippet: "
"
frame size: 13
parameter count: 1
-bytecode array length: 151
+bytecode array length: 134
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 68 S> */ B(GetIterator), R(0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
+ B(Star4),
B(LdaNamedProperty), R(4), U8(1), U8(4),
- B(Star), R(3),
+ B(Star3),
B(LdaFalse),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(context), R(8),
B(LdaTrue),
- B(Star), R(5),
+ B(Star5),
/* 63 S> */ B(CallProperty0), R(3), R(4), U8(6),
- B(Star), R(9),
+ B(Star9),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
B(LdaNamedProperty), R(9), U8(2), U8(8),
- B(JumpIfToBooleanTrue), U8(26),
+ B(JumpIfToBooleanTrue), U8(23),
B(LdaNamedProperty), R(9), U8(3), U8(10),
- B(Star), R(9),
+ B(Star9),
B(LdaFalse),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(9), R(2),
/* 63 S> */ B(Mov), R(2), R(1),
/* 73 S> */ B(LdaSmi), I8(1),
B(Mov), R(9), R(7),
- B(Star), R(6),
- B(Jump), U8(15),
+ B(Star6),
+ B(Jump), U8(11),
B(LdaSmi), I8(-1),
- B(Star), R(7),
- B(Star), R(6),
- B(Jump), U8(7),
- B(Star), R(7),
+ B(Star7),
+ B(Star6),
+ B(Jump), U8(5),
+ B(Star7),
B(LdaZero),
- B(Star), R(6),
+ B(Star6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(8),
+ B(Star8),
B(Ldar), R(5),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(10),
B(LdaNamedProperty), R(4), U8(4), U8(12),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(11),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star11),
B(CallProperty0), R(11), R(4), U8(14),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(12),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star12),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(Jump), U8(12),
- B(Star), R(10),
+ B(Jump), U8(11),
+ B(Star10),
B(LdaZero),
B(TestReferenceEqual), R(6),
B(JumpIfTrue), U8(5),
@@ -176,8 +176,8 @@ constant pool: [
Smi [9],
]
handlers: [
- [29, 75, 83],
- [99, 120, 122],
+ [25, 66, 72],
+ [85, 104, 106],
]
---
@@ -189,31 +189,31 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 159
+bytecode array length: 143
bytecodes: [
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
- B(Star), R(4),
+ B(Star4),
B(GetIterator), R(4), U8(1), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(3),
+ B(Star3),
B(LdaNamedProperty), R(3), U8(1), U8(5),
- B(Star), R(2),
+ B(Star2),
B(LdaFalse),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(context), R(7),
B(LdaTrue),
- B(Star), R(4),
+ B(Star4),
/* 43 S> */ B(CallProperty0), R(2), R(3), U8(7),
- B(Star), R(8),
+ B(Star8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
B(LdaNamedProperty), R(8), U8(2), U8(9),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(36),
B(LdaNamedProperty), R(8), U8(3), U8(11),
- B(Star), R(8),
+ B(Star8),
B(LdaFalse),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(8), R(1),
/* 43 S> */ B(Mov), R(1), R(0),
/* 66 S> */ B(LdaSmi), I8(10),
@@ -224,29 +224,29 @@ bytecodes: [
/* 97 E> */ B(TestEqual), R(0), U8(14),
B(JumpIfFalse), U8(4),
/* 104 S> */ B(Jump), U8(5),
- /* 34 E> */ B(JumpLoop), U8(55), I8(0),
+ /* 34 E> */ B(JumpLoop), U8(51), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(6),
- B(Star), R(5),
- B(Jump), U8(7),
- B(Star), R(6),
+ B(Star6),
+ B(Star5),
+ B(Jump), U8(5),
+ B(Star6),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(7),
+ B(Star7),
B(Ldar), R(4),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(9),
B(LdaNamedProperty), R(3), U8(4), U8(15),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(10),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star10),
B(CallProperty0), R(10), R(3), U8(17),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(11),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star11),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Jump), U8(12),
- B(Star), R(9),
+ B(Jump), U8(11),
+ B(Star9),
B(LdaZero),
B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
@@ -270,8 +270,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [31, 89, 97],
- [113, 134, 136],
+ [27, 81, 87],
+ [100, 119, 121],
]
---
@@ -281,63 +281,63 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 165
+bytecode array length: 146
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
- B(Star), R(3),
+ B(Star3),
B(GetIterator), R(3), U8(2), U8(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaNamedProperty), R(2), U8(2), U8(6),
- B(Star), R(1),
+ B(Star1),
B(LdaFalse),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(context), R(6),
B(LdaTrue),
- B(Star), R(3),
+ B(Star3),
/* 68 S> */ B(CallProperty0), R(1), R(2), U8(8),
- B(Star), R(7),
+ B(Star7),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
B(LdaNamedProperty), R(7), U8(3), U8(10),
- B(JumpIfToBooleanTrue), U8(32),
+ B(JumpIfToBooleanTrue), U8(28),
B(LdaNamedProperty), R(7), U8(4), U8(12),
- B(Star), R(7),
+ B(Star7),
B(LdaFalse),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(7),
/* 67 E> */ B(StaNamedProperty), R(0), U8(5), U8(14),
/* 96 S> */ B(LdaNamedProperty), R(0), U8(5), U8(16),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(1),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(0), R(8),
- B(Jump), U8(15),
+ B(Jump), U8(11),
B(LdaSmi), I8(-1),
- B(Star), R(5),
- B(Star), R(4),
- B(Jump), U8(7),
- B(Star), R(5),
+ B(Star5),
+ B(Star4),
+ B(Jump), U8(5),
+ B(Star5),
B(LdaZero),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(6),
+ B(Star6),
B(Ldar), R(3),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(9),
B(LdaNamedProperty), R(2), U8(6), U8(18),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(10),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star10),
B(CallProperty0), R(10), R(2), U8(20),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(11),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star11),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Jump), U8(12),
- B(Star), R(9),
+ B(Jump), U8(11),
+ B(Star9),
B(LdaZero),
B(TestReferenceEqual), R(4),
B(JumpIfTrue), U8(5),
@@ -367,7 +367,7 @@ constant pool: [
Smi [9],
]
handlers: [
- [37, 89, 97],
- [113, 134, 136],
+ [32, 78, 84],
+ [97, 116, 118],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 42f4b336ca..7ccbd17f62 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -15,56 +15,56 @@ snippet: "
"
frame size: 14
parameter count: 2
-bytecode array length: 140
+bytecode array length: 125
bytecodes: [
/* 34 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
+ B(Star5),
B(LdaNamedProperty), R(5), U8(0), U8(4),
- B(Star), R(4),
+ B(Star4),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(context), R(9),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
/* 29 S> */ B(CallProperty0), R(4), R(5), U8(6),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(1), U8(8),
- B(JumpIfToBooleanTrue), U8(25),
+ B(JumpIfToBooleanTrue), U8(23),
B(LdaNamedProperty), R(10), U8(2), U8(10),
- B(Star), R(10),
+ B(Star10),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(10), R(0),
/* 29 S> */ B(Mov), R(0), R(2),
/* 49 S> */ B(Mov), R(2), R(3),
B(Ldar), R(10),
- /* 20 E> */ B(JumpLoop), U8(42), I8(0),
+ /* 20 E> */ B(JumpLoop), U8(38), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(8),
- B(Star), R(7),
- B(Jump), U8(7),
- B(Star), R(8),
+ B(Star8),
+ B(Star7),
+ B(Jump), U8(5),
+ B(Star8),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(9),
+ B(Star9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
B(LdaNamedProperty), R(5), U8(3), U8(12),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(12),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star12),
B(CallProperty0), R(12), R(5), U8(14),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(13),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(Jump), U8(12),
- B(Star), R(11),
+ B(Jump), U8(11),
+ B(Star11),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
@@ -87,8 +87,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [25, 70, 78],
- [94, 115, 117],
+ [22, 63, 69],
+ [82, 101, 103],
]
---
@@ -100,7 +100,7 @@ snippet: "
"
frame size: 20
parameter count: 2
-bytecode array length: 224
+bytecode array length: 205
bytecodes: [
/* 10 E> */ B(CreateFunctionContext), U8(0), U8(5),
B(PushContext), R(2),
@@ -117,28 +117,28 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
/* 34 S> */ B(LdaContextSlot), R(3), U8(3), U8(0),
- B(Star), R(6),
+ B(Star6),
B(GetIterator), R(6), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
+ B(Star5),
B(LdaNamedProperty), R(5), U8(2), U8(4),
- B(Star), R(4),
+ B(Star4),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(context), R(9),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
/* 29 S> */ B(CallProperty0), R(4), R(5), U8(6),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(3), U8(8),
- B(JumpIfToBooleanTrue), U8(74),
+ B(JumpIfToBooleanTrue), U8(69),
B(LdaNamedProperty), R(10), U8(4), U8(10),
- B(Star), R(10),
+ B(Star10),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(10), R(0),
B(CreateBlockContext), U8(5),
B(PushContext), R(11),
@@ -147,9 +147,9 @@ bytecodes: [
/* 29 S> */ B(Ldar), R(0),
/* 29 E> */ B(StaCurrentContextSlot), U8(2),
/* 41 S> */ B(LdaLookupGlobalSlot), U8(6), U8(12), U8(3),
- B(Star), R(12),
+ B(Star12),
B(LdaConstant), U8(7),
- B(Star), R(13),
+ B(Star13),
B(LdaZero),
B(Star), R(17),
B(LdaSmi), I8(37),
@@ -160,33 +160,33 @@ bytecodes: [
B(Mov), R(13), R(15),
B(Mov), R(closure), R(16),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(14), U8(6),
- B(Star), R(12),
+ B(Star12),
/* 41 E> */ B(CallUndefinedReceiver1), R(12), R(13), U8(14),
B(PopContext), R(11),
B(Mov), R(0), R(10),
- /* 20 E> */ B(JumpLoop), U8(91), I8(0),
+ /* 20 E> */ B(JumpLoop), U8(84), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(8),
- B(Star), R(7),
- B(Jump), U8(7),
- B(Star), R(8),
+ B(Star8),
+ B(Star7),
+ B(Jump), U8(5),
+ B(Star8),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(9),
+ B(Star9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(12),
B(LdaNamedProperty), R(5), U8(8), U8(16),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(13),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star13),
B(CallProperty0), R(13), R(5), U8(18),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(14),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star14),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
- B(Jump), U8(12),
- B(Star), R(12),
+ B(Jump), U8(11),
+ B(Star12),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
@@ -215,8 +215,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [58, 152, 160],
- [176, 197, 199],
+ [54, 141, 147],
+ [160, 179, 181],
]
---
@@ -228,29 +228,29 @@ snippet: "
"
frame size: 13
parameter count: 2
-bytecode array length: 157
+bytecode array length: 141
bytecodes: [
/* 34 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(3),
+ B(Star3),
B(LdaNamedProperty), R(3), U8(0), U8(4),
- B(Star), R(2),
+ B(Star2),
B(LdaFalse),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(context), R(7),
B(LdaTrue),
- B(Star), R(4),
+ B(Star4),
/* 29 S> */ B(CallProperty0), R(2), R(3), U8(6),
- B(Star), R(8),
+ B(Star8),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
B(LdaNamedProperty), R(8), U8(1), U8(8),
- B(JumpIfToBooleanTrue), U8(42),
+ B(JumpIfToBooleanTrue), U8(39),
B(LdaNamedProperty), R(8), U8(2), U8(10),
- B(Star), R(8),
+ B(Star8),
B(LdaFalse),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(8), R(0),
B(CreateBlockContext), U8(3),
B(PushContext), R(9),
@@ -259,33 +259,33 @@ bytecodes: [
/* 29 S> */ B(Ldar), R(0),
/* 29 E> */ B(StaCurrentContextSlot), U8(2),
/* 41 S> */ B(CreateClosure), U8(4), U8(0), U8(2),
- B(Star), R(10),
+ B(Star10),
/* 67 E> */ B(CallUndefinedReceiver0), R(10), U8(12),
B(PopContext), R(9),
B(Mov), R(0), R(8),
- /* 20 E> */ B(JumpLoop), U8(59), I8(0),
+ /* 20 E> */ B(JumpLoop), U8(54), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(6),
- B(Star), R(5),
- B(Jump), U8(7),
- B(Star), R(6),
+ B(Star6),
+ B(Star5),
+ B(Jump), U8(5),
+ B(Star6),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(7),
+ B(Star7),
B(Ldar), R(4),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(10),
B(LdaNamedProperty), R(3), U8(5), U8(14),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(11),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star11),
B(CallProperty0), R(11), R(3), U8(16),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(12),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star12),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(Jump), U8(12),
- B(Star), R(10),
+ B(Jump), U8(11),
+ B(Star10),
B(LdaZero),
B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
@@ -310,8 +310,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [25, 87, 95],
- [111, 132, 134],
+ [22, 79, 85],
+ [98, 117, 119],
]
---
@@ -323,60 +323,60 @@ snippet: "
"
frame size: 16
parameter count: 2
-bytecode array length: 151
+bytecode array length: 133
bytecodes: [
/* 41 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(7),
+ B(Star7),
B(LdaNamedProperty), R(7), U8(0), U8(4),
- B(Star), R(6),
+ B(Star6),
B(LdaFalse),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(context), R(11),
B(LdaTrue),
- B(Star), R(8),
+ B(Star8),
/* 36 S> */ B(CallProperty0), R(6), R(7), U8(6),
- B(Star), R(12),
+ B(Star12),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
B(LdaNamedProperty), R(12), U8(1), U8(8),
- B(JumpIfToBooleanTrue), U8(36),
+ B(JumpIfToBooleanTrue), U8(31),
B(LdaNamedProperty), R(12), U8(2), U8(10),
- B(Star), R(12),
+ B(Star12),
B(LdaFalse),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(12), R(0),
/* 31 S> */ B(LdaNamedProperty), R(0), U8(3), U8(12),
- B(Star), R(3),
+ B(Star3),
/* 34 S> */ B(LdaNamedProperty), R(0), U8(4), U8(14),
- B(Star), R(4),
+ B(Star4),
/* 56 S> */ B(Ldar), R(4),
/* 58 E> */ B(Add), R(3), U8(16),
- B(Star), R(5),
- /* 20 E> */ B(JumpLoop), U8(53), I8(0),
+ B(Star5),
+ /* 20 E> */ B(JumpLoop), U8(46), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(10),
- B(Star), R(9),
- B(Jump), U8(7),
- B(Star), R(10),
+ B(Star10),
+ B(Star9),
+ B(Jump), U8(5),
+ B(Star10),
B(LdaZero),
- B(Star), R(9),
+ B(Star9),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(11),
+ B(Star11),
B(Ldar), R(8),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(13),
B(LdaNamedProperty), R(7), U8(5), U8(17),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(14),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star14),
B(CallProperty0), R(14), R(7), U8(19),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(15),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star15),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
- B(Jump), U8(12),
- B(Star), R(13),
+ B(Jump), U8(11),
+ B(Star13),
B(LdaZero),
B(TestReferenceEqual), R(9),
B(JumpIfTrue), U8(5),
@@ -401,8 +401,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [25, 81, 89],
- [105, 126, 128],
+ [22, 71, 77],
+ [90, 109, 111],
]
---
@@ -414,16 +414,16 @@ snippet: "
"
frame size: 15
parameter count: 2
-bytecode array length: 181
+bytecode array length: 164
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(5),
B(Mov), R(this), R(6),
/* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
B(ResumeGenerator), R(0), R(0), U8(5),
- B(Star), R(5),
+ B(Star5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(5),
@@ -433,51 +433,51 @@ bytecodes: [
/* 35 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(6),
+ B(Star6),
B(LdaNamedProperty), R(6), U8(3), U8(4),
- B(Star), R(5),
+ B(Star5),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(context), R(10),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
/* 30 S> */ B(CallProperty0), R(5), R(6), U8(6),
- B(Star), R(11),
+ B(Star11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(4), U8(8),
- B(JumpIfToBooleanTrue), U8(25),
+ B(JumpIfToBooleanTrue), U8(23),
B(LdaNamedProperty), R(11), U8(5), U8(10),
- B(Star), R(11),
+ B(Star11),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(11), R(1),
/* 30 S> */ B(Mov), R(1), R(3),
/* 50 S> */ B(Mov), R(3), R(4),
B(Ldar), R(11),
- /* 21 E> */ B(JumpLoop), U8(42), I8(0),
+ /* 21 E> */ B(JumpLoop), U8(38), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(9),
- B(Star), R(8),
- B(Jump), U8(7),
- B(Star), R(9),
+ B(Star9),
+ B(Star8),
+ B(Jump), U8(5),
+ B(Star9),
B(LdaZero),
- B(Star), R(8),
+ B(Star8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
+ B(Star10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(12),
B(LdaNamedProperty), R(6), U8(6), U8(12),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(13),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star13),
B(CallProperty0), R(13), R(6), U8(14),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(14),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star14),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
- B(Jump), U8(12),
- B(Star), R(12),
+ B(Jump), U8(11),
+ B(Star12),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
@@ -494,7 +494,7 @@ bytecodes: [
/* 55 S> */ B(Return),
]
constant pool: [
- Smi [21],
+ Smi [20],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
@@ -503,8 +503,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
]
handlers: [
- [66, 111, 119],
- [135, 156, 158],
+ [61, 102, 108],
+ [121, 140, 142],
]
---
@@ -516,16 +516,16 @@ snippet: "
"
frame size: 14
parameter count: 2
-bytecode array length: 225
+bytecode array length: 205
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
/* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ B(Star4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(4),
@@ -535,65 +535,65 @@ bytecodes: [
/* 35 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
+ B(Star5),
B(LdaNamedProperty), R(5), U8(4), U8(4),
- B(Star), R(4),
+ B(Star4),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(context), R(9),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
/* 30 S> */ B(CallProperty0), R(4), R(5), U8(6),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(5), U8(8),
- B(JumpIfToBooleanTrue), U8(63),
+ B(JumpIfToBooleanTrue), U8(58),
B(LdaNamedProperty), R(10), U8(6), U8(10),
- B(Star), R(10),
+ B(Star10),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(10), R(1),
/* 30 S> */ B(Mov), R(1), R(3),
/* 40 S> */ B(LdaFalse),
- B(Star), R(12),
+ B(Star12),
B(Mov), R(3), R(11),
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(11), U8(2),
/* 40 E> */ B(SuspendGenerator), R(0), R(0), U8(11), U8(1),
B(ResumeGenerator), R(0), R(0), U8(11),
- B(Star), R(11),
+ B(Star11),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
B(Ldar), R(11),
/* 40 E> */ B(Throw),
B(LdaSmi), I8(1),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(11), R(8),
- B(Jump), U8(20),
+ B(Jump), U8(16),
B(Ldar), R(11),
- /* 21 E> */ B(JumpLoop), U8(80), I8(0),
+ /* 21 E> */ B(JumpLoop), U8(73), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(8),
- B(Star), R(7),
- B(Jump), U8(7),
- B(Star), R(8),
+ B(Star8),
+ B(Star7),
+ B(Jump), U8(5),
+ B(Star8),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(9),
+ B(Star9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
B(LdaNamedProperty), R(5), U8(9), U8(12),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(12),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star12),
B(CallProperty0), R(12), R(5), U8(14),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(13),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(Jump), U8(12),
- B(Star), R(11),
+ B(Jump), U8(11),
+ B(Star11),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
@@ -612,22 +612,22 @@ bytecodes: [
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [21],
- Smi [118],
+ Smi [20],
+ Smi [108],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- Smi [16],
+ Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
Smi [6],
Smi [9],
]
handlers: [
- [66, 149, 157],
- [173, 194, 196],
+ [61, 137, 143],
+ [156, 175, 177],
]
---
@@ -639,61 +639,61 @@ snippet: "
"
frame size: 16
parameter count: 2
-bytecode array length: 195
+bytecode array length: 173
bytecodes: [
B(Mov), R(closure), R(5),
B(Mov), R(this), R(6),
/* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(5), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(5),
/* 40 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(7),
+ B(Star7),
B(LdaNamedProperty), R(7), U8(0), U8(4),
- B(Star), R(6),
+ B(Star6),
B(LdaFalse),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(context), R(11),
B(LdaTrue),
- B(Star), R(8),
+ B(Star8),
/* 35 S> */ B(CallProperty0), R(6), R(7), U8(6),
- B(Star), R(12),
+ B(Star12),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
B(LdaNamedProperty), R(12), U8(1), U8(8),
- B(JumpIfToBooleanTrue), U8(25),
+ B(JumpIfToBooleanTrue), U8(23),
B(LdaNamedProperty), R(12), U8(2), U8(10),
- B(Star), R(12),
+ B(Star12),
B(LdaFalse),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(12), R(1),
/* 35 S> */ B(Mov), R(1), R(3),
/* 55 S> */ B(Mov), R(3), R(4),
B(Ldar), R(12),
- /* 26 E> */ B(JumpLoop), U8(42), I8(0),
+ /* 26 E> */ B(JumpLoop), U8(38), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(10),
- B(Star), R(9),
- B(Jump), U8(7),
- B(Star), R(10),
+ B(Star10),
+ B(Star9),
+ B(Jump), U8(5),
+ B(Star10),
B(LdaZero),
- B(Star), R(9),
+ B(Star9),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(11),
+ B(Star11),
B(Ldar), R(8),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(13),
B(LdaNamedProperty), R(7), U8(3), U8(12),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(14),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star14),
B(CallProperty0), R(14), R(7), U8(14),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(15),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star15),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
- B(Jump), U8(12),
- B(Star), R(13),
+ B(Jump), U8(11),
+ B(Star13),
B(LdaZero),
B(TestReferenceEqual), R(9),
B(JumpIfTrue), U8(5),
@@ -707,23 +707,23 @@ bytecodes: [
B(Ldar), R(10),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(7),
+ B(Star7),
B(LdaFalse),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(6), U8(3),
/* 60 S> */ B(Return),
- B(Star), R(6),
+ B(Star6),
B(CreateCatchContext), R(6), U8(4),
- B(Star), R(5),
+ B(Star5),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(5),
B(PushContext), R(6),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(8),
+ B(Star8),
B(LdaFalse),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(0), R(7),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(7), U8(3),
/* 60 S> */ B(Return),
@@ -736,9 +736,9 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [15, 167, 167],
- [40, 85, 93],
- [109, 130, 132],
+ [14, 149, 149],
+ [36, 77, 83],
+ [96, 115, 117],
]
---
@@ -750,35 +750,35 @@ snippet: "
"
frame size: 15
parameter count: 2
-bytecode array length: 231
+bytecode array length: 207
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
/* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(4), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(4),
/* 40 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(6),
+ B(Star6),
B(LdaNamedProperty), R(6), U8(1), U8(4),
- B(Star), R(5),
+ B(Star5),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(context), R(10),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
/* 35 S> */ B(CallProperty0), R(5), R(6), U8(6),
- B(Star), R(11),
+ B(Star11),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(LdaNamedProperty), R(11), U8(2), U8(8),
- B(JumpIfToBooleanTrue), U8(57),
+ B(JumpIfToBooleanTrue), U8(53),
B(LdaNamedProperty), R(11), U8(3), U8(10),
- B(Star), R(11),
+ B(Star11),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(11), R(1),
/* 35 S> */ B(Mov), R(1), R(3),
/* 45 S> */ B(Mov), R(0), R(12),
@@ -786,38 +786,38 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(12), U8(2),
/* 45 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(0),
B(ResumeGenerator), R(0), R(0), U8(12),
- B(Star), R(12),
+ B(Star12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(13),
+ B(Star13),
B(LdaZero),
B(TestReferenceEqual), R(13),
B(JumpIfTrue), U8(5),
B(Ldar), R(12),
B(ReThrow),
B(Ldar), R(12),
- /* 26 E> */ B(JumpLoop), U8(74), I8(0),
+ /* 26 E> */ B(JumpLoop), U8(68), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(9),
- B(Star), R(8),
- B(Jump), U8(7),
- B(Star), R(9),
+ B(Star9),
+ B(Star8),
+ B(Jump), U8(5),
+ B(Star9),
B(LdaZero),
- B(Star), R(8),
+ B(Star8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
+ B(Star10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(12),
B(LdaNamedProperty), R(6), U8(4), U8(12),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(13),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star13),
B(CallProperty0), R(13), R(6), U8(14),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(14),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star14),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
- B(Jump), U8(12),
- B(Star), R(12),
+ B(Jump), U8(11),
+ B(Star12),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
@@ -831,29 +831,29 @@ bytecodes: [
B(Ldar), R(9),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(6),
+ B(Star6),
B(LdaTrue),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 54 S> */ B(Return),
- B(Star), R(5),
+ B(Star5),
B(CreateCatchContext), R(5), U8(5),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(4),
B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(LdaTrue),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [96],
+ Smi [88],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -861,8 +861,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [19, 203, 203],
- [44, 121, 129],
- [145, 166, 168],
+ [18, 183, 183],
+ [40, 111, 117],
+ [130, 149, 151],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
index d73ca7d69c..f1d58a83da 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
@@ -28,10 +28,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
/* 34 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 56 E> */ B(CallUndefinedReceiver0), R(0), U8(0),
/* 58 S> */ B(Return),
]
@@ -47,12 +47,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 13
bytecodes: [
/* 34 S> */ B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 67 E> */ B(CallUndefinedReceiver1), R(0), R(1), U8(0),
/* 70 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
index 071b6ae8ff..b07b1bbf34 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
@@ -14,17 +14,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 19
bytecodes: [
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(LdaSmi), I8(10),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(Ldar), R(0),
B(TestUndetectable),
- B(JumpIfFalse), U8(6),
+ B(JumpIfFalse), U8(5),
/* 88 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
/* 97 S> */ B(Ldar), R(1),
/* 106 S> */ B(Return),
]
@@ -43,17 +43,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 19
bytecodes: [
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(LdaSmi), I8(10),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(Ldar), R(0),
B(TestUndetectable),
- B(JumpIfFalse), U8(6),
+ B(JumpIfFalse), U8(5),
/* 93 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
/* 102 S> */ B(Ldar), R(1),
/* 111 S> */ B(Return),
]
@@ -72,17 +72,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 19
bytecodes: [
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(LdaSmi), I8(10),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(Ldar), R(0),
B(TestUndetectable),
- B(JumpIfTrue), U8(6),
+ B(JumpIfTrue), U8(5),
/* 88 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
/* 97 S> */ B(Ldar), R(1),
/* 106 S> */ B(Return),
]
@@ -101,17 +101,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 19
bytecodes: [
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(LdaSmi), I8(10),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(Ldar), R(0),
B(TestUndetectable),
- B(JumpIfTrue), U8(6),
+ B(JumpIfTrue), U8(5),
/* 93 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
/* 102 S> */ B(Ldar), R(1),
/* 111 S> */ B(Return),
]
@@ -130,16 +130,16 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 18
bytecodes: [
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(LdaSmi), I8(10),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(Ldar), R(0),
- B(JumpIfNotNull), U8(6),
+ B(JumpIfNotNull), U8(5),
/* 89 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
/* 98 S> */ B(Ldar), R(1),
/* 107 S> */ B(Return),
]
@@ -158,16 +158,16 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 18
bytecodes: [
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(LdaSmi), I8(10),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(Ldar), R(0),
- B(JumpIfNotUndefined), U8(6),
+ B(JumpIfNotUndefined), U8(5),
/* 94 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
/* 103 S> */ B(Ldar), R(1),
/* 112 S> */ B(Return),
]
@@ -186,16 +186,16 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 18
bytecodes: [
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(LdaSmi), I8(10),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(Ldar), R(0),
- B(JumpIfNull), U8(6),
+ B(JumpIfNull), U8(5),
/* 89 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
/* 98 S> */ B(Ldar), R(1),
/* 107 S> */ B(Return),
]
@@ -214,16 +214,16 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 18
bytecodes: [
/* 46 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(LdaSmi), I8(10),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(Ldar), R(0),
- B(JumpIfUndefined), U8(6),
+ B(JumpIfUndefined), U8(5),
/* 94 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
/* 103 S> */ B(Ldar), R(1),
/* 112 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index acd0a0e7f8..f28a4e70e0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -13,16 +13,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 43
+bytecode array length: 41
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
/* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(0),
B(ResumeGenerator), R(0), R(0), U8(1),
- B(Star), R(1),
+ B(Star1),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(1),
@@ -33,7 +33,7 @@ bytecodes: [
/* 16 S> */ B(Return),
]
constant pool: [
- Smi [21],
+ Smi [20],
Smi [10],
Smi [7],
]
@@ -47,16 +47,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 79
+bytecode array length: 74
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
/* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(0),
B(ResumeGenerator), R(0), R(0), U8(1),
- B(Star), R(1),
+ B(Star1),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(1),
@@ -64,13 +64,13 @@ bytecodes: [
B(Ldar), R(1),
/* 25 S> */ B(Return),
/* 16 S> */ B(LdaSmi), I8(42),
- B(Star), R(1),
+ B(Star1),
B(LdaFalse),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(1), U8(2),
/* 16 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(1),
B(ResumeGenerator), R(0), R(0), U8(1),
- B(Star), R(1),
+ B(Star1),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
B(Ldar), R(1),
@@ -81,8 +81,8 @@ bytecodes: [
/* 25 S> */ B(Return),
]
constant pool: [
- Smi [21],
- Smi [57],
+ Smi [20],
+ Smi [53],
Smi [10],
Smi [7],
Smi [10],
@@ -98,16 +98,16 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 231
+bytecode array length: 210
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
/* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
B(ResumeGenerator), R(0), R(0), U8(4),
- B(Star), R(4),
+ B(Star4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(4),
@@ -115,69 +115,69 @@ bytecodes: [
B(Ldar), R(4),
/* 44 S> */ B(Return),
/* 30 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
- B(Star), R(6),
+ B(Star6),
B(GetIterator), R(6), U8(1), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
+ B(Star5),
B(LdaNamedProperty), R(5), U8(5), U8(5),
- B(Star), R(4),
+ B(Star4),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(context), R(9),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
/* 25 S> */ B(CallProperty0), R(4), R(5), U8(7),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(6), U8(9),
- B(JumpIfToBooleanTrue), U8(63),
+ B(JumpIfToBooleanTrue), U8(58),
B(LdaNamedProperty), R(10), U8(7), U8(11),
- B(Star), R(10),
+ B(Star10),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(10), R(1),
/* 25 S> */ B(Mov), R(1), R(3),
/* 36 S> */ B(LdaFalse),
- B(Star), R(12),
+ B(Star12),
B(Mov), R(3), R(11),
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(11), U8(2),
/* 36 E> */ B(SuspendGenerator), R(0), R(0), U8(11), U8(1),
B(ResumeGenerator), R(0), R(0), U8(11),
- B(Star), R(11),
+ B(Star11),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
B(Ldar), R(11),
/* 36 E> */ B(Throw),
B(LdaSmi), I8(1),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(11), R(8),
- B(Jump), U8(20),
+ B(Jump), U8(16),
B(Ldar), R(11),
- /* 16 E> */ B(JumpLoop), U8(80), I8(0),
+ /* 16 E> */ B(JumpLoop), U8(73), I8(0),
B(LdaSmi), I8(-1),
- B(Star), R(8),
- B(Star), R(7),
- B(Jump), U8(7),
- B(Star), R(8),
+ B(Star8),
+ B(Star7),
+ B(Jump), U8(5),
+ B(Star8),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(9),
+ B(Star9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(38),
+ B(JumpIfToBooleanTrue), U8(35),
B(Mov), R(context), R(11),
B(LdaNamedProperty), R(5), U8(10), U8(13),
- B(JumpIfUndefinedOrNull), U8(29),
- B(Star), R(12),
+ B(JumpIfUndefinedOrNull), U8(26),
+ B(Star12),
B(CallProperty0), R(12), R(5), U8(15),
- B(JumpIfJSReceiver), U8(21),
- B(Star), R(13),
+ B(JumpIfJSReceiver), U8(19),
+ B(Star13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(Jump), U8(12),
- B(Star), R(11),
+ B(Jump), U8(11),
+ B(Star11),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
@@ -196,23 +196,23 @@ bytecodes: [
/* 44 S> */ B(Return),
]
constant pool: [
- Smi [21],
- Smi [124],
+ Smi [20],
+ Smi [113],
Smi [10],
Smi [7],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- Smi [16],
+ Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
Smi [6],
Smi [9],
]
handlers: [
- [72, 155, 163],
- [179, 200, 202],
+ [66, 142, 148],
+ [161, 180, 182],
]
---
@@ -223,16 +223,16 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 204
+bytecode array length: 188
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
/* 38 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 38 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(0),
B(ResumeGenerator), R(0), R(0), U8(1),
- B(Star), R(1),
+ B(Star1),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(1),
@@ -240,58 +240,58 @@ bytecodes: [
B(Ldar), R(1),
/* 54 S> */ B(Return),
/* 43 S> */ B(LdaGlobal), U8(4), U8(0),
- B(Star), R(5),
+ B(Star5),
/* 50 E> */ B(CallUndefinedReceiver0), R(5), U8(2),
- B(Star), R(6),
+ B(Star6),
B(GetIterator), R(6), U8(4), U8(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(3),
+ B(Star3),
B(LdaNamedProperty), R(3), U8(5), U8(8),
- B(Star), R(5),
+ B(Star5),
B(LdaUndefined),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(2),
B(SwitchOnSmiNoFeedback), U8(6), U8(2), I8(1),
B(CallProperty1), R(5), R(3), R(4), U8(10),
- B(Jump), U8(63),
+ B(Jump), U8(59),
B(LdaNamedProperty), R(3), U8(8), U8(12),
- B(JumpIfUndefinedOrNull), U8(11),
- B(Star), R(6),
+ B(JumpIfUndefinedOrNull), U8(10),
+ B(Star6),
B(CallProperty1), R(6), R(3), R(4), U8(14),
- B(Jump), U8(48),
+ B(Jump), U8(45),
B(Ldar), R(4),
/* 54 S> */ B(Return),
B(LdaNamedProperty), R(3), U8(9), U8(16),
- B(JumpIfUndefinedOrNull), U8(11),
- B(Star), R(6),
+ B(JumpIfUndefinedOrNull), U8(10),
+ B(Star6),
B(CallProperty1), R(6), R(3), R(4), U8(18),
- B(Jump), U8(30),
+ B(Jump), U8(28),
B(LdaNamedProperty), R(3), U8(8), U8(20),
- B(JumpIfUndefinedOrNull), U8(19),
- B(Star), R(6),
+ B(JumpIfUndefinedOrNull), U8(17),
+ B(Star6),
B(CallProperty0), R(6), R(3), U8(22),
B(Jump), U8(2),
- B(JumpIfJSReceiver), U8(9),
- B(Star), R(6),
+ B(JumpIfJSReceiver), U8(8),
+ B(Star6),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
- B(Star), R(1),
+ B(Star1),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(1), U8(1),
B(LdaNamedProperty), R(1), U8(10), U8(24),
- B(JumpIfToBooleanTrue), U8(24),
+ B(JumpIfToBooleanTrue), U8(22),
B(Ldar), R(1),
/* 43 E> */ B(SuspendGenerator), R(0), R(0), U8(6), U8(1),
B(ResumeGenerator), R(0), R(0), U8(6),
- B(Star), R(4),
+ B(Star4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(2),
- B(JumpLoop), U8(108), I8(0),
+ B(Star2),
+ B(JumpLoop), U8(101), I8(0),
B(LdaNamedProperty), R(1), U8(11), U8(26),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(1),
B(TestReferenceEqual), R(2),
B(JumpIfFalse), U8(5),
@@ -301,14 +301,14 @@ bytecodes: [
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [21],
- Smi [172],
+ Smi [20],
+ Smi [159],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
Smi [11],
- Smi [29],
+ Smi [28],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["throw"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index dd6f8ea9e7..fa86273ad6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -35,11 +35,11 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Dec), U8(2),
/* 44 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
@@ -80,11 +80,11 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Inc), U8(2),
/* 50 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
index 4167138f1f..21dbc7e740 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
@@ -16,10 +16,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
/* 39 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaConstant), U8(1),
B(DeletePropertySloppy), R(0),
/* 57 S> */ B(Return),
@@ -42,10 +42,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
/* 51 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(1),
B(DeletePropertyStrict), R(0),
/* 70 S> */ B(Return),
@@ -66,10 +66,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
/* 39 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
/* 55 S> */ B(Return),
]
@@ -89,10 +89,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
/* 25 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
/* 41 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
index 49a80d13c9..81e8c5fad2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
@@ -28,10 +28,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 47 S> */ B(LdaConstant), U8(1),
/* 58 S> */ B(Return),
]
@@ -48,10 +48,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 48 S> */ B(LdaConstant), U8(0),
/* 60 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
index 8f45fff2cb..2a459ac5a1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
@@ -21,20 +21,20 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 39
+bytecode array length: 35
bytecodes: [
/* 16 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
/* 45 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
/* 50 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(1), U8(0),
/* 63 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 70 E> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 72 E> */ B(LdaNamedPropertyNoFeedback), R(2), U8(1),
/* 68 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
/* 101 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(3),
@@ -64,33 +64,33 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 67
+bytecode array length: 62
bytecodes: [
/* 16 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
/* 50 S> */ B(LdaZero),
/* 52 E> */ B(StaGlobal), U8(1), U8(2),
/* 59 S> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(5),
/* 59 E> */ B(TestLessThan), R(1), U8(6),
- B(JumpIfFalse), U8(42),
+ B(JumpIfFalse), U8(39),
/* 81 S> */ B(LdaGlobal), U8(0), U8(7),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
/* 86 E> */ B(StaNamedProperty), R(1), U8(2), U8(9),
/* 101 S> */ B(LdaGlobal), U8(0), U8(7),
- B(Star), R(1),
+ B(Star1),
/* 108 E> */ B(LdaGlobal), U8(0), U8(7),
- B(Star), R(2),
+ B(Star2),
/* 110 E> */ B(LdaNamedProperty), R(2), U8(2), U8(11),
/* 106 E> */ B(StaNamedProperty), R(1), U8(3), U8(13),
/* 66 S> */ B(LdaGlobal), U8(1), U8(4),
B(Inc), U8(15),
/* 66 E> */ B(StaGlobal), U8(1), U8(2),
- /* 45 E> */ B(JumpLoop), U8(49), I8(0),
+ /* 45 E> */ B(JumpLoop), U8(45), I8(0),
/* 149 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 156 S> */ B(Return),
]
@@ -121,33 +121,33 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 68
+bytecode array length: 63
bytecodes: [
/* 16 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
/* 45 S> */ B(LdaSmi), I8(4),
/* 47 E> */ B(StaGlobal), U8(1), U8(2),
/* 68 S> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(4),
/* 68 E> */ B(TestGreaterThan), R(1), U8(6),
- B(JumpIfFalse), U8(42),
+ B(JumpIfFalse), U8(39),
/* 85 S> */ B(LdaGlobal), U8(0), U8(7),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
/* 90 E> */ B(StaNamedProperty), R(1), U8(2), U8(9),
/* 105 S> */ B(LdaGlobal), U8(0), U8(7),
- B(Star), R(1),
+ B(Star1),
/* 112 E> */ B(LdaGlobal), U8(0), U8(7),
- B(Star), R(2),
+ B(Star2),
/* 114 E> */ B(LdaNamedProperty), R(2), U8(2), U8(11),
/* 110 E> */ B(StaNamedProperty), R(1), U8(3), U8(13),
/* 128 S> */ B(LdaGlobal), U8(1), U8(4),
B(Dec), U8(15),
/* 129 E> */ B(StaGlobal), U8(1), U8(2),
- /* 60 E> */ B(JumpLoop), U8(49), I8(0),
+ /* 60 E> */ B(JumpLoop), U8(45), I8(0),
/* 168 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 175 S> */ B(Return),
]
@@ -178,33 +178,33 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 68
+bytecode array length: 63
bytecodes: [
/* 16 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
/* 45 S> */ B(LdaSmi), I8(4),
/* 47 E> */ B(StaGlobal), U8(1), U8(2),
/* 75 S> */ B(LdaGlobal), U8(0), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
/* 80 E> */ B(StaNamedProperty), R(1), U8(2), U8(6),
/* 95 S> */ B(LdaGlobal), U8(0), U8(4),
- B(Star), R(1),
+ B(Star1),
/* 102 E> */ B(LdaGlobal), U8(0), U8(4),
- B(Star), R(2),
+ B(Star2),
/* 104 E> */ B(LdaNamedProperty), R(2), U8(2), U8(8),
/* 100 E> */ B(StaNamedProperty), R(1), U8(3), U8(10),
/* 118 S> */ B(LdaGlobal), U8(1), U8(12),
B(Dec), U8(14),
/* 119 E> */ B(StaGlobal), U8(1), U8(2),
/* 141 S> */ B(LdaGlobal), U8(1), U8(12),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(4),
/* 141 E> */ B(TestGreaterThan), R(1), U8(15),
B(JumpIfFalse), U8(5),
- /* 60 E> */ B(JumpLoop), U8(49), I8(0),
+ /* 60 E> */ B(JumpLoop), U8(45), I8(0),
/* 171 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
/* 178 S> */ B(Return),
]
@@ -237,32 +237,32 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 70
+bytecode array length: 62
bytecodes: [
/* 16 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(41),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
/* 31 E> */ B(StaGlobal), U8(1), U8(0),
/* 95 S> */ B(LdaGlobal), U8(1), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 101 E> */ B(LdaNamedPropertyNoFeedback), R(1), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
/* 104 E> */ B(TestLessThan), R(1), U8(4),
- B(JumpIfFalse), U8(15),
+ B(JumpIfFalse), U8(14),
/* 121 S> */ B(LdaGlobal), U8(1), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
/* 126 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
- B(Jump), U8(19),
+ B(Jump), U8(17),
/* 158 S> */ B(LdaGlobal), U8(1), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 165 E> */ B(LdaGlobal), U8(1), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 167 E> */ B(LdaNamedPropertyNoFeedback), R(2), U8(3),
/* 163 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
/* 206 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(4),
@@ -289,14 +289,14 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 23
+bytecode array length: 20
bytecodes: [
/* 16 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(LdaConstant), U8(0),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(4),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(3), U8(2),
/* 31 E> */ B(StaGlobal), U8(1), U8(0),
/* 77 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(2),
@@ -321,14 +321,14 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 23
+bytecode array length: 20
bytecodes: [
/* 16 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(LdaConstant), U8(0),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(37),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(3), U8(2),
/* 31 E> */ B(StaGlobal), U8(1), U8(0),
/* 62 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(2),
@@ -364,61 +364,61 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 136
+bytecode array length: 114
bytecodes: [
/* 237 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 255 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 255 E> */ B(CallNoFeedback), R(1), R(this), U8(1),
/* 274 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(this), R(2),
/* 274 E> */ B(CallNoFeedback), R(1), R(2), U8(2),
/* 294 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(this), R(2),
/* 294 E> */ B(CallNoFeedback), R(1), R(2), U8(3),
/* 317 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(3),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(this), R(2),
/* 317 E> */ B(CallNoFeedback), R(1), R(2), U8(4),
/* 343 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(4),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(this), R(2),
/* 343 E> */ B(CallNoFeedback), R(1), R(2), U8(5),
/* 372 S> */ B(LdaNamedPropertyNoFeedback), R(this), U8(5),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(4),
- B(Star), R(6),
+ B(Star6),
B(LdaSmi), I8(5),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(this), R(2),
/* 372 E> */ B(CallNoFeedback), R(1), R(2), U8(6),
/* 416 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(6),
@@ -458,69 +458,69 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 139
+bytecode array length: 111
bytecodes: [
/* 189 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 202 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
B(LdaGlobal), U8(0), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 202 E> */ B(CallNoFeedback), R(1), R(2), U8(1),
/* 216 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
B(LdaGlobal), U8(1), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
/* 216 E> */ B(CallNoFeedback), R(1), R(2), U8(2),
/* 231 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
B(LdaGlobal), U8(2), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
/* 231 E> */ B(CallNoFeedback), R(1), R(2), U8(3),
/* 249 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
B(LdaGlobal), U8(3), U8(6),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(Star5),
/* 249 E> */ B(CallNoFeedback), R(1), R(2), U8(4),
/* 270 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
B(LdaGlobal), U8(4), U8(8),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(4),
- B(Star), R(6),
+ B(Star6),
/* 270 E> */ B(CallNoFeedback), R(1), R(2), U8(5),
/* 294 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
B(LdaGlobal), U8(5), U8(10),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(4),
- B(Star), R(6),
+ B(Star6),
B(LdaSmi), I8(5),
- B(Star), R(7),
+ B(Star7),
/* 294 E> */ B(CallNoFeedback), R(1), R(2), U8(6),
/* 338 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(6),
/* 345 S> */ B(Return),
@@ -555,22 +555,22 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 42
+bytecode array length: 38
bytecodes: [
/* 79 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 93 S> */ B(CreateEmptyObjectLiteral),
/* 95 E> */ B(StaGlobal), U8(0), U8(0),
/* 111 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
/* 115 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
/* 130 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(4),
/* 134 E> */ B(StaNamedProperty), R(1), U8(2), U8(6),
/* 149 S> */ B(LdaGlobal), U8(3), U8(8),
- B(Star), R(1),
+ B(Star1),
/* 149 E> */ B(CallUndefinedReceiver0), R(1), U8(10),
/* 182 S> */ B(LdaNamedProperty), R(0), U8(4), U8(12),
/* 189 S> */ B(Return),
@@ -603,22 +603,22 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 42
+bytecode array length: 38
bytecodes: [
/* 76 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 92 S> */ B(CreateEmptyObjectLiteral),
/* 94 E> */ B(StaGlobal), U8(0), U8(0),
/* 112 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
/* 116 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
/* 133 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(4),
/* 137 E> */ B(StaNamedProperty), R(1), U8(2), U8(6),
/* 154 S> */ B(LdaGlobal), U8(3), U8(8),
- B(Star), R(1),
+ B(Star1),
/* 154 E> */ B(CallUndefinedReceiver0), R(1), U8(10),
/* 189 S> */ B(LdaNamedProperty), R(0), U8(4), U8(12),
/* 196 S> */ B(Return),
@@ -733,16 +733,16 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 26
+bytecode array length: 24
bytecodes: [
/* 46 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(2),
B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaCurrentContextSlot), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
/* 57 E> */ B(StaNamedPropertyNoFeedback), R(2), U8(1), U8(0),
/* 79 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(2),
@@ -765,16 +765,16 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 26
+bytecode array length: 24
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(2),
B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 37 S> */ B(LdaCurrentContextSlot), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
/* 41 E> */ B(StaNamedPropertyNoFeedback), R(2), U8(1), U8(0),
/* 63 S> */ B(LdaNamedPropertyNoFeedback), R(0), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
index c911947481..c212cf36fb 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
@@ -20,20 +20,20 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 41
+bytecode array length: 37
bytecodes: [
/* 16 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(CreateEmptyObjectLiteral),
/* 31 E> */ B(StaGlobal), U8(0), U8(0),
/* 45 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
/* 49 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
/* 62 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 68 E> */ B(LdaGlobal), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 70 E> */ B(LdaNamedProperty), R(2), U8(1), U8(6),
/* 66 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
/* 98 S> */ B(LdaNamedProperty), R(0), U8(3), U8(10),
@@ -67,28 +67,28 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 64
+bytecode array length: 58
bytecodes: [
/* 16 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 31 E> */ B(StaGlobal), U8(1), U8(1),
/* 93 S> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(1),
+ B(Star1),
/* 99 E> */ B(LdaNamedProperty), R(1), U8(2), U8(5),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
/* 101 E> */ B(TestLessThan), R(1), U8(7),
- B(JumpIfFalse), U8(15),
+ B(JumpIfFalse), U8(14),
/* 118 S> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
/* 122 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
- B(Jump), U8(20),
+ B(Jump), U8(18),
/* 154 S> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(1),
+ B(Star1),
/* 160 E> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(2),
+ B(Star2),
/* 162 E> */ B(LdaNamedProperty), R(2), U8(3), U8(10),
/* 158 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
/* 200 S> */ B(LdaNamedProperty), R(0), U8(4), U8(12),
@@ -126,59 +126,59 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 143
+bytecode array length: 121
bytecodes: [
/* 237 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 255 S> */ B(LdaNamedProperty), R(this), U8(0), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 255 E> */ B(CallProperty0), R(1), R(this), U8(2),
/* 274 S> */ B(LdaNamedProperty), R(this), U8(1), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
/* 274 E> */ B(CallProperty1), R(1), R(this), R(3), U8(6),
/* 294 S> */ B(LdaNamedProperty), R(this), U8(2), U8(8),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
/* 294 E> */ B(CallProperty2), R(1), R(this), R(3), R(4), U8(10),
/* 317 S> */ B(LdaNamedProperty), R(this), U8(3), U8(12),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(this), R(2),
/* 317 E> */ B(CallProperty), R(1), R(2), U8(4), U8(14),
/* 343 S> */ B(LdaNamedProperty), R(this), U8(4), U8(16),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(4),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(this), R(2),
/* 343 E> */ B(CallProperty), R(1), R(2), U8(5), U8(18),
/* 372 S> */ B(LdaNamedProperty), R(this), U8(5), U8(20),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(4),
- B(Star), R(6),
+ B(Star6),
B(LdaSmi), I8(5),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(this), R(2),
/* 372 E> */ B(CallProperty), R(1), R(2), U8(6), U8(22),
/* 416 S> */ B(LdaNamedProperty), R(0), U8(6), U8(24),
@@ -218,57 +218,57 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 125
+bytecode array length: 103
bytecodes: [
/* 189 E> */ B(CreateMappedArguments),
- B(Star), R(0),
+ B(Star0),
/* 202 S> */ B(LdaGlobal), U8(0), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 202 E> */ B(CallUndefinedReceiver0), R(1), U8(2),
/* 216 S> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star2),
/* 216 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(6),
/* 231 S> */ B(LdaGlobal), U8(2), U8(8),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(2),
- B(Star), R(3),
+ B(Star3),
/* 231 E> */ B(CallUndefinedReceiver2), R(1), R(2), R(3), U8(10),
/* 249 S> */ B(LdaGlobal), U8(3), U8(12),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(3),
- B(Star), R(4),
+ B(Star4),
/* 249 E> */ B(CallUndefinedReceiver), R(1), R(2), U8(3), U8(14),
/* 270 S> */ B(LdaGlobal), U8(4), U8(16),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(3),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(4),
- B(Star), R(5),
+ B(Star5),
/* 270 E> */ B(CallUndefinedReceiver), R(1), R(2), U8(4), U8(18),
/* 294 S> */ B(LdaGlobal), U8(5), U8(20),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(3),
- B(Star), R(4),
+ B(Star4),
B(LdaSmi), I8(4),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(5),
- B(Star), R(6),
+ B(Star6),
/* 294 E> */ B(CallUndefinedReceiver), R(1), R(2), U8(5), U8(22),
/* 338 S> */ B(LdaNamedProperty), R(0), U8(6), U8(24),
/* 345 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
index de6b5e0844..9dab60f0e8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
@@ -110,14 +110,14 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 20
+bytecode array length: 18
bytecodes: [
/* 25 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
- /* 30 S> */ B(JumpIfToBooleanFalse), U8(11),
+ B(Star0),
+ /* 30 S> */ B(JumpIfToBooleanFalse), U8(10),
/* 43 S> */ B(Ldar), R(0),
B(AddSmi), I8(1), U8(0),
- B(Star), R(0),
+ B(Star0),
B(Jump), U8(5),
/* 66 S> */ B(LdaSmi), I8(2),
/* 75 S> */ B(Return),
@@ -249,12 +249,12 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 26
+bytecode array length: 24
bytecodes: [
/* 24 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 35 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 38 S> */ B(LdaConstant), U8(0),
/* 44 E> */ B(TestEqualStrict), R(0), U8(0),
B(JumpIfFalse), U8(10),
@@ -345,12 +345,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 21
bytecodes: [
/* 25 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 36 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 41 S> */ B(Ldar), R(0),
B(JumpIfToBooleanFalse), U8(10),
/* 52 S> */ B(Mov), R(0), R(1),
@@ -445,10 +445,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 25 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 30 S> */ B(JumpIfToBooleanFalse), U8(5),
/* 43 S> */ B(LdaSmi), I8(20),
/* 53 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
index 951b3543a8..034fbdaf21 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
@@ -27,10 +27,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 42 S> */ B(Wide), B(LdaSmi), I16(1234),
- B(Star), R(0),
+ B(Star0),
/* 48 S> */ B(Wide), B(LdaSmi), I16(5678),
/* 60 S> */ B(Return),
]
@@ -45,10 +45,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 42 S> */ B(Wide), B(LdaSmi), I16(1234),
- B(Star), R(0),
+ B(Star0),
/* 48 S> */ B(Wide), B(LdaSmi), I16(1234),
/* 60 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
index 0ee86f4cea..808158d347 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
@@ -11,10 +11,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 46 S> */ B(Return),
]
@@ -29,10 +29,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(Return),
]
constant pool: [
@@ -46,12 +46,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
B(LdaTheHole),
- B(Star), R(0),
+ B(Star0),
/* 42 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 45 E> */ B(ThrowReferenceErrorIfHole), U8(0),
B(Mov), R(1), R(0),
@@ -70,12 +70,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 8
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 46 S> */ B(LdaSmi), I8(20),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
index fe82887e3c..81440404f7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
@@ -57,14 +57,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
/* 42 S> */ B(LdaSmi), I8(20),
- B(Star), R(1),
+ B(Star1),
B(LdaCurrentContextSlot), U8(2),
/* 45 E> */ B(ThrowReferenceErrorIfHole), U8(1),
B(Ldar), R(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
index 3199c09d94..f69f618b6f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
@@ -202,10 +202,10 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 522
+bytecode array length: 521
bytecodes: [
/* 33 S> */ B(CreateEmptyObjectLiteral),
- B(Star), R(0),
+ B(Star0),
/* 41 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
/* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
/* 67 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
index ddcf588467..13bed9d061 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
@@ -11,10 +11,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(4),
B(LdaSmi), I8(3),
/* 59 S> */ B(Return),
@@ -30,10 +30,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(1),
/* 55 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfTrue), U8(4),
@@ -51,10 +51,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(4),
B(LdaSmi), I8(3),
/* 59 S> */ B(Return),
@@ -70,10 +70,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaZero),
/* 55 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(4),
@@ -91,10 +91,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(4),
B(LdaSmi), I8(3),
/* 67 S> */ B(Return),
@@ -110,18 +110,18 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 23
+bytecode array length: 19
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 49 S> */ B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(LdaSmi), I8(4),
- B(Star), R(2),
+ B(Star2),
/* 59 S> */ B(Ldar), R(0),
- B(JumpIfToBooleanTrue), U8(8),
+ B(JumpIfToBooleanTrue), U8(7),
B(LdaSmi), I8(5),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
/* 94 S> */ B(Return),
]
@@ -168,149 +168,148 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 275
+bytecode array length: 208
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 60 S> */ B(LdaSmi), I8(3),
- B(Star), R(2),
+ B(Star2),
/* 63 S> */ B(Ldar), R(0),
- B(JumpIfToBooleanTrueConstant), U8(0),
+ B(JumpIfToBooleanTrue), U8(196),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
/* 623 S> */ B(Return),
]
constant pool: [
- Smi [260],
]
handlers: [
]
@@ -353,149 +352,148 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 274
+bytecode array length: 207
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 60 S> */ B(LdaSmi), I8(3),
- B(Star), R(2),
+ B(Star2),
/* 63 S> */ B(Ldar), R(0),
- B(JumpIfToBooleanFalseConstant), U8(0),
+ B(JumpIfToBooleanFalse), U8(196),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
/* 623 S> */ B(Return),
]
constant pool: [
- Smi [260],
]
handlers: [
]
@@ -538,150 +536,149 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 278
+bytecode array length: 211
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 60 S> */ B(LdaSmi), I8(3),
- B(Star), R(2),
+ B(Star2),
/* 63 S> */ B(LdaSmi), I8(3),
/* 73 E> */ B(TestGreaterThan), R(0), U8(0),
- B(JumpIfTrueConstant), U8(0),
+ B(JumpIfTrue), U8(196),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
/* 629 S> */ B(Return),
]
constant pool: [
- Smi [260],
]
handlers: [
]
@@ -724,150 +721,149 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 277
+bytecode array length: 210
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 60 S> */ B(LdaSmi), I8(3),
- B(Star), R(2),
+ B(Star2),
/* 63 S> */ B(LdaSmi), I8(5),
/* 73 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalseConstant), U8(0),
+ B(JumpIfFalse), U8(196),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(3),
/* 629 S> */ B(Return),
]
constant pool: [
- Smi [260],
]
handlers: [
]
@@ -910,10 +906,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(4),
B(LdaSmi), I8(3),
B(JumpIfToBooleanTrue), U8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
index f7a4289e61..ada867aafa 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
@@ -12,7 +12,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 62
+bytecode array length: 56
bytecodes: [
/* 10 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
@@ -23,20 +23,20 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(4),
/* 14 S> */ B(LdaLookupGlobalSlot), U8(1), U8(0), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(10),
- B(Star), R(8),
+ B(Star8),
B(LdaSmi), I8(14),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(2), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
- B(Star), R(2),
+ B(Star2),
/* 14 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 35 S> */ B(LdaLookupGlobalSlot), U8(3), U8(4), U8(1),
/* 44 S> */ B(Return),
@@ -56,7 +56,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 63
+bytecode array length: 57
bytecodes: [
/* 10 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
@@ -67,20 +67,20 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(4),
/* 14 S> */ B(LdaLookupGlobalSlot), U8(1), U8(0), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(10),
- B(Star), R(8),
+ B(Star8),
B(LdaSmi), I8(14),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(2), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
- B(Star), R(2),
+ B(Star2),
/* 14 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 35 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(3), U8(4), U8(1),
B(TypeOf),
@@ -101,7 +101,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 63
+bytecode array length: 57
bytecodes: [
/* 10 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
@@ -114,20 +114,20 @@ bytecodes: [
/* 14 S> */ B(LdaSmi), I8(20),
/* 16 E> */ B(StaLookupSlot), U8(1), U8(0),
/* 22 S> */ B(LdaLookupGlobalSlot), U8(2), U8(0), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(3),
- B(Star), R(3),
+ B(Star3),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(10),
- B(Star), R(8),
+ B(Star8),
B(LdaSmi), I8(29),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(2), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
- B(Star), R(2),
+ B(Star2),
/* 29 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 38 S> */ B(Return),
]
@@ -151,7 +151,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 62
+bytecode array length: 56
bytecodes: [
/* 38 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
@@ -162,20 +162,20 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(4),
/* 44 S> */ B(LdaLookupGlobalSlot), U8(1), U8(0), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(38),
- B(Star), R(8),
+ B(Star8),
B(LdaSmi), I8(44),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(2), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
- B(Star), R(2),
+ B(Star2),
/* 44 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 66 S> */ B(LdaLookupContextSlot), U8(3), U8(4), U8(1),
/* 75 S> */ B(Return),
@@ -200,7 +200,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 62
+bytecode array length: 56
bytecodes: [
/* 34 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(1),
@@ -211,20 +211,20 @@ bytecodes: [
B(Ldar), R(0),
B(StaCurrentContextSlot), U8(4),
/* 40 S> */ B(LdaLookupGlobalSlot), U8(1), U8(0), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaZero),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(34),
- B(Star), R(8),
+ B(Star8),
B(LdaSmi), I8(40),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(2), R(4),
B(Mov), R(3), R(5),
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
- B(Star), R(2),
+ B(Star2),
/* 40 E> */ B(CallUndefinedReceiver1), R(2), R(3), U8(2),
/* 62 S> */ B(LdaLookupGlobalSlot), U8(3), U8(4), U8(1),
/* 71 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 854a78c921..149140d4a7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -13,16 +13,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 47
+bytecode array length: 45
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(2),
@@ -34,7 +34,7 @@ bytecodes: [
/* 14 S> */ B(Return),
]
constant pool: [
- Smi [21],
+ Smi [20],
Smi [10],
Smi [7],
]
@@ -47,16 +47,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 47
+bytecode array length: 45
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(2),
@@ -68,7 +68,7 @@ bytecodes: [
/* 25 S> */ B(Return),
]
constant pool: [
- Smi [21],
+ Smi [20],
Smi [10],
Smi [7],
]
@@ -83,16 +83,16 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 77
+bytecode array length: 69
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(3),
@@ -101,23 +101,23 @@ bytecodes: [
/* 65 S> */ B(Return),
/* 32 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(ThrowReferenceErrorIfHole), U8(3),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(42),
- B(Star), R(4),
+ B(Star4),
/* 32 E> */ B(CallUndefinedReceiver1), R(3), R(4), U8(0),
/* 47 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
/* 52 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(ThrowReferenceErrorIfHole), U8(3),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(42),
- B(Star), R(4),
+ B(Star4),
/* 52 E> */ B(CallUndefinedReceiver1), R(3), R(4), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 65 S> */ B(Return),
]
constant pool: [
- Smi [21],
+ Smi [20],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
@@ -133,16 +133,16 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 75
+bytecode array length: 71
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(3),
@@ -155,10 +155,10 @@ bytecodes: [
B(Inc), U8(0),
/* 24 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 34 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
B(Mov), R(3), R(1),
@@ -166,7 +166,7 @@ bytecodes: [
/* 50 S> */ B(Return),
]
constant pool: [
- Smi [21],
+ Smi [20],
Smi [10],
Smi [7],
]
@@ -181,21 +181,21 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 89
+bytecode array length: 84
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(closure), R(4),
B(CallRuntime), U16(Runtime::kDeclareModuleExports), R(3), U8(2),
B(Ldar), R(0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(3),
@@ -208,10 +208,10 @@ bytecodes: [
B(Inc), U8(0),
/* 24 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 34 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
B(Mov), R(3), R(1),
@@ -219,7 +219,7 @@ bytecodes: [
/* 50 S> */ B(Return),
]
constant pool: [
- Smi [35],
+ Smi [33],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -235,21 +235,21 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 93
+bytecode array length: 88
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(closure), R(4),
B(CallRuntime), U16(Runtime::kDeclareModuleExports), R(3), U8(2),
B(Ldar), R(0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(3),
@@ -262,10 +262,10 @@ bytecodes: [
B(Inc), U8(0),
/* 26 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
/* 36 S> */ B(LdaUndefined),
- B(Star), R(2),
+ B(Star2),
/* 41 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Inc), U8(1),
/* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(Mov), R(3), R(1),
@@ -273,7 +273,7 @@ bytecodes: [
/* 52 S> */ B(Return),
]
constant pool: [
- Smi [35],
+ Smi [33],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -287,21 +287,21 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 68
+bytecode array length: 65
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaConstant), U8(1),
- B(Star), R(2),
+ B(Star2),
B(Mov), R(closure), R(3),
B(CallRuntime), U16(Runtime::kDeclareModuleExports), R(2), U8(2),
B(Ldar), R(0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(2),
@@ -315,7 +315,7 @@ bytecodes: [
/* 33 S> */ B(Return),
]
constant pool: [
- Smi [35],
+ Smi [33],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -330,21 +330,21 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 89
+bytecode array length: 82
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaConstant), U8(1),
- B(Star), R(2),
+ B(Star2),
B(Mov), R(closure), R(3),
B(CallRuntime), U16(Runtime::kDeclareModuleExports), R(2), U8(2),
B(Ldar), R(0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(2),
@@ -353,21 +353,21 @@ bytecodes: [
/* 27 S> */ B(Return),
B(Mov), R(2), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(5), U8(0), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(4),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(4),
B(StaModuleVariable), I8(1), U8(0),
B(Ldar), R(1),
/* 27 S> */ B(Return),
]
constant pool: [
- Smi [35],
+ Smi [33],
FIXED_ARRAY_TYPE,
Smi [10],
Smi [7],
@@ -383,16 +383,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 47
+bytecode array length: 45
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(2),
@@ -404,7 +404,7 @@ bytecodes: [
/* 31 S> */ B(Return),
]
constant pool: [
- Smi [21],
+ Smi [20],
Smi [10],
Smi [7],
]
@@ -417,16 +417,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 47
+bytecode array length: 45
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(2),
@@ -438,7 +438,7 @@ bytecodes: [
/* 20 S> */ B(Return),
]
constant pool: [
- Smi [21],
+ Smi [20],
Smi [10],
Smi [7],
]
@@ -452,21 +452,21 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 74
+bytecode array length: 67
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
/* 0 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaZero),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(3), U8(1),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(3),
@@ -474,15 +474,15 @@ bytecodes: [
B(Ldar), R(3),
/* 46 S> */ B(Return),
/* 31 S> */ B(LdaNamedProperty), R(1), U8(3), U8(0),
- B(Star), R(3),
+ B(Star3),
/* 42 E> */ B(LdaNamedProperty), R(1), U8(4), U8(2),
- B(Star), R(6),
+ B(Star6),
/* 31 E> */ B(CallProperty2), R(3), R(1), R(1), R(6), U8(4),
- B(Star), R(2),
+ B(Star2),
/* 46 S> */ B(Return),
]
constant pool: [
- Smi [33],
+ Smi [30],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["f"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index 23d9d69529..07a3cffaa0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -12,23 +12,23 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 47
+bytecode array length: 42
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
+ B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
/* 89 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(0),
/* 89 E> */ B(ConstructWithSpread), R(0), R(2), U8(1), U8(1),
B(LdaUndefined),
@@ -50,25 +50,25 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 50
+bytecode array length: 44
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
+ B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
/* 89 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star2),
B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(0),
/* 89 E> */ B(ConstructWithSpread), R(0), R(2), U8(2), U8(1),
B(LdaUndefined),
@@ -90,46 +90,46 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 121
+bytecode array length: 110
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
+ B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
/* 89 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star2),
/* 101 S> */ B(CreateArrayLiteral), U8(4), U8(1), U8(37),
- B(Star), R(6),
+ B(Star6),
/* 101 E> */ B(GetIterator), R(6), U8(2), U8(4),
B(Mov), R(4), R(1),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(5),
+ B(Star5),
B(LdaNamedProperty), R(5), U8(5), U8(6),
- B(Star), R(4),
+ B(Star4),
B(CallProperty0), R(4), R(5), U8(15),
- B(Star), R(6),
+ B(Star6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
B(LdaNamedProperty), R(6), U8(6), U8(17),
- B(JumpIfToBooleanTrue), U8(19),
+ B(JumpIfToBooleanTrue), U8(18),
B(LdaNamedProperty), R(6), U8(7), U8(8),
B(StaInArrayLiteral), R(3), R(2), U8(13),
B(Ldar), R(2),
B(Inc), U8(12),
- B(Star), R(2),
- B(JumpLoop), U8(33), I8(0),
+ B(Star2),
+ B(JumpLoop), U8(31), I8(0),
B(LdaSmi), I8(4),
B(StaInArrayLiteral), R(3), R(2), U8(13),
B(Mov), R(3), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index a5256793a0..ac95dd70b1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -44,12 +44,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 19
+bytecode array length: 17
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 75 E> */ B(StaNamedOwnProperty), R(1), U8(1), U8(1),
B(Ldar), R(1),
@@ -68,12 +68,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 20
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 69 E> */ B(AddSmi), I8(1), U8(1),
B(StaNamedOwnProperty), R(1), U8(1), U8(2),
@@ -93,10 +93,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 49 E> */ B(CreateClosure), U8(1), U8(0), U8(2),
B(StaNamedOwnProperty), R(0), U8(2), U8(1),
B(Ldar), R(0),
@@ -116,10 +116,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
/* 43 E> */ B(CreateClosure), U8(1), U8(0), U8(2),
B(StaNamedOwnProperty), R(0), U8(2), U8(1),
B(Ldar), R(0),
@@ -139,18 +139,18 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 33
+bytecode array length: 28
bytecodes: [
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
B(LdaConstant), U8(1),
- B(Star), R(2),
+ B(Star2),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaNull),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(0), R(1),
B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
B(Ldar), R(1),
@@ -170,18 +170,18 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 36
+bytecode array length: 31
bytecodes: [
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
B(LdaConstant), U8(1),
- B(Star), R(2),
+ B(Star2),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(0), R(1),
B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
B(Ldar), R(1),
@@ -202,18 +202,18 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 33
+bytecode array length: 28
bytecodes: [
/* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(0),
+ B(Star0),
B(LdaConstant), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaNull),
- B(Star), R(3),
+ B(Star3),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(0), R(1),
B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
B(Ldar), R(1),
@@ -233,14 +233,14 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 28
+bytecode array length: 25
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(1), R(2),
B(Mov), R(0), R(4),
/* 57 E> */ B(CallRuntime), U16(Runtime::kSetKeyedProperty), R(2), U8(3),
@@ -276,12 +276,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 24
+bytecode array length: 22
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 60 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
@@ -302,12 +302,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 30
+bytecode array length: 28
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 64 E> */ B(StaNamedOwnProperty), R(1), U8(2), U8(1),
B(Ldar), R(0),
@@ -331,18 +331,18 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 35
+bytecode array length: 32
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 60 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
/* 64 E> */ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
/* 78 E> */ B(CreateEmptyObjectLiteral),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(1), R(2),
B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(2), U8(2),
B(Ldar), R(2),
@@ -361,30 +361,30 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 63
+bytecode array length: 55
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 60 E> */ B(ToName), R(2),
B(LdaConstant), U8(2),
/* 64 E> */ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(1),
B(LdaConstant), U8(3),
- B(Star), R(3),
+ B(Star3),
/* 71 E> */ B(CreateClosure), U8(4), U8(0), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(1), R(2),
B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), R(2), U8(4),
B(LdaConstant), U8(3),
- B(Star), R(3),
+ B(Star3),
/* 84 E> */ B(CreateClosure), U8(5), U8(1), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
- B(Star), R(5),
+ B(Star5),
B(CallRuntime), U16(Runtime::kDefineSetterPropertyUnchecked), R(2), U8(4),
B(Ldar), R(2),
/* 98 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
index bb4aac932e..1b8f118a93 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
@@ -20,10 +20,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 102 S> */ B(LdaImmutableContextSlot), R(context), U8(2), U8(1),
- B(Star), R(0),
+ B(Star0),
B(LdaImmutableCurrentContextSlot), U8(2),
/* 118 E> */ B(Mul), R(0), U8(0),
/* 129 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
index daf96cdd44..e2d02ee527 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
@@ -11,10 +11,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 4
+bytecode array length: 3
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(Return),
]
constant pool: [
@@ -28,10 +28,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(AddSmi), I8(3), U8(0),
/* 58 S> */ B(Return),
]
@@ -46,12 +46,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 13
+bytecode array length: 11
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 54 E> */ B(Add), R(1), U8(0),
/* 58 S> */ B(Return),
@@ -67,10 +67,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(SubSmi), I8(3), U8(0),
/* 58 S> */ B(Return),
]
@@ -85,12 +85,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 13
+bytecode array length: 11
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 54 E> */ B(Sub), R(1), U8(0),
/* 58 S> */ B(Return),
@@ -106,10 +106,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(MulSmi), I8(3), U8(0),
/* 58 S> */ B(Return),
]
@@ -124,10 +124,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(MulSmi), I8(3), U8(0),
/* 58 S> */ B(Return),
]
@@ -142,10 +142,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(DivSmi), I8(3), U8(0),
/* 58 S> */ B(Return),
]
@@ -160,12 +160,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 54 E> */ B(Div), R(1), U8(0),
/* 58 S> */ B(Return),
@@ -181,10 +181,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(ModSmi), I8(3), U8(0),
/* 58 S> */ B(Return),
]
@@ -199,12 +199,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 54 E> */ B(Mod), R(1), U8(0),
/* 58 S> */ B(Return),
@@ -220,10 +220,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(BitwiseOrSmi), I8(2), U8(0),
/* 58 S> */ B(Return),
]
@@ -238,10 +238,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(BitwiseOrSmi), I8(2), U8(0),
/* 58 S> */ B(Return),
]
@@ -256,10 +256,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(BitwiseXorSmi), I8(2), U8(0),
/* 58 S> */ B(Return),
]
@@ -274,10 +274,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(BitwiseXorSmi), I8(2), U8(0),
/* 58 S> */ B(Return),
]
@@ -292,10 +292,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(BitwiseAndSmi), I8(2), U8(0),
/* 58 S> */ B(Return),
]
@@ -310,10 +310,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(BitwiseAndSmi), I8(2), U8(0),
/* 58 S> */ B(Return),
]
@@ -328,10 +328,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(ShiftLeftSmi), I8(3), U8(0),
/* 60 S> */ B(Return),
]
@@ -346,12 +346,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 46 S> */ B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 55 E> */ B(ShiftLeft), R(1), U8(0),
/* 60 S> */ B(Return),
@@ -367,10 +367,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(ShiftRightSmi), I8(3), U8(0),
/* 60 S> */ B(Return),
]
@@ -385,12 +385,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 46 S> */ B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 55 E> */ B(ShiftRight), R(1), U8(0),
/* 60 S> */ B(Return),
@@ -406,10 +406,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 7
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(ShiftRightLogicalSmi), I8(3), U8(0),
/* 61 S> */ B(Return),
]
@@ -424,12 +424,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
- B(Star), R(0),
+ B(Star0),
/* 46 S> */ B(LdaSmi), I8(3),
- B(Star), R(1),
+ B(Star1),
B(Ldar), R(0),
/* 55 E> */ B(ShiftRightLogical), R(1), U8(0),
/* 61 S> */ B(Return),
@@ -445,10 +445,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(3),
/* 59 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
index 579f8cf2e8..e3dacbb196 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -23,40 +23,40 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 97
+bytecode array length: 87
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 67 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
/* 76 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaImmutableCurrentContextSlot), U8(3),
/* 81 E> */ B(LdaKeyedProperty), R(this), U8(0),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(4), U8(1),
- B(Star), R(5),
+ B(Star5),
B(CallProperty0), R(5), R(this), U8(2),
B(Inc), U8(4),
- B(Star), R(5),
+ B(Star5),
/* 83 E> */ B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(4), U8(1),
- B(Star), R(6),
+ B(Star6),
B(CallProperty1), R(6), R(this), R(5), U8(5),
/* 91 S> */ B(LdaSmi), I8(1),
- B(Star), R(3),
+ B(Star3),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaImmutableCurrentContextSlot), U8(3),
/* 96 E> */ B(LdaKeyedProperty), R(this), U8(7),
B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(5), U8(1),
- B(Star), R(6),
+ B(Star6),
B(CallProperty1), R(6), R(this), R(3), U8(9),
/* 108 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaImmutableCurrentContextSlot), U8(3),
/* 120 E> */ B(LdaKeyedProperty), R(this), U8(11),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(4), U8(1),
- B(Star), R(5),
+ B(Star5),
B(CallProperty0), R(5), R(this), U8(13),
/* 123 S> */ B(Return),
]
@@ -76,17 +76,17 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 28
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(272),
- B(Star), R(3),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(276),
+ B(Star3),
B(LdaConstant), U8(0),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
B(Throw),
]
@@ -107,17 +107,17 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 28
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(271),
- B(Star), R(3),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(275),
+ B(Star3),
B(LdaConstant), U8(0),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
B(Throw),
]
@@ -138,17 +138,17 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 28
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(272),
- B(Star), R(3),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(276),
+ B(Star3),
B(LdaConstant), U8(0),
- B(Star), R(4),
+ B(Star4),
/* 61 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
B(Throw),
]
@@ -169,17 +169,17 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 31
+bytecode array length: 28
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(271),
- B(Star), R(4),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(275),
+ B(Star4),
B(LdaConstant), U8(0),
- B(Star), R(5),
+ B(Star5),
B(CallRuntime), U16(Runtime::kNewTypeError), R(4), U8(2),
B(Throw),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
index 059314f804..462cc5b831 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorDeclaration.golden
@@ -16,27 +16,27 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 64
+bytecode array length: 57
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(3), U8(1),
B(StaCurrentContextSlot), U8(3),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(2), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(5), U8(2), U8(2),
- B(Star), R(6),
+ B(Star6),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(5), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(1),
@@ -65,27 +65,27 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 61
+bytecode array length: 54
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(3), U8(1),
B(StaCurrentContextSlot), U8(3),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(2), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaNull),
- B(Star), R(6),
+ B(Star6),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(5), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(1),
@@ -113,27 +113,27 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 61
+bytecode array length: 54
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(3), U8(1),
B(StaCurrentContextSlot), U8(3),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(2), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
+ B(Star4),
B(LdaNull),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(6),
+ B(Star6),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(5), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(1),
@@ -167,27 +167,27 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 126
+bytecode array length: 113
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaConstant), U8(2),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(3),
B(LdaTheHole),
- B(Star), R(7),
+ B(Star7),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(1),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(3), R(6),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(5), U8(2), U8(2),
- B(Star), R(7),
+ B(Star7),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(2),
@@ -195,21 +195,21 @@ bytecodes: [
/* 38 E> */ B(CreateBlockContext), U8(6),
B(PushContext), R(2),
B(LdaConstant), U8(8),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(3),
/* 118 E> */ B(CreateClosure), U8(9), U8(3), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(7),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(3), R(6),
B(Mov), R(0), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(10), U8(4), U8(2),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(11), U8(5), U8(2),
- B(Star), R(7),
+ B(Star7),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(2),
@@ -246,49 +246,50 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 112
+bytecode array length: 101
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
+ B(Star4),
B(PopContext), R(2),
B(Mov), R(5), R(0),
/* 38 E> */ B(CreateBlockContext), U8(4),
B(PushContext), R(2),
B(LdaConstant), U8(6),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(3),
/* 77 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(5),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(3), R(6),
B(Mov), R(0), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star5),
+ B(Ldar), R(6),
+ B(StaCurrentContextSlot), U8(5),
B(CreateClosure), U8(8), U8(3), U8(2),
- B(Star), R(6),
- B(Ldar), R(5),
- B(StaNamedProperty), R(6), U8(9), U8(0),
+ B(Star6),
B(LdaNull),
- B(Star), R(7),
+ B(Star7),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(2),
B(Mov), R(3), R(1),
/* 122 S> */ B(Ldar), R(1),
- /* 122 E> */ B(Construct), R(1), R(0), U8(0), U8(2),
+ /* 122 E> */ B(Construct), R(1), R(0), U8(0), U8(0),
B(LdaUndefined),
/* 133 S> */ B(Return),
]
@@ -302,7 +303,6 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- SYMBOL_TYPE,
]
handlers: [
]
@@ -319,49 +319,50 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 112
+bytecode array length: 101
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
+ B(Star4),
B(PopContext), R(2),
B(Mov), R(5), R(0),
/* 38 E> */ B(CreateBlockContext), U8(4),
B(PushContext), R(2),
B(LdaConstant), U8(6),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(3),
/* 80 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(5),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(3), R(6),
B(Mov), R(0), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star5),
+ B(Ldar), R(6),
+ B(StaCurrentContextSlot), U8(5),
B(LdaNull),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(8), U8(3), U8(2),
- B(Star), R(7),
- B(Ldar), R(5),
- B(StaNamedProperty), R(7), U8(9), U8(0),
+ B(Star7),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(6), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(2),
B(Mov), R(3), R(1),
/* 126 S> */ B(Ldar), R(1),
- /* 126 E> */ B(Construct), R(1), R(0), U8(0), U8(2),
+ /* 126 E> */ B(Construct), R(1), R(0), U8(0), U8(0),
B(LdaUndefined),
/* 137 S> */ B(Return),
]
@@ -375,7 +376,6 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- SYMBOL_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden
index 67adb5f095..e02111d2a9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden
@@ -21,15 +21,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 30
+bytecode array length: 28
bytecodes: [
/* 35 E> */ B(LdaNamedProperty), R(closure), U8(0), U8(0),
- B(JumpIfUndefined), U8(11),
- B(Star), R(1),
+ B(JumpIfUndefined), U8(10),
+ B(Star1),
B(CallProperty0), R(1), R(this), U8(2),
B(Mov), R(this), R(0),
/* 44 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaImmutableCurrentContextSlot), U8(3),
/* 59 E> */ B(LdaKeyedProperty), R(this), U8(4),
/* 52 E> */ B(StaKeyedProperty), R(this), R(3), U8(6),
@@ -60,15 +60,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 30
+bytecode array length: 28
bytecodes: [
/* 35 E> */ B(LdaNamedProperty), R(closure), U8(0), U8(0),
- B(JumpIfUndefined), U8(11),
- B(Star), R(1),
+ B(JumpIfUndefined), U8(10),
+ B(Star1),
B(CallProperty0), R(1), R(this), U8(2),
B(Mov), R(this), R(0),
/* 44 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaImmutableCurrentContextSlot), U8(3),
/* 59 E> */ B(LdaKeyedProperty), R(this), U8(4),
/* 52 E> */ B(StaKeyedProperty), R(this), R(3), U8(6),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
index 5e00600435..f1e04d845d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -24,49 +24,49 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 130
+bytecode array length: 116
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaConstant), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(2),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(2),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(5),
+ B(Star5),
B(StaNamedProperty), R(3), U8(5), U8(0),
B(PopContext), R(2),
B(Mov), R(3), R(0),
/* 38 E> */ B(CreateBlockContext), U8(6),
B(PushContext), R(2),
B(LdaConstant), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(2),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(2),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(7),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(9), U8(3), U8(2),
- B(Star), R(5),
+ B(Star5),
B(StaNamedProperty), R(3), U8(5), U8(2),
B(PopContext), R(2),
B(Mov), R(3), R(1),
@@ -129,96 +129,96 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 267
+bytecode array length: 237
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaConstant), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaConstant), U8(2),
- B(Star), R(5),
+ B(Star5),
B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
B(StaCurrentContextSlot), U8(2),
B(LdaTheHole),
- B(Star), R(11),
+ B(Star11),
B(CreateClosure), U8(4), U8(0), U8(2),
- B(Star), R(8),
+ B(Star8),
B(LdaConstant), U8(3),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(8), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
- B(Star), R(9),
+ B(Star9),
B(CreateClosure), U8(5), U8(1), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(1),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(4), R(6),
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star), R(6),
+ B(Star6),
B(StaNamedProperty), R(4), U8(7), U8(0),
B(PopContext), R(3),
B(Mov), R(4), R(0),
/* 38 E> */ B(CreateBlockContext), U8(8),
B(PushContext), R(3),
B(LdaConstant), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaConstant), U8(2),
- B(Star), R(5),
+ B(Star5),
B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
B(StaCurrentContextSlot), U8(2),
B(LdaConstant), U8(10),
- B(Star), R(5),
+ B(Star5),
B(LdaConstant), U8(10),
- B(Star), R(5),
+ B(Star5),
B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
B(StaCurrentContextSlot), U8(3),
B(LdaTheHole),
- B(Star), R(11),
+ B(Star11),
B(CreateClosure), U8(12), U8(3), U8(2),
- B(Star), R(8),
+ B(Star8),
B(LdaConstant), U8(11),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(8), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
- B(Star), R(9),
+ B(Star9),
B(CreateClosure), U8(13), U8(4), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(9),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(14), U8(5), U8(2),
- B(Star), R(8),
+ B(Star8),
B(CreateClosure), U8(15), U8(6), U8(2),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(4), R(6),
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(16), U8(7), U8(2),
- B(Star), R(6),
+ B(Star6),
B(StaNamedProperty), R(4), U8(7), U8(2),
B(PopContext), R(3),
B(Mov), R(4), R(1),
/* 140 E> */ B(CreateBlockContext), U8(17),
B(PushContext), R(3),
B(LdaConstant), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaConstant), U8(2),
- B(Star), R(5),
+ B(Star5),
B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
B(StaCurrentContextSlot), U8(2),
/* 356 E> */ B(CreateClosure), U8(19), U8(8), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(18),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(4), R(6),
B(Mov), R(1), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(20), U8(9), U8(2),
- B(Star), R(6),
+ B(Star6),
B(StaNamedProperty), R(4), U8(7), U8(4),
B(PopContext), R(3),
B(Mov), R(4), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
index c768ea4dbc..2270e27b02 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -18,17 +18,17 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 30
+bytecode array length: 28
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
/* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 61 E> */ B(LdaKeyedProperty), R(this), U8(0),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(3),
+ B(Star3),
/* 63 E> */ B(CallAnyReceiver), R(3), R(this), U8(1), U8(2),
/* 66 S> */ B(Return),
]
@@ -49,17 +49,17 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 28
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(270),
- B(Star), R(3),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(274),
+ B(Star3),
B(LdaConstant), U8(0),
- B(Star), R(4),
+ B(Star4),
/* 57 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
B(Throw),
]
@@ -81,17 +81,17 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 31
+bytecode array length: 28
bytecodes: [
B(LdaImmutableCurrentContextSlot), U8(3),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(270),
- B(Star), R(3),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(274),
+ B(Star3),
B(LdaConstant), U8(0),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
B(Throw),
]
@@ -113,25 +113,25 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 57
+bytecode array length: 53
bytecodes: [
/* 44 E> */ B(CreateFunctionContext), U8(0), U8(1),
B(PushContext), R(0),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(2),
B(LdaImmutableContextSlot), R(0), U8(3), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Mov), R(this), R(1),
B(Mov), R(0), R(3),
B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(1), U8(3),
/* 49 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(5),
+ B(Star5),
/* 61 E> */ B(CallUndefinedReceiver0), R(5), U8(0),
- B(Star), R(5),
+ B(Star5),
B(LdaImmutableContextSlot), R(0), U8(3), U8(0),
/* 63 E> */ B(LdaKeyedProperty), R(5), U8(2),
B(LdaImmutableContextSlot), R(0), U8(2), U8(0),
- B(Star), R(4),
+ B(Star4),
/* 66 E> */ B(CallAnyReceiver), R(4), R(5), U8(1), U8(4),
B(LdaUndefined),
/* 70 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
index 73073757cb..b0178c0a2e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodDeclaration.golden
@@ -15,25 +15,25 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 51
+bytecode array length: 46
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(3), U8(1),
B(StaCurrentContextSlot), U8(3),
- B(LdaTheHole),
- B(Star), R(6),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(2),
+ B(StaCurrentContextSlot), U8(2),
+ B(LdaTheHole),
+ B(Star6),
+ B(CreateClosure), U8(4), U8(1), U8(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(2), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
- B(CreateClosure), U8(4), U8(1), U8(2),
- B(StaCurrentContextSlot), U8(2),
+ B(Star4),
B(PopContext), R(1),
B(Mov), R(5), R(0),
B(LdaUndefined),
@@ -62,43 +62,43 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 100
+bytecode array length: 91
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaConstant), U8(2),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(3),
- B(LdaTheHole),
- B(Star), R(7),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(3),
+ B(StaCurrentContextSlot), U8(2),
+ B(LdaTheHole),
+ B(Star7),
+ B(CreateClosure), U8(4), U8(1), U8(2),
+ B(Star3),
B(LdaConstant), U8(1),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(3), R(6),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
- B(CreateClosure), U8(4), U8(1), U8(2),
- B(StaCurrentContextSlot), U8(2),
+ B(Star5),
B(PopContext), R(2),
B(Mov), R(6), R(0),
/* 38 E> */ B(CreateBlockContext), U8(5),
B(PushContext), R(2),
B(LdaConstant), U8(7),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(3),
- /* 93 E> */ B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(3),
+ B(CreateClosure), U8(8), U8(2), U8(2),
+ B(StaCurrentContextSlot), U8(2),
+ /* 93 E> */ B(CreateClosure), U8(9), U8(3), U8(2),
+ B(Star3),
B(LdaConstant), U8(6),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(3), R(6),
B(Mov), R(0), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
- B(CreateClosure), U8(9), U8(3), U8(2),
- B(StaCurrentContextSlot), U8(2),
+ B(Star5),
B(PopContext), R(2),
B(Mov), R(6), R(1),
B(LdaUndefined),
@@ -130,44 +130,44 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 97
+bytecode array length: 84
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
+ B(Star4),
B(PopContext), R(2),
B(Mov), R(5), R(0),
/* 38 E> */ B(CreateBlockContext), U8(4),
B(PushContext), R(2),
B(LdaConstant), U8(6),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(4), U8(1),
B(StaCurrentContextSlot), U8(3),
- /* 77 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
- B(Star), R(3),
+ B(CreateClosure), U8(7), U8(2), U8(2),
+ B(StaCurrentContextSlot), U8(2),
+ /* 77 E> */ B(CreateClosure), U8(8), U8(3), U8(2),
+ B(Star3),
B(LdaConstant), U8(5),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(3), R(6),
B(Mov), R(0), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
- B(CreateClosure), U8(8), U8(3), U8(2),
- B(StaCurrentContextSlot), U8(2),
- B(Star), R(6),
- B(Ldar), R(5),
- B(StaNamedProperty), R(6), U8(9), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star5),
+ B(Ldar), R(6),
+ B(StaCurrentContextSlot), U8(5),
B(PopContext), R(2),
- B(Mov), R(3), R(1),
+ B(Star1),
B(LdaUndefined),
/* 118 S> */ B(Return),
]
@@ -181,7 +181,6 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- SYMBOL_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
index 2e067b6f53..f2ef6f1f92 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
@@ -13,10 +13,10 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 25 E> */ B(CallProperty0), R(0), R(arg0), U8(2),
/* 32 S> */ B(Return),
]
@@ -33,10 +33,10 @@ snippet: "
"
frame size: 1
parameter count: 4
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
/* 31 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 31 E> */ B(CallProperty2), R(0), R(arg0), R(arg1), R(arg2), U8(2),
/* 42 S> */ B(Return),
]
@@ -53,13 +53,13 @@ snippet: "
"
frame size: 3
parameter count: 3
-bytecode array length: 20
+bytecode array length: 18
bytecodes: [
/* 28 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
B(Ldar), R(arg1),
/* 35 E> */ B(Add), R(arg1), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 28 E> */ B(CallProperty2), R(0), R(arg0), R(2), R(arg1), U8(3),
/* 43 S> */ B(Return),
]
@@ -207,10 +207,10 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 542
+bytecode array length: 540
bytecodes: [
/* 26 S> */ B(CreateEmptyObjectLiteral),
- B(Star), R(0),
+ B(Star0),
/* 34 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
/* 47 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
/* 60 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
@@ -341,7 +341,7 @@ bytecodes: [
/* 1685 S> */ B(LdaNamedProperty), R(0), U8(127), U8(254),
/* 1698 S> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(128), U16(256),
/* 1715 S> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(128), U16(256),
- B(Star), R(1),
+ B(Star1),
/* 1715 E> */ B(Wide), B(CallProperty0), R16(1), R16(arg0), U16(258),
/* 1722 S> */ B(Return),
]
@@ -486,24 +486,24 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 50
+bytecode array length: 42
bytecodes: [
/* 25 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
- B(Star), R(4),
+ B(Star4),
/* 25 E> */ B(CallProperty1), R(2), R(arg0), R(4), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 32 E> */ B(LdaNamedProperty), R(2), U8(0), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
- B(Star), R(3),
+ B(Star3),
/* 33 E> */ B(CallProperty1), R(1), R(2), R(3), U8(6),
- B(Star), R(1),
+ B(Star1),
/* 40 E> */ B(LdaNamedProperty), R(1), U8(0), U8(8),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(3),
- B(Star), R(2),
+ B(Star2),
/* 41 E> */ B(CallProperty1), R(0), R(1), R(2), U8(10),
/* 49 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
index 6fd6116f0c..456d8fedde 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
@@ -22,33 +22,33 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 76
+bytecode array length: 67
bytecodes: [
/* 7 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(41),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
/* 60 S> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(1),
+ B(Star1),
/* 65 E> */ B(LdaNamedPropertyNoFeedback), R(1), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 73 E> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(2),
+ B(Star2),
/* 74 E> */ B(LdaNamedPropertyNoFeedback), R(2), U8(3),
/* 71 E> */ B(Add), R(1), U8(2),
/* 62 E> */ B(StaGlobal), U8(4), U8(5),
/* 87 S> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(7),
/* 94 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(3), U8(0),
/* 105 S> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(1),
+ B(Star1),
/* 114 E> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(2),
+ B(Star2),
/* 115 E> */ B(LdaNamedPropertyNoFeedback), R(2), U8(3),
- B(Star), R(2),
+ B(Star2),
/* 112 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
B(Mov), R(2), R(0),
B(Ldar), R(0),
@@ -79,51 +79,51 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 118
+bytecode array length: 105
bytecodes: [
/* 7 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(41),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
/* 68 S> */ B(LdaZero),
/* 70 E> */ B(StaGlobal), U8(2), U8(2),
/* 77 S> */ B(LdaGlobal), U8(2), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(5),
/* 77 E> */ B(TestLessThan), R(1), U8(6),
- B(JumpIfFalse), U8(82),
+ B(JumpIfFalse), U8(73),
/* 97 S> */ B(LdaGlobal), U8(1), U8(7),
- B(Star), R(1),
+ B(Star1),
/* 106 E> */ B(LdaGlobal), U8(1), U8(7),
- B(Star), R(2),
+ B(Star2),
/* 107 E> */ B(LdaNamedProperty), R(2), U8(3), U8(10),
- B(Star), R(2),
+ B(Star2),
/* 115 E> */ B(LdaGlobal), U8(1), U8(7),
- B(Star), R(3),
+ B(Star3),
/* 116 E> */ B(LdaNamedProperty), R(3), U8(4), U8(12),
/* 113 E> */ B(Add), R(2), U8(9),
/* 104 E> */ B(StaNamedProperty), R(1), U8(3), U8(14),
/* 131 S> */ B(LdaGlobal), U8(1), U8(7),
- B(Star), R(1),
+ B(Star1),
/* 140 E> */ B(LdaGlobal), U8(1), U8(7),
- B(Star), R(2),
+ B(Star2),
/* 141 E> */ B(LdaNamedProperty), R(2), U8(3), U8(10),
- B(Star), R(2),
+ B(Star2),
/* 149 E> */ B(LdaGlobal), U8(1), U8(7),
- B(Star), R(3),
+ B(Star3),
/* 150 E> */ B(LdaNamedProperty), R(3), U8(4), U8(12),
/* 147 E> */ B(Add), R(2), U8(16),
- B(Star), R(2),
+ B(Star2),
/* 138 E> */ B(StaNamedProperty), R(1), U8(4), U8(17),
B(Mov), R(2), R(0),
/* 84 S> */ B(LdaGlobal), U8(2), U8(4),
B(Inc), U8(19),
/* 84 E> */ B(StaGlobal), U8(2), U8(2),
- /* 63 E> */ B(JumpLoop), U8(89), I8(0),
+ /* 63 E> */ B(JumpLoop), U8(79), I8(0),
B(Ldar), R(0),
/* 171 S> */ B(Return),
]
@@ -152,47 +152,47 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 107
+bytecode array length: 94
bytecodes: [
/* 7 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(41),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
/* 72 S> */ B(LdaGlobal), U8(2), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaZero),
/* 72 E> */ B(TestGreaterThan), R(1), U8(4),
- B(JumpIfFalse), U8(76),
+ B(JumpIfFalse), U8(67),
/* 87 S> */ B(LdaGlobal), U8(1), U8(5),
- B(Star), R(1),
+ B(Star1),
/* 97 E> */ B(LdaGlobal), U8(1), U8(5),
- B(Star), R(2),
+ B(Star2),
/* 98 E> */ B(LdaNamedProperty), R(2), U8(3), U8(8),
- B(Star), R(2),
+ B(Star2),
/* 106 E> */ B(LdaGlobal), U8(1), U8(5),
- B(Star), R(3),
+ B(Star3),
/* 107 E> */ B(LdaNamedProperty), R(3), U8(4), U8(10),
/* 104 E> */ B(Sub), R(2), U8(7),
/* 95 E> */ B(StaNamedProperty), R(1), U8(3), U8(12),
/* 122 S> */ B(LdaGlobal), U8(1), U8(5),
- B(Star), R(1),
+ B(Star1),
/* 132 E> */ B(LdaGlobal), U8(1), U8(5),
- B(Star), R(2),
+ B(Star2),
/* 133 E> */ B(LdaNamedProperty), R(2), U8(4), U8(10),
- B(Star), R(2),
+ B(Star2),
/* 141 E> */ B(LdaGlobal), U8(1), U8(5),
- B(Star), R(3),
+ B(Star3),
/* 142 E> */ B(LdaNamedProperty), R(3), U8(3), U8(8),
/* 139 E> */ B(Sub), R(2), U8(14),
- B(Star), R(2),
+ B(Star2),
/* 130 E> */ B(StaNamedProperty), R(1), U8(4), U8(15),
B(Mov), R(2), R(0),
B(Ldar), R(2),
- /* 63 E> */ B(JumpLoop), U8(82), I8(0),
+ /* 63 E> */ B(JumpLoop), U8(72), I8(0),
B(Ldar), R(0),
/* 163 S> */ B(Return),
]
@@ -221,37 +221,37 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 79
+bytecode array length: 70
bytecodes: [
/* 7 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(41),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
/* 63 S> */ B(LdaSmi), I8(10),
/* 65 E> */ B(StaGlobal), U8(2), U8(2),
B(LdaUndefined),
- B(Star), R(0),
+ B(Star0),
/* 90 S> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(1),
+ B(Star1),
/* 99 E> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(2),
+ B(Star2),
/* 100 E> */ B(LdaNamedProperty), R(2), U8(3), U8(7),
- B(Star), R(2),
+ B(Star2),
/* 108 E> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(3),
+ B(Star3),
/* 109 E> */ B(LdaNamedProperty), R(3), U8(4), U8(9),
/* 106 E> */ B(Sub), R(2), U8(6),
- B(Star), R(2),
+ B(Star2),
/* 97 E> */ B(StaNamedProperty), R(1), U8(4), U8(11),
B(Mov), R(2), R(0),
/* 133 S> */ B(LdaGlobal), U8(2), U8(13),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(10),
/* 133 E> */ B(TestLessThan), R(1), U8(15),
B(JumpIfFalse), U8(5),
- /* 77 E> */ B(JumpLoop), U8(49), I8(0),
+ /* 77 E> */ B(JumpLoop), U8(43), I8(0),
B(Ldar), R(0),
/* 146 S> */ B(Return),
]
@@ -281,33 +281,33 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 74
+bytecode array length: 66
bytecodes: [
/* 7 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(41),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
/* 63 S> */ B(LdaGlobal), U8(1), U8(2),
- B(Star), R(1),
+ B(Star1),
/* 68 E> */ B(LdaNamedPropertyNoFeedback), R(1), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
/* 74 E> */ B(TestLessThan), R(1), U8(4),
- B(JumpIfFalse), U8(22),
+ B(JumpIfFalse), U8(20),
/* 89 S> */ B(LdaGlobal), U8(1), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
- B(Star), R(2),
+ B(Star2),
/* 96 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(2), U8(0),
B(Mov), R(2), R(0),
B(Ldar), R(2),
- B(Jump), U8(20),
+ B(Jump), U8(18),
/* 124 S> */ B(LdaGlobal), U8(1), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
- B(Star), R(2),
+ B(Star2),
/* 131 E> */ B(StaNamedPropertyNoFeedback), R(1), U8(3), U8(0),
B(Mov), R(2), R(0),
B(Ldar), R(2),
@@ -331,15 +331,15 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 19
+bytecode array length: 16
bytecodes: [
/* 7 S> */ B(LdaConstant), U8(0),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(4),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(3), U8(2),
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 36 S> */ B(Return),
]
constant pool: [
@@ -357,15 +357,15 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 19
+bytecode array length: 16
bytecodes: [
/* 7 S> */ B(LdaConstant), U8(0),
- B(Star), R(3),
+ B(Star3),
B(LdaSmi), I8(37),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(3), U8(2),
/* 9 E> */ B(StaGlobal), U8(1), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 21 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
index ef630cfa6f..179bf0dc63 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
@@ -21,29 +21,29 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 70
+bytecode array length: 63
bytecodes: [
/* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 9 E> */ B(StaGlobal), U8(1), U8(1),
/* 66 S> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(1),
+ B(Star1),
/* 71 E> */ B(LdaNamedProperty), R(1), U8(2), U8(6),
- B(Star), R(1),
+ B(Star1),
/* 80 E> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(2),
+ B(Star2),
/* 81 E> */ B(LdaNamedProperty), R(2), U8(3), U8(8),
/* 78 E> */ B(Add), R(1), U8(3),
/* 68 E> */ B(StaGlobal), U8(4), U8(10),
/* 95 S> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(7),
/* 103 E> */ B(StaNamedProperty), R(1), U8(3), U8(12),
/* 114 S> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(1),
+ B(Star1),
/* 124 E> */ B(LdaGlobal), U8(1), U8(4),
- B(Star), R(2),
+ B(Star2),
/* 125 E> */ B(LdaNamedProperty), R(2), U8(3), U8(8),
- B(Star), R(2),
+ B(Star2),
/* 122 E> */ B(StaNamedProperty), R(1), U8(2), U8(14),
B(Mov), R(2), R(0),
B(Ldar), R(0),
@@ -75,29 +75,29 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 66
+bytecode array length: 60
bytecodes: [
/* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
/* 9 E> */ B(StaGlobal), U8(1), U8(1),
/* 65 S> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(1),
+ B(Star1),
/* 70 E> */ B(LdaNamedProperty), R(1), U8(2), U8(5),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
/* 77 E> */ B(TestLessThan), R(1), U8(7),
- B(JumpIfFalse), U8(22),
+ B(JumpIfFalse), U8(20),
/* 92 S> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
- B(Star), R(2),
+ B(Star2),
/* 100 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
B(Mov), R(2), R(0),
B(Ldar), R(2),
- B(Jump), U8(20),
+ B(Jump), U8(18),
/* 128 S> */ B(LdaGlobal), U8(1), U8(3),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(3),
- B(Star), R(2),
+ B(Star2),
/* 136 E> */ B(StaNamedProperty), R(1), U8(3), U8(10),
B(Mov), R(2), R(0),
B(Ldar), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
index e28ded8006..242b24b70c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
@@ -85,10 +85,10 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 26 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 32 S> */ B(LdaSmi), I8(-124),
/* 40 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
/* 47 S> */ B(Return),
@@ -237,10 +237,10 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 524
+bytecode array length: 523
bytecodes: [
/* 26 S> */ B(CreateEmptyObjectLiteral),
- B(Star), R(0),
+ B(Star0),
/* 34 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
/* 45 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
/* 56 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
@@ -644,392 +644,392 @@ snippet: "
"
frame size: 1
parameter count: 3
-bytecode array length: 905
+bytecode array length: 777
bytecodes: [
/* 30 S> */ B(Ldar), R(arg1),
/* 35 E> */ B(LdaKeyedProperty), R(arg0), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 42 S> */ B(Ldar), R(arg1),
/* 47 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(Ldar), R(arg1),
/* 59 E> */ B(LdaKeyedProperty), R(arg0), U8(4),
- B(Star), R(0),
+ B(Star0),
/* 66 S> */ B(Ldar), R(arg1),
/* 71 E> */ B(LdaKeyedProperty), R(arg0), U8(6),
- B(Star), R(0),
+ B(Star0),
/* 78 S> */ B(Ldar), R(arg1),
/* 83 E> */ B(LdaKeyedProperty), R(arg0), U8(8),
- B(Star), R(0),
+ B(Star0),
/* 90 S> */ B(Ldar), R(arg1),
/* 95 E> */ B(LdaKeyedProperty), R(arg0), U8(10),
- B(Star), R(0),
+ B(Star0),
/* 102 S> */ B(Ldar), R(arg1),
/* 107 E> */ B(LdaKeyedProperty), R(arg0), U8(12),
- B(Star), R(0),
+ B(Star0),
/* 114 S> */ B(Ldar), R(arg1),
/* 119 E> */ B(LdaKeyedProperty), R(arg0), U8(14),
- B(Star), R(0),
+ B(Star0),
/* 126 S> */ B(Ldar), R(arg1),
/* 131 E> */ B(LdaKeyedProperty), R(arg0), U8(16),
- B(Star), R(0),
+ B(Star0),
/* 138 S> */ B(Ldar), R(arg1),
/* 143 E> */ B(LdaKeyedProperty), R(arg0), U8(18),
- B(Star), R(0),
+ B(Star0),
/* 150 S> */ B(Ldar), R(arg1),
/* 155 E> */ B(LdaKeyedProperty), R(arg0), U8(20),
- B(Star), R(0),
+ B(Star0),
/* 162 S> */ B(Ldar), R(arg1),
/* 167 E> */ B(LdaKeyedProperty), R(arg0), U8(22),
- B(Star), R(0),
+ B(Star0),
/* 174 S> */ B(Ldar), R(arg1),
/* 179 E> */ B(LdaKeyedProperty), R(arg0), U8(24),
- B(Star), R(0),
+ B(Star0),
/* 186 S> */ B(Ldar), R(arg1),
/* 191 E> */ B(LdaKeyedProperty), R(arg0), U8(26),
- B(Star), R(0),
+ B(Star0),
/* 198 S> */ B(Ldar), R(arg1),
/* 203 E> */ B(LdaKeyedProperty), R(arg0), U8(28),
- B(Star), R(0),
+ B(Star0),
/* 210 S> */ B(Ldar), R(arg1),
/* 215 E> */ B(LdaKeyedProperty), R(arg0), U8(30),
- B(Star), R(0),
+ B(Star0),
/* 222 S> */ B(Ldar), R(arg1),
/* 227 E> */ B(LdaKeyedProperty), R(arg0), U8(32),
- B(Star), R(0),
+ B(Star0),
/* 234 S> */ B(Ldar), R(arg1),
/* 239 E> */ B(LdaKeyedProperty), R(arg0), U8(34),
- B(Star), R(0),
+ B(Star0),
/* 246 S> */ B(Ldar), R(arg1),
/* 251 E> */ B(LdaKeyedProperty), R(arg0), U8(36),
- B(Star), R(0),
+ B(Star0),
/* 258 S> */ B(Ldar), R(arg1),
/* 263 E> */ B(LdaKeyedProperty), R(arg0), U8(38),
- B(Star), R(0),
+ B(Star0),
/* 270 S> */ B(Ldar), R(arg1),
/* 275 E> */ B(LdaKeyedProperty), R(arg0), U8(40),
- B(Star), R(0),
+ B(Star0),
/* 282 S> */ B(Ldar), R(arg1),
/* 287 E> */ B(LdaKeyedProperty), R(arg0), U8(42),
- B(Star), R(0),
+ B(Star0),
/* 294 S> */ B(Ldar), R(arg1),
/* 299 E> */ B(LdaKeyedProperty), R(arg0), U8(44),
- B(Star), R(0),
+ B(Star0),
/* 306 S> */ B(Ldar), R(arg1),
/* 311 E> */ B(LdaKeyedProperty), R(arg0), U8(46),
- B(Star), R(0),
+ B(Star0),
/* 318 S> */ B(Ldar), R(arg1),
/* 323 E> */ B(LdaKeyedProperty), R(arg0), U8(48),
- B(Star), R(0),
+ B(Star0),
/* 330 S> */ B(Ldar), R(arg1),
/* 335 E> */ B(LdaKeyedProperty), R(arg0), U8(50),
- B(Star), R(0),
+ B(Star0),
/* 342 S> */ B(Ldar), R(arg1),
/* 347 E> */ B(LdaKeyedProperty), R(arg0), U8(52),
- B(Star), R(0),
+ B(Star0),
/* 354 S> */ B(Ldar), R(arg1),
/* 359 E> */ B(LdaKeyedProperty), R(arg0), U8(54),
- B(Star), R(0),
+ B(Star0),
/* 366 S> */ B(Ldar), R(arg1),
/* 371 E> */ B(LdaKeyedProperty), R(arg0), U8(56),
- B(Star), R(0),
+ B(Star0),
/* 378 S> */ B(Ldar), R(arg1),
/* 383 E> */ B(LdaKeyedProperty), R(arg0), U8(58),
- B(Star), R(0),
+ B(Star0),
/* 390 S> */ B(Ldar), R(arg1),
/* 395 E> */ B(LdaKeyedProperty), R(arg0), U8(60),
- B(Star), R(0),
+ B(Star0),
/* 402 S> */ B(Ldar), R(arg1),
/* 407 E> */ B(LdaKeyedProperty), R(arg0), U8(62),
- B(Star), R(0),
+ B(Star0),
/* 414 S> */ B(Ldar), R(arg1),
/* 419 E> */ B(LdaKeyedProperty), R(arg0), U8(64),
- B(Star), R(0),
+ B(Star0),
/* 426 S> */ B(Ldar), R(arg1),
/* 431 E> */ B(LdaKeyedProperty), R(arg0), U8(66),
- B(Star), R(0),
+ B(Star0),
/* 438 S> */ B(Ldar), R(arg1),
/* 443 E> */ B(LdaKeyedProperty), R(arg0), U8(68),
- B(Star), R(0),
+ B(Star0),
/* 450 S> */ B(Ldar), R(arg1),
/* 455 E> */ B(LdaKeyedProperty), R(arg0), U8(70),
- B(Star), R(0),
+ B(Star0),
/* 462 S> */ B(Ldar), R(arg1),
/* 467 E> */ B(LdaKeyedProperty), R(arg0), U8(72),
- B(Star), R(0),
+ B(Star0),
/* 474 S> */ B(Ldar), R(arg1),
/* 479 E> */ B(LdaKeyedProperty), R(arg0), U8(74),
- B(Star), R(0),
+ B(Star0),
/* 486 S> */ B(Ldar), R(arg1),
/* 491 E> */ B(LdaKeyedProperty), R(arg0), U8(76),
- B(Star), R(0),
+ B(Star0),
/* 498 S> */ B(Ldar), R(arg1),
/* 503 E> */ B(LdaKeyedProperty), R(arg0), U8(78),
- B(Star), R(0),
+ B(Star0),
/* 510 S> */ B(Ldar), R(arg1),
/* 515 E> */ B(LdaKeyedProperty), R(arg0), U8(80),
- B(Star), R(0),
+ B(Star0),
/* 522 S> */ B(Ldar), R(arg1),
/* 527 E> */ B(LdaKeyedProperty), R(arg0), U8(82),
- B(Star), R(0),
+ B(Star0),
/* 534 S> */ B(Ldar), R(arg1),
/* 539 E> */ B(LdaKeyedProperty), R(arg0), U8(84),
- B(Star), R(0),
+ B(Star0),
/* 546 S> */ B(Ldar), R(arg1),
/* 551 E> */ B(LdaKeyedProperty), R(arg0), U8(86),
- B(Star), R(0),
+ B(Star0),
/* 558 S> */ B(Ldar), R(arg1),
/* 563 E> */ B(LdaKeyedProperty), R(arg0), U8(88),
- B(Star), R(0),
+ B(Star0),
/* 570 S> */ B(Ldar), R(arg1),
/* 575 E> */ B(LdaKeyedProperty), R(arg0), U8(90),
- B(Star), R(0),
+ B(Star0),
/* 582 S> */ B(Ldar), R(arg1),
/* 587 E> */ B(LdaKeyedProperty), R(arg0), U8(92),
- B(Star), R(0),
+ B(Star0),
/* 594 S> */ B(Ldar), R(arg1),
/* 599 E> */ B(LdaKeyedProperty), R(arg0), U8(94),
- B(Star), R(0),
+ B(Star0),
/* 606 S> */ B(Ldar), R(arg1),
/* 611 E> */ B(LdaKeyedProperty), R(arg0), U8(96),
- B(Star), R(0),
+ B(Star0),
/* 618 S> */ B(Ldar), R(arg1),
/* 623 E> */ B(LdaKeyedProperty), R(arg0), U8(98),
- B(Star), R(0),
+ B(Star0),
/* 630 S> */ B(Ldar), R(arg1),
/* 635 E> */ B(LdaKeyedProperty), R(arg0), U8(100),
- B(Star), R(0),
+ B(Star0),
/* 642 S> */ B(Ldar), R(arg1),
/* 647 E> */ B(LdaKeyedProperty), R(arg0), U8(102),
- B(Star), R(0),
+ B(Star0),
/* 654 S> */ B(Ldar), R(arg1),
/* 659 E> */ B(LdaKeyedProperty), R(arg0), U8(104),
- B(Star), R(0),
+ B(Star0),
/* 666 S> */ B(Ldar), R(arg1),
/* 671 E> */ B(LdaKeyedProperty), R(arg0), U8(106),
- B(Star), R(0),
+ B(Star0),
/* 678 S> */ B(Ldar), R(arg1),
/* 683 E> */ B(LdaKeyedProperty), R(arg0), U8(108),
- B(Star), R(0),
+ B(Star0),
/* 690 S> */ B(Ldar), R(arg1),
/* 695 E> */ B(LdaKeyedProperty), R(arg0), U8(110),
- B(Star), R(0),
+ B(Star0),
/* 702 S> */ B(Ldar), R(arg1),
/* 707 E> */ B(LdaKeyedProperty), R(arg0), U8(112),
- B(Star), R(0),
+ B(Star0),
/* 714 S> */ B(Ldar), R(arg1),
/* 719 E> */ B(LdaKeyedProperty), R(arg0), U8(114),
- B(Star), R(0),
+ B(Star0),
/* 726 S> */ B(Ldar), R(arg1),
/* 731 E> */ B(LdaKeyedProperty), R(arg0), U8(116),
- B(Star), R(0),
+ B(Star0),
/* 738 S> */ B(Ldar), R(arg1),
/* 743 E> */ B(LdaKeyedProperty), R(arg0), U8(118),
- B(Star), R(0),
+ B(Star0),
/* 750 S> */ B(Ldar), R(arg1),
/* 755 E> */ B(LdaKeyedProperty), R(arg0), U8(120),
- B(Star), R(0),
+ B(Star0),
/* 762 S> */ B(Ldar), R(arg1),
/* 767 E> */ B(LdaKeyedProperty), R(arg0), U8(122),
- B(Star), R(0),
+ B(Star0),
/* 774 S> */ B(Ldar), R(arg1),
/* 779 E> */ B(LdaKeyedProperty), R(arg0), U8(124),
- B(Star), R(0),
+ B(Star0),
/* 786 S> */ B(Ldar), R(arg1),
/* 791 E> */ B(LdaKeyedProperty), R(arg0), U8(126),
- B(Star), R(0),
+ B(Star0),
/* 798 S> */ B(Ldar), R(arg1),
/* 803 E> */ B(LdaKeyedProperty), R(arg0), U8(128),
- B(Star), R(0),
+ B(Star0),
/* 810 S> */ B(Ldar), R(arg1),
/* 815 E> */ B(LdaKeyedProperty), R(arg0), U8(130),
- B(Star), R(0),
+ B(Star0),
/* 822 S> */ B(Ldar), R(arg1),
/* 827 E> */ B(LdaKeyedProperty), R(arg0), U8(132),
- B(Star), R(0),
+ B(Star0),
/* 834 S> */ B(Ldar), R(arg1),
/* 839 E> */ B(LdaKeyedProperty), R(arg0), U8(134),
- B(Star), R(0),
+ B(Star0),
/* 846 S> */ B(Ldar), R(arg1),
/* 851 E> */ B(LdaKeyedProperty), R(arg0), U8(136),
- B(Star), R(0),
+ B(Star0),
/* 858 S> */ B(Ldar), R(arg1),
/* 863 E> */ B(LdaKeyedProperty), R(arg0), U8(138),
- B(Star), R(0),
+ B(Star0),
/* 870 S> */ B(Ldar), R(arg1),
/* 875 E> */ B(LdaKeyedProperty), R(arg0), U8(140),
- B(Star), R(0),
+ B(Star0),
/* 882 S> */ B(Ldar), R(arg1),
/* 887 E> */ B(LdaKeyedProperty), R(arg0), U8(142),
- B(Star), R(0),
+ B(Star0),
/* 894 S> */ B(Ldar), R(arg1),
/* 899 E> */ B(LdaKeyedProperty), R(arg0), U8(144),
- B(Star), R(0),
+ B(Star0),
/* 906 S> */ B(Ldar), R(arg1),
/* 911 E> */ B(LdaKeyedProperty), R(arg0), U8(146),
- B(Star), R(0),
+ B(Star0),
/* 918 S> */ B(Ldar), R(arg1),
/* 923 E> */ B(LdaKeyedProperty), R(arg0), U8(148),
- B(Star), R(0),
+ B(Star0),
/* 930 S> */ B(Ldar), R(arg1),
/* 935 E> */ B(LdaKeyedProperty), R(arg0), U8(150),
- B(Star), R(0),
+ B(Star0),
/* 942 S> */ B(Ldar), R(arg1),
/* 947 E> */ B(LdaKeyedProperty), R(arg0), U8(152),
- B(Star), R(0),
+ B(Star0),
/* 954 S> */ B(Ldar), R(arg1),
/* 959 E> */ B(LdaKeyedProperty), R(arg0), U8(154),
- B(Star), R(0),
+ B(Star0),
/* 966 S> */ B(Ldar), R(arg1),
/* 971 E> */ B(LdaKeyedProperty), R(arg0), U8(156),
- B(Star), R(0),
+ B(Star0),
/* 978 S> */ B(Ldar), R(arg1),
/* 983 E> */ B(LdaKeyedProperty), R(arg0), U8(158),
- B(Star), R(0),
+ B(Star0),
/* 990 S> */ B(Ldar), R(arg1),
/* 995 E> */ B(LdaKeyedProperty), R(arg0), U8(160),
- B(Star), R(0),
+ B(Star0),
/* 1002 S> */ B(Ldar), R(arg1),
/* 1007 E> */ B(LdaKeyedProperty), R(arg0), U8(162),
- B(Star), R(0),
+ B(Star0),
/* 1014 S> */ B(Ldar), R(arg1),
/* 1019 E> */ B(LdaKeyedProperty), R(arg0), U8(164),
- B(Star), R(0),
+ B(Star0),
/* 1026 S> */ B(Ldar), R(arg1),
/* 1031 E> */ B(LdaKeyedProperty), R(arg0), U8(166),
- B(Star), R(0),
+ B(Star0),
/* 1038 S> */ B(Ldar), R(arg1),
/* 1043 E> */ B(LdaKeyedProperty), R(arg0), U8(168),
- B(Star), R(0),
+ B(Star0),
/* 1050 S> */ B(Ldar), R(arg1),
/* 1055 E> */ B(LdaKeyedProperty), R(arg0), U8(170),
- B(Star), R(0),
+ B(Star0),
/* 1062 S> */ B(Ldar), R(arg1),
/* 1067 E> */ B(LdaKeyedProperty), R(arg0), U8(172),
- B(Star), R(0),
+ B(Star0),
/* 1074 S> */ B(Ldar), R(arg1),
/* 1079 E> */ B(LdaKeyedProperty), R(arg0), U8(174),
- B(Star), R(0),
+ B(Star0),
/* 1086 S> */ B(Ldar), R(arg1),
/* 1091 E> */ B(LdaKeyedProperty), R(arg0), U8(176),
- B(Star), R(0),
+ B(Star0),
/* 1098 S> */ B(Ldar), R(arg1),
/* 1103 E> */ B(LdaKeyedProperty), R(arg0), U8(178),
- B(Star), R(0),
+ B(Star0),
/* 1110 S> */ B(Ldar), R(arg1),
/* 1115 E> */ B(LdaKeyedProperty), R(arg0), U8(180),
- B(Star), R(0),
+ B(Star0),
/* 1122 S> */ B(Ldar), R(arg1),
/* 1127 E> */ B(LdaKeyedProperty), R(arg0), U8(182),
- B(Star), R(0),
+ B(Star0),
/* 1134 S> */ B(Ldar), R(arg1),
/* 1139 E> */ B(LdaKeyedProperty), R(arg0), U8(184),
- B(Star), R(0),
+ B(Star0),
/* 1146 S> */ B(Ldar), R(arg1),
/* 1151 E> */ B(LdaKeyedProperty), R(arg0), U8(186),
- B(Star), R(0),
+ B(Star0),
/* 1158 S> */ B(Ldar), R(arg1),
/* 1163 E> */ B(LdaKeyedProperty), R(arg0), U8(188),
- B(Star), R(0),
+ B(Star0),
/* 1170 S> */ B(Ldar), R(arg1),
/* 1175 E> */ B(LdaKeyedProperty), R(arg0), U8(190),
- B(Star), R(0),
+ B(Star0),
/* 1182 S> */ B(Ldar), R(arg1),
/* 1187 E> */ B(LdaKeyedProperty), R(arg0), U8(192),
- B(Star), R(0),
+ B(Star0),
/* 1194 S> */ B(Ldar), R(arg1),
/* 1199 E> */ B(LdaKeyedProperty), R(arg0), U8(194),
- B(Star), R(0),
+ B(Star0),
/* 1206 S> */ B(Ldar), R(arg1),
/* 1211 E> */ B(LdaKeyedProperty), R(arg0), U8(196),
- B(Star), R(0),
+ B(Star0),
/* 1218 S> */ B(Ldar), R(arg1),
/* 1223 E> */ B(LdaKeyedProperty), R(arg0), U8(198),
- B(Star), R(0),
+ B(Star0),
/* 1230 S> */ B(Ldar), R(arg1),
/* 1235 E> */ B(LdaKeyedProperty), R(arg0), U8(200),
- B(Star), R(0),
+ B(Star0),
/* 1242 S> */ B(Ldar), R(arg1),
/* 1247 E> */ B(LdaKeyedProperty), R(arg0), U8(202),
- B(Star), R(0),
+ B(Star0),
/* 1254 S> */ B(Ldar), R(arg1),
/* 1259 E> */ B(LdaKeyedProperty), R(arg0), U8(204),
- B(Star), R(0),
+ B(Star0),
/* 1266 S> */ B(Ldar), R(arg1),
/* 1271 E> */ B(LdaKeyedProperty), R(arg0), U8(206),
- B(Star), R(0),
+ B(Star0),
/* 1278 S> */ B(Ldar), R(arg1),
/* 1283 E> */ B(LdaKeyedProperty), R(arg0), U8(208),
- B(Star), R(0),
+ B(Star0),
/* 1290 S> */ B(Ldar), R(arg1),
/* 1295 E> */ B(LdaKeyedProperty), R(arg0), U8(210),
- B(Star), R(0),
+ B(Star0),
/* 1302 S> */ B(Ldar), R(arg1),
/* 1307 E> */ B(LdaKeyedProperty), R(arg0), U8(212),
- B(Star), R(0),
+ B(Star0),
/* 1314 S> */ B(Ldar), R(arg1),
/* 1319 E> */ B(LdaKeyedProperty), R(arg0), U8(214),
- B(Star), R(0),
+ B(Star0),
/* 1326 S> */ B(Ldar), R(arg1),
/* 1331 E> */ B(LdaKeyedProperty), R(arg0), U8(216),
- B(Star), R(0),
+ B(Star0),
/* 1338 S> */ B(Ldar), R(arg1),
/* 1343 E> */ B(LdaKeyedProperty), R(arg0), U8(218),
- B(Star), R(0),
+ B(Star0),
/* 1350 S> */ B(Ldar), R(arg1),
/* 1355 E> */ B(LdaKeyedProperty), R(arg0), U8(220),
- B(Star), R(0),
+ B(Star0),
/* 1362 S> */ B(Ldar), R(arg1),
/* 1367 E> */ B(LdaKeyedProperty), R(arg0), U8(222),
- B(Star), R(0),
+ B(Star0),
/* 1374 S> */ B(Ldar), R(arg1),
/* 1379 E> */ B(LdaKeyedProperty), R(arg0), U8(224),
- B(Star), R(0),
+ B(Star0),
/* 1386 S> */ B(Ldar), R(arg1),
/* 1391 E> */ B(LdaKeyedProperty), R(arg0), U8(226),
- B(Star), R(0),
+ B(Star0),
/* 1398 S> */ B(Ldar), R(arg1),
/* 1403 E> */ B(LdaKeyedProperty), R(arg0), U8(228),
- B(Star), R(0),
+ B(Star0),
/* 1410 S> */ B(Ldar), R(arg1),
/* 1415 E> */ B(LdaKeyedProperty), R(arg0), U8(230),
- B(Star), R(0),
+ B(Star0),
/* 1422 S> */ B(Ldar), R(arg1),
/* 1427 E> */ B(LdaKeyedProperty), R(arg0), U8(232),
- B(Star), R(0),
+ B(Star0),
/* 1434 S> */ B(Ldar), R(arg1),
/* 1439 E> */ B(LdaKeyedProperty), R(arg0), U8(234),
- B(Star), R(0),
+ B(Star0),
/* 1446 S> */ B(Ldar), R(arg1),
/* 1451 E> */ B(LdaKeyedProperty), R(arg0), U8(236),
- B(Star), R(0),
+ B(Star0),
/* 1458 S> */ B(Ldar), R(arg1),
/* 1463 E> */ B(LdaKeyedProperty), R(arg0), U8(238),
- B(Star), R(0),
+ B(Star0),
/* 1470 S> */ B(Ldar), R(arg1),
/* 1475 E> */ B(LdaKeyedProperty), R(arg0), U8(240),
- B(Star), R(0),
+ B(Star0),
/* 1482 S> */ B(Ldar), R(arg1),
/* 1487 E> */ B(LdaKeyedProperty), R(arg0), U8(242),
- B(Star), R(0),
+ B(Star0),
/* 1494 S> */ B(Ldar), R(arg1),
/* 1499 E> */ B(LdaKeyedProperty), R(arg0), U8(244),
- B(Star), R(0),
+ B(Star0),
/* 1506 S> */ B(Ldar), R(arg1),
/* 1511 E> */ B(LdaKeyedProperty), R(arg0), U8(246),
- B(Star), R(0),
+ B(Star0),
/* 1518 S> */ B(Ldar), R(arg1),
/* 1523 E> */ B(LdaKeyedProperty), R(arg0), U8(248),
- B(Star), R(0),
+ B(Star0),
/* 1530 S> */ B(Ldar), R(arg1),
/* 1535 E> */ B(LdaKeyedProperty), R(arg0), U8(250),
- B(Star), R(0),
+ B(Star0),
/* 1542 S> */ B(Ldar), R(arg1),
/* 1547 E> */ B(LdaKeyedProperty), R(arg0), U8(252),
- B(Star), R(0),
+ B(Star0),
/* 1554 S> */ B(Ldar), R(arg1),
/* 1559 E> */ B(LdaKeyedProperty), R(arg0), U8(254),
- B(Star), R(0),
+ B(Star0),
/* 1566 S> */ B(Ldar), R(arg1),
/* 1574 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(256),
/* 1578 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
index 32e4e31848..318139541a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
@@ -55,10 +55,10 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 16 S> */ B(LdaSmi), I8(100),
- B(Star), R(1),
+ B(Star1),
B(LdaConstant), U8(0),
/* 23 E> */ B(StaKeyedProperty), R(arg0), R(1), U8(0),
B(LdaUndefined),
@@ -291,12 +291,12 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 533
+bytecode array length: 532
bytecodes: [
/* 18 S> */ B(LdaSmi), I8(1),
/* 25 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 40 S> */ B(CreateEmptyObjectLiteral),
- B(Star), R(0),
+ B(Star0),
/* 48 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
/* 61 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
/* 74 S> */ B(LdaNamedProperty), R(0), U8(3), U8(6),
@@ -704,12 +704,12 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 533
+bytecode array length: 532
bytecodes: [
/* 33 S> */ B(LdaSmi), I8(1),
/* 40 E> */ B(StaNamedProperty), R(arg0), U8(0), U8(0),
/* 55 S> */ B(CreateEmptyObjectLiteral),
- B(Star), R(0),
+ B(Star0),
/* 63 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
/* 76 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
/* 89 S> */ B(LdaNamedProperty), R(0), U8(3), U8(6),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
index a841e05375..ce5b8ca190 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -23,26 +23,26 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 118
+bytecode array length: 106
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
/* 60 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(5),
+ B(Star5),
B(StaNamedProperty), R(3), U8(5), U8(0),
B(PopContext), R(2),
B(Mov), R(3), R(0),
@@ -51,19 +51,19 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(7),
- B(Star), R(4),
+ B(Star4),
/* 99 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(3), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(9), U8(3), U8(2),
- B(Star), R(5),
+ B(Star5),
B(StaNamedProperty), R(3), U8(5), U8(2),
B(PopContext), R(2),
B(Mov), R(3), R(1),
@@ -121,34 +121,34 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 228
+bytecode array length: 204
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
B(LdaTheHole),
- B(Star), R(11),
+ B(Star11),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(8),
+ B(Star8),
B(LdaConstant), U8(2),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(8), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
- B(Star), R(9),
+ B(Star9),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(1),
- B(Star), R(5),
+ B(Star5),
/* 77 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(4), R(6),
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(6), U8(2), U8(2),
- B(Star), R(6),
+ B(Star6),
B(StaNamedProperty), R(4), U8(7), U8(0),
B(PopContext), R(3),
B(Mov), R(4), R(0),
@@ -157,29 +157,29 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
B(LdaTheHole),
- B(Star), R(11),
+ B(Star11),
B(CreateClosure), U8(11), U8(3), U8(2),
- B(Star), R(8),
+ B(Star8),
B(LdaConstant), U8(10),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(8), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
- B(Star), R(9),
+ B(Star9),
B(CreateClosure), U8(12), U8(4), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(9),
- B(Star), R(5),
+ B(Star5),
/* 133 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(8),
+ B(Star8),
B(CreateClosure), U8(13), U8(5), U8(2),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(4), R(6),
B(Mov), R(10), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(14), U8(6), U8(2),
- B(Star), R(6),
+ B(Star6),
B(StaNamedProperty), R(4), U8(7), U8(2),
B(PopContext), R(3),
B(Mov), R(4), R(1),
@@ -188,18 +188,18 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(2),
/* 236 E> */ B(CreateClosure), U8(17), U8(7), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(16),
- B(Star), R(5),
+ B(Star5),
/* 256 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(8),
+ B(Star8),
B(Mov), R(4), R(6),
B(Mov), R(1), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(18), U8(8), U8(2),
- B(Star), R(6),
+ B(Star6),
B(StaNamedProperty), R(4), U8(7), U8(4),
B(PopContext), R(3),
B(Mov), R(4), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
index 8f43e97280..b8cdbfb37e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
@@ -45,14 +45,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 22
+bytecode array length: 19
bytecodes: [
/* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 48 E> */ B(LdaNamedProperty), R(1), U8(1), U8(1),
- B(Star), R(0),
+ B(Star0),
B(LdaConstant), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 48 E> */ B(CallProperty1), R(0), R(1), R(2), U8(3),
/* 61 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
index 22831a3ff3..b4eec58192 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
@@ -16,18 +16,18 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 26
+bytecode array length: 24
bytecodes: [
/* 45 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 64 S> */ B(Ldar), R(0),
/* 76 E> */ B(Add), R(0), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 86 S> */ B(LdaSmi), I8(10),
/* 95 E> */ B(TestGreaterThan), R(0), U8(1),
B(JumpIfFalse), U8(4),
/* 101 S> */ B(Jump), U8(5),
- /* 48 E> */ B(JumpLoop), U8(16), I8(0),
+ /* 48 E> */ B(JumpLoop), U8(15), I8(0),
/* 110 S> */ B(Ldar), R(0),
/* 122 S> */ B(Return),
]
@@ -47,12 +47,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 21
+bytecode array length: 19
bytecodes: [
/* 45 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 67 S> */ B(Add), R(0), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 77 S> */ B(LdaSmi), I8(10),
/* 86 E> */ B(TestGreaterThan), R(0), U8(1),
B(JumpIfFalse), U8(4),
@@ -73,12 +73,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 8
bytecodes: [
/* 45 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 62 S> */ B(Add), R(0), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 84 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index 7d7a8d39b3..8906df4536 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -15,18 +15,18 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 22
bytecodes: [
/* 30 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 35 S> */ B(LdaSmi), I8(10),
/* 35 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(14),
+ B(JumpIfFalse), U8(13),
/* 56 S> */ B(Mov), R(0), R(1),
/* 43 S> */ B(Ldar), R(1),
B(Inc), U8(1),
- B(Star), R(0),
- /* 17 E> */ B(JumpLoop), U8(16), I8(0),
+ B(Star0),
+ /* 17 E> */ B(JumpLoop), U8(15), I8(0),
B(LdaUndefined),
/* 61 S> */ B(Return),
]
@@ -44,7 +44,7 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 162
+bytecode array length: 149
bytecodes: [
/* 10 E> */ B(CreateFunctionContext), U8(0), U8(4),
B(PushContext), R(4),
@@ -61,9 +61,9 @@ bytecodes: [
/* 30 S> */ B(LdaZero),
/* 30 E> */ B(StaCurrentContextSlot), U8(2),
B(LdaCurrentContextSlot), U8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 59 E> */ B(CreateBlockContext), U8(2),
B(PushContext), R(6),
B(LdaTheHole),
@@ -72,54 +72,54 @@ bytecodes: [
B(StaCurrentContextSlot), U8(2),
B(LdaSmi), I8(1),
B(TestEqual), R(1), U8(0),
- B(JumpIfFalse), U8(7),
+ B(JumpIfFalse), U8(6),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(Jump), U8(8),
/* 43 S> */ B(LdaCurrentContextSlot), U8(2),
B(Inc), U8(1),
/* 43 E> */ B(StaCurrentContextSlot), U8(2),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star2),
/* 35 S> */ B(LdaCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
B(LdaSmi), I8(10),
/* 35 E> */ B(TestLessThan), R(7), U8(2),
B(JumpIfFalse), U8(4),
B(Jump), U8(6),
B(PopContext), R(6),
- B(Jump), U8(76),
+ B(Jump), U8(68),
B(LdaSmi), I8(1),
B(TestEqual), R(2), U8(3),
- B(JumpIfFalse), U8(53),
+ B(JumpIfFalse), U8(45),
/* 48 S> */ B(LdaLookupGlobalSlot), U8(3), U8(4), U8(3),
- B(Star), R(7),
+ B(Star7),
B(LdaConstant), U8(4),
- B(Star), R(8),
+ B(Star8),
B(LdaZero),
- B(Star), R(12),
+ B(Star12),
B(LdaSmi), I8(31),
- B(Star), R(13),
+ B(Star13),
B(LdaSmi), I8(48),
- B(Star), R(14),
+ B(Star14),
B(Mov), R(7), R(9),
B(Mov), R(8), R(10),
B(Mov), R(closure), R(11),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(9), U8(6),
- B(Star), R(7),
+ B(Star7),
/* 48 E> */ B(CallUndefinedReceiver1), R(7), R(8), U8(6),
B(LdaZero),
- B(Star), R(2),
+ B(Star2),
B(LdaCurrentContextSlot), U8(2),
- B(Star), R(0),
- /* 17 E> */ B(JumpLoop), U8(55), I8(1),
+ B(Star0),
+ /* 17 E> */ B(JumpLoop), U8(47), I8(1),
B(LdaSmi), I8(1),
B(TestEqual), R(2), U8(8),
B(JumpIfFalse), U8(6),
B(PopContext), R(6),
B(Jump), U8(7),
B(PopContext), R(6),
- B(JumpLoop), U8(121), I8(0),
+ B(JumpLoop), U8(110), I8(0),
B(PopContext), R(5),
B(LdaUndefined),
/* 61 S> */ B(Return),
@@ -143,13 +143,13 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 103
+bytecode array length: 94
bytecodes: [
/* 30 S> */ B(LdaZero),
- B(Star), R(3),
- B(Star), R(0),
+ B(Star3),
+ B(Star0),
B(LdaSmi), I8(1),
- B(Star), R(1),
+ B(Star1),
/* 78 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(4),
B(LdaTheHole),
@@ -158,41 +158,41 @@ bytecodes: [
B(StaCurrentContextSlot), U8(2),
B(LdaSmi), I8(1),
B(TestEqual), R(1), U8(0),
- B(JumpIfFalse), U8(7),
+ B(JumpIfFalse), U8(6),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(Jump), U8(8),
/* 43 S> */ B(LdaCurrentContextSlot), U8(2),
B(Inc), U8(1),
/* 43 E> */ B(StaCurrentContextSlot), U8(2),
B(LdaSmi), I8(1),
- B(Star), R(2),
+ B(Star2),
/* 35 S> */ B(LdaCurrentContextSlot), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaSmi), I8(10),
/* 35 E> */ B(TestLessThan), R(5), U8(2),
B(JumpIfFalse), U8(4),
B(Jump), U8(6),
B(PopContext), R(4),
- B(Jump), U8(44),
+ B(Jump), U8(41),
B(LdaSmi), I8(1),
B(TestEqual), R(2), U8(3),
- B(JumpIfFalse), U8(21),
+ B(JumpIfFalse), U8(18),
/* 48 S> */ B(CreateClosure), U8(1), U8(0), U8(2),
- B(Star), R(5),
+ B(Star5),
/* 74 E> */ B(CallUndefinedReceiver0), R(5), U8(4),
B(LdaZero),
- B(Star), R(2),
+ B(Star2),
B(LdaCurrentContextSlot), U8(2),
- B(Star), R(0),
- /* 17 E> */ B(JumpLoop), U8(23), I8(1),
+ B(Star0),
+ /* 17 E> */ B(JumpLoop), U8(20), I8(1),
B(LdaSmi), I8(1),
B(TestEqual), R(2), U8(6),
B(JumpIfFalse), U8(6),
B(PopContext), R(4),
B(Jump), U8(7),
B(PopContext), R(4),
- B(JumpLoop), U8(89), I8(0),
+ B(JumpLoop), U8(83), I8(0),
B(LdaUndefined),
/* 80 S> */ B(Return),
]
@@ -212,24 +212,24 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 42
+bytecode array length: 37
bytecodes: [
/* 37 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
- B(Star), R(3),
+ B(Star3),
/* 28 S> */ B(LdaNamedProperty), R(3), U8(1), U8(1),
- B(Star), R(0),
+ B(Star0),
/* 31 S> */ B(LdaNamedProperty), R(3), U8(2), U8(3),
- B(Star), R(1),
+ B(Star1),
/* 55 S> */ B(LdaZero),
/* 55 E> */ B(TestGreaterThan), R(1), U8(5),
- B(JumpIfFalse), U8(18),
+ B(JumpIfFalse), U8(16),
/* 75 S> */ B(Ldar), R(1),
/* 77 E> */ B(Add), R(0), U8(6),
- B(Star), R(2),
+ B(Star2),
/* 62 S> */ B(Ldar), R(1),
B(Dec), U8(7),
- B(Star), R(1),
- /* 17 E> */ B(JumpLoop), U8(19), I8(0),
+ B(Star1),
+ /* 17 E> */ B(JumpLoop), U8(17), I8(0),
B(LdaUndefined),
/* 84 S> */ B(Return),
]
@@ -250,16 +250,16 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 65
+bytecode array length: 61
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
/* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(3),
@@ -267,20 +267,20 @@ bytecodes: [
B(Ldar), R(3),
/* 62 S> */ B(Return),
/* 31 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 36 S> */ B(LdaSmi), I8(10),
/* 36 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(14),
+ B(JumpIfFalse), U8(13),
/* 57 S> */ B(Mov), R(1), R(2),
/* 44 S> */ B(Ldar), R(2),
B(Inc), U8(1),
- B(Star), R(1),
- /* 18 E> */ B(JumpLoop), U8(16), I8(0),
+ B(Star1),
+ /* 18 E> */ B(JumpLoop), U8(15), I8(0),
B(LdaUndefined),
/* 62 S> */ B(Return),
]
constant pool: [
- Smi [21],
+ Smi [20],
Smi [10],
Smi [7],
]
@@ -296,16 +296,16 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 97
+bytecode array length: 91
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 11 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(2),
@@ -313,17 +313,17 @@ bytecodes: [
B(Ldar), R(2),
/* 56 S> */ B(Return),
/* 31 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 36 S> */ B(LdaSmi), I8(10),
/* 36 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(46),
+ B(JumpIfFalse), U8(43),
/* 47 S> */ B(LdaFalse),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(1), R(2),
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(2), U8(2),
/* 47 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(1),
B(ResumeGenerator), R(0), R(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
B(Ldar), R(2),
@@ -332,14 +332,14 @@ bytecodes: [
/* 56 S> */ B(Return),
/* 44 S> */ B(Ldar), R(1),
B(Inc), U8(1),
- B(Star), R(1),
- /* 18 E> */ B(JumpLoop), U8(48), I8(0),
+ B(Star1),
+ /* 18 E> */ B(JumpLoop), U8(45), I8(0),
B(LdaUndefined),
/* 56 S> */ B(Return),
]
constant pool: [
- Smi [21],
- Smi [66],
+ Smi [20],
+ Smi [62],
Smi [10],
Smi [7],
Smi [10],
@@ -357,41 +357,41 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 79
+bytecode array length: 70
bytecodes: [
B(Mov), R(closure), R(3),
B(Mov), R(this), R(4),
/* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(3), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(3),
/* 36 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 41 S> */ B(LdaSmi), I8(10),
/* 41 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(14),
+ B(JumpIfFalse), U8(13),
/* 62 S> */ B(Mov), R(1), R(2),
/* 49 S> */ B(Ldar), R(2),
B(Inc), U8(1),
- B(Star), R(1),
- /* 23 E> */ B(JumpLoop), U8(16), I8(0),
+ B(Star1),
+ /* 23 E> */ B(JumpLoop), U8(15), I8(0),
B(LdaUndefined),
- B(Star), R(5),
+ B(Star5),
B(LdaFalse),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(4), U8(3),
/* 67 S> */ B(Return),
- B(Star), R(4),
+ B(Star4),
B(CreateCatchContext), R(4), U8(0),
- B(Star), R(3),
+ B(Star3),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(3),
B(PushContext), R(4),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(6),
+ B(Star6),
B(LdaFalse),
- B(Star), R(7),
+ B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
/* 67 S> */ B(Return),
@@ -400,7 +400,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [15, 51, 51],
+ [14, 46, 46],
]
---
@@ -412,27 +412,27 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 115
+bytecode array length: 104
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
/* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionEnter), R(2), U8(2),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(2),
/* 36 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 41 S> */ B(LdaSmi), I8(10),
/* 41 E> */ B(TestLessThan), R(1), U8(0),
- B(JumpIfFalse), U8(46),
+ B(JumpIfFalse), U8(43),
/* 52 S> */ B(Mov), R(0), R(3),
B(Mov), R(1), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(3), U8(2),
/* 52 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
B(ResumeGenerator), R(0), R(0), U8(3),
- B(Star), R(3),
+ B(Star3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(4),
+ B(Star4),
B(LdaZero),
B(TestReferenceEqual), R(4),
B(JumpIfTrue), U8(5),
@@ -440,35 +440,35 @@ bytecodes: [
B(ReThrow),
/* 49 S> */ B(Ldar), R(1),
B(Inc), U8(1),
- B(Star), R(1),
- /* 23 E> */ B(JumpLoop), U8(48), I8(0),
+ B(Star1),
+ /* 23 E> */ B(JumpLoop), U8(45), I8(0),
B(LdaUndefined),
- B(Star), R(4),
+ B(Star4),
B(LdaTrue),
- B(Star), R(5),
+ B(Star5),
B(Mov), R(0), R(3),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 61 S> */ B(Return),
- B(Star), R(3),
+ B(Star3),
B(CreateCatchContext), R(3), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(2),
B(PushContext), R(3),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(5),
+ B(Star5),
B(LdaTrue),
- B(Star), R(6),
+ B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
/* 61 S> */ B(Return),
]
constant pool: [
- Smi [44],
+ Smi [42],
SCOPE_INFO_TYPE,
]
handlers: [
- [19, 87, 87],
+ [18, 80, 80],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
index 2b7639a885..ab6dc6a762 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -27,7 +27,7 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 184
+bytecode array length: 168
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(2),
@@ -36,16 +36,16 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(3),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
/* 60 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
/* 92 S> */ B(LdaConstant), U8(4),
- B(Star), R(8),
+ B(Star8),
B(LdaConstant), U8(5),
B(TestEqualStrict), R(8), U8(0),
B(Mov), R(3), R(5),
@@ -54,12 +54,12 @@ bytecodes: [
B(Ldar), R(8),
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(5),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(6), U8(1), U8(2),
- B(Star), R(5),
+ B(Star5),
B(StaNamedProperty), R(3), U8(7), U8(1),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(7),
+ B(Star7),
B(CallProperty0), R(7), R(3), U8(3),
B(PopContext), R(2),
B(Mov), R(3), R(0),
@@ -70,16 +70,16 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(3),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star6),
B(CreateClosure), U8(11), U8(3), U8(2),
- B(Star), R(3),
+ B(Star3),
B(LdaConstant), U8(10),
- B(Star), R(4),
+ B(Star4),
/* 131 S> */ B(LdaConstant), U8(3),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(7),
+ B(Star7),
/* 176 S> */ B(LdaConstant), U8(4),
- B(Star), R(8),
+ B(Star8),
B(LdaConstant), U8(5),
B(TestEqualStrict), R(8), U8(0),
B(Mov), R(3), R(5),
@@ -88,12 +88,12 @@ bytecodes: [
B(Ldar), R(8),
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(5),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(12), U8(4), U8(2),
- B(Star), R(5),
+ B(Star5),
B(StaNamedProperty), R(3), U8(7), U8(5),
B(CreateClosure), U8(13), U8(5), U8(2),
- B(Star), R(7),
+ B(Star7),
B(CallProperty0), R(7), R(3), U8(7),
B(PopContext), R(2),
B(Mov), R(3), R(1),
@@ -161,7 +161,7 @@ snippet: "
"
frame size: 12
parameter count: 1
-bytecode array length: 333
+bytecode array length: 303
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(3),
@@ -170,23 +170,23 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(3),
B(LdaTheHole),
- B(Star), R(11),
+ B(Star11),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(8),
+ B(Star8),
B(LdaConstant), U8(2),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(8), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
- B(Star), R(9),
+ B(Star9),
B(CreateClosure), U8(4), U8(1), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(1),
- B(Star), R(5),
+ B(Star5),
/* 77 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(8),
+ B(Star8),
/* 109 S> */ B(LdaConstant), U8(6),
- B(Star), R(9),
+ B(Star9),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(9), U8(0),
B(Mov), R(10), R(7),
@@ -196,12 +196,12 @@ bytecodes: [
B(Ldar), R(9),
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(8), U8(2), U8(2),
- B(Star), R(6),
+ B(Star6),
B(StaNamedProperty), R(4), U8(9), U8(1),
B(CreateClosure), U8(10), U8(3), U8(2),
- B(Star), R(8),
+ B(Star8),
B(CallProperty0), R(8), R(4), U8(3),
B(PopContext), R(3),
B(Mov), R(4), R(0),
@@ -212,23 +212,23 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(3),
B(LdaTheHole),
- B(Star), R(11),
+ B(Star11),
B(CreateClosure), U8(14), U8(4), U8(2),
- B(Star), R(8),
+ B(Star8),
B(LdaConstant), U8(13),
- B(Star), R(9),
+ B(Star9),
B(Mov), R(8), R(10),
B(CallRuntime), U16(Runtime::kDefineClass), R(9), U8(3),
- B(Star), R(9),
+ B(Star9),
B(CreateClosure), U8(15), U8(5), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(12),
- B(Star), R(5),
+ B(Star5),
/* 165 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(8),
+ B(Star8),
/* 210 S> */ B(LdaConstant), U8(6),
- B(Star), R(9),
+ B(Star9),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(9), U8(0),
B(Mov), R(4), R(6),
@@ -238,14 +238,14 @@ bytecodes: [
B(Ldar), R(9),
B(StaCurrentContextSlot), U8(3),
B(CreateClosure), U8(16), U8(6), U8(2),
- B(Star), R(10),
+ B(Star10),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(6),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(17), U8(7), U8(2),
- B(Star), R(6),
+ B(Star6),
B(StaNamedProperty), R(4), U8(9), U8(5),
B(CreateClosure), U8(18), U8(8), U8(2),
- B(Star), R(8),
+ B(Star8),
B(CallProperty0), R(8), R(4), U8(7),
B(PopContext), R(3),
B(Mov), R(4), R(1),
@@ -256,14 +256,14 @@ bytecodes: [
B(LdaTheHole),
B(StaCurrentContextSlot), U8(3),
/* 313 E> */ B(CreateClosure), U8(21), U8(9), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaConstant), U8(20),
- B(Star), R(5),
+ B(Star5),
/* 333 S> */ B(LdaConstant), U8(5),
B(StaCurrentContextSlot), U8(2),
- B(Star), R(8),
+ B(Star8),
/* 378 S> */ B(LdaConstant), U8(6),
- B(Star), R(9),
+ B(Star9),
B(LdaConstant), U8(7),
B(TestEqualStrict), R(9), U8(0),
B(Mov), R(4), R(6),
@@ -273,23 +273,24 @@ bytecodes: [
B(Ldar), R(9),
B(StaCurrentContextSlot), U8(3),
B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(5),
- B(Star), R(5),
+ B(StaCurrentContextSlot), U8(4),
+ B(Star5),
+ B(Ldar), R(4),
+ B(StaCurrentContextSlot), U8(5),
B(CreateClosure), U8(22), U8(10), U8(2),
- B(Star), R(6),
+ B(Star6),
B(StaNamedProperty), R(4), U8(9), U8(9),
B(CreateClosure), U8(23), U8(11), U8(2),
- B(Star), R(8),
- B(Ldar), R(4),
- B(StaNamedProperty), R(8), U8(24), U8(11),
- B(CallProperty0), R(8), R(4), U8(13),
+ B(Star8),
+ B(CallProperty0), R(8), R(4), U8(11),
B(PopContext), R(3),
B(Mov), R(4), R(2),
/* 456 S> */ B(Ldar), R(0),
- /* 456 E> */ B(Construct), R(0), R(0), U8(0), U8(15),
+ /* 456 E> */ B(Construct), R(0), R(0), U8(0), U8(13),
/* 465 S> */ B(Ldar), R(1),
- /* 465 E> */ B(Construct), R(1), R(0), U8(0), U8(17),
+ /* 465 E> */ B(Construct), R(1), R(0), U8(0), U8(15),
/* 474 S> */ B(Ldar), R(2),
- /* 474 E> */ B(Construct), R(2), R(0), U8(0), U8(19),
+ /* 474 E> */ B(Construct), R(2), R(0), U8(0), U8(17),
B(LdaUndefined),
/* 483 S> */ B(Return),
]
@@ -318,7 +319,6 @@ constant pool: [
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- SYMBOL_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
index 7826544414..45209d642c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -18,20 +18,20 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 35
+bytecode array length: 32
bytecodes: [
/* 56 S> */ B(LdaCurrentContextSlot), U8(3),
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(268),
- B(Star), R(2),
+ B(JumpIfTrue), U8(16),
+ B(Wide), B(LdaSmi), I16(272),
+ B(Star2),
B(LdaConstant), U8(0),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(0),
+ B(Star0),
/* 70 E> */ B(CallAnyReceiver), R(0), R(1), U8(1), U8(0),
/* 73 S> */ B(Return),
]
@@ -53,12 +53,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- /* 56 S> */ B(Wide), B(LdaSmi), I16(270),
- B(Star), R(0),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(274),
+ B(Star0),
B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 64 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
B(Throw),
]
@@ -80,12 +80,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- /* 56 S> */ B(Wide), B(LdaSmi), I16(270),
- B(Star), R(0),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(274),
+ B(Star0),
B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star1),
B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
B(Throw),
]
@@ -113,59 +113,59 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 142
+bytecode array length: 127
bytecodes: [
/* 90 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaCurrentContextSlot), U8(3),
/* 94 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(268),
- B(Star), R(2),
+ B(JumpIfTrue), U8(16),
+ B(Wide), B(LdaSmi), I16(272),
+ B(Star2),
B(LdaConstant), U8(0),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(1), U8(1),
- B(Star), R(2),
+ B(Star2),
B(CallProperty0), R(2), R(0), U8(0),
B(Inc), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 97 E> */ B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(1), U8(1),
- B(Star), R(3),
+ B(Star3),
B(CallProperty1), R(3), R(0), R(2), U8(3),
/* 105 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaCurrentContextSlot), U8(3),
/* 109 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(269),
- B(Star), R(3),
+ B(JumpIfTrue), U8(16),
+ B(Wide), B(LdaSmi), I16(273),
+ B(Star3),
B(LdaConstant), U8(0),
- B(Star), R(4),
+ B(Star4),
B(CallRuntime), U16(Runtime::kNewTypeError), R(3), U8(2),
B(Throw),
B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(2), U8(1),
- B(Star), R(3),
+ B(Star3),
B(CallProperty1), R(3), R(1), R(0), U8(5),
/* 122 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
- B(Star), R(1),
+ B(Star1),
B(LdaCurrentContextSlot), U8(3),
/* 133 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(268),
- B(Star), R(2),
+ B(JumpIfTrue), U8(16),
+ B(Wide), B(LdaSmi), I16(272),
+ B(Star2),
B(LdaConstant), U8(0),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(1), U8(1),
- B(Star), R(2),
+ B(Star2),
B(CallProperty0), R(2), R(0), U8(7),
/* 137 S> */ B(Return),
]
@@ -186,12 +186,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- /* 60 S> */ B(Wide), B(LdaSmi), I16(272),
- B(Star), R(0),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(276),
+ B(Star0),
B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star1),
B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
B(Throw),
]
@@ -212,12 +212,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- /* 53 S> */ B(Wide), B(LdaSmi), I16(271),
- B(Star), R(0),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(275),
+ B(Star0),
B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star1),
B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
B(Throw),
]
@@ -238,12 +238,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- /* 60 S> */ B(Wide), B(LdaSmi), I16(272),
- B(Star), R(0),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(276),
+ B(Star0),
B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star1),
/* 68 E> */ B(CallRuntime), U16(Runtime::kNewTypeError), R(0), U8(2),
B(Throw),
]
@@ -264,12 +264,12 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- /* 46 S> */ B(Wide), B(LdaSmi), I16(271),
- B(Star), R(1),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(275),
+ B(Star1),
B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(CallRuntime), U16(Runtime::kNewTypeError), R(1), U8(2),
B(Throw),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden
index d733264629..62907f9016 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodDeclaration.golden
@@ -15,21 +15,21 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 40
+bytecode array length: 36
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
- B(LdaTheHole),
- B(Star), R(5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(StaCurrentContextSlot), U8(2),
+ B(LdaTheHole),
+ B(Star5),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
- B(CreateClosure), U8(3), U8(1), U8(2),
- B(StaCurrentContextSlot), U8(2),
+ B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
B(LdaUndefined),
@@ -54,23 +54,23 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 50
+bytecode array length: 44
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
+ B(Star3),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(4),
+ B(Star4),
B(LdaNull),
- B(Star), R(5),
+ B(Star5),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(4), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(1),
@@ -97,23 +97,23 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 50
+bytecode array length: 44
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
+ B(Star3),
B(LdaNull),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(5),
+ B(Star5),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(4), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(1),
@@ -141,23 +141,23 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 53
+bytecode array length: 47
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star5),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(2), R(4),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(3),
- B(Star), R(3),
+ B(Star3),
B(CreateClosure), U8(3), U8(1), U8(2),
- B(Star), R(4),
+ B(Star4),
B(CreateClosure), U8(4), U8(2), U8(2),
- B(Star), R(5),
+ B(Star5),
B(CallRuntime), U16(Runtime::kCreatePrivateAccessors), R(4), U8(2),
B(StaCurrentContextSlot), U8(2),
B(PopContext), R(1),
@@ -186,27 +186,27 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 57
+bytecode array length: 52
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaConstant), U8(2),
- B(Star), R(3),
+ B(Star3),
B(CallRuntime), U16(Runtime::kCreatePrivateBrandSymbol), R(3), U8(1),
B(StaCurrentContextSlot), U8(4),
- B(LdaTheHole),
- B(Star), R(6),
B(CreateClosure), U8(3), U8(0), U8(2),
- B(Star), R(2),
+ B(StaCurrentContextSlot), U8(2),
+ B(CreateClosure), U8(4), U8(1), U8(2),
+ B(StaCurrentContextSlot), U8(3),
+ B(LdaTheHole),
+ B(Star6),
+ B(CreateClosure), U8(5), U8(2), U8(2),
+ B(Star2),
B(LdaConstant), U8(1),
- B(Star), R(4),
+ B(Star4),
B(Mov), R(2), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
- B(CreateClosure), U8(4), U8(1), U8(2),
- B(StaCurrentContextSlot), U8(2),
- B(CreateClosure), U8(5), U8(2), U8(2),
- B(StaCurrentContextSlot), U8(3),
+ B(Star4),
B(PopContext), R(1),
B(Mov), R(5), R(0),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
index 0a2c2995e6..322e08134a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
@@ -228,10 +228,10 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 525
+bytecode array length: 524
bytecodes: [
/* 33 S> */ B(CreateEmptyObjectLiteral),
- B(Star), R(0),
+ B(Star0),
/* 41 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
/* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
/* 67 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
@@ -639,10 +639,10 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 525
+bytecode array length: 524
bytecodes: [
/* 49 S> */ B(CreateEmptyObjectLiteral),
- B(Star), R(0),
+ B(Star0),
/* 57 S> */ B(LdaNamedProperty), R(0), U8(0), U8(0),
/* 70 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
/* 83 S> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden
index 9e94fe593d..e232c8a999 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConcat.golden
@@ -13,15 +13,15 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 21
+bytecode array length: 18
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(Ldar), R(1),
/* 65 E> */ B(Add), R(0), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(0),
/* 69 E> */ B(Add), R(2), U8(1),
/* 80 S> */ B(Return),
@@ -40,17 +40,17 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 25
+bytecode array length: 21
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(0),
/* 72 E> */ B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(1),
/* 76 E> */ B(Add), R(2), U8(1),
/* 80 S> */ B(Return),
@@ -69,15 +69,15 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 21
+bytecode array length: 18
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(LdaConstant), U8(0),
/* 65 E> */ B(Add), R(0), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(1),
/* 76 E> */ B(Add), R(2), U8(1),
/* 80 S> */ B(Return),
@@ -96,23 +96,23 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 42
+bytecode array length: 36
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(0),
/* 69 E> */ B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
/* 73 E> */ B(Add), R(2), U8(1),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(1),
/* 81 E> */ B(Add), R(2), U8(2),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(2),
/* 85 E> */ B(Add), R(2), U8(3),
/* 93 E> */ B(AddSmi), I8(1), U8(4),
@@ -134,17 +134,17 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 24
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(LdaConstant), U8(0),
/* 66 E> */ B(Add), R(0), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(0),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(1),
/* 90 E> */ B(Add), R(3), U8(1),
/* 78 E> */ B(Add), R(2), U8(2),
@@ -165,22 +165,22 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 41
+bytecode array length: 35
bytecodes: [
/* 30 E> */ B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 80 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
/* 98 E> */ B(CallUndefinedReceiver2), R(2), R(0), R(1), U8(0),
/* 96 E> */ B(Add), R(3), U8(2),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(0),
/* 108 E> */ B(Add), R(3), U8(3),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(1),
/* 112 E> */ B(Add), R(3), U8(4),
/* 116 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
index 385c3089c1..6c1858d59a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
@@ -28,10 +28,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 58 S> */ B(LdaConstant), U8(1),
/* 81 S> */ B(Return),
]
@@ -48,10 +48,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
- B(Star), R(0),
+ B(Star0),
/* 57 S> */ B(LdaConstant), U8(0),
/* 78 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index 07feb7864a..da9b0975e1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -19,10 +19,10 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 20
+bytecode array length: 19
bytecodes: [
/* 93 E> */ B(CreateRestParameter),
- B(Star), R(2),
+ B(Star2),
B(Mov), R(closure), R(1),
/* 93 S> */ B(Ldar), R(1),
B(GetSuperConstructor), R(4),
@@ -51,21 +51,21 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 41
+bytecode array length: 38
bytecodes: [
/* 128 E> */ B(CreateRestParameter),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(closure), R(1),
B(Mov), R(3), R(2),
/* 140 S> */ B(Ldar), R(closure),
B(GetSuperConstructor), R(5),
B(LdaSmi), I8(1),
- B(Star), R(6),
+ B(Star6),
/* 152 E> */ B(ThrowIfNotSuperConstructor), R(5),
B(Ldar), R(0),
B(Mov), R(3), R(7),
/* 140 E> */ B(ConstructWithSpread), R(5), R(6), U8(2), U8(0),
- B(Star), R(8),
+ B(Star8),
B(Ldar), R(this),
B(ThrowSuperAlreadyCalledIfNotHole),
B(Mov), R(8), R(this),
@@ -93,49 +93,49 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 120
+bytecode array length: 111
bytecodes: [
/* 128 E> */ B(CreateRestParameter),
- B(Star), R(3),
+ B(Star3),
B(Mov), R(closure), R(1),
B(Mov), R(3), R(2),
/* 140 S> */ B(Ldar), R(closure),
B(GetSuperConstructor), R(5),
B(CreateEmptyArrayLiteral), U8(0),
- B(Star), R(7),
+ B(Star7),
B(LdaZero),
- B(Star), R(6),
+ B(Star6),
B(LdaSmi), I8(1),
B(StaInArrayLiteral), R(7), R(6), U8(1),
B(Ldar), R(6),
B(Inc), U8(3),
- /* 152 S> */ B(Star), R(6),
+ /* 152 S> */ B(Star6),
/* 152 E> */ B(GetIterator), R(3), U8(4), U8(6),
B(Mov), R(1), R(4),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(9),
+ B(Star9),
B(LdaNamedProperty), R(9), U8(0), U8(8),
- B(Star), R(8),
+ B(Star8),
B(CallProperty0), R(8), R(9), U8(14),
- B(Star), R(10),
+ B(Star10),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(LdaNamedProperty), R(10), U8(1), U8(16),
- B(JumpIfToBooleanTrue), U8(19),
+ B(JumpIfToBooleanTrue), U8(18),
B(LdaNamedProperty), R(10), U8(2), U8(10),
B(StaInArrayLiteral), R(7), R(6), U8(1),
B(Ldar), R(6),
B(Inc), U8(3),
- B(Star), R(6),
- B(JumpLoop), U8(33), I8(0),
+ B(Star6),
+ B(JumpLoop), U8(31), I8(0),
B(LdaSmi), I8(1),
B(StaInArrayLiteral), R(7), R(6), U8(1),
B(ThrowIfNotSuperConstructor), R(5),
B(Mov), R(5), R(6),
B(Mov), R(0), R(8),
/* 140 E> */ B(CallJSRuntime), U8(%reflect_construct), R(6), U8(3),
- B(Star), R(9),
+ B(Star9),
B(Ldar), R(this),
B(ThrowSuperAlreadyCalledIfNotHole),
B(Mov), R(9), R(this),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
index 5b12e3fc79..796b3ad2e0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
@@ -15,10 +15,10 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 31
+bytecode array length: 30
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(1),
B(TestEqualStrict), R(0), U8(0),
B(Mov), R(0), R(1),
@@ -49,23 +49,23 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 37
+bytecode array length: 34
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(1),
B(TestEqualStrict), R(0), U8(0),
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(10),
- B(Jump), U8(14),
+ B(JumpIfTrue), U8(9),
+ B(Jump), U8(12),
/* 66 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
- /* 73 S> */ B(Jump), U8(8),
+ B(Star0),
+ /* 73 S> */ B(Jump), U8(7),
/* 89 S> */ B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
/* 96 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 105 S> */ B(Return),
@@ -85,22 +85,22 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 35
+bytecode array length: 32
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(1),
B(TestEqualStrict), R(0), U8(0),
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(8),
- B(Jump), U8(12),
+ B(JumpIfTrue), U8(7),
+ B(Jump), U8(10),
/* 66 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 98 S> */ B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
/* 105 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 114 S> */ B(Return),
@@ -121,10 +121,10 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 35
+bytecode array length: 33
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(2),
B(TestEqualStrict), R(0), U8(0),
B(Mov), R(0), R(1),
@@ -133,10 +133,10 @@ bytecodes: [
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(6),
B(Jump), U8(6),
- /* 66 S> */ B(Jump), U8(10),
- /* 82 S> */ B(Jump), U8(8),
+ /* 66 S> */ B(Jump), U8(9),
+ /* 82 S> */ B(Jump), U8(7),
/* 99 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 106 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 115 S> */ B(Return),
@@ -157,27 +157,27 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 43
+bytecode array length: 38
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(TypeOf),
- B(Star), R(1),
+ B(Star1),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(3),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(10),
- B(Jump), U8(14),
+ B(JumpIfTrue), U8(9),
+ B(Jump), U8(12),
/* 74 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
- /* 81 S> */ B(Jump), U8(14),
+ B(Star0),
+ /* 81 S> */ B(Jump), U8(12),
/* 97 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
- /* 104 S> */ B(Jump), U8(8),
+ B(Star0),
+ /* 104 S> */ B(Jump), U8(7),
/* 121 S> */ B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
/* 128 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 137 S> */ B(Return),
@@ -197,20 +197,20 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 29
+bytecode array length: 26
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(TypeOf),
B(TestEqualStrict), R(0), U8(0),
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(4),
- B(Jump), U8(8),
+ B(Jump), U8(7),
/* 74 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
- /* 81 S> */ B(Jump), U8(8),
+ B(Star0),
+ /* 81 S> */ B(Jump), U8(7),
/* 98 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 105 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 114 S> */ B(Return),
@@ -297,156 +297,154 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 289
+bytecode array length: 223
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(1),
B(TestEqualStrict), R(0), U8(0),
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrueConstant), U8(0),
- B(JumpConstant), U8(1),
+ B(JumpIfTrue), U8(198),
+ B(Jump), U8(201),
/* 68 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 77 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 86 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 95 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 104 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 113 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 122 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 131 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 140 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 149 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 158 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 167 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 176 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 185 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 194 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 203 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 212 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 221 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 230 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 239 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 248 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 257 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 266 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 275 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 284 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 293 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 302 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 311 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 320 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 329 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 338 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 347 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 356 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 365 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 374 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 383 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 392 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 401 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 410 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 419 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 428 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 437 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 446 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 455 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 464 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 473 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 482 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 491 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 500 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 509 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 518 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 527 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 536 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 545 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 554 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 563 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 572 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 581 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 590 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 599 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 608 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 617 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 626 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 635 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
- /* 644 S> */ B(Jump), U8(8),
+ B(Star0),
+ /* 644 S> */ B(Jump), U8(7),
/* 662 S> */ B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
/* 671 S> */ B(Jump), U8(2),
B(LdaUndefined),
/* 680 S> */ B(Return),
]
constant pool: [
- Smi [262],
- Smi [266],
]
handlers: [
]
@@ -465,33 +463,33 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 57
+bytecode array length: 52
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(LdaSmi), I8(1),
B(TestEqualStrict), R(0), U8(0),
B(Mov), R(0), R(1),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(32),
- B(Jump), U8(34),
+ B(JumpIfTrue), U8(29),
+ B(Jump), U8(30),
/* 70 S> */ B(Ldar), R(0),
/* 79 E> */ B(AddSmi), I8(1), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(2), U8(2),
B(JumpIfTrue), U8(4),
- B(Jump), U8(8),
+ B(Jump), U8(7),
/* 101 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
- /* 108 S> */ B(Jump), U8(8),
+ B(Star0),
+ /* 108 S> */ B(Jump), U8(7),
/* 131 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
/* 138 S> */ B(Jump), U8(2),
/* 176 S> */ B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
B(LdaUndefined),
/* 185 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden
index b7821fa66c..e360a1b2ce 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TemplateLiterals.golden
@@ -13,19 +13,19 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 27
+bytecode array length: 23
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(Ldar), R(0),
B(ToString),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(1),
/* 70 E> */ B(ToString),
B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(0),
B(Add), R(2), U8(0),
/* 80 S> */ B(Return),
@@ -44,18 +44,18 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 27
+bytecode array length: 23
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(0),
/* 72 E> */ B(ToString),
B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(1),
/* 76 E> */ B(ToString),
B(Add), R(2), U8(0),
@@ -75,18 +75,18 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 27
+bytecode array length: 23
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(Ldar), R(0),
B(ToString),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(0),
B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(1),
/* 76 E> */ B(ToString),
B(Add), R(2), U8(0),
@@ -106,28 +106,28 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 49
+bytecode array length: 42
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(LdaConstant), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(0),
/* 69 E> */ B(ToString),
B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(1),
B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(Ldar), R(1),
/* 76 E> */ B(ToString),
B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(2),
B(Add), R(2), U8(0),
- B(Star), R(2),
+ B(Star2),
B(LdaSmi), I8(1),
B(ToString),
B(Add), R(2), U8(0),
@@ -149,20 +149,20 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 34
+bytecode array length: 29
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 56 S> */ B(Ldar), R(0),
B(ToString),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(0),
B(Add), R(2), U8(1),
- B(Star), R(2),
+ B(Star2),
B(LdaConstant), U8(0),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(1),
/* 87 E> */ B(ToString),
B(Add), R(3), U8(2),
@@ -184,24 +184,24 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 44
+bytecode array length: 38
bytecodes: [
/* 30 E> */ B(CreateClosure), U8(0), U8(0), U8(2),
- B(Star), R(2),
+ B(Star2),
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(LdaSmi), I8(2),
- B(Star), R(1),
+ B(Star1),
/* 80 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
+ B(Star3),
/* 96 E> */ B(CallUndefinedReceiver2), R(2), R(0), R(1), U8(1),
B(ToString),
B(Add), R(3), U8(0),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(0),
/* 108 E> */ B(ToString),
B(Add), R(3), U8(0),
- B(Star), R(3),
+ B(Star3),
B(Ldar), R(1),
/* 112 E> */ B(ToString),
B(Add), R(3), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
index 35926e7711..4d5102e8f7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
@@ -44,10 +44,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 10
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(5),
/* 54 S> */ B(LdaConstant), U8(0),
/* 54 E> */ B(Throw),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index f080925637..e007b667a2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -12,14 +12,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 33
+bytecode array length: 31
bytecodes: [
B(LdaConstant), U8(0),
- B(Star), R(1),
+ B(Star1),
B(Mov), R(closure), R(2),
/* 0 E> */ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
/* 8 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(41),
- B(Star), R(1),
+ B(Star1),
/* 16 E> */ B(CreateClosure), U8(2), U8(0), U8(0),
B(StaNamedOwnProperty), R(1), U8(3), U8(1),
B(Ldar), R(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
index 5a299760ab..48d871da71 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
@@ -11,14 +11,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 20
bytecodes: [
B(Mov), R(context), R(0),
/* 40 S> */ B(LdaSmi), I8(1),
/* 49 S> */ B(Return),
- B(Star), R(1),
+ B(Star1),
B(CreateCatchContext), R(1), U8(0),
- B(Star), R(0),
+ B(Star0),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(0),
@@ -41,15 +41,15 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 54
+bytecode array length: 47
bytecodes: [
B(Mov), R(context), R(1),
/* 47 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
- B(Jump), U8(17),
- B(Star), R(2),
+ B(Star0),
+ B(Jump), U8(15),
+ B(Star2),
/* 49 E> */ B(CreateCatchContext), R(2), U8(0),
- B(Star), R(1),
+ B(Star1),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(1),
@@ -57,17 +57,17 @@ bytecodes: [
B(PopContext), R(2),
B(Mov), R(context), R(1),
/* 75 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
- B(Jump), U8(21),
- B(Star), R(2),
+ B(Star0),
+ B(Jump), U8(18),
+ B(Star2),
/* 77 E> */ B(CreateCatchContext), R(2), U8(1),
- B(Star), R(1),
+ B(Star1),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(1),
B(PushContext), R(2),
/* 95 S> */ B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
B(PopContext), R(2),
B(LdaUndefined),
/* 103 S> */ B(Return),
@@ -77,7 +77,7 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [3, 7, 9],
- [27, 31, 33],
+ [3, 6, 8],
+ [24, 27, 29],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
index d06342c1b8..a85dd9316a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
@@ -12,25 +12,25 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 45
+bytecode array length: 37
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(3),
/* 51 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
B(LdaSmi), I8(-1),
- B(Star), R(2),
- B(Star), R(1),
- B(Jump), U8(7),
- B(Star), R(2),
+ B(Star2),
+ B(Star1),
+ B(Jump), U8(5),
+ B(Star2),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(LdaTheHole),
/* 53 E> */ B(SetPendingMessage),
- B(Star), R(3),
+ B(Star3),
/* 70 S> */ B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
B(Ldar), R(3),
/* 72 E> */ B(SetPendingMessage),
B(LdaZero),
@@ -44,7 +44,7 @@ bytecodes: [
constant pool: [
]
handlers: [
- [7, 11, 19],
+ [6, 9, 15],
]
---
@@ -54,37 +54,37 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 69
+bytecode array length: 58
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
+ B(Star0),
B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
/* 51 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
- B(Jump), U8(21),
- B(Star), R(5),
+ B(Star0),
+ B(Jump), U8(18),
+ B(Star5),
/* 53 E> */ B(CreateCatchContext), R(5), U8(0),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(4),
B(PushContext), R(5),
/* 71 S> */ B(LdaSmi), I8(20),
- B(Star), R(0),
+ B(Star0),
B(PopContext), R(5),
B(LdaSmi), I8(-1),
- B(Star), R(2),
- B(Star), R(1),
- B(Jump), U8(7),
- B(Star), R(2),
+ B(Star2),
+ B(Star1),
+ B(Jump), U8(5),
+ B(Star2),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(LdaTheHole),
/* 73 E> */ B(SetPendingMessage),
- B(Star), R(3),
+ B(Star3),
/* 90 S> */ B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
B(Ldar), R(3),
/* 92 E> */ B(SetPendingMessage),
B(LdaZero),
@@ -99,8 +99,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [7, 35, 43],
- [10, 14, 16],
+ [6, 30, 36],
+ [9, 12, 14],
]
---
@@ -111,47 +111,47 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 89
+bytecode array length: 76
bytecodes: [
B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
B(Mov), R(context), R(5),
/* 55 S> */ B(LdaSmi), I8(1),
- B(Star), R(0),
- B(Jump), U8(21),
- B(Star), R(6),
+ B(Star0),
+ B(Jump), U8(18),
+ B(Star6),
/* 57 E> */ B(CreateCatchContext), R(6), U8(0),
- B(Star), R(5),
+ B(Star5),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(5),
B(PushContext), R(6),
/* 74 S> */ B(LdaSmi), I8(2),
- B(Star), R(0),
+ B(Star0),
B(PopContext), R(6),
- B(Jump), U8(21),
- B(Star), R(5),
+ B(Jump), U8(18),
+ B(Star5),
/* 76 E> */ B(CreateCatchContext), R(5), U8(1),
- B(Star), R(4),
+ B(Star4),
B(LdaTheHole),
B(SetPendingMessage),
B(Ldar), R(4),
B(PushContext), R(5),
/* 95 S> */ B(LdaSmi), I8(20),
- B(Star), R(0),
+ B(Star0),
B(PopContext), R(5),
B(LdaSmi), I8(-1),
- B(Star), R(2),
- B(Star), R(1),
- B(Jump), U8(7),
- B(Star), R(2),
+ B(Star2),
+ B(Star1),
+ B(Jump), U8(5),
+ B(Star2),
B(LdaZero),
- B(Star), R(1),
+ B(Star1),
B(LdaTheHole),
/* 97 E> */ B(SetPendingMessage),
- B(Star), R(3),
+ B(Star3),
/* 114 S> */ B(LdaSmi), I8(3),
- B(Star), R(0),
+ B(Star0),
B(Ldar), R(3),
/* 116 E> */ B(SetPendingMessage),
B(LdaZero),
@@ -167,8 +167,8 @@ constant pool: [
SCOPE_INFO_TYPE,
]
handlers: [
- [3, 55, 63],
- [6, 34, 36],
- [9, 13, 15],
+ [3, 48, 54],
+ [6, 30, 32],
+ [9, 12, 14],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
index ea335954e0..148073c74b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
@@ -16,10 +16,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
/* 24 S> */ B(LdaSmi), I8(13),
- B(Star), R(0),
+ B(Star0),
/* 29 S> */ B(TypeOf),
/* 46 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
index b564000c3b..2bfb8b3b62 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
@@ -15,17 +15,17 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 23
+bytecode array length: 21
bytecodes: [
/* 42 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 54 S> */ B(LdaSmi), I8(10),
/* 54 E> */ B(TestEqual), R(0), U8(0),
- B(JumpIfTrue), U8(12),
+ B(JumpIfTrue), U8(11),
/* 65 S> */ B(Ldar), R(0),
/* 71 E> */ B(AddSmi), I8(10), U8(1),
- B(Star), R(0),
- /* 45 E> */ B(JumpLoop), U8(14), I8(0),
+ B(Star0),
+ /* 45 E> */ B(JumpLoop), U8(13), I8(0),
/* 79 S> */ B(Ldar), R(0),
/* 88 S> */ B(Return),
]
@@ -44,17 +44,17 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 20
+bytecode array length: 18
bytecodes: [
/* 42 S> */ B(LdaFalse),
- B(Star), R(0),
+ B(Star0),
/* 56 S> */ B(Ldar), R(0),
B(ToBooleanLogicalNot),
- B(Star), R(0),
+ B(Star0),
/* 74 S> */ B(LdaFalse),
/* 74 E> */ B(TestEqual), R(0), U8(0),
B(JumpIfFalse), U8(5),
- /* 49 E> */ B(JumpLoop), U8(11), I8(0),
+ /* 49 E> */ B(JumpLoop), U8(10), I8(0),
/* 85 S> */ B(Ldar), R(0),
/* 94 S> */ B(Return),
]
@@ -70,10 +70,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 8
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(101),
- B(Star), R(0),
+ B(Star0),
/* 61 S> */ B(MulSmi), I8(3), U8(0),
B(LdaUndefined),
/* 66 S> */ B(Return),
@@ -91,14 +91,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
/* 42 S> */ B(Wide), B(LdaSmi), I16(1234),
- B(Star), R(0),
+ B(Star0),
/* 64 S> */ B(Mul), R(0), U8(1),
/* 68 E> */ B(SubSmi), I8(1), U8(0),
B(LdaUndefined),
- B(Star), R(1),
+ B(Star1),
/* 83 S> */ B(Return),
]
constant pool: [
@@ -113,10 +113,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(13),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(BitwiseNot), U8(0),
/* 56 S> */ B(Return),
]
@@ -132,10 +132,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(13),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(ToNumber), U8(0),
/* 56 S> */ B(Return),
]
@@ -151,10 +151,10 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
/* 42 S> */ B(LdaSmi), I8(13),
- B(Star), R(0),
+ B(Star0),
/* 53 S> */ B(Negate), U8(0),
/* 56 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
index 39a21f7b50..ddd83ee90c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -169,40 +169,40 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 548
+bytecode array length: 532
bytecodes: [
/* 43 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star2),
/* 79 S> */ B(LdaZero),
- B(Star), R(3),
+ B(Star3),
/* 91 S> */ B(LdaZero),
- B(Star), R(4),
+ B(Star4),
/* 103 S> */ B(LdaZero),
- B(Star), R(5),
+ B(Star5),
/* 115 S> */ B(LdaZero),
- B(Star), R(6),
+ B(Star6),
/* 127 S> */ B(LdaZero),
- B(Star), R(7),
+ B(Star7),
/* 139 S> */ B(LdaZero),
- B(Star), R(8),
+ B(Star8),
/* 151 S> */ B(LdaZero),
- B(Star), R(9),
+ B(Star9),
/* 164 S> */ B(LdaZero),
- B(Star), R(10),
+ B(Star10),
/* 177 S> */ B(LdaZero),
- B(Star), R(11),
+ B(Star11),
/* 190 S> */ B(LdaZero),
- B(Star), R(12),
+ B(Star12),
/* 203 S> */ B(LdaZero),
- B(Star), R(13),
+ B(Star13),
/* 216 S> */ B(LdaZero),
- B(Star), R(14),
+ B(Star14),
/* 229 S> */ B(LdaZero),
- B(Star), R(15),
+ B(Star15),
/* 242 S> */ B(LdaZero),
B(Star), R(16),
/* 255 S> */ B(LdaZero),
@@ -658,40 +658,40 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 550
+bytecode array length: 534
bytecodes: [
/* 43 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star2),
/* 79 S> */ B(LdaZero),
- B(Star), R(3),
+ B(Star3),
/* 91 S> */ B(LdaZero),
- B(Star), R(4),
+ B(Star4),
/* 103 S> */ B(LdaZero),
- B(Star), R(5),
+ B(Star5),
/* 115 S> */ B(LdaZero),
- B(Star), R(6),
+ B(Star6),
/* 127 S> */ B(LdaZero),
- B(Star), R(7),
+ B(Star7),
/* 139 S> */ B(LdaZero),
- B(Star), R(8),
+ B(Star8),
/* 151 S> */ B(LdaZero),
- B(Star), R(9),
+ B(Star9),
/* 164 S> */ B(LdaZero),
- B(Star), R(10),
+ B(Star10),
/* 177 S> */ B(LdaZero),
- B(Star), R(11),
+ B(Star11),
/* 190 S> */ B(LdaZero),
- B(Star), R(12),
+ B(Star12),
/* 203 S> */ B(LdaZero),
- B(Star), R(13),
+ B(Star13),
/* 216 S> */ B(LdaZero),
- B(Star), R(14),
+ B(Star14),
/* 229 S> */ B(LdaZero),
- B(Star), R(15),
+ B(Star15),
/* 242 S> */ B(LdaZero),
B(Star), R(16),
/* 255 S> */ B(LdaZero),
@@ -1147,40 +1147,40 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 556
+bytecode array length: 540
bytecodes: [
/* 43 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star2),
/* 79 S> */ B(LdaZero),
- B(Star), R(3),
+ B(Star3),
/* 91 S> */ B(LdaZero),
- B(Star), R(4),
+ B(Star4),
/* 103 S> */ B(LdaZero),
- B(Star), R(5),
+ B(Star5),
/* 115 S> */ B(LdaZero),
- B(Star), R(6),
+ B(Star6),
/* 127 S> */ B(LdaZero),
- B(Star), R(7),
+ B(Star7),
/* 139 S> */ B(LdaZero),
- B(Star), R(8),
+ B(Star8),
/* 151 S> */ B(LdaZero),
- B(Star), R(9),
+ B(Star9),
/* 164 S> */ B(LdaZero),
- B(Star), R(10),
+ B(Star10),
/* 177 S> */ B(LdaZero),
- B(Star), R(11),
+ B(Star11),
/* 190 S> */ B(LdaZero),
- B(Star), R(12),
+ B(Star12),
/* 203 S> */ B(LdaZero),
- B(Star), R(13),
+ B(Star13),
/* 216 S> */ B(LdaZero),
- B(Star), R(14),
+ B(Star14),
/* 229 S> */ B(LdaZero),
- B(Star), R(15),
+ B(Star15),
/* 242 S> */ B(LdaZero),
B(Star), R(16),
/* 255 S> */ B(LdaZero),
@@ -1642,40 +1642,40 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 577
+bytecode array length: 560
bytecodes: [
/* 43 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star2),
/* 79 S> */ B(LdaZero),
- B(Star), R(3),
+ B(Star3),
/* 91 S> */ B(LdaZero),
- B(Star), R(4),
+ B(Star4),
/* 103 S> */ B(LdaZero),
- B(Star), R(5),
+ B(Star5),
/* 115 S> */ B(LdaZero),
- B(Star), R(6),
+ B(Star6),
/* 127 S> */ B(LdaZero),
- B(Star), R(7),
+ B(Star7),
/* 139 S> */ B(LdaZero),
- B(Star), R(8),
+ B(Star8),
/* 151 S> */ B(LdaZero),
- B(Star), R(9),
+ B(Star9),
/* 164 S> */ B(LdaZero),
- B(Star), R(10),
+ B(Star10),
/* 177 S> */ B(LdaZero),
- B(Star), R(11),
+ B(Star11),
/* 190 S> */ B(LdaZero),
- B(Star), R(12),
+ B(Star12),
/* 203 S> */ B(LdaZero),
- B(Star), R(13),
+ B(Star13),
/* 216 S> */ B(LdaZero),
- B(Star), R(14),
+ B(Star14),
/* 229 S> */ B(LdaZero),
- B(Star), R(15),
+ B(Star15),
/* 242 S> */ B(LdaZero),
B(Star), R(16),
/* 255 S> */ B(LdaZero),
@@ -1959,7 +1959,7 @@ bytecodes: [
/* 2119 S> */ B(LdaZero),
B(Wide), B(Star), R16(156),
/* 2131 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 2134 S> */ B(LdaSmi), I8(3),
/* 2143 E> */ B(Wide), B(TestEqual), R16(129), U16(0),
B(JumpIfFalse), U8(12),
@@ -2143,40 +2143,40 @@ snippet: "
"
frame size: 158
parameter count: 1
-bytecode array length: 593
+bytecode array length: 574
bytecodes: [
/* 43 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star2),
/* 79 S> */ B(LdaZero),
- B(Star), R(3),
+ B(Star3),
/* 91 S> */ B(LdaZero),
- B(Star), R(4),
+ B(Star4),
/* 103 S> */ B(LdaZero),
- B(Star), R(5),
+ B(Star5),
/* 115 S> */ B(LdaZero),
- B(Star), R(6),
+ B(Star6),
/* 127 S> */ B(LdaZero),
- B(Star), R(7),
+ B(Star7),
/* 139 S> */ B(LdaZero),
- B(Star), R(8),
+ B(Star8),
/* 151 S> */ B(LdaZero),
- B(Star), R(9),
+ B(Star9),
/* 164 S> */ B(LdaZero),
- B(Star), R(10),
+ B(Star10),
/* 177 S> */ B(LdaZero),
- B(Star), R(11),
+ B(Star11),
/* 190 S> */ B(LdaZero),
- B(Star), R(12),
+ B(Star12),
/* 203 S> */ B(LdaZero),
- B(Star), R(13),
+ B(Star13),
/* 216 S> */ B(LdaZero),
- B(Star), R(14),
+ B(Star14),
/* 229 S> */ B(LdaZero),
- B(Star), R(15),
+ B(Star15),
/* 242 S> */ B(LdaZero),
B(Star), R(16),
/* 255 S> */ B(LdaZero),
@@ -2460,22 +2460,22 @@ bytecodes: [
/* 2119 S> */ B(LdaZero),
B(Wide), B(Star), R16(156),
/* 2131 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 2143 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 2151 S> */ B(LdaZero),
B(Wide), B(Star), R16(128),
/* 2166 S> */ B(LdaSmi), I8(64),
/* 2166 E> */ B(Wide), B(TestLessThan), R16(128), U16(0),
- B(JumpIfFalse), U8(30),
+ B(JumpIfFalse), U8(29),
/* 2183 S> */ B(Wide), B(Ldar), R16(128),
/* 2189 E> */ B(Add), R(1), U8(1),
B(Wide), B(Mov), R16(1), R16(157),
- B(Star), R(1),
+ B(Star1),
/* 2176 S> */ B(Wide), B(Ldar), R16(128),
B(Inc), U8(2),
B(Wide), B(Star), R16(128),
- /* 2146 E> */ B(JumpLoop), U8(35), I8(0),
+ /* 2146 E> */ B(JumpLoop), U8(34), I8(0),
/* 2195 S> */ B(Wide), B(Ldar), R16(128),
/* 2207 S> */ B(Return),
]
@@ -2649,40 +2649,40 @@ snippet: "
"
frame size: 163
parameter count: 1
-bytecode array length: 624
+bytecode array length: 605
bytecodes: [
/* 43 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star2),
/* 79 S> */ B(LdaZero),
- B(Star), R(3),
+ B(Star3),
/* 91 S> */ B(LdaZero),
- B(Star), R(4),
+ B(Star4),
/* 103 S> */ B(LdaZero),
- B(Star), R(5),
+ B(Star5),
/* 115 S> */ B(LdaZero),
- B(Star), R(6),
+ B(Star6),
/* 127 S> */ B(LdaZero),
- B(Star), R(7),
+ B(Star7),
/* 139 S> */ B(LdaZero),
- B(Star), R(8),
+ B(Star8),
/* 151 S> */ B(LdaZero),
- B(Star), R(9),
+ B(Star9),
/* 164 S> */ B(LdaZero),
- B(Star), R(10),
+ B(Star10),
/* 177 S> */ B(LdaZero),
- B(Star), R(11),
+ B(Star11),
/* 190 S> */ B(LdaZero),
- B(Star), R(12),
+ B(Star12),
/* 203 S> */ B(LdaZero),
- B(Star), R(13),
+ B(Star13),
/* 216 S> */ B(LdaZero),
- B(Star), R(14),
+ B(Star14),
/* 229 S> */ B(LdaZero),
- B(Star), R(15),
+ B(Star15),
/* 242 S> */ B(LdaZero),
B(Star), R(16),
/* 255 S> */ B(LdaZero),
@@ -2966,28 +2966,28 @@ bytecodes: [
/* 2119 S> */ B(LdaZero),
B(Wide), B(Star), R16(156),
/* 2131 S> */ B(Wide), B(LdaSmi), I16(1234),
- B(Star), R(0),
+ B(Star0),
/* 2146 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 2162 S> */ B(Ldar), R(0),
- B(JumpIfUndefinedOrNull), U8(71),
+ B(JumpIfUndefinedOrNull), U8(70),
B(Wide), B(ToObject), R16(157),
B(Wide), B(ForInEnumerate), R16(157),
B(Wide), B(ForInPrepare), R16(158), U16(0),
B(LdaZero),
B(Wide), B(Star), R16(161),
/* 2154 S> */ B(Wide), B(ForInContinue), R16(161), R16(160),
- B(JumpIfFalse), U8(44),
+ B(JumpIfFalse), U8(43),
B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(0),
- B(JumpIfUndefined), U8(21),
+ B(JumpIfUndefined), U8(20),
B(Wide), B(Star), R16(128),
/* 2169 S> */ B(Wide), B(Ldar), R16(128),
/* 2175 E> */ B(Add), R(1), U8(1),
B(Wide), B(Mov), R16(1), R16(162),
- B(Star), R(1),
+ B(Star1),
/* 2172 E> */ B(Wide), B(ForInStep), R16(161),
B(Wide), B(Star), R16(161),
- /* 2149 E> */ B(JumpLoop), U8(47), I8(0),
+ /* 2149 E> */ B(JumpLoop), U8(46), I8(0),
/* 2181 S> */ B(Ldar), R(1),
/* 2191 S> */ B(Return),
]
@@ -3162,40 +3162,40 @@ snippet: "
"
frame size: 159
parameter count: 1
-bytecode array length: 591
+bytecode array length: 573
bytecodes: [
/* 43 S> */ B(LdaZero),
- B(Star), R(0),
+ B(Star0),
/* 55 S> */ B(LdaZero),
- B(Star), R(1),
+ B(Star1),
/* 67 S> */ B(LdaZero),
- B(Star), R(2),
+ B(Star2),
/* 79 S> */ B(LdaZero),
- B(Star), R(3),
+ B(Star3),
/* 91 S> */ B(LdaZero),
- B(Star), R(4),
+ B(Star4),
/* 103 S> */ B(LdaZero),
- B(Star), R(5),
+ B(Star5),
/* 115 S> */ B(LdaZero),
- B(Star), R(6),
+ B(Star6),
/* 127 S> */ B(LdaZero),
- B(Star), R(7),
+ B(Star7),
/* 139 S> */ B(LdaZero),
- B(Star), R(8),
+ B(Star8),
/* 151 S> */ B(LdaZero),
- B(Star), R(9),
+ B(Star9),
/* 164 S> */ B(LdaZero),
- B(Star), R(10),
+ B(Star10),
/* 177 S> */ B(LdaZero),
- B(Star), R(11),
+ B(Star11),
/* 190 S> */ B(LdaZero),
- B(Star), R(12),
+ B(Star12),
/* 203 S> */ B(LdaZero),
- B(Star), R(13),
+ B(Star13),
/* 216 S> */ B(LdaZero),
- B(Star), R(14),
+ B(Star14),
/* 229 S> */ B(LdaZero),
- B(Star), R(15),
+ B(Star15),
/* 242 S> */ B(LdaZero),
B(Star), R(16),
/* 255 S> */ B(LdaZero),
@@ -3481,11 +3481,11 @@ bytecodes: [
/* 2122 S> */ B(Wide), B(Mov), R16(64), R16(157),
B(Wide), B(Mov), R16(63), R16(158),
/* 2137 E> */ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
- B(Star), R(0),
+ B(Star0),
/* 2143 S> */ B(Wide), B(Mov), R16(27), R16(157),
B(Wide), B(Mov), R16(143), R16(158),
/* 2158 E> */ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
- B(Star), R(1),
+ B(Star1),
/* 2165 S> */ B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
/* 2177 S> */ B(Ldar), R(1),
/* 2187 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index ba342d8960..5f3ac98190 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -467,7 +467,7 @@ bool WriteExpectationsFile(const std::vector<std::string>& snippet_list,
const std::string& output_filename) {
std::ofstream output_file_handle;
if (!options.write_to_stdout()) {
- output_file_handle.open(output_filename.c_str());
+ output_file_handle.open(output_filename.c_str(), std::ios::binary);
if (!output_file_handle.is_open()) {
REPORT_ERROR("Could not open " << output_filename << " for writing.");
return false;
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 46703ac7aa..ccb8710865 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2592,10 +2592,6 @@ TEST(ClassDeclarations) {
}
TEST(ClassAndSuperClass) {
- // Different bytecodes are generated with and without --future temporarily,
- // see crbug.com/v8/9237 . TODO(marja): remove this hack once --super-ic is on
- // by default.
- FLAG_super_ic = false;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 80d9d6e52e..5e0f7d5d98 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -1722,18 +1722,20 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- BytecodeArrayIterator iterator(bytecode_array);
-
- bool found_16bit_constant_jump = false;
- while (!iterator.done()) {
- if (iterator.current_bytecode() == Bytecode::kJumpConstant &&
- iterator.current_operand_scale() == OperandScale::kDouble) {
- found_16bit_constant_jump = true;
- break;
+ {
+ BytecodeArrayIterator iterator(bytecode_array);
+
+ bool found_16bit_constant_jump = false;
+ while (!iterator.done()) {
+ if (iterator.current_bytecode() == Bytecode::kJumpConstant &&
+ iterator.current_operand_scale() == OperandScale::kDouble) {
+ found_16bit_constant_jump = true;
+ break;
+ }
+ iterator.Advance();
}
- iterator.Advance();
+ CHECK(found_16bit_constant_jump);
}
- CHECK(found_16bit_constant_jump);
InterpreterTester tester(isolate, bytecode_array, metadata);
auto callable = tester.GetCallable<>();
@@ -1766,19 +1768,20 @@ TEST(InterpreterJumpWith32BitOperand) {
ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
-
- BytecodeArrayIterator iterator(bytecode_array);
-
- bool found_32bit_jump = false;
- while (!iterator.done()) {
- if (iterator.current_bytecode() == Bytecode::kJump &&
- iterator.current_operand_scale() == OperandScale::kQuadruple) {
- found_32bit_jump = true;
- break;
+ {
+ BytecodeArrayIterator iterator(bytecode_array);
+
+ bool found_32bit_jump = false;
+ while (!iterator.done()) {
+ if (iterator.current_bytecode() == Bytecode::kJump &&
+ iterator.current_operand_scale() == OperandScale::kQuadruple) {
+ found_32bit_jump = true;
+ break;
+ }
+ iterator.Advance();
}
- iterator.Advance();
+ CHECK(found_32bit_jump);
}
- CHECK(found_32bit_jump);
InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
@@ -2352,7 +2355,6 @@ TEST(InterpreterUnaryNot) {
bool expected_value = ((i & 1) == 1);
BytecodeArrayBuilder builder(zone, 1, 0);
- Register r0(0);
builder.LoadFalse();
for (size_t j = 0; j < i; j++) {
builder.LogicalNot(ToBooleanMode::kAlreadyBoolean);
@@ -2390,7 +2392,6 @@ TEST(InterpreterUnaryNotNonBoolean) {
for (size_t i = 0; i < arraysize(object_type_tuples); i++) {
BytecodeArrayBuilder builder(zone, 1, 0);
- Register r0(0);
LoadLiteralForTest(&builder, object_type_tuples[i].first);
builder.LogicalNot(ToBooleanMode::kConvertToBoolean).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
diff --git a/deps/v8/test/cctest/test-accessor-assembler.cc b/deps/v8/test/cctest/test-accessor-assembler.cc
index 0095f4576d..a14a4d5c02 100644
--- a/deps/v8/test/cctest/test-accessor-assembler.cc
+++ b/deps/v8/test/cctest/test-accessor-assembler.cc
@@ -32,7 +32,7 @@ void TestStubCacheOffsetCalculation(StubCache::Table table) {
auto map = m.Parameter<Map>(2);
TNode<IntPtrT> primary_offset =
m.StubCachePrimaryOffsetForTesting(name, map);
- Node* result;
+ TNode<IntPtrT> result;
if (table == StubCache::kPrimary) {
result = primary_offset;
} else {
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 236053eb45..af5858eaef 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -6,7 +6,6 @@
#include "test/cctest/test-api.h"
-#include "include/v8-util.h"
#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
#include "src/codegen/compilation-cache.h"
@@ -18,30 +17,18 @@
#include "src/strings/unicode-inl.h"
#include "src/utils/utils.h"
-using ::v8::Boolean;
-using ::v8::BooleanObject;
using ::v8::Context;
-using ::v8::Extension;
using ::v8::Function;
using ::v8::FunctionTemplate;
-using ::v8::HandleScope;
using ::v8::Local;
using ::v8::Name;
-using ::v8::Message;
-using ::v8::MessageCallback;
using ::v8::Object;
using ::v8::ObjectTemplate;
-using ::v8::Persistent;
using ::v8::Script;
-using ::v8::StackTrace;
using ::v8::String;
using ::v8::Symbol;
-using ::v8::TryCatch;
-using ::v8::Undefined;
-using ::v8::V8;
using ::v8::Value;
-
namespace {
void Returns42(const v8::FunctionCallbackInfo<v8::Value>& info) {
diff --git a/deps/v8/test/cctest/test-api-stack-traces.cc b/deps/v8/test/cctest/test-api-stack-traces.cc
index 4d445fba1d..edfaa98dd6 100644
--- a/deps/v8/test/cctest/test-api-stack-traces.cc
+++ b/deps/v8/test/cctest/test-api-stack-traces.cc
@@ -156,12 +156,14 @@ THREADED_TEST(ExceptionCreateMessage) {
// THREADED_TEST(StackTrace) {
TEST(StackTrace) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::TryCatch try_catch(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::TryCatch try_catch(isolate);
const char* source = "function foo() { FAIL.FAIL; }; foo();";
v8::Local<v8::String> src = v8_str(source);
v8::Local<v8::String> origin = v8_str("stack-trace-test");
- v8::ScriptCompiler::Source script_source(src, v8::ScriptOrigin(origin));
+ v8::ScriptCompiler::Source script_source(src,
+ v8::ScriptOrigin(isolate, origin));
CHECK(v8::ScriptCompiler::CompileUnboundScript(context->GetIsolate(),
&script_source)
.ToLocalChecked()
@@ -207,10 +209,8 @@ static void AnalyzeStackInNativeCode(
const int kOverviewTest = 1;
const int kDetailedTest = 2;
const int kFunctionName = 3;
- const int kDisplayName = 4;
- const int kFunctionNameAndDisplayName = 5;
- const int kDisplayNameIsNotString = 6;
- const int kFunctionNameIsNotString = 7;
+ const int kFunctionNameAndDisplayName = 4;
+ const int kFunctionNameIsNotString = 5;
CHECK_EQ(args.Length(), 1);
@@ -252,22 +252,10 @@ static void AnalyzeStackInNativeCode(
CHECK_EQ(3, stackTrace->GetFrameCount());
checkStackFrame(nullptr, "function.name", 3, 1, true, false,
stackTrace->GetFrame(isolate, 0));
- } else if (testGroup == kDisplayName) {
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 5, v8::StackTrace::kOverview);
- CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
- stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kFunctionNameAndDisplayName) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
- stackTrace->GetFrame(isolate, 0));
- } else if (testGroup == kDisplayNameIsNotString) {
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 5, v8::StackTrace::kOverview);
- CHECK_EQ(3, stackTrace->GetFrameCount());
checkStackFrame(nullptr, "function.name", 3, 1, true, false,
stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kFunctionNameIsNotString) {
@@ -304,7 +292,7 @@ TEST(CaptureStackTrace) {
"var x;eval('new foo();');";
v8::Local<v8::String> overview_src = v8_str(overview_source);
v8::ScriptCompiler::Source script_source(overview_src,
- v8::ScriptOrigin(origin));
+ v8::ScriptOrigin(isolate, origin));
v8::Local<Value> overview_result(
v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source)
.ToLocalChecked()
@@ -325,7 +313,7 @@ TEST(CaptureStackTrace) {
"eval('new baz();');";
v8::Local<v8::String> detailed_src = v8_str(detailed_source);
// Make the script using a non-zero line and column offset.
- v8::ScriptOrigin detailed_origin(origin, 3, 5);
+ v8::ScriptOrigin detailed_origin(isolate, origin, 3, 5);
v8::ScriptCompiler::Source script_source2(detailed_src, detailed_origin);
v8::Local<v8::UnboundScript> detailed_script(
v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source2)
@@ -349,14 +337,12 @@ TEST(CaptureStackTrace) {
" f()\n"
"}\n"
"bar('function.name', undefined, 3);\n"
- "bar(undefined, 'function.displayName', 4);\n"
- "bar('function.name', 'function.displayName', 5);\n"
- "bar('function.name', 239, 6);\n"
- "bar(239, undefined, 7);\n";
+ "bar('function.name', 'function.displayName', 4);\n"
+ "bar(239, undefined, 5);\n";
v8::Local<v8::String> function_name_src =
v8::String::NewFromUtf8Literal(isolate, function_name_source);
v8::ScriptCompiler::Source script_source3(function_name_src,
- v8::ScriptOrigin(origin));
+ v8::ScriptOrigin(isolate, origin));
v8::Local<Value> function_name_result(
v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source3)
.ToLocalChecked()
diff --git a/deps/v8/test/cctest/test-api-wasm.cc b/deps/v8/test/cctest/test-api-wasm.cc
index eb49d5668a..6064f1384c 100644
--- a/deps/v8/test/cctest/test-api-wasm.cc
+++ b/deps/v8/test/cctest/test-api-wasm.cc
@@ -129,20 +129,20 @@ TEST(WasmStreamingAbortWithoutReject) {
namespace {
-bool wasm_threads_enabled_value = false;
bool wasm_simd_enabled_value = false;
-
-bool MockWasmThreadsEnabledCallback(v8::Local<v8::Context>) {
- return wasm_threads_enabled_value;
-}
+bool wasm_exceptions_enabled_value = false;
bool MockWasmSimdEnabledCallback(v8::Local<v8::Context>) {
return wasm_simd_enabled_value;
}
+bool MockWasmExceptionsEnabledCallback(v8::Local<v8::Context>) {
+ return wasm_exceptions_enabled_value;
+}
+
} // namespace
-TEST(TestSetWasmThreadsEnabledCallback) {
+TEST(TestSetWasmSimdEnabledCallback) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -150,28 +150,28 @@ TEST(TestSetWasmThreadsEnabledCallback) {
v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
i::Handle<i::Context> i_context = v8::Utils::OpenHandle(*context);
- // {Isolate::AreWasmThreadsEnabled} calls the callback set by the embedder if
+ // {Isolate::IsWasmSimdEnabled} calls the callback set by the embedder if
// such a callback exists. Otherwise it returns
- // {FLAG_experimental_wasm_threads}. First we test that the flag is returned
+ // {FLAG_experimental_wasm_simd}. First we test that the flag is returned
// correctly if no callback is set. Then we test that the flag is ignored if
// the callback is set.
- i::FLAG_experimental_wasm_threads = false;
- CHECK(!i_isolate->AreWasmThreadsEnabled(i_context));
+ i::FLAG_experimental_wasm_simd = false;
+ CHECK(!i_isolate->IsWasmSimdEnabled(i_context));
- i::FLAG_experimental_wasm_threads = true;
- CHECK(i_isolate->AreWasmThreadsEnabled(i_context));
+ i::FLAG_experimental_wasm_simd = true;
+ CHECK(i_isolate->IsWasmSimdEnabled(i_context));
- isolate->SetWasmThreadsEnabledCallback(MockWasmThreadsEnabledCallback);
- wasm_threads_enabled_value = false;
- CHECK(!i_isolate->AreWasmThreadsEnabled(i_context));
+ isolate->SetWasmSimdEnabledCallback(MockWasmSimdEnabledCallback);
+ wasm_simd_enabled_value = false;
+ CHECK(!i_isolate->IsWasmSimdEnabled(i_context));
- wasm_threads_enabled_value = true;
- i::FLAG_experimental_wasm_threads = false;
- CHECK(i_isolate->AreWasmThreadsEnabled(i_context));
+ wasm_simd_enabled_value = true;
+ i::FLAG_experimental_wasm_simd = false;
+ CHECK(i_isolate->IsWasmSimdEnabled(i_context));
}
-TEST(TestSetWasmSimdEnabledCallback) {
+TEST(TestSetWasmExceptionsEnabledCallback) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -179,23 +179,23 @@ TEST(TestSetWasmSimdEnabledCallback) {
v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
i::Handle<i::Context> i_context = v8::Utils::OpenHandle(*context);
- // {Isolate::IsWasmSimdEnabled} calls the callback set by the embedder if
- // such a callback exists. Otherwise it returns
- // {FLAG_experimental_wasm_simd}. First we test that the flag is returned
+ // {Isolate::AreWasmExceptionsEnabled} calls the callback set by the embedder
+ // if such a callback exists. Otherwise it returns
+ // {FLAG_experimental_wasm_eh}. First we test that the flag is returned
// correctly if no callback is set. Then we test that the flag is ignored if
// the callback is set.
- i::FLAG_experimental_wasm_simd = false;
- CHECK(!i_isolate->IsWasmSimdEnabled(i_context));
+ i::FLAG_experimental_wasm_eh = false;
+ CHECK(!i_isolate->AreWasmExceptionsEnabled(i_context));
- i::FLAG_experimental_wasm_simd = true;
- CHECK(i_isolate->IsWasmSimdEnabled(i_context));
+ i::FLAG_experimental_wasm_eh = true;
+ CHECK(i_isolate->AreWasmExceptionsEnabled(i_context));
- isolate->SetWasmSimdEnabledCallback(MockWasmSimdEnabledCallback);
- wasm_simd_enabled_value = false;
- CHECK(!i_isolate->IsWasmSimdEnabled(i_context));
+ isolate->SetWasmExceptionsEnabledCallback(MockWasmExceptionsEnabledCallback);
+ wasm_exceptions_enabled_value = false;
+ CHECK(!i_isolate->AreWasmExceptionsEnabled(i_context));
- wasm_simd_enabled_value = true;
- i::FLAG_experimental_wasm_simd = false;
- CHECK(i_isolate->IsWasmSimdEnabled(i_context));
+ wasm_exceptions_enabled_value = true;
+ i::FLAG_experimental_wasm_eh = false;
+ CHECK(i_isolate->AreWasmExceptionsEnabled(i_context));
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 96afaaad79..c01c1ea791 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -2280,8 +2280,8 @@ THREADED_TEST(TestDataTypeChecks) {
x.As<v8::Value>();
}
- v8::ScriptOrigin origin(v8_str(""), 0, 0, false, -1, Local<v8::Value>(),
- false, false, true);
+ v8::ScriptOrigin origin(isolate, v8_str(""), 0, 0, false, -1,
+ Local<v8::Value>(), false, false, true);
v8::ScriptCompiler::Source source(v8::String::NewFromUtf8Literal(isolate, ""),
origin);
v8::Local<v8::Data> module =
@@ -4629,8 +4629,8 @@ TEST(MessageHandler3) {
CHECK(!message_received);
isolate->AddMessageListener(check_message_3);
LocalContext context;
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8_str("6.75"), 1, 2, true, -1, v8_str("7.40"), true);
+ v8::ScriptOrigin origin = v8::ScriptOrigin(isolate, v8_str("6.75"), 1, 2,
+ true, -1, v8_str("7.40"), true);
v8::Local<v8::Script> script =
Script::Compile(context.local(), v8_str("throw 'error'"), &origin)
.ToLocalChecked();
@@ -4659,7 +4659,8 @@ TEST(MessageHandler4) {
CHECK(!message_received);
isolate->AddMessageListener(check_message_4);
LocalContext context;
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("6.75"), 1, 2, false);
+ v8::ScriptOrigin origin =
+ v8::ScriptOrigin(isolate, v8_str("6.75"), 1, 2, false);
v8::Local<v8::Script> script =
Script::Compile(context.local(), v8_str("throw 'error'"), &origin)
.ToLocalChecked();
@@ -4699,7 +4700,8 @@ TEST(MessageHandler5) {
CHECK(!message_received);
isolate->AddMessageListener(check_message_5a);
LocalContext context;
- v8::ScriptOrigin origin1 = v8::ScriptOrigin(v8_str("6.75"), 1, 2, true);
+ v8::ScriptOrigin origin1 =
+ v8::ScriptOrigin(isolate, v8_str("6.75"), 1, 2, true);
v8::Local<v8::Script> script =
Script::Compile(context.local(), v8_str("throw 'error'"), &origin1)
.ToLocalChecked();
@@ -4710,7 +4712,8 @@ TEST(MessageHandler5) {
message_received = false;
isolate->AddMessageListener(check_message_5b);
- v8::ScriptOrigin origin2 = v8::ScriptOrigin(v8_str("6.75"), 1, 2, false);
+ v8::ScriptOrigin origin2 =
+ v8::ScriptOrigin(isolate, v8_str("6.75"), 1, 2, false);
script = Script::Compile(context.local(), v8_str("throw 'error'"), &origin2)
.ToLocalChecked();
CHECK(script->Run(context.local()).IsEmpty());
@@ -14020,7 +14023,8 @@ static void CheckTryCatchSourceInfo(v8::Local<v8::Script> script,
THREADED_TEST(TryCatchSourceInfo) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Local<v8::String> source = v8_str(
"function Foo() {\n"
" return Bar();\n"
@@ -14043,13 +14047,13 @@ THREADED_TEST(TryCatchSourceInfo) {
CheckTryCatchSourceInfo(script, resource_name, 0);
resource_name = "test1.js";
- v8::ScriptOrigin origin1(v8_str(resource_name), 0, 0);
+ v8::ScriptOrigin origin1(isolate, v8_str(resource_name), 0, 0);
script =
v8::Script::Compile(context.local(), source, &origin1).ToLocalChecked();
CheckTryCatchSourceInfo(script, resource_name, 0);
resource_name = "test2.js";
- v8::ScriptOrigin origin2(v8_str(resource_name), 7, 0);
+ v8::ScriptOrigin origin2(isolate, v8_str(resource_name), 7, 0);
script =
v8::Script::Compile(context.local(), source, &origin2).ToLocalChecked();
CheckTryCatchSourceInfo(script, resource_name, 7);
@@ -15464,7 +15468,7 @@ static int asm_warning_triggered = 0;
static void AsmJsWarningListener(v8::Local<v8::Message> message,
v8::Local<Value>) {
- DCHECK_EQ(v8::Isolate::kMessageWarning, message->ErrorLevel());
+ CHECK_EQ(v8::Isolate::kMessageWarning, message->ErrorLevel());
asm_warning_triggered = 1;
}
@@ -15486,7 +15490,12 @@ TEST(AsmJsWarning) {
" return {};\n"
"}\n"
"module();");
- DCHECK_EQ(1, asm_warning_triggered);
+#if V8_ENABLE_WEBASSEMBLY
+ int kExpectedWarnings = 1;
+#else
+ int kExpectedWarnings = 0;
+#endif
+ CHECK_EQ(kExpectedWarnings, asm_warning_triggered);
isolate->RemoveMessageListeners(AsmJsWarningListener);
}
@@ -16873,7 +16882,7 @@ TEST(VisitExternalStrings) {
v8::Isolate* isolate = CcTest::isolate();
LocalContext env;
v8::HandleScope scope(isolate);
- const char string[] = "Some string that's long";
+ const char string[] = "Some string";
uint16_t* two_byte_string = AsciiToTwoByteString(string);
TestResource* resource[4];
resource[0] = new TestResource(two_byte_string);
@@ -16953,7 +16962,7 @@ TEST(ExternalInternalizedStringCollectedAtTearDown) {
v8::Local<v8::String> ring =
CompileRun("ring")->ToString(env.local()).ToLocalChecked();
CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
- CHECK(ring->MakeExternal(inscription));
+ ring->MakeExternal(inscription);
// Ring is still alive. Orcs are roaming freely across our lands.
CHECK_EQ(0, destroyed);
USE(ring);
@@ -16975,7 +16984,7 @@ TEST(ExternalInternalizedStringCollectedAtGC) {
new TestOneByteResource(i::StrDup(s), &destroyed);
v8::Local<v8::String> ring = CompileRun("ring").As<v8::String>();
CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
- CHECK(ring->MakeExternal(inscription));
+ ring->MakeExternal(inscription);
// Ring is still alive. Orcs are roaming freely across our lands.
CHECK_EQ(0, destroyed);
USE(ring);
@@ -17252,9 +17261,9 @@ THREADED_TEST(ScriptOrigin) {
Local<v8::Symbol> symbol(v8::Symbol::New(isolate));
array->Set(isolate, 0, symbol);
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"), 1, 1, true, -1,
- v8_str("http://sourceMapUrl"),
- true, false, false, array);
+ v8::ScriptOrigin origin = v8::ScriptOrigin(
+ isolate, v8_str("test"), 1, 1, true, -1, v8_str("http://sourceMapUrl"),
+ true, false, false, array);
v8::Local<v8::String> script = v8_str("function f() {}\n\nfunction g() {}");
v8::Script::Compile(env.local(), script, &origin)
.ToLocalChecked()
@@ -17295,8 +17304,9 @@ THREADED_TEST(ScriptOrigin) {
THREADED_TEST(FunctionGetInferredName) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"), 0, 0);
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::ScriptOrigin origin = v8::ScriptOrigin(isolate, v8_str("test"), 0, 0);
v8::Local<v8::String> script =
v8_str("var foo = { bar : { baz : function() {}}}; var f = foo.bar.baz;");
v8::Script::Compile(env.local(), script, &origin)
@@ -17318,35 +17328,37 @@ THREADED_TEST(FunctionGetDebugName) {
const char* code =
"var error = false;"
"function a() { this.x = 1; };"
- "a.displayName = 'display_a';"
+ "Object.defineProperty(a, 'name', {value: 'display_a'});"
"var b = (function() {"
" var f = function() { this.x = 2; };"
- " f.displayName = 'display_b';"
+ " Object.defineProperty(f, 'name', {value: 'display_b'});"
" return f;"
"})();"
"var c = function() {};"
- "c.__defineGetter__('displayName', function() {"
+ "c.__defineGetter__('name', function() {"
" error = true;"
" throw new Error();"
"});"
"function d() {};"
- "d.__defineGetter__('displayName', function() {"
+ "d.__defineGetter__('name', function() {"
" error = true;"
" return 'wrong_display_name';"
"});"
"function e() {};"
- "e.displayName = 'wrong_display_name';"
- "e.__defineSetter__('displayName', function() {"
+ "Object.defineProperty(e, 'name', {value: 'wrong_display_name'});"
+ "e.__defineSetter__('name', function() {"
" error = true;"
" throw new Error();"
"});"
"function f() {};"
- "f.displayName = { 'foo': 6, toString: function() {"
+ "Object.defineProperty(f, 'name', {value: {foo: 6, toString: function() {"
" error = true;"
" return 'wrong_display_name';"
- "}};"
+ "}}});"
"var g = function() {"
- " arguments.callee.displayName = 'set_in_runtime';"
+ " Object.defineProperty(arguments.callee, 'name', {"
+ " value: 'set_in_runtime'"
+ " });"
"}; g();"
"var h = function() {};"
"h.displayName = 'displayName';"
@@ -17358,7 +17370,7 @@ THREADED_TEST(FunctionGetDebugName) {
"Object.defineProperty(j, 'name', { value: 'function.name' });"
"var foo = { bar : { baz : (0, function() {})}}; var k = foo.bar.baz;"
"var foo = { bar : { baz : function() {} }}; var l = foo.bar.baz;";
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"), 0, 0);
+ v8::ScriptOrigin origin = v8::ScriptOrigin(isolate, v8_str("test"), 0, 0);
v8::Script::Compile(env.local(), v8_str(code), &origin)
.ToLocalChecked()
->Run(env.local())
@@ -17373,7 +17385,7 @@ THREADED_TEST(FunctionGetDebugName) {
"e", "e",
"f", "f",
"g", "set_in_runtime",
- "h", "displayName",
+ "h", "function.name",
"i", "function.name",
"j", "function.name",
"k", "foo.bar.baz",
@@ -17385,87 +17397,18 @@ THREADED_TEST(FunctionGetDebugName) {
v8::String::NewFromUtf8(isolate, functions[i * 2])
.ToLocalChecked())
.ToLocalChecked());
- CHECK_EQ(0, strcmp(functions[i * 2 + 1],
- *v8::String::Utf8Value(isolate, f->GetDebugName())));
+ std::string expected(functions[i * 2 + 1]);
+ std::string actual = *v8::String::Utf8Value(isolate, f->GetDebugName());
+ CHECK_EQ(expected, actual);
}
}
-THREADED_TEST(FunctionGetDisplayName) {
+THREADED_TEST(ScriptLineNumber) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- const char* code = "var error = false;"
- "function a() { this.x = 1; };"
- "a.displayName = 'display_a';"
- "var b = (function() {"
- " var f = function() { this.x = 2; };"
- " f.displayName = 'display_b';"
- " return f;"
- "})();"
- "var c = function() {};"
- "c.__defineGetter__('displayName', function() {"
- " error = true;"
- " throw new Error();"
- "});"
- "function d() {};"
- "d.__defineGetter__('displayName', function() {"
- " error = true;"
- " return 'wrong_display_name';"
- "});"
- "function e() {};"
- "e.displayName = 'wrong_display_name';"
- "e.__defineSetter__('displayName', function() {"
- " error = true;"
- " throw new Error();"
- "});"
- "function f() {};"
- "f.displayName = { 'foo': 6, toString: function() {"
- " error = true;"
- " return 'wrong_display_name';"
- "}};"
- "var g = function() {"
- " arguments.callee.displayName = 'set_in_runtime';"
- "}; g();";
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"), 0, 0);
- v8::Script::Compile(env.local(), v8_str(code), &origin)
- .ToLocalChecked()
- ->Run(env.local())
- .ToLocalChecked();
- v8::Local<v8::Value> error =
- env->Global()->Get(env.local(), v8_str("error")).ToLocalChecked();
- v8::Local<v8::Function> a = v8::Local<v8::Function>::Cast(
- env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
- v8::Local<v8::Function> b = v8::Local<v8::Function>::Cast(
- env->Global()->Get(env.local(), v8_str("b")).ToLocalChecked());
- v8::Local<v8::Function> c = v8::Local<v8::Function>::Cast(
- env->Global()->Get(env.local(), v8_str("c")).ToLocalChecked());
- v8::Local<v8::Function> d = v8::Local<v8::Function>::Cast(
- env->Global()->Get(env.local(), v8_str("d")).ToLocalChecked());
- v8::Local<v8::Function> e = v8::Local<v8::Function>::Cast(
- env->Global()->Get(env.local(), v8_str("e")).ToLocalChecked());
- v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(env.local(), v8_str("f")).ToLocalChecked());
- v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(env.local(), v8_str("g")).ToLocalChecked());
- CHECK(!error->BooleanValue(isolate));
- CHECK_EQ(0, strcmp("display_a",
- *v8::String::Utf8Value(isolate, a->GetDisplayName())));
- CHECK_EQ(0, strcmp("display_b",
- *v8::String::Utf8Value(isolate, b->GetDisplayName())));
- CHECK(c->GetDisplayName()->IsUndefined());
- CHECK(d->GetDisplayName()->IsUndefined());
- CHECK(e->GetDisplayName()->IsUndefined());
- CHECK(f->GetDisplayName()->IsUndefined());
- CHECK_EQ(0, strcmp("set_in_runtime",
- *v8::String::Utf8Value(isolate, g->GetDisplayName())));
-}
-
-
-THREADED_TEST(ScriptLineNumber) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"), 0, 0);
+ v8::ScriptOrigin origin = v8::ScriptOrigin(isolate, v8_str("test"), 0, 0);
v8::Local<v8::String> script = v8_str("function f() {}\n\nfunction g() {}");
v8::Script::Compile(env.local(), script, &origin)
.ToLocalChecked()
@@ -17484,7 +17427,7 @@ THREADED_TEST(ScriptColumnNumber) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"), 3, 2);
+ v8::ScriptOrigin origin = v8::ScriptOrigin(isolate, v8_str("test"), 3, 2);
v8::Local<v8::String> script =
v8_str("function foo() {}\n\n function bar() {}");
v8::Script::Compile(env.local(), script, &origin)
@@ -17504,7 +17447,7 @@ THREADED_TEST(FunctionGetScriptId) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"), 3, 2);
+ v8::ScriptOrigin origin = v8::ScriptOrigin(isolate, v8_str("test"), 3, 2);
v8::Local<v8::String> scriptSource =
v8_str("function foo() {}\n\n function bar() {}");
v8::Local<v8::Script> script(
@@ -17521,8 +17464,9 @@ THREADED_TEST(FunctionGetScriptId) {
THREADED_TEST(FunctionGetBoundFunction) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"));
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::ScriptOrigin origin = v8::ScriptOrigin(isolate, v8_str("test"));
v8::Local<v8::String> script = v8_str(
"var a = new Object();\n"
"a.x = 1;\n"
@@ -18084,7 +18028,7 @@ THREADED_TEST(TwoByteStringInOneByteCons) {
TestResource resource(uc16_buffer);
- CHECK(flat_string->MakeExternal(&resource));
+ flat_string->MakeExternal(&resource);
CHECK(flat_string->IsTwoByteRepresentation());
@@ -19140,49 +19084,49 @@ THREADED_TEST(CreationContext) {
{
Local<Context> other_context = Context::New(isolate);
Context::Scope scope(other_context);
- CHECK(object1->CreationContext() == context1);
+ CHECK(object1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(object1, 1);
- CHECK(func1->CreationContext() == context1);
+ CHECK(func1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(func1, 1);
- CHECK(instance1->CreationContext() == context1);
+ CHECK(instance1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(instance1, 1);
- CHECK(object2->CreationContext() == context2);
+ CHECK(object2->GetCreationContext().ToLocalChecked() == context2);
CheckContextId(object2, 2);
- CHECK(func2->CreationContext() == context2);
+ CHECK(func2->GetCreationContext().ToLocalChecked() == context2);
CheckContextId(func2, 2);
- CHECK(instance2->CreationContext() == context2);
+ CHECK(instance2->GetCreationContext().ToLocalChecked() == context2);
CheckContextId(instance2, 2);
}
{
Context::Scope scope(context1);
- CHECK(object1->CreationContext() == context1);
+ CHECK(object1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(object1, 1);
- CHECK(func1->CreationContext() == context1);
+ CHECK(func1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(func1, 1);
- CHECK(instance1->CreationContext() == context1);
+ CHECK(instance1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(instance1, 1);
- CHECK(object2->CreationContext() == context2);
+ CHECK(object2->GetCreationContext().ToLocalChecked() == context2);
CheckContextId(object2, 2);
- CHECK(func2->CreationContext() == context2);
+ CHECK(func2->GetCreationContext().ToLocalChecked() == context2);
CheckContextId(func2, 2);
- CHECK(instance2->CreationContext() == context2);
+ CHECK(instance2->GetCreationContext().ToLocalChecked() == context2);
CheckContextId(instance2, 2);
}
{
Context::Scope scope(context2);
- CHECK(object1->CreationContext() == context1);
+ CHECK(object1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(object1, 1);
- CHECK(func1->CreationContext() == context1);
+ CHECK(func1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(func1, 1);
- CHECK(instance1->CreationContext() == context1);
+ CHECK(instance1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(instance1, 1);
- CHECK(object2->CreationContext() == context2);
+ CHECK(object2->GetCreationContext().ToLocalChecked() == context2);
CheckContextId(object2, 2);
- CHECK(func2->CreationContext() == context2);
+ CHECK(func2->GetCreationContext().ToLocalChecked() == context2);
CheckContextId(func2, 2);
- CHECK(instance2->CreationContext() == context2);
+ CHECK(instance2->GetCreationContext().ToLocalChecked() == context2);
CheckContextId(instance2, 2);
}
}
@@ -19201,7 +19145,7 @@ THREADED_TEST(CreationContextOfJsFunction) {
Local<Context> other_context = Context::New(CcTest::isolate());
Context::Scope scope(other_context);
- CHECK(function->CreationContext() == context);
+ CHECK(function->GetCreationContext().ToLocalChecked() == context);
CheckContextId(function, 1);
}
@@ -19232,13 +19176,12 @@ THREADED_TEST(CreationContextOfJsBoundFunction) {
Local<Context> other_context = Context::New(CcTest::isolate());
Context::Scope scope(other_context);
- CHECK(bound_function1->CreationContext() == context1);
+ CHECK(bound_function1->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(bound_function1, 1);
- CHECK(bound_function2->CreationContext() == context1);
+ CHECK(bound_function2->GetCreationContext().ToLocalChecked() == context1);
CheckContextId(bound_function2, 1);
}
-
void HasOwnPropertyIndexedPropertyGetter(
uint32_t index,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -21435,7 +21378,7 @@ class RegExpInterruptTest {
v8::Local<v8::String> string =
v8::Local<v8::String>::New(isolate, instance->string_handle_);
CHECK(string->CanMakeExternal());
- CHECK(string->MakeExternal(&one_byte_string_resource));
+ string->MakeExternal(&one_byte_string_resource);
}
static void MakeSubjectTwoByteExternal(v8::Isolate* isolate, void* data) {
@@ -21445,7 +21388,7 @@ class RegExpInterruptTest {
v8::Local<v8::String> string =
v8::Local<v8::String>::New(isolate, instance->string_handle_);
CHECK(string->CanMakeExternal());
- CHECK(string->MakeExternal(&two_byte_string_resource));
+ string->MakeExternal(&two_byte_string_resource);
}
static void ReenterIrregexp(v8::Isolate* isolate, void* data) {
@@ -23184,7 +23127,7 @@ TEST(ScriptNameAndLineNumber) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
const char* url = "http://www.foo.com/foo.js";
- v8::ScriptOrigin origin(v8_str(url), 13, 0);
+ v8::ScriptOrigin origin(isolate, v8_str(url), 13, 0);
v8::ScriptCompiler::Source script_source(v8_str("var foo;"), origin);
Local<Script> script =
@@ -23202,7 +23145,7 @@ TEST(ScriptPositionInfo) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
v8::HandleScope scope(isolate);
const char* url = "http://www.foo.com/foo.js";
- v8::ScriptOrigin origin(v8_str(url), 13, 0);
+ v8::ScriptOrigin origin(isolate, v8_str(url), 13, 0);
v8::ScriptCompiler::Source script_source(v8_str("var foo;\n"
"var bar;\n"
"var fisk = foo + bar;\n"),
@@ -23594,7 +23537,7 @@ void RunStreamingTest(const char** chunks, v8::ScriptType type,
// Possible errors are only produced while compiling.
CHECK(!try_catch.HasCaught());
- v8::ScriptOrigin origin(v8_str("http://foo.com"), 0, 0, false, -1,
+ v8::ScriptOrigin origin(isolate, v8_str("http://foo.com"), 0, 0, false, -1,
v8::Local<v8::Value>(), false, false,
type == v8::ScriptType::kModule);
@@ -23904,7 +23847,7 @@ TEST(StreamingWithDebuggingEnabledLate) {
CHECK(!try_catch.HasCaught());
- v8::ScriptOrigin origin(v8_str("http://foo.com"));
+ v8::ScriptOrigin origin(isolate, v8_str("http://foo.com"));
char* full_source = TestSourceStream::FullSourceString(chunks);
EnableDebugger(isolate);
@@ -24013,7 +23956,7 @@ TEST(StreamingWithHarmonyScopes) {
// independent way, so the error is not detected).
CHECK(!try_catch.HasCaught());
- v8::ScriptOrigin origin(v8_str("http://foo.com"));
+ v8::ScriptOrigin origin(isolate, v8_str("http://foo.com"));
char* full_source = TestSourceStream::FullSourceString(chunks);
v8::Local<Script> script =
v8::ScriptCompiler::Compile(env.local(), &source, v8_str(full_source),
@@ -24044,7 +23987,7 @@ TEST(CodeCache) {
v8::Local<v8::Context> context = v8::Context::New(isolate1);
v8::Context::Scope cscope(context);
v8::Local<v8::String> source_string = v8_str(source);
- v8::ScriptOrigin script_origin(v8_str(origin));
+ v8::ScriptOrigin script_origin(isolate1, v8_str(origin));
v8::ScriptCompiler::Source source(source_string, script_origin);
v8::ScriptCompiler::CompileOptions option =
v8::ScriptCompiler::kNoCompileOptions;
@@ -24061,7 +24004,7 @@ TEST(CodeCache) {
v8::Local<v8::Context> context = v8::Context::New(isolate2);
v8::Context::Scope cscope(context);
v8::Local<v8::String> source_string = v8_str(source);
- v8::ScriptOrigin script_origin(v8_str(origin));
+ v8::ScriptOrigin script_origin(isolate2, v8_str(origin));
v8::ScriptCompiler::Source source(source_string, script_origin, cache);
v8::ScriptCompiler::CompileOptions option =
v8::ScriptCompiler::kConsumeCodeCache;
@@ -24118,8 +24061,8 @@ Local<Module> CompileAndInstantiateModule(v8::Isolate* isolate,
const char* resource_name,
const char* source) {
Local<String> source_string = v8_str(source);
- v8::ScriptOrigin script_origin(v8_str(resource_name), 0, 0, false, -1,
- Local<v8::Value>(), false, false, true);
+ v8::ScriptOrigin script_origin(isolate, v8_str(resource_name), 0, 0, false,
+ -1, Local<v8::Value>(), false, false, true);
v8::ScriptCompiler::Source script_compiler_source(source_string,
script_origin);
Local<Module> module =
@@ -24147,8 +24090,8 @@ Local<Module> CompileAndInstantiateModuleFromCache(
v8::Isolate* isolate, Local<Context> context, const char* resource_name,
const char* source, v8::ScriptCompiler::CachedData* cache) {
Local<String> source_string = v8_str(source);
- v8::ScriptOrigin script_origin(v8_str(resource_name), 0, 0, false, -1,
- Local<v8::Value>(), false, false, true);
+ v8::ScriptOrigin script_origin(isolate, v8_str(resource_name), 0, 0, false,
+ -1, Local<v8::Value>(), false, false, true);
v8::ScriptCompiler::Source script_compiler_source(source_string,
script_origin, cache);
@@ -24512,8 +24455,8 @@ TEST(ImportFromSyntheticModule) {
Local<String> source_text = v8_str(
"import {test_export} from './synthetic.module';"
"(function() { return test_export; })();");
- v8::ScriptOrigin origin(url, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, true);
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -24542,8 +24485,8 @@ TEST(ImportFromSyntheticModuleThrow) {
Local<String> source_text = v8_str(
"import {test_export} from './synthetic.module';"
"(function() { return test_export; })();");
- v8::ScriptOrigin origin(url, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, true);
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -24620,7 +24563,7 @@ TEST(CodeCacheModuleScriptMismatch) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope cscope(context);
- v8::ScriptOrigin script_origin(v8_str(origin));
+ v8::ScriptOrigin script_origin(isolate, v8_str(origin));
v8::ScriptCompiler::Source script_compiler_source(v8_str(source),
script_origin, cache);
@@ -24659,7 +24602,7 @@ TEST(CodeCacheScriptModuleMismatch) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope cscope(context);
v8::Local<v8::String> source_string = v8_str(source);
- v8::ScriptOrigin script_origin(v8_str(origin));
+ v8::ScriptOrigin script_origin(isolate, v8_str(origin));
v8::ScriptCompiler::Source source(source_string, script_origin);
v8::ScriptCompiler::CompileOptions option =
v8::ScriptCompiler::kNoCompileOptions;
@@ -24680,7 +24623,7 @@ TEST(CodeCacheScriptModuleMismatch) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope cscope(context);
- v8::ScriptOrigin script_origin(v8_str(origin), 0, 0, false, -1,
+ v8::ScriptOrigin script_origin(isolate, v8_str(origin), 0, 0, false, -1,
Local<v8::Value>(), false, false, true);
v8::ScriptCompiler::Source script_compiler_source(v8_str(source),
script_origin, cache);
@@ -24723,8 +24666,8 @@ TEST(InvalidCodeCacheDataInCompileModule) {
new v8::ScriptCompiler::CachedData(data, length);
CHECK(!cached_data->rejected);
- v8::ScriptOrigin script_origin(origin, 0, 0, false, -1, Local<v8::Value>(),
- false, false, true);
+ v8::ScriptOrigin script_origin(isolate, origin, 0, 0, false, -1,
+ Local<v8::Value>(), false, false, true);
v8::ScriptCompiler::Source source(v8_str("42"), script_origin, cached_data);
v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
@@ -24750,12 +24693,13 @@ void TestInvalidCacheData(v8::ScriptCompiler::CompileOptions option) {
const char* garbage = "garbage garbage garbage garbage garbage garbage";
const uint8_t* data = reinterpret_cast<const uint8_t*>(garbage);
int length = 16;
+ v8::Isolate* isolate = CcTest::isolate();
v8::ScriptCompiler::CachedData* cached_data =
new v8::ScriptCompiler::CachedData(data, length);
CHECK(!cached_data->rejected);
- v8::ScriptOrigin origin(v8_str("origin"));
+ v8::ScriptOrigin origin(isolate, v8_str("origin"));
v8::ScriptCompiler::Source source(v8_str("42"), origin, cached_data);
- v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::Local<v8::Script> script =
v8::ScriptCompiler::Compile(context, &source, option).ToLocalChecked();
CHECK(cached_data->rejected);
@@ -24835,7 +24779,7 @@ TEST(ClassPrototypeCreationContext) {
Local<Object> result = Local<Object>::Cast(
CompileRun("'use strict'; class Example { }; Example.prototype"));
- CHECK(env.local() == result->CreationContext());
+ CHECK(env.local() == result->GetCreationContext().ToLocalChecked());
}
@@ -25614,7 +25558,7 @@ TEST(ObjectTemplatePerContextIntrinsics) {
object->Get(env.local(), v8_str("values")).ToLocalChecked());
auto fn = i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*values));
auto ctx = v8::Utils::OpenHandle(*env.local());
- CHECK_EQ(*fn->GetCreationContext(), *ctx);
+ CHECK_EQ(*(fn->GetCreationContext().ToHandleChecked()), *ctx);
{
LocalContext env2;
@@ -25630,7 +25574,7 @@ TEST(ObjectTemplatePerContextIntrinsics) {
object2->Get(env2.local(), v8_str("values")).ToLocalChecked());
auto fn2 = i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*values2));
auto ctx2 = v8::Utils::OpenHandle(*env2.local());
- CHECK_EQ(*fn2->GetCreationContext(), *ctx2);
+ CHECK_EQ(*(fn2->GetCreationContext().ToHandleChecked()), *ctx2);
}
}
@@ -26093,21 +26037,26 @@ TEST(CorrectEnteredContext) {
object->ToString(currentContext.local()).ToLocalChecked();
}
+// For testing only, the host-defined options are provided entirely by the host
+// and have an abritrary length. Use this constant here for testing that we get
+// the correct value during the tests.
+const int kCustomHostDefinedOptionsLengthForTesting = 7;
+
v8::MaybeLocal<v8::Promise> HostImportModuleDynamicallyCallbackResolve(
Local<Context> context, Local<v8::ScriptOrModule> referrer,
- Local<String> specifier) {
+ Local<String> specifier, Local<FixedArray> import_assertions) {
CHECK(!referrer.IsEmpty());
String::Utf8Value referrer_utf8(
context->GetIsolate(), Local<String>::Cast(referrer->GetResourceName()));
CHECK_EQ(0, strcmp("www.google.com", *referrer_utf8));
- CHECK(referrer->GetHostDefinedOptions()
- ->Get(context->GetIsolate(), 0)
- ->IsSymbol());
-
+ CHECK_EQ(referrer->GetHostDefinedOptions()->Length(),
+ kCustomHostDefinedOptionsLengthForTesting);
CHECK(!specifier.IsEmpty());
String::Utf8Value specifier_utf8(context->GetIsolate(), specifier);
CHECK_EQ(0, strcmp("index.js", *specifier_utf8));
+ CHECK_EQ(0, import_assertions->Length());
+
Local<v8::Promise::Resolver> resolver =
v8::Promise::Resolver::New(context).ToLocalChecked();
auto result = v8_str("hello world");
@@ -26119,7 +26068,6 @@ TEST(DynamicImport) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
-
isolate->SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallbackResolve);
@@ -26128,14 +26076,99 @@ TEST(DynamicImport) {
i::Handle<i::String> result(v8::Utils::OpenHandle(*v8_str("hello world")));
i::Handle<i::String> source(v8::Utils::OpenHandle(*v8_str("foo")));
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::FixedArray> options = i_isolate->factory()->NewFixedArray(1);
- i::Handle<i::Symbol> symbol = i_isolate->factory()->NewSymbol();
- options->set(0, *symbol);
i::Handle<i::Script> referrer = i_isolate->factory()->NewScript(source);
referrer->set_name(*url);
- referrer->set_host_defined_options(*options);
+ referrer->set_host_defined_options(*i_isolate->factory()->NewFixedArray(
+ kCustomHostDefinedOptionsLengthForTesting));
+ i::MaybeHandle<i::JSPromise> maybe_promise =
+ i_isolate->RunHostImportModuleDynamicallyCallback(
+ referrer, specifier, i::MaybeHandle<i::Object>());
+ i::Handle<i::JSPromise> promise = maybe_promise.ToHandleChecked();
+ isolate->PerformMicrotaskCheckpoint();
+ CHECK(result->Equals(i::String::cast(promise->result())));
+}
+
+v8::MaybeLocal<v8::Promise>
+HostImportModuleDynamicallyWithAssertionsCallbackResolve(
+ Local<Context> context, Local<v8::ScriptOrModule> referrer,
+ Local<String> specifier, Local<v8::FixedArray> import_assertions) {
+ CHECK(!referrer.IsEmpty());
+ String::Utf8Value referrer_utf8(
+ context->GetIsolate(), Local<String>::Cast(referrer->GetResourceName()));
+ CHECK_EQ(0, strcmp("www.google.com", *referrer_utf8));
+ CHECK_EQ(referrer->GetHostDefinedOptions()->Length(),
+ kCustomHostDefinedOptionsLengthForTesting);
+
+ CHECK(!specifier.IsEmpty());
+ String::Utf8Value specifier_utf8(context->GetIsolate(), specifier);
+ CHECK_EQ(0, strcmp("index.js", *specifier_utf8));
+
+ CHECK_EQ(8, import_assertions->Length());
+ constexpr int kAssertionEntrySizeForDynamicImport = 2;
+ for (int i = 0;
+ i < import_assertions->Length() / kAssertionEntrySizeForDynamicImport;
+ ++i) {
+ Local<String> assertion_key =
+ import_assertions
+ ->Get(context, (i * kAssertionEntrySizeForDynamicImport))
+ .As<Value>()
+ .As<String>();
+ Local<String> assertion_value =
+ import_assertions
+ ->Get(context, (i * kAssertionEntrySizeForDynamicImport) + 1)
+ .As<Value>()
+ .As<String>();
+ if (v8_str("a")->StrictEquals(assertion_key)) {
+ CHECK(v8_str("z")->StrictEquals(assertion_value));
+ } else if (v8_str("aa")->StrictEquals(assertion_key)) {
+ CHECK(v8_str("x")->StrictEquals(assertion_value));
+ } else if (v8_str("b")->StrictEquals(assertion_key)) {
+ CHECK(v8_str("w")->StrictEquals(assertion_value));
+ } else if (v8_str("c")->StrictEquals(assertion_key)) {
+ CHECK(v8_str("y")->StrictEquals(assertion_value));
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ Local<v8::Promise::Resolver> resolver =
+ v8::Promise::Resolver::New(context).ToLocalChecked();
+ auto result = v8_str("hello world");
+ resolver->Resolve(context, result).ToChecked();
+ return resolver->GetPromise();
+}
+
+TEST(DynamicImportWithAssertions) {
+ FLAG_SCOPE_EXTERNAL(harmony_import_assertions);
+
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ isolate->SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyWithAssertionsCallbackResolve);
+
+ i::Handle<i::String> url(v8::Utils::OpenHandle(*v8_str("www.google.com")));
+ i::Handle<i::Object> specifier(v8::Utils::OpenHandle(*v8_str("index.js")));
+ i::Handle<i::String> result(v8::Utils::OpenHandle(*v8_str("hello world")));
+ i::Handle<i::String> source(v8::Utils::OpenHandle(*v8_str("foo")));
+ v8::Local<v8::Object> import_assertions =
+ CompileRun(
+ "var arg = { assert: { 'b': 'w', aa: 'x', c: 'y', a: 'z'} };"
+ "arg;")
+ ->ToObject(context.local())
+ .ToLocalChecked();
+
+ i::Handle<i::Object> i_import_assertions =
+ v8::Utils::OpenHandle(*import_assertions);
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::Script> referrer = i_isolate->factory()->NewScript(source);
+ referrer->set_name(*url);
+ referrer->set_host_defined_options(*i_isolate->factory()->NewFixedArray(
+ kCustomHostDefinedOptionsLengthForTesting));
i::MaybeHandle<i::JSPromise> maybe_promise =
- i_isolate->RunHostImportModuleDynamicallyCallback(referrer, specifier);
+ i_isolate->RunHostImportModuleDynamicallyCallback(referrer, specifier,
+ i_import_assertions);
i::Handle<i::JSPromise> promise = maybe_promise.ToHandleChecked();
isolate->PerformMicrotaskCheckpoint();
CHECK(result->Equals(i::String::cast(promise->result())));
@@ -26159,8 +26192,8 @@ TEST(ImportMeta) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
Local<String> url = v8_str("www.google.com");
Local<String> source_text = v8_str("import.meta;");
- v8::ScriptOrigin origin(url, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, true);
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -26207,8 +26240,8 @@ TEST(ImportMetaThrowUnhandled) {
Local<String> url = v8_str("www.google.com");
Local<String> source_text =
v8_str("export default function() { return import.meta }");
- v8::ScriptOrigin origin(url, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, true);
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -26252,8 +26285,8 @@ TEST(ImportMetaThrowHandled) {
return false;
}
)javascript");
- v8::ScriptOrigin origin(url, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, true);
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -26285,8 +26318,8 @@ TEST(GetModuleNamespace) {
Local<String> url = v8_str("www.google.com");
Local<String> source_text = v8_str("export default 5; export const a = 10;");
- v8::ScriptOrigin origin(url, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, true);
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -26312,8 +26345,8 @@ TEST(ModuleGetUnboundModuleScript) {
Local<String> url = v8_str("www.google.com");
Local<String> source_text = v8_str("export default 5; export const a = 10;");
- v8::ScriptOrigin origin(url, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, true);
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -26339,8 +26372,8 @@ TEST(ModuleScriptId) {
Local<String> url = v8_str("www.google.com");
Local<String> source_text = v8_str("export default 5; export const a = 10;");
- v8::ScriptOrigin origin(url, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, true);
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -26360,8 +26393,8 @@ TEST(ModuleIsSourceTextModule) {
Local<String> url = v8_str("www.google.com");
Local<String> source_text = v8_str("export default 5; export const a = 10;");
- v8::ScriptOrigin origin(url, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ v8::ScriptOrigin origin(isolate, url, 0, 0, false, -1, Local<v8::Value>(),
+ false, false, true);
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -27492,135 +27525,19 @@ UNINITIALIZED_TEST(NestedIsolates) {
#ifndef V8_LITE_MODE
namespace {
-template <typename T>
-struct ConvertJSValue {
- static Maybe<T> Get(v8::Local<v8::Value> value,
- v8::Local<v8::Context> context);
-};
-
-template <>
-struct ConvertJSValue<int32_t> {
- static Maybe<int32_t> Get(v8::Local<v8::Value> value,
- v8::Local<v8::Context> context) {
- return value->Int32Value(context);
- }
-};
-
-template <>
-struct ConvertJSValue<uint32_t> {
- static Maybe<uint32_t> Get(v8::Local<v8::Value> value,
- v8::Local<v8::Context> context) {
- return value->Uint32Value(context);
- }
-};
-
-// NaNs and +/-Infinity should be 0, otherwise (modulo 2^64) - 2^63.
-// Step 8 - 12 of https://heycam.github.io/webidl/#abstract-opdef-converttoint
-// The int64_t and uint64_t implementations below are copied from Blink:
-// https://source.chromium.org/chromium/chromium/src/+/master:third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h;l=249?q=doubletointeger&sq=&ss=chromium%2Fchromium%2Fsrc
-template <>
-struct ConvertJSValue<int64_t> {
- static Maybe<int64_t> Get(v8::Local<v8::Value> value,
- v8::Local<v8::Context> context) {
- Maybe<double> double_value = value->NumberValue(context);
- if (!double_value.IsJust()) {
- return v8::Nothing<int64_t>();
- }
- double result = double_value.ToChecked();
- if (std::isinf(result) || std::isnan(result)) {
- return v8::Just(int64_t(0));
- }
- result = trunc(result);
-
- constexpr uint64_t kMaxULL = std::numeric_limits<uint64_t>::max();
-
- // -2^{64} < fmod_value < 2^{64}.
- double fmod_value = fmod(result, kMaxULL + 1.0);
- if (fmod_value >= 0) {
- if (fmod_value < pow(2, 63)) {
- // 0 <= fmod_value < 2^{63}.
- // 0 <= value < 2^{63}. This cast causes no loss.
- return v8::Just(static_cast<int64_t>(fmod_value));
- } else {
- // 2^{63} <= fmod_value < 2^{64}.
- // 2^{63} <= value < 2^{64}. This cast causes no loss.
- return v8::Just(static_cast<int64_t>(fmod_value - pow(2, 64)));
- }
- }
- // -2^{64} < fmod_value < 0.
- // 0 < fmod_value_uint64 < 2^{64}. This cast causes no loss.
- uint64_t fmod_value_uint64 = static_cast<uint64_t>(-fmod_value);
- // -1 < (kMaxULL - fmod_value_uint64) < 2^{64} - 1.
- // 0 < value < 2^{64}.
- return v8::Just(static_cast<int64_t>(kMaxULL - fmod_value_uint64 + 1));
- }
-};
-
-template <>
-struct ConvertJSValue<uint64_t> {
- static Maybe<uint64_t> Get(v8::Local<v8::Value> value,
- v8::Local<v8::Context> context) {
- Maybe<double> double_value = value->NumberValue(context);
- if (!double_value.IsJust()) {
- return v8::Nothing<uint64_t>();
- }
- double result = double_value.ToChecked();
- if (std::isinf(result) || std::isnan(result)) {
- return v8::Just(uint64_t(0));
- }
- result = trunc(result);
-
- constexpr uint64_t kMaxULL = std::numeric_limits<uint64_t>::max();
-
- // -2^{64} < fmod_value < 2^{64}.
- double fmod_value = fmod(result, kMaxULL + 1.0);
- if (fmod_value >= 0) {
- return v8::Just(static_cast<uint64_t>(fmod_value));
- }
- // -2^{64} < fmod_value < 0.
- // 0 < fmod_value_uint64 < 2^{64}. This cast causes no loss.
- uint64_t fmod_value_uint64 = static_cast<uint64_t>(-fmod_value);
- // -1 < (kMaxULL - fmod_value_uint64) < 2^{64} - 1.
- // 0 < value < 2^{64}.
- return v8::Just(static_cast<uint64_t>(kMaxULL - fmod_value_uint64 + 1));
- }
-};
-
-template <>
-struct ConvertJSValue<float> {
- static Maybe<float> Get(v8::Local<v8::Value> value,
- v8::Local<v8::Context> context) {
- Maybe<double> val = value->NumberValue(context);
- if (val.IsNothing()) return v8::Nothing<float>();
- return v8::Just(static_cast<float>(val.ToChecked()));
- }
-};
-
-template <>
-struct ConvertJSValue<double> {
- static Maybe<double> Get(v8::Local<v8::Value> value,
- v8::Local<v8::Context> context) {
- return value->NumberValue(context);
- }
-};
-template <>
-struct ConvertJSValue<bool> {
- static Maybe<bool> Get(v8::Local<v8::Value> value,
- v8::Local<v8::Context> context) {
- return v8::Just<bool>(value->BooleanValue(CcTest::isolate()));
- }
-};
-
-template <typename Value, typename Impl>
+template <typename Value, typename Impl, typename Ret>
struct BasicApiChecker {
- static void FastCallback(v8::ApiObject receiver, Value argument,
- v8::FastApiCallbackOptions& options) {
- Impl::FastCallback(receiver, argument, options);
+ static Ret FastCallback(v8::ApiObject receiver, Value argument,
+ v8::FastApiCallbackOptions& options) {
+ const v8::Value* data = reinterpret_cast<const v8::Value*>(&options.data);
+ CHECK(data->IsNumber());
+ CHECK_EQ(reinterpret_cast<const v8::Number*>(data)->Value(), 42.0);
+ return Impl::FastCallback(receiver, argument, options);
}
- static void FastCallbackNoFallback(v8::ApiObject receiver, Value argument) {
- v8::FastApiCallbackOptions options;
- Impl::FastCallback(receiver, argument, options);
+ static Ret FastCallbackNoFallback(v8::ApiObject receiver, Value argument) {
+ v8::FastApiCallbackOptions options = {false, {0}};
+ return Impl::FastCallback(receiver, argument, options);
}
static void SlowCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
Impl::SlowCallback(info);
@@ -27644,7 +27561,7 @@ enum class FallbackPolicy {
};
template <typename T>
-struct ApiNumberChecker : BasicApiChecker<T, ApiNumberChecker<T>> {
+struct ApiNumberChecker : BasicApiChecker<T, ApiNumberChecker<T>, void> {
explicit ApiNumberChecker(
T value, Behavior raise_exception = Behavior::kNoException,
FallbackPolicy write_to_fallback = FallbackPolicy::kDontRequestFallback,
@@ -27704,7 +27621,7 @@ struct ApiNumberChecker : BasicApiChecker<T, ApiNumberChecker<T>> {
};
struct UnexpectedObjectChecker
- : BasicApiChecker<v8::ApiObject, UnexpectedObjectChecker> {
+ : BasicApiChecker<v8::ApiObject, UnexpectedObjectChecker, void> {
static void FastCallback(v8::ApiObject receiver, v8::ApiObject argument,
v8::FastApiCallbackOptions& options) {
v8::Object* receiver_obj = reinterpret_cast<v8::Object*>(&receiver);
@@ -27732,26 +27649,27 @@ struct UnexpectedObjectChecker
}
};
-template <typename Value, typename Impl>
+template <typename Value, typename Impl, typename Ret>
bool SetupTest(v8::Local<v8::Value> initial_value, LocalContext* env,
- BasicApiChecker<Value, Impl>* checker, const char* source_code,
- bool supports_fallback = true, bool accept_any_receiver = true) {
+ BasicApiChecker<Value, Impl, Ret>* checker,
+ const char* source_code, bool supports_fallback = true,
+ bool accept_any_receiver = true) {
v8::Isolate* isolate = CcTest::isolate();
v8::TryCatch try_catch(isolate);
v8::CFunction c_func;
if (supports_fallback) {
- c_func = v8::CFunction::MakeWithFallbackSupport(
- BasicApiChecker<Value, Impl>::FastCallback);
+ c_func =
+ v8::CFunction::Make(BasicApiChecker<Value, Impl, Ret>::FastCallback);
} else {
c_func = v8::CFunction::Make(
- BasicApiChecker<Value, Impl>::FastCallbackNoFallback);
+ BasicApiChecker<Value, Impl, Ret>::FastCallbackNoFallback);
}
CHECK_EQ(c_func.ArgumentInfo(0).GetType(), v8::CTypeInfo::Type::kV8Value);
Local<v8::FunctionTemplate> checker_templ = v8::FunctionTemplate::New(
- isolate, BasicApiChecker<Value, Impl>::SlowCallback,
- v8::Local<v8::Value>(), v8::Local<v8::Signature>(), 1,
+ isolate, BasicApiChecker<Value, Impl, Ret>::SlowCallback,
+ v8::Number::New(isolate, 42), v8::Local<v8::Signature>(), 1,
v8::ConstructorBehavior::kAllow, v8::SideEffectType::kHasSideEffect,
&c_func);
if (!accept_any_receiver) {
@@ -27776,10 +27694,7 @@ bool SetupTest(v8::Local<v8::Value> initial_value, LocalContext* env,
->Global()
->Set(env->local(), v8_str("value"), initial_value)
.FromJust());
- v8::Local<v8::Value> result = CompileRun(source_code);
- if (!try_catch.HasCaught()) {
- CHECK(result->IsUndefined());
- }
+ USE(CompileRun(source_code));
return try_catch.HasCaught();
}
@@ -27820,7 +27735,7 @@ void CallAndCheck(
ApiNumberChecker<T> checker(expected_value, raise_exception,
write_to_fallback);
- bool has_caught = SetupTest<T, ApiNumberChecker<T>>(
+ bool has_caught = SetupTest<T, ApiNumberChecker<T>, void>(
initial_value, &env, &checker,
"function func(arg) { return receiver.api_func(arg); }"
"%PrepareFunctionForOptimization(func);"
@@ -27829,9 +27744,12 @@ void CallAndCheck(
v8::Isolate* isolate = CcTest::isolate();
v8::TryCatch try_catch(isolate);
- CompileRun(
+ v8::Local<v8::Value> result = CompileRun(
"%OptimizeFunctionOnNextCall(func);"
"func(value);");
+ if (!try_catch.HasCaught()) {
+ CHECK(result->IsUndefined());
+ }
CHECK_EQ(expected_behavior == Behavior::kException, has_caught);
CHECK_EQ(expected_path == ApiCheckerResult::kSlowCalled,
@@ -27840,6 +27758,7 @@ void CallAndCheck(
!checker.DidCallSlow());
if (expected_path & ApiCheckerResult::kSlowCalled) {
+ CHECK(checker.DidCallSlow());
if (expected_behavior != Behavior::kException) {
CheckEqual(checker.slow_value_.ToChecked(), expected_value);
}
@@ -27850,6 +27769,57 @@ void CallAndCheck(
}
}
+template <typename T>
+struct ReturnValueChecker : BasicApiChecker<T, ReturnValueChecker<T>, T> {
+ static T FastCallback(v8::ApiObject receiver, T arg,
+ v8::FastApiCallbackOptions& options) {
+ v8::Object* receiver_obj = reinterpret_cast<v8::Object*>(&receiver);
+ ReturnValueChecker<T>* receiver_ptr =
+ GetInternalField<ReturnValueChecker<T>, kV8WrapperObjectIndex>(
+ receiver_obj);
+ receiver_ptr->result_ |= ApiCheckerResult::kFastCalled;
+ return arg;
+ }
+
+ static void SlowCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Object* receiver_obj = v8::Object::Cast(*info.Holder());
+ ReturnValueChecker<T>* receiver_ptr =
+ GetInternalField<ReturnValueChecker<T>, kV8WrapperObjectIndex>(
+ receiver_obj);
+ receiver_ptr->result_ |= ApiCheckerResult::kSlowCalled;
+ info.GetReturnValue().Set(info[0]);
+ }
+};
+
+template <typename T>
+void CheckFastReturnValue(v8::Local<v8::Value> expected_value,
+ ApiCheckerResultFlags expected_path) {
+ LocalContext env;
+ ReturnValueChecker<T> checker{};
+
+ bool has_caught = SetupTest<T, ReturnValueChecker<T>, T>(
+ expected_value, &env, &checker,
+ "function func(arg) { return receiver.api_func(arg); }"
+ "%PrepareFunctionForOptimization(func);"
+ "func(value);");
+ CHECK(!has_caught);
+ checker.result_ = ApiCheckerResult::kNotCalled;
+
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::TryCatch try_catch(isolate);
+ v8::Local<v8::Value> result = CompileRun(
+ "%OptimizeFunctionOnNextCall(func);"
+ "func(value);");
+
+ CHECK_EQ(expected_path == ApiCheckerResult::kSlowCalled,
+ !checker.DidCallFast());
+ CHECK_EQ(expected_path == ApiCheckerResult::kFastCalled,
+ !checker.DidCallSlow());
+ CHECK(checker.DidCallFast() || checker.DidCallSlow());
+
+ CHECK(result->SameValue(expected_value));
+}
+
void CallAndDeopt() {
LocalContext env;
v8::Local<v8::Value> initial_value(v8_num(42));
@@ -27969,7 +27939,7 @@ void CallWithUnexpectedObjectType(v8::Local<v8::Value> receiver) {
class TestCFunctionInfo : public v8::CFunctionInfo {
const v8::CTypeInfo& ReturnInfo() const override {
static v8::CTypeInfo return_info =
- v8::CTypeInfo::FromCType(v8::CTypeInfo::Type::kVoid);
+ v8::CTypeInfo(v8::CTypeInfo::Type::kVoid);
return return_info;
}
@@ -27977,9 +27947,8 @@ class TestCFunctionInfo : public v8::CFunctionInfo {
const v8::CTypeInfo& ArgumentInfo(unsigned int index) const override {
static v8::CTypeInfo type_info0 =
- v8::CTypeInfo::FromCType(v8::CTypeInfo::Type::kV8Value);
- static v8::CTypeInfo type_info1 =
- v8::CTypeInfo::FromCType(v8::CTypeInfo::Type::kBool);
+ v8::CTypeInfo(v8::CTypeInfo::Type::kV8Value);
+ static v8::CTypeInfo type_info1 = v8::CTypeInfo(v8::CTypeInfo::Type::kBool);
switch (index) {
case 0:
return type_info0;
@@ -27989,6 +27958,8 @@ class TestCFunctionInfo : public v8::CFunctionInfo {
UNREACHABLE();
}
}
+
+ bool HasOptions() const override { return false; }
};
void CheckDynamicTypeInfo() {
@@ -28434,6 +28405,48 @@ TEST(FastApiCalls) {
CallAndCheck<bool>(true, Behavior::kNoException,
ApiCheckerResult::kFastCalled, v8::Object::New(isolate));
+ // Test return values
+ CheckFastReturnValue<bool>(v8::Boolean::New(isolate, true),
+ ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<bool>(v8::Boolean::New(isolate, false),
+ ApiCheckerResult::kFastCalled);
+
+ CheckFastReturnValue<int32_t>(v8_num(0), ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<int32_t>(v8_num(std::numeric_limits<int32_t>::min()),
+ ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<int32_t>(v8_num(std::numeric_limits<int32_t>::max()),
+ ApiCheckerResult::kFastCalled);
+
+ CheckFastReturnValue<uint32_t>(v8_num(0), ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<uint32_t>(v8_num(std::numeric_limits<uint32_t>::min()),
+ ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<uint32_t>(v8_num(std::numeric_limits<uint32_t>::max()),
+ ApiCheckerResult::kFastCalled);
+
+#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ CheckFastReturnValue<float>(v8_num(0), ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<float>(v8_num(-0.0), ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<float>(v8_num(std::numeric_limits<float>::quiet_NaN()),
+ ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<float>(v8_num(std::numeric_limits<float>::infinity()),
+ ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<float>(v8_num(std::numeric_limits<float>::min()),
+ ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<float>(v8_num(std::numeric_limits<float>::max()),
+ ApiCheckerResult::kFastCalled);
+
+ CheckFastReturnValue<double>(v8_num(0), ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<double>(v8_num(-0.0), ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<double>(v8_num(std::numeric_limits<double>::quiet_NaN()),
+ ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<double>(v8_num(std::numeric_limits<double>::infinity()),
+ ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<double>(v8_num(std::numeric_limits<double>::min()),
+ ApiCheckerResult::kFastCalled);
+ CheckFastReturnValue<double>(v8_num(std::numeric_limits<double>::max()),
+ ApiCheckerResult::kFastCalled);
+#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+
// Check for the deopt loop protection
CallAndDeopt();
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
index 7a5a9b64ca..df57ea6048 100644
--- a/deps/v8/test/cctest/test-api.h
+++ b/deps/v8/test/cctest/test-api.h
@@ -52,4 +52,135 @@ static void CheckInternalFieldsAreZero(v8::Local<T> value) {
}
}
+template <typename T>
+struct ConvertJSValue {
+ static v8::Maybe<T> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context);
+};
+
+template <>
+struct ConvertJSValue<int32_t> {
+ static v8::Maybe<int32_t> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return value->Int32Value(context);
+ }
+};
+
+template <>
+struct ConvertJSValue<uint32_t> {
+ static v8::Maybe<uint32_t> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return value->Uint32Value(context);
+ }
+};
+
+// NaNs and +/-Infinity should be 0, otherwise (modulo 2^64) - 2^63.
+// Step 8 - 12 of https://heycam.github.io/webidl/#abstract-opdef-converttoint
+// The int64_t and uint64_t implementations below are copied from Blink:
+// https://source.chromium.org/chromium/chromium/src/+/master:third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h;l=249?q=doubletointeger&sq=&ss=chromium%2Fchromium%2Fsrc
+template <>
+struct ConvertJSValue<int64_t> {
+ static v8::Maybe<int64_t> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ v8::Maybe<double> double_value = value->NumberValue(context);
+ if (!double_value.IsJust()) {
+ return v8::Nothing<int64_t>();
+ }
+ double result = double_value.ToChecked();
+ if (std::isinf(result) || std::isnan(result)) {
+ return v8::Just(int64_t(0));
+ }
+ result = trunc(result);
+
+ constexpr uint64_t kMaxULL = std::numeric_limits<uint64_t>::max();
+
+ // -2^{64} < fmod_value < 2^{64}.
+ double fmod_value = fmod(result, kMaxULL + 1.0);
+ if (fmod_value >= 0) {
+ if (fmod_value < pow(2, 63)) {
+ // 0 <= fmod_value < 2^{63}.
+ // 0 <= value < 2^{63}. This cast causes no loss.
+ return v8::Just(static_cast<int64_t>(fmod_value));
+ } else {
+ // 2^{63} <= fmod_value < 2^{64}.
+ // 2^{63} <= value < 2^{64}. This cast causes no loss.
+ return v8::Just(static_cast<int64_t>(fmod_value - pow(2, 64)));
+ }
+ }
+ // -2^{64} < fmod_value < 0.
+ // 0 < fmod_value_uint64 < 2^{64}. This cast causes no loss.
+ uint64_t fmod_value_uint64 = static_cast<uint64_t>(-fmod_value);
+ // -1 < (kMaxULL - fmod_value_uint64) < 2^{64} - 1.
+ // 0 < value < 2^{64}.
+ return v8::Just(static_cast<int64_t>(kMaxULL - fmod_value_uint64 + 1));
+ }
+};
+
+template <>
+struct ConvertJSValue<uint64_t> {
+ static v8::Maybe<uint64_t> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ v8::Maybe<double> double_value = value->NumberValue(context);
+ if (!double_value.IsJust()) {
+ return v8::Nothing<uint64_t>();
+ }
+ double result = double_value.ToChecked();
+ if (std::isinf(result) || std::isnan(result)) {
+ return v8::Just(uint64_t(0));
+ }
+ result = trunc(result);
+
+ constexpr uint64_t kMaxULL = std::numeric_limits<uint64_t>::max();
+
+ // -2^{64} < fmod_value < 2^{64}.
+ double fmod_value = fmod(result, kMaxULL + 1.0);
+ if (fmod_value >= 0) {
+ return v8::Just(static_cast<uint64_t>(fmod_value));
+ }
+ // -2^{64} < fmod_value < 0.
+ // 0 < fmod_value_uint64 < 2^{64}. This cast causes no loss.
+ uint64_t fmod_value_uint64 = static_cast<uint64_t>(-fmod_value);
+ // -1 < (kMaxULL - fmod_value_uint64) < 2^{64} - 1.
+ // 0 < value < 2^{64}.
+ return v8::Just(static_cast<uint64_t>(kMaxULL - fmod_value_uint64 + 1));
+ }
+};
+
+template <>
+struct ConvertJSValue<v8::BigInt> {
+ static v8::Maybe<v8::Local<v8::BigInt>> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ if (value->IsBigInt()) {
+ return v8::Just(value.As<v8::BigInt>());
+ }
+ return v8::Nothing<v8::Local<v8::BigInt>>();
+ }
+};
+
+template <>
+struct ConvertJSValue<float> {
+ static v8::Maybe<float> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ v8::Maybe<double> val = value->NumberValue(context);
+ if (val.IsNothing()) return v8::Nothing<float>();
+ return v8::Just(static_cast<float>(val.ToChecked()));
+ }
+};
+
+template <>
+struct ConvertJSValue<double> {
+ static v8::Maybe<double> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return value->NumberValue(context);
+ }
+};
+
+template <>
+struct ConvertJSValue<bool> {
+ static v8::Maybe<bool> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return v8::Just<bool>(value->BooleanValue(CcTest::isolate()));
+ }
+};
+
#endif // V8_TEST_CCTEST_TEST_API_H_
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 4deb43673f..a1d8cdfb7d 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -10888,6 +10888,26 @@ TEST(fcvtmu) {
CHECK_EQUAL_64(0x0UL, x30);
}
+TEST(fcvtn) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ double src[2] = {1.0f, 1.0f};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+
+ __ Mov(x0, src_base);
+ __ Ldr(q0, MemOperand(x0, 0));
+
+ __ Fcvtn(q0.V2S(), q0.V2D());
+
+ END();
+ RUN();
+
+ // Ensure top half is cleared.
+ CHECK_EQUAL_128(0, 0x3f800000'3f800000, q0);
+}
+
TEST(fcvtns) {
INIT_V8();
SETUP();
@@ -11841,9 +11861,7 @@ TEST(system_msr) {
}
TEST(system_pauth_b) {
-#ifdef DEBUG
i::FLAG_sim_abort_on_bad_auth = false;
-#endif
SETUP();
START();
diff --git a/deps/v8/test/cctest/test-assembler-riscv64.cc b/deps/v8/test/cctest/test-assembler-riscv64.cc
new file mode 100644
index 0000000000..c9429f2bc1
--- /dev/null
+++ b/deps/v8/test/cctest/test-assembler-riscv64.cc
@@ -0,0 +1,1874 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <math.h>
+
+#include <iostream> // NOLINT(readability/streams)
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
+#include "src/heap/factory.h"
+#include "src/init/v8.h"
+#include "src/utils/utils.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/test-helper-riscv64.h"
+
+namespace v8 {
+namespace internal {
+
+// Define these function prototypes to match JSEntryFunction in execution.cc
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F2 = void*(int x, int y, int p2, int p3, int p4);
+using F3 = void*(void* p, int p1, int p2, int p3, int p4);
+using F4 = void*(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
+using F5 = void*(void* p0, void* p1, int p2, int p3, int p4);
+
+#define MIN_VAL_IMM12 -(1 << 11)
+#define LARGE_INT_EXCEED_32_BIT 0x01C9'1075'0321'FB01LL
+#define LARGE_INT_UNDER_32_BIT 0x1234'5678
+#define LARGE_UINT_EXCEED_32_BIT 0xFDCB'1234'A034'5691ULL
+
+#define __ assm.
+
+#define UTEST_R2_FORM_WITH_RES(instr_name, type, rs1_val, rs2_val, \
+ expected_res) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(a0, a0, a1); }; \
+ auto res = GenAndRunTest<type, type>(rs1_val, rs2_val, fn); \
+ CHECK_EQ(expected_res, res); \
+ }
+
+#define UTEST_R1_FORM_WITH_RES(instr_name, in_type, out_type, rs1_val, \
+ expected_res) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(a0, a0); }; \
+ auto res = GenAndRunTest<out_type, in_type>(rs1_val, fn); \
+ CHECK_EQ(expected_res, res); \
+ }
+
+#define UTEST_I_FORM_WITH_RES(instr_name, type, rs1_val, imm12, expected_res) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ CcTest::InitializeVM(); \
+ CHECK_EQ(is_intn(imm12, 12), true); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(a0, a0, imm12); }; \
+ auto res = GenAndRunTest<type, type>(rs1_val, fn); \
+ CHECK_EQ(expected_res, res); \
+ }
+
+#define UTEST_AMO_WITH_RES(instr_name, aq, rl, inout_type, rs1_val, rs2_val, \
+ expected_res) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(aq, rl, a1, a0, a2); }; \
+ auto res = \
+ GenAndRunTestForAMO<inout_type, inout_type>(rs1_val, rs2_val, fn); \
+ CHECK_EQ(expected_res, res); \
+ }
+
+#define UTEST_LOAD_STORE(ldname, stname, value_type, value) \
+ TEST(RISCV_UTEST_##stname##ldname) { \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { \
+ __ stname(a1, a0, 0); \
+ __ ldname(a0, a0, 0); \
+ }; \
+ GenAndRunTestForLoadStore<value_type>(value, fn); \
+ }
+
+// Since f.Call() is implemented as vararg calls and RISCV calling convention
+// passes all vararg arguments and returns (including floats) in GPRs, we have
+// to move from GPR to FPR and back in all floating point tests
+#define UTEST_LOAD_STORE_F(ldname, stname, value_type, store_value) \
+ TEST(RISCV_UTEST_##stname##ldname) { \
+ DCHECK(std::is_floating_point<value_type>::value); \
+ \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { \
+ __ stname(fa0, a0, 0); \
+ __ ldname(fa0, a0, 0); \
+ }; \
+ GenAndRunTestForLoadStore<value_type>(store_value, fn); \
+ }
+
+#define UTEST_LR_SC(ldname, stname, aq, rl, value_type, value) \
+ TEST(RISCV_UTEST_##stname##ldname) { \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { \
+ __ ldname(aq, rl, a1, a0); \
+ __ stname(aq, rl, a0, a0, a1); \
+ }; \
+ GenAndRunTestForLRSC<value_type>(value, fn); \
+ }
+
+#define UTEST_R1_FORM_WITH_RES_F(instr_name, type, rs1_fval, expected_fres) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ DCHECK(std::is_floating_point<type>::value); \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(fa0, fa0); }; \
+ auto res = GenAndRunTest<type, type>(rs1_fval, fn); \
+ CHECK_EQ(expected_fres, res); \
+ }
+
+#define UTEST_R2_FORM_WITH_RES_F(instr_name, type, rs1_fval, rs2_fval, \
+ expected_fres) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ DCHECK(std::is_floating_point<type>::value); \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(fa0, fa0, fa1); }; \
+ auto res = GenAndRunTest<type, type>(rs1_fval, rs2_fval, fn); \
+ CHECK_EQ(expected_fres, res); \
+ }
+
+#define UTEST_R3_FORM_WITH_RES_F(instr_name, type, rs1_fval, rs2_fval, \
+ rs3_fval, expected_fres) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ DCHECK(std::is_floating_point<type>::value); \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(fa0, fa0, fa1, fa2); }; \
+ auto res = GenAndRunTest<type, type>(rs1_fval, rs2_fval, rs3_fval, fn); \
+ CHECK_EQ(expected_fres, res); \
+ }
+
+#define UTEST_COMPARE_WITH_RES_F(instr_name, input_type, rs1_fval, rs2_fval, \
+ expected_res) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(a0, fa0, fa1); }; \
+ auto res = GenAndRunTest<int32_t, input_type>(rs1_fval, rs2_fval, fn); \
+ CHECK_EQ(expected_res, res); \
+ }
+
+#define UTEST_CONV_F_FROM_I(instr_name, input_type, output_type, rs1_val, \
+ expected_fres) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ DCHECK(std::is_integral<input_type>::value&& \
+ std::is_floating_point<output_type>::value); \
+ \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(fa0, a0); }; \
+ auto res = GenAndRunTest<output_type, input_type>(rs1_val, fn); \
+ CHECK_EQ(expected_fres, res); \
+ }
+
+#define UTEST_CONV_I_FROM_F(instr_name, input_type, output_type, \
+ rounding_mode, rs1_fval, expected_res) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ DCHECK(std::is_floating_point<input_type>::value&& \
+ std::is_integral<output_type>::value); \
+ \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { \
+ __ instr_name(a0, fa0, rounding_mode); \
+ }; \
+ auto res = GenAndRunTest<output_type, input_type>(rs1_fval, fn); \
+ CHECK_EQ(expected_res, res); \
+ } \
+ \
+ TEST(RISCV_UTEST_dyn_##instr_name) { \
+ DCHECK(std::is_floating_point<input_type>::value&& \
+ std::is_integral<output_type>::value); \
+ \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { \
+ __ csrwi(csr_frm, rounding_mode); \
+ __ instr_name(a0, fa0, DYN); \
+ }; \
+ auto res = GenAndRunTest<output_type, input_type>(rs1_fval, fn); \
+ CHECK_EQ(expected_res, res); \
+ }
+
+#define UTEST_CONV_F_FROM_F(instr_name, input_type, output_type, rs1_val, \
+ expected_fres) \
+ TEST(RISCV_UTEST_##instr_name) { \
+ CcTest::InitializeVM(); \
+ auto fn = [](MacroAssembler& assm) { __ instr_name(fa0, fa0); }; \
+ auto res = GenAndRunTest<output_type, input_type>(rs1_val, fn); \
+ CHECK_EQ(expected_fres, res); \
+ }
+
+#define UTEST_CSRI(csr_reg, csr_write_val, csr_set_clear_val) \
+ TEST(RISCV_UTEST_CSRI_##csr_reg) { \
+ CHECK_EQ(is_uint5(csr_write_val) && is_uint5(csr_set_clear_val), true); \
+ \
+ CcTest::InitializeVM(); \
+ int64_t expected_res = 111; \
+ Label exit, error; \
+ auto fn = [&exit, &error, expected_res](MacroAssembler& assm) { \
+ /* test csr-write and csr-read */ \
+ __ csrwi(csr_reg, csr_write_val); \
+ __ csrr(a0, csr_reg); \
+ __ RV_li(a1, csr_write_val); \
+ __ bne(a0, a1, &error); \
+ /* test csr_set */ \
+ __ csrsi(csr_reg, csr_set_clear_val); \
+ __ csrr(a0, csr_reg); \
+ __ RV_li(a1, (csr_write_val) | (csr_set_clear_val)); \
+ __ bne(a0, a1, &error); \
+ /* test csr_clear */ \
+ __ csrci(csr_reg, csr_set_clear_val); \
+ __ csrr(a0, csr_reg); \
+ __ RV_li(a1, (csr_write_val) & (~(csr_set_clear_val))); \
+ __ bne(a0, a1, &error); \
+ /* everyhing runs correctly, return 111 */ \
+ __ RV_li(a0, expected_res); \
+ __ j(&exit); \
+ \
+ __ bind(&error); \
+ /* got an error, return 666 */ \
+ __ RV_li(a0, 666); \
+ \
+ __ bind(&exit); \
+ }; \
+ auto res = GenAndRunTest(fn); \
+ CHECK_EQ(expected_res, res); \
+ }
+
+#define UTEST_CSR(csr_reg, csr_write_val, csr_set_clear_val) \
+ TEST(RISCV_UTEST_CSR_##csr_reg) { \
+ Label exit, error; \
+ int64_t expected_res = 111; \
+ auto fn = [&exit, &error, expected_res](MacroAssembler& assm) { \
+ /* test csr-write and csr-read */ \
+ __ RV_li(t0, csr_write_val); \
+ __ csrw(csr_reg, t0); \
+ __ csrr(a0, csr_reg); \
+ __ RV_li(a1, csr_write_val); \
+ __ bne(a0, a1, &error); \
+ /* test csr_set */ \
+ __ RV_li(t0, csr_set_clear_val); \
+ __ csrs(csr_reg, t0); \
+ __ csrr(a0, csr_reg); \
+ __ RV_li(a1, (csr_write_val) | (csr_set_clear_val)); \
+ __ bne(a0, a1, &error); \
+ /* test csr_clear */ \
+ __ RV_li(t0, csr_set_clear_val); \
+ __ csrc(csr_reg, t0); \
+ __ csrr(a0, csr_reg); \
+ __ RV_li(a1, (csr_write_val) & (~(csr_set_clear_val))); \
+ __ bne(a0, a1, &error); \
+ /* everyhing runs correctly, return 111 */ \
+ __ RV_li(a0, expected_res); \
+ __ j(&exit); \
+ \
+ __ bind(&error); \
+ /* got an error, return 666 */ \
+ __ RV_li(a0, 666); \
+ \
+ __ bind(&exit); \
+ }; \
+ \
+ auto res = GenAndRunTest(fn); \
+ CHECK_EQ(expected_res, res); \
+ }
+
+#define UTEST_R2_FORM_WITH_OP(instr_name, type, rs1_val, rs2_val, tested_op) \
+ UTEST_R2_FORM_WITH_RES(instr_name, type, rs1_val, rs2_val, \
+ ((rs1_val)tested_op(rs2_val)))
+
+#define UTEST_I_FORM_WITH_OP(instr_name, type, rs1_val, imm12, tested_op) \
+ UTEST_I_FORM_WITH_RES(instr_name, type, rs1_val, imm12, \
+ ((rs1_val)tested_op(imm12)))
+
+#define UTEST_R2_FORM_WITH_OP_F(instr_name, type, rs1_fval, rs2_fval, \
+ tested_op) \
+ UTEST_R2_FORM_WITH_RES_F(instr_name, type, rs1_fval, rs2_fval, \
+ ((rs1_fval)tested_op(rs2_fval)))
+
+#define UTEST_COMPARE_WITH_OP_F(instr_name, input_type, rs1_fval, rs2_fval, \
+ tested_op) \
+ UTEST_COMPARE_WITH_RES_F(instr_name, input_type, rs1_fval, rs2_fval, \
+ ((rs1_fval)tested_op(rs2_fval)))
+
+// -- test load-store --
+UTEST_LOAD_STORE(ld, sd, int64_t, 0xFBB10A9C12345678)
+// due to sign-extension of lw
+// instruction, value-to-stored must have
+// its 32th least significant bit be 0
+UTEST_LOAD_STORE(lw, sw, int32_t, 0x456AF894)
+// set the 32th least significant bit of
+// value-to-store to 1 to test
+// zero-extension by lwu
+UTEST_LOAD_STORE(lwu, sw, uint32_t, 0x856AF894)
+// due to sign-extension of lh
+// instruction, value-to-stored must have
+// its 16th least significant bit be 0
+UTEST_LOAD_STORE(lh, sh, int32_t, 0x7894)
+// set the 16th least significant bit of
+// value-to-store to 1 to test
+// zero-extension by lhu
+UTEST_LOAD_STORE(lhu, sh, uint32_t, 0xF894)
+// due to sign-extension of lb
+// instruction, value-to-stored must have
+// its 8th least significant bit be 0
+UTEST_LOAD_STORE(lb, sb, int32_t, 0x54)
+// set the 8th least significant bit of
+// value-to-store to 1 to test
+// zero-extension by lbu
+UTEST_LOAD_STORE(lbu, sb, uint32_t, 0x94)
+
+// -- arithmetic w/ immediate --
+UTEST_I_FORM_WITH_OP(addi, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, +)
+UTEST_I_FORM_WITH_OP(slti, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, <)
+UTEST_I_FORM_WITH_OP(sltiu, uint64_t, LARGE_UINT_EXCEED_32_BIT, 0x4FB, <)
+UTEST_I_FORM_WITH_OP(xori, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, ^)
+UTEST_I_FORM_WITH_OP(ori, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, |)
+UTEST_I_FORM_WITH_OP(andi, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, &)
+UTEST_I_FORM_WITH_OP(slli, int64_t, 0x1234'5678ULL, 33, <<)
+UTEST_I_FORM_WITH_OP(srli, int64_t, 0x8234'5678'0000'0000ULL, 33, >>)
+UTEST_I_FORM_WITH_OP(srai, int64_t, -0x1234'5678'0000'0000LL, 33, >>)
+
+// -- arithmetic --
+UTEST_R2_FORM_WITH_OP(add, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, +)
+UTEST_R2_FORM_WITH_OP(sub, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, -)
+UTEST_R2_FORM_WITH_OP(slt, int64_t, MIN_VAL_IMM12, LARGE_INT_EXCEED_32_BIT, <)
+UTEST_R2_FORM_WITH_OP(sltu, uint64_t, 0x4FB, LARGE_UINT_EXCEED_32_BIT, <)
+UTEST_R2_FORM_WITH_OP(xor_, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, ^)
+UTEST_R2_FORM_WITH_OP(or_, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, |)
+UTEST_R2_FORM_WITH_OP(and_, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, &)
+UTEST_R2_FORM_WITH_OP(sll, int64_t, 0x12345678ULL, 33, <<)
+UTEST_R2_FORM_WITH_OP(srl, int64_t, 0x8234567800000000ULL, 33, >>)
+UTEST_R2_FORM_WITH_OP(sra, int64_t, -0x1234'5678'0000'0000LL, 33, >>)
+
+// -- Memory fences --
+// void fence(uint8_t pred, uint8_t succ);
+// void fence_tso();
+
+// -- Environment call / break --
+// void ecall();
+// void ebreak();
+// void unimp();
+
+// -- CSR --
+UTEST_CSRI(csr_frm, DYN, RUP)
+UTEST_CSRI(csr_fflags, kInexact | kInvalidOperation, kInvalidOperation)
+UTEST_CSRI(csr_fcsr, kDivideByZero | kOverflow, kUnderflow)
+UTEST_CSR(csr_frm, DYN, RUP)
+UTEST_CSR(csr_fflags, kInexact | kInvalidOperation, kInvalidOperation)
+UTEST_CSR(csr_fcsr, kDivideByZero | kOverflow | (RDN << kFcsrFrmShift),
+ kUnderflow | (RNE << kFcsrFrmShift))
+
+// -- RV64I --
+UTEST_I_FORM_WITH_OP(addiw, int32_t, LARGE_INT_UNDER_32_BIT, MIN_VAL_IMM12, +)
+UTEST_I_FORM_WITH_OP(slliw, int32_t, 0x12345678U, 12, <<)
+UTEST_I_FORM_WITH_OP(srliw, int32_t, 0x82345678U, 12, >>)
+UTEST_I_FORM_WITH_OP(sraiw, int32_t, -123, 12, >>)
+
+UTEST_R2_FORM_WITH_OP(addw, int32_t, LARGE_INT_UNDER_32_BIT, MIN_VAL_IMM12, +)
+UTEST_R2_FORM_WITH_OP(subw, int32_t, LARGE_INT_UNDER_32_BIT, MIN_VAL_IMM12, -)
+UTEST_R2_FORM_WITH_OP(sllw, int32_t, 0x12345678U, 12, <<)
+UTEST_R2_FORM_WITH_OP(srlw, int32_t, 0x82345678U, 12, >>)
+UTEST_R2_FORM_WITH_OP(sraw, int32_t, -123, 12, >>)
+
+// -- RV32M Standard Extension --
+UTEST_R2_FORM_WITH_OP(mul, int64_t, 0x0F945001L, MIN_VAL_IMM12, *)
+UTEST_R2_FORM_WITH_RES(mulh, int64_t, 0x1234567800000000LL,
+ -0x1234'5617'0000'0000LL, 0x12345678LL * -0x1234'5617LL)
+UTEST_R2_FORM_WITH_RES(mulhu, int64_t, 0x1234'5678'0000'0000ULL,
+ 0xF896'7021'0000'0000ULL,
+ 0x1234'5678ULL * 0xF896'7021ULL)
+UTEST_R2_FORM_WITH_RES(mulhsu, int64_t, -0x1234'56780000'0000LL,
+ 0xF234'5678'0000'0000ULL,
+ static_cast<int64_t>(-0x1234'5678LL * 0xF234'5678ULL))
+UTEST_R2_FORM_WITH_OP(div, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, /)
+UTEST_R2_FORM_WITH_OP(divu, uint64_t, LARGE_UINT_EXCEED_32_BIT, 100, /)
+UTEST_R2_FORM_WITH_OP(rem, int64_t, LARGE_INT_EXCEED_32_BIT, MIN_VAL_IMM12, %)
+UTEST_R2_FORM_WITH_OP(remu, uint64_t, LARGE_UINT_EXCEED_32_BIT, 100, %)
+
+// -- RV64M Standard Extension (in addition to RV32M) --
+UTEST_R2_FORM_WITH_OP(mulw, int32_t, -20, 56, *)
+UTEST_R2_FORM_WITH_OP(divw, int32_t, 200, -10, /)
+UTEST_R2_FORM_WITH_OP(divuw, uint32_t, 1000, 100, /)
+UTEST_R2_FORM_WITH_OP(remw, int32_t, 1234, -91, %)
+UTEST_R2_FORM_WITH_OP(remuw, uint32_t, 1234, 43, %)
+
+// -- RV32A Standard Extension --
+UTEST_LR_SC(lr_w, sc_w, false, false, int32_t, 0xFBB1A75C)
+UTEST_AMO_WITH_RES(amoswap_w, false, false, uint32_t, 0xFBB1A75C, 0xA75C0A9C,
+ (uint32_t)0xA75C0A9C)
+UTEST_AMO_WITH_RES(amoadd_w, false, false, uint32_t, 0xFBB1A75C, 0xA75C0A9C,
+ (uint32_t)0xFBB1A75C + (uint32_t)0xA75C0A9C)
+UTEST_AMO_WITH_RES(amoxor_w, false, false, uint32_t, 0xFBB1A75C, 0xA75C0A9C,
+ (uint32_t)0xFBB1A75C ^ (uint32_t)0xA75C0A9C)
+UTEST_AMO_WITH_RES(amoand_w, false, false, uint32_t, 0xFBB1A75C, 0xA75C0A9C,
+ (uint32_t)0xFBB1A75C & (uint32_t)0xA75C0A9C)
+UTEST_AMO_WITH_RES(amoor_w, false, false, uint32_t, 0xFBB1A75C, 0xA75C0A9C,
+ (uint32_t)0xFBB1A75C | (uint32_t)0xA75C0A9C)
+UTEST_AMO_WITH_RES(amomin_w, false, false, int32_t, 0xFBB1A75C, 0xA75C0A9C,
+ std::min((int32_t)0xFBB1A75C, (int32_t)0xA75C0A9C))
+UTEST_AMO_WITH_RES(amomax_w, false, false, int32_t, 0xFBB1A75C, 0xA75C0A9C,
+ std::max((int32_t)0xFBB1A75C, (int32_t)0xA75C0A9C))
+UTEST_AMO_WITH_RES(amominu_w, false, false, uint32_t, 0xFBB1A75C, 0xA75C0A9C,
+ std::min((uint32_t)0xFBB1A75C, (uint32_t)0xA75C0A9C))
+UTEST_AMO_WITH_RES(amomaxu_w, false, false, uint32_t, 0xFBB1A75C, 0xA75C0A9C,
+ std::max((uint32_t)0xFBB1A75C, (uint32_t)0xA75C0A9C))
+
+// -- RV64A Standard Extension (in addition to RV32A) --
+UTEST_LR_SC(lr_d, sc_d, false, false, int64_t, 0xFBB10A9Cbfb76aa6)
+UTEST_AMO_WITH_RES(amoswap_d, false, false, int64_t, 0xFBB10A9Cbfb76aa6,
+ 0x284ff922346ad35c, (int64_t)0x284ff922346ad35c)
+UTEST_AMO_WITH_RES(amoadd_d, false, false, int64_t, 0xFBB10A9Cbfb76aa6,
+ 0x284ff922346ad35c,
+ (int64_t)0xFBB10A9Cbfb76aa6 + (int64_t)0x284ff922346ad35c)
+UTEST_AMO_WITH_RES(amoxor_d, false, false, int64_t, 0xFBB10A9Cbfb76aa6,
+ 0x284ff922346ad35c,
+ (int64_t)0xFBB10A9Cbfb76aa6 ^ (int64_t)0x284ff922346ad35c)
+UTEST_AMO_WITH_RES(amoand_d, false, false, int64_t, 0xFBB10A9Cbfb76aa6,
+ 0x284ff922346ad35c,
+ (int64_t)0xFBB10A9Cbfb76aa6 & (int64_t)0x284ff922346ad35c)
+UTEST_AMO_WITH_RES(amoor_d, false, false, int64_t, 0xFBB10A9Cbfb76aa6,
+ 0x284ff922346ad35c,
+ (int64_t)0xFBB10A9Cbfb76aa6 | (int64_t)0x284ff922346ad35c)
+UTEST_AMO_WITH_RES(amomin_d, false, false, int64_t, 0xFBB10A9Cbfb76aa6,
+ 0x284ff922346ad35c,
+ std::min((int64_t)0xFBB10A9Cbfb76aa6,
+ (int64_t)0x284ff922346ad35c))
+UTEST_AMO_WITH_RES(amomax_d, false, false, int64_t, 0xFBB10A9Cbfb76aa6,
+ 0x284ff922346ad35c,
+ std::max((int64_t)0xFBB10A9Cbfb76aa6,
+ (int64_t)0x284ff922346ad35c))
+UTEST_AMO_WITH_RES(amominu_d, false, false, uint64_t, 0xFBB10A9Cbfb76aa6,
+ 0x284ff922346ad35c,
+ std::min((uint64_t)0xFBB10A9Cbfb76aa6,
+ (uint64_t)0x284ff922346ad35c))
+UTEST_AMO_WITH_RES(amomaxu_d, false, false, uint64_t, 0xFBB10A9Cbfb76aa6,
+ 0x284ff922346ad35c,
+ std::max((uint64_t)0xFBB10A9Cbfb76aa6,
+ (uint64_t)0x284ff922346ad35c))
+
+// -- RV32F Standard Extension --
+UTEST_LOAD_STORE_F(flw, fsw, float, -2345.678f)
+UTEST_R2_FORM_WITH_OP_F(fadd_s, float, -1012.01f, 3456.13f, +)
+UTEST_R2_FORM_WITH_OP_F(fsub_s, float, -1012.01f, 3456.13f, -)
+UTEST_R2_FORM_WITH_OP_F(fmul_s, float, -10.01f, 56.13f, *)
+UTEST_R2_FORM_WITH_OP_F(fdiv_s, float, -10.01f, 34.13f, /)
+UTEST_R1_FORM_WITH_RES_F(fsqrt_s, float, 34.13f, sqrtf(34.13f))
+UTEST_R2_FORM_WITH_RES_F(fmin_s, float, -1012.0f, 3456.13f, -1012.0f)
+UTEST_R2_FORM_WITH_RES_F(fmax_s, float, -1012.0f, 3456.13f, 3456.13f)
+UTEST_R3_FORM_WITH_RES_F(fmadd_s, float, 67.56f, -1012.01f, 3456.13f,
+ std::fma(67.56f, -1012.01f, 3456.13f))
+UTEST_R3_FORM_WITH_RES_F(fmsub_s, float, 67.56f, -1012.01f, 3456.13f,
+ std::fma(67.56f, -1012.01f, -3456.13f))
+UTEST_R3_FORM_WITH_RES_F(fnmsub_s, float, 67.56f, -1012.01f, 3456.13f,
+ -std::fma(67.56f, -1012.01f, -3456.13f))
+UTEST_R3_FORM_WITH_RES_F(fnmadd_s, float, 67.56f, -1012.01f, 3456.13f,
+ -std::fma(67.56f, -1012.01f, 3456.13f))
+UTEST_COMPARE_WITH_OP_F(feq_s, float, -3456.56, -3456.56, ==)
+UTEST_COMPARE_WITH_OP_F(flt_s, float, -3456.56, -3456.56, <)
+UTEST_COMPARE_WITH_OP_F(fle_s, float, -3456.56, -3456.56, <=)
+UTEST_CONV_F_FROM_I(fcvt_s_w, int32_t, float, -100, (float)(-100))
+UTEST_CONV_F_FROM_I(fcvt_s_wu, uint32_t, float,
+ std::numeric_limits<uint32_t>::max(),
+ (float)(std::numeric_limits<uint32_t>::max()))
+UTEST_CONV_I_FROM_F(fcvt_w_s, float, int32_t, RMM, -100.5f, -101)
+UTEST_CONV_I_FROM_F(fcvt_wu_s, float, uint32_t, RUP, 256.1f, 257)
+UTEST_R2_FORM_WITH_RES_F(fsgnj_s, float, -100.0f, 200.0f, 100.0f)
+UTEST_R2_FORM_WITH_RES_F(fsgnjn_s, float, 100.0f, 200.0f, -100.0f)
+UTEST_R2_FORM_WITH_RES_F(fsgnjx_s, float, -100.0f, 200.0f, -100.0f)
+
+// -- RV64F Standard Extension (in addition to RV32F) --
+UTEST_LOAD_STORE_F(fld, fsd, double, -3456.678)
+UTEST_R2_FORM_WITH_OP_F(fadd_d, double, -1012.01, 3456.13, +)
+UTEST_R2_FORM_WITH_OP_F(fsub_d, double, -1012.01, 3456.13, -)
+UTEST_R2_FORM_WITH_OP_F(fmul_d, double, -10.01, 56.13, *)
+UTEST_R2_FORM_WITH_OP_F(fdiv_d, double, -10.01, 34.13, /)
+UTEST_R1_FORM_WITH_RES_F(fsqrt_d, double, 34.13, std::sqrt(34.13))
+UTEST_R2_FORM_WITH_RES_F(fmin_d, double, -1012.0, 3456.13, -1012.0)
+UTEST_R2_FORM_WITH_RES_F(fmax_d, double, -1012.0, 3456.13, 3456.13)
+
+UTEST_R3_FORM_WITH_RES_F(fmadd_d, double, 67.56, -1012.01, 3456.13,
+ std::fma(67.56, -1012.01, 3456.13))
+UTEST_R3_FORM_WITH_RES_F(fmsub_d, double, 67.56, -1012.01, 3456.13,
+ std::fma(67.56, -1012.01, -3456.13))
+UTEST_R3_FORM_WITH_RES_F(fnmsub_d, double, 67.56, -1012.01, 3456.13,
+ -std::fma(67.56, -1012.01, -3456.13))
+UTEST_R3_FORM_WITH_RES_F(fnmadd_d, double, 67.56, -1012.01, 3456.13,
+ -std::fma(67.56, -1012.01, 3456.13))
+
+UTEST_COMPARE_WITH_OP_F(feq_d, double, -3456.56, -3456.56, ==)
+UTEST_COMPARE_WITH_OP_F(flt_d, double, -3456.56, -3456.56, <)
+UTEST_COMPARE_WITH_OP_F(fle_d, double, -3456.56, -3456.56, <=)
+
+UTEST_CONV_F_FROM_I(fcvt_d_w, int32_t, double, -100, -100.0)
+UTEST_CONV_F_FROM_I(fcvt_d_wu, uint32_t, double,
+ std::numeric_limits<uint32_t>::max(),
+ (double)(std::numeric_limits<uint32_t>::max()))
+UTEST_CONV_I_FROM_F(fcvt_w_d, double, int32_t, RTZ, -100.0, -100)
+UTEST_CONV_I_FROM_F(fcvt_wu_d, double, uint32_t, RTZ,
+ (double)(std::numeric_limits<uint32_t>::max()),
+ std::numeric_limits<uint32_t>::max())
+
+// -- RV64F Standard Extension (in addition to RV32F) --
+UTEST_CONV_I_FROM_F(fcvt_l_s, float, int64_t, RDN, -100.5f, -101)
+UTEST_CONV_I_FROM_F(fcvt_lu_s, float, uint64_t, RTZ, 1000001.0f, 1000001)
+UTEST_CONV_F_FROM_I(fcvt_s_l, int64_t, float, (-0x1234'5678'0000'0001LL),
+ (float)(-0x1234'5678'0000'0001LL))
+UTEST_CONV_F_FROM_I(fcvt_s_lu, uint64_t, float,
+ std::numeric_limits<uint64_t>::max(),
+ (float)(std::numeric_limits<uint64_t>::max()))
+
+// -- RV32D Standard Extension --
+UTEST_CONV_F_FROM_F(fcvt_s_d, double, float, 100.0, 100.0f)
+UTEST_CONV_F_FROM_F(fcvt_d_s, float, double, 100.0f, 100.0)
+
+UTEST_R2_FORM_WITH_RES_F(fsgnj_d, double, -100.0, 200.0, 100.0)
+UTEST_R2_FORM_WITH_RES_F(fsgnjn_d, double, 100.0, 200.0, -100.0)
+UTEST_R2_FORM_WITH_RES_F(fsgnjx_d, double, -100.0, 200.0, -100.0)
+
+// -- RV64D Standard Extension (in addition to RV32D) --
+UTEST_CONV_I_FROM_F(fcvt_l_d, double, int64_t, RNE, -100.5, -100)
+UTEST_CONV_I_FROM_F(fcvt_lu_d, double, uint64_t, RTZ, 2456.5, 2456)
+UTEST_CONV_F_FROM_I(fcvt_d_l, int64_t, double, (-0x1234'5678'0000'0001LL),
+ (double)(-0x1234'5678'0000'0001LL))
+UTEST_CONV_F_FROM_I(fcvt_d_lu, uint64_t, double,
+ std::numeric_limits<uint64_t>::max(),
+ (double)(std::numeric_limits<uint64_t>::max()))
+
+// -- RV64C Standard Extension --
+UTEST_R1_FORM_WITH_RES(c_mv, int64_t, int64_t, 0x0f5600ab123400,
+ 0x0f5600ab123400)
+
+// -- Assembler Pseudo Instructions --
+UTEST_R1_FORM_WITH_RES(mv, int64_t, int64_t, 0x0f5600ab123400, 0x0f5600ab123400)
+UTEST_R1_FORM_WITH_RES(not_, int64_t, int64_t, 0, ~0)
+UTEST_R1_FORM_WITH_RES(neg, int64_t, int64_t, 0x0f5600ab123400LL,
+ -(0x0f5600ab123400LL))
+UTEST_R1_FORM_WITH_RES(negw, int32_t, int32_t, 0xab123400, -(0xab123400))
+UTEST_R1_FORM_WITH_RES(sext_w, int32_t, int64_t, 0xFA01'1234,
+ static_cast<int64_t>(0xFFFFFFFFFA011234LL))
+UTEST_R1_FORM_WITH_RES(seqz, int64_t, int64_t, 20, 20 == 0)
+UTEST_R1_FORM_WITH_RES(snez, int64_t, int64_t, 20, 20 != 0)
+UTEST_R1_FORM_WITH_RES(sltz, int64_t, int64_t, -20, -20 < 0)
+UTEST_R1_FORM_WITH_RES(sgtz, int64_t, int64_t, -20, -20 > 0)
+
+UTEST_R1_FORM_WITH_RES_F(fmv_s, float, -23.5f, -23.5f)
+UTEST_R1_FORM_WITH_RES_F(fabs_s, float, -23.5f, 23.5f)
+UTEST_R1_FORM_WITH_RES_F(fneg_s, float, 23.5f, -23.5f)
+UTEST_R1_FORM_WITH_RES_F(fmv_d, double, -23.5, -23.5)
+UTEST_R1_FORM_WITH_RES_F(fabs_d, double, -23.5, 23.5)
+UTEST_R1_FORM_WITH_RES_F(fneg_d, double, 23.5, -23.5)
+
+// Test LI
+TEST(RISCV0) {
+ CcTest::InitializeVM();
+
+ FOR_INT64_INPUTS(i) {
+ auto fn = [i](MacroAssembler& assm) { __ RV_li(a0, i); };
+ auto res = GenAndRunTest(fn);
+ CHECK_EQ(i, res);
+ }
+}
+
+TEST(RISCV1) {
+ CcTest::InitializeVM();
+
+ Label L, C;
+ auto fn = [&L, &C](MacroAssembler& assm) {
+ __ mv(a1, a0);
+ __ RV_li(a0, 0l);
+ __ j(&C);
+
+ __ bind(&L);
+ __ add(a0, a0, a1);
+ __ addi(a1, a1, -1);
+
+ __ bind(&C);
+ __ xori(a2, a1, 0);
+ __ bnez(a2, &L);
+ };
+
+ int64_t input = 50;
+ int64_t expected_res = 1275L;
+ auto res = GenAndRunTest<int64_t>(input, fn);
+ CHECK_EQ(expected_res, res);
+}
+
+TEST(RISCV2) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Label exit, error;
+ int64_t expected_res = 0x31415926L;
+
+ // ----- Test all instructions.
+
+ // Test lui, ori, and addiw, used in the
+ // li pseudo-instruction. This way we
+ // can then safely load registers with
+ // chosen values.
+ auto fn = [&exit, &error, expected_res](MacroAssembler& assm) {
+ __ ori(a4, zero_reg, 0);
+ __ lui(a4, 0x12345);
+ __ ori(a4, a4, 0);
+ __ ori(a4, a4, 0xF0F);
+ __ ori(a4, a4, 0x0F0);
+ __ addiw(a5, a4, 1);
+ __ addiw(a6, a5, -0x10);
+
+ // Load values in temporary registers.
+ __ RV_li(a4, 0x00000004);
+ __ RV_li(a5, 0x00001234);
+ __ RV_li(a6, 0x12345678);
+ __ RV_li(a7, 0x7FFFFFFF);
+ __ RV_li(t0, 0xFFFFFFFC);
+ __ RV_li(t1, 0xFFFFEDCC);
+ __ RV_li(t2, 0xEDCBA988);
+ __ RV_li(t3, 0x80000000);
+
+ __ srliw(t0, a6, 8); // 0x00123456
+ __ slliw(t0, t0, 11); // 0x91A2B000
+ __ sraiw(t0, t0, 3); // 0xFFFFFFFF F2345600
+ __ sraw(t0, t0, a4); // 0xFFFFFFFF FF234560
+ __ sllw(t0, t0, a4); // 0xFFFFFFFF F2345600
+ __ srlw(t0, t0, a4); // 0x0F234560
+ __ RV_li(t5, 0x0F234560);
+ __ bne(t0, t5, &error);
+
+ __ addw(t0, a4, a5); // 0x00001238
+ __ subw(t0, t0, a4); // 0x00001234
+ __ RV_li(t5, 0x00001234);
+ __ bne(t0, t5, &error);
+ __ addw(a1, a7,
+ a4); // 32bit addu result is sign-extended into 64bit reg.
+ __ RV_li(t5, 0xFFFFFFFF80000003);
+ __ bne(a1, t5, &error);
+ __ subw(a1, t3, a4); // 0x7FFFFFFC
+ __ RV_li(t5, 0x7FFFFFFC);
+ __ bne(a1, t5, &error);
+
+ __ and_(t0, a5, a6); // 0x0000000000001230
+ __ or_(t0, t0, a5); // 0x0000000000001234
+ __ xor_(t0, t0, a6); // 0x000000001234444C
+ __ or_(t0, t0, a6);
+ __ not_(t0, t0); // 0xFFFFFFFFEDCBA983
+ __ RV_li(t5, 0xFFFFFFFFEDCBA983);
+ __ bne(t0, t5, &error);
+
+ // Shift both 32bit number to left, to
+ // preserve meaning of next comparison.
+ __ slli(a7, a7, 32);
+ __ slli(t3, t3, 32);
+
+ __ slt(t0, t3, a7);
+ __ RV_li(t5, 1);
+ __ bne(t0, t5, &error);
+ __ sltu(t0, t3, a7);
+ __ bne(t0, zero_reg, &error);
+
+ // Restore original values in registers.
+ __ srli(a7, a7, 32);
+ __ srli(t3, t3, 32);
+
+ __ RV_li(t0, 0x7421); // 0x00007421
+ __ addi(t0, t0, -0x1); // 0x00007420
+ __ addi(t0, t0, -0x20); // 0x00007400
+ __ RV_li(t5, 0x00007400);
+ __ bne(t0, t5, &error);
+ __ addiw(a1, a7, 0x1); // 0x80000000 - result is sign-extended.
+ __ RV_li(t5, 0xFFFFFFFF80000000);
+ __ bne(a1, t5, &error);
+
+ __ RV_li(t5, 0x00002000);
+ __ slt(t0, a5, t5); // 0x1
+ __ RV_li(t6, 0xFFFFFFFFFFFF8000);
+ __ slt(t0, t0, t6); // 0x0
+ __ bne(t0, zero_reg, &error);
+ __ sltu(t0, a5, t5); // 0x1
+ __ RV_li(t6, 0x00008000);
+ __ sltu(t0, t0, t6); // 0x1
+ __ RV_li(t5, 1);
+ __ bne(t0, t5, &error);
+
+ __ andi(t0, a5, 0x0F0); // 0x00000030
+ __ ori(t0, t0, 0x200); // 0x00000230
+ __ xori(t0, t0, 0x3CC); // 0x000001FC
+ __ RV_li(t5, 0x000001FC);
+ __ bne(t0, t5, &error);
+ __ lui(a1, -519628); // Result is sign-extended into 64bit register.
+ __ RV_li(t5, 0xFFFFFFFF81234000);
+ __ bne(a1, t5, &error);
+
+ // Everything was correctly executed.
+ // Load the expected result.
+ __ RV_li(a0, expected_res);
+ __ j(&exit);
+
+ __ bind(&error);
+ // Got an error. Return a wrong result.
+ __ RV_li(a0, 666);
+
+ __ bind(&exit);
+ };
+ auto res = GenAndRunTest(fn);
+ CHECK_EQ(expected_res, res);
+}
+
+TEST(RISCV3) {
+ // Test floating point instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ double i;
+ float fa;
+ float fb;
+ float fc;
+ float fd;
+ float fe;
+ float ff;
+ float fg;
+ } t;
+
+ // Create a function that accepts &t and loads, manipulates, and stores
+ // the doubles t.a ... t.f.
+
+ // Double precision floating point instructions.
+ auto fn = [](MacroAssembler& assm) {
+ __ fld(ft0, a0, offsetof(T, a));
+ __ fld(ft1, a0, offsetof(T, b));
+ __ fadd_d(ft2, ft0, ft1);
+ __ fsd(ft2, a0, offsetof(T, c)); // c = a + b.
+
+ __ fmv_d(ft3, ft2); // c
+ __ fneg_d(fa0, ft1); // -b
+ __ fsub_d(ft3, ft3, fa0);
+ __ fsd(ft3, a0, offsetof(T, d)); // d = c - (-b).
+
+ __ fsd(ft0, a0, offsetof(T, b)); // b = a.
+
+ __ RV_li(a4, 120);
+ __ fcvt_d_w(ft5, a4);
+ __ fmul_d(ft3, ft3, ft5);
+ __ fsd(ft3, a0, offsetof(T, e)); // e = d * 120 = 1.8066e16.
+
+ __ fdiv_d(ft4, ft3, ft0);
+ __ fsd(ft4, a0, offsetof(T, f)); // f = e / a = 120.44.
+
+ __ fsqrt_d(ft5, ft4);
+ __ fsd(ft5, a0, offsetof(T, g));
+ // g = sqrt(f) = 10.97451593465515908537
+
+ __ fld(ft0, a0, offsetof(T, h));
+ __ fld(ft1, a0, offsetof(T, i));
+ __ fmadd_d(ft5, ft1, ft0, ft1);
+ __ fsd(ft5, a0, offsetof(T, h));
+
+ // // Single precision floating point instructions.
+ __ flw(ft0, a0, offsetof(T, fa));
+ __ flw(ft1, a0, offsetof(T, fb));
+ __ fadd_s(ft2, ft0, ft1);
+ __ fsw(ft2, a0, offsetof(T, fc)); // fc = fa + fb.
+
+ __ fneg_s(ft3, ft1); // -fb
+ __ fsub_s(ft3, ft2, ft3);
+ __ fsw(ft3, a0, offsetof(T, fd)); // fd = fc - (-fb).
+
+ __ fsw(ft0, a0, offsetof(T, fb)); // fb = fa.
+
+ __ RV_li(t0, 120);
+ __ fcvt_s_w(ft5, t0); // ft5 = 120.0.
+ __ fmul_s(ft3, ft3, ft5);
+ __ fsw(ft3, a0, offsetof(T, fe)); // fe = fd * 120
+
+ __ fdiv_s(ft4, ft3, ft0);
+ __ fsw(ft4, a0, offsetof(T, ff)); // ff = fe / fa
+
+ __ fsqrt_s(ft5, ft4);
+ __ fsw(ft5, a0, offsetof(T, fg));
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ // Double test values.
+ t.a = 1.5e14;
+ t.b = 2.75e11;
+ t.c = 0.0;
+ t.d = 0.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.h = 1.5;
+ t.i = 2.75;
+ // Single test values.
+ t.fa = 1.5e6;
+ t.fb = 2.75e4;
+ t.fc = 0.0;
+ t.fd = 0.0;
+ t.fe = 0.0;
+ t.ff = 0.0;
+ f.Call(&t, 0, 0, 0, 0);
+ // Expected double results.
+ CHECK_EQ(1.5e14, t.a);
+ CHECK_EQ(1.5e14, t.b);
+ CHECK_EQ(1.50275e14, t.c);
+ CHECK_EQ(1.50550e14, t.d);
+ CHECK_EQ(1.8066e16, t.e);
+ CHECK_EQ(120.44, t.f);
+ CHECK_EQ(10.97451593465515908537, t.g);
+ CHECK_EQ(6.875, t.h);
+ // Expected single results.
+ CHECK_EQ(1.5e6, t.fa);
+ CHECK_EQ(1.5e6, t.fb);
+ CHECK_EQ(1.5275e06, t.fc);
+ CHECK_EQ(1.5550e06, t.fd);
+ CHECK_EQ(1.866e08, t.fe);
+ CHECK_EQ(124.40000152587890625, t.ff);
+ CHECK_EQ(11.1534748077392578125, t.fg);
+}
+TEST(RISCV4) {
+ // Test moves between floating point and
+ // integer registers.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ double a;
+ double b;
+ double c;
+ float d;
+ int64_t e;
+ } t;
+
+ auto fn = [](MacroAssembler& assm) {
+ __ fld(ft0, a0, offsetof(T, a));
+ __ fld(fa1, a0, offsetof(T, b));
+
+ // Swap ft0 and fa1, by using 2 integer registers, a4-a5,
+ __ fmv_x_d(a4, ft0);
+ __ fmv_x_d(a5, fa1);
+
+ __ fmv_d_x(fa1, a4);
+ __ fmv_d_x(ft0, a5);
+
+ // Store the swapped ft0 and fa1 back to memory.
+ __ fsd(ft0, a0, offsetof(T, a));
+ __ fsd(fa1, a0, offsetof(T, c));
+
+ // Test sign extension of move operations from coprocessor.
+ __ flw(ft0, a0, offsetof(T, d));
+ __ fmv_x_w(a4, ft0);
+
+ __ sd(a4, a0, offsetof(T, e));
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ t.a = 1.5e22;
+ t.b = 2.75e11;
+ t.c = 17.17;
+ t.d = -2.75e11;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(2.75e11, t.a);
+ CHECK_EQ(2.75e11, t.b);
+ CHECK_EQ(1.5e22, t.c);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFD2800E8EL), t.e);
+}
+
+TEST(RISCV5) {
+ // Test conversions between doubles and
+ // integers.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ double a;
+ double b;
+ int i;
+ int j;
+ } t;
+
+ auto fn = [](MacroAssembler& assm) {
+ // Load all structure elements to registers.
+ __ fld(ft0, a0, offsetof(T, a));
+ __ fld(ft1, a0, offsetof(T, b));
+ __ lw(a4, a0, offsetof(T, i));
+ __ lw(a5, a0, offsetof(T, j));
+
+ // Convert double in ft0 to int in element i.
+ __ fcvt_l_d(a6, ft0);
+ __ sw(a6, a0, offsetof(T, i));
+
+ // Convert double in ft1 to int in element j.
+ __ fcvt_l_d(a7, ft1);
+ __ sw(a7, a0, offsetof(T, j));
+
+ // Convert int in original i (a4) to double in a.
+ __ fcvt_d_l(fa0, a4);
+ __ fsd(fa0, a0, offsetof(T, a));
+
+ // Convert int in original j (a5) to double in b.
+ __ fcvt_d_l(fa1, a5);
+ __ fsd(fa1, a0, offsetof(T, b));
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ t.a = 1.5e4;
+ t.b = 2.75e8;
+ t.i = 12345678;
+ t.j = -100000;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(12345678.0, t.a);
+ CHECK_EQ(-100000.0, t.b);
+ CHECK_EQ(15000, t.i);
+ CHECK_EQ(275000000, t.j);
+}
+
+TEST(RISCV6) {
+ // Test simple memory loads and stores.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ uint32_t ui;
+ int32_t si;
+ int32_t r1;
+ int32_t r2;
+ int32_t r3;
+ int32_t r4;
+ int32_t r5;
+ int32_t r6;
+ } t;
+
+ auto fn = [](MacroAssembler& assm) {
+ // Basic word load/store.
+ __ lw(a4, a0, offsetof(T, ui));
+ __ sw(a4, a0, offsetof(T, r1));
+
+ // lh with positive data.
+ __ lh(a5, a0, offsetof(T, ui));
+ __ sw(a5, a0, offsetof(T, r2));
+
+ // lh with negative data.
+ __ lh(a6, a0, offsetof(T, si));
+ __ sw(a6, a0, offsetof(T, r3));
+
+ // lhu with negative data.
+ __ lhu(a7, a0, offsetof(T, si));
+ __ sw(a7, a0, offsetof(T, r4));
+
+ // Lb with negative data.
+ __ lb(t0, a0, offsetof(T, si));
+ __ sw(t0, a0, offsetof(T, r5));
+
+ // sh writes only 1/2 of word.
+ __ RV_li(t1, 0x33333333);
+ __ sw(t1, a0, offsetof(T, r6));
+ __ lhu(t1, a0, offsetof(T, si));
+ __ sh(t1, a0, offsetof(T, r6));
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ t.ui = 0x11223344;
+ t.si = 0x99AABBCC;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
+ if (kArchEndian == kLittle) {
+ CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFBBCC), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x0000BBCC), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFFFCC), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x3333BBCC), t.r6);
+ } else {
+ CHECK_EQ(static_cast<int32_t>(0x1122), t.r2);
+ CHECK_EQ(static_cast<int32_t>(0xFFFF99AA), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x000099AA), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFFF99), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x99AA3333), t.r6);
+ }
+}
+
+// pair.first is the F_TYPE input to test, pair.second is I_TYPE expected result
+template <typename T>
+static const std::vector<std::pair<T, uint64_t>> fclass_test_values() {
+ static const std::pair<T, uint64_t> kValues[] = {
+ std::make_pair(-std::numeric_limits<T>::infinity(), kNegativeInfinity),
+ std::make_pair(-10240.56, kNegativeNormalNumber),
+ std::make_pair(-(std::numeric_limits<T>::min() / 2),
+ kNegativeSubnormalNumber),
+ std::make_pair(-0.0, kNegativeZero),
+ std::make_pair(+0.0, kPositiveZero),
+ std::make_pair((std::numeric_limits<T>::min() / 2),
+ kPositiveSubnormalNumber),
+ std::make_pair(10240.56, kPositiveNormalNumber),
+ std::make_pair(std::numeric_limits<T>::infinity(), kPositiveInfinity),
+ std::make_pair(std::numeric_limits<T>::signaling_NaN(), kSignalingNaN),
+ std::make_pair(std::numeric_limits<T>::quiet_NaN(), kQuietNaN)};
+ return std::vector<std::pair<T, uint64_t>>(&kValues[0],
+ &kValues[arraysize(kValues)]);
+}
+
+TEST(FCLASS) {
+ CcTest::InitializeVM();
+ {
+ auto i_vec = fclass_test_values<float>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fclass_s(a0, fa0); };
+ auto res = GenAndRunTest<uint32_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+
+ {
+ auto i_vec = fclass_test_values<double>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fclass_d(a0, fa0); };
+ auto res = GenAndRunTest<uint32_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+}
+
+TEST(RISCV7) {
+ // Test floating point compare and
+ // branch instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ int32_t result;
+ } t;
+
+ // Create a function that accepts &t,
+ // and loads, manipulates, and stores
+ // the doubles t.a ... t.f.
+ Label neither_is_nan, less_than, outa_here;
+ auto fn = [&neither_is_nan, &less_than, &outa_here](MacroAssembler& assm) {
+ __ fld(ft0, a0, offsetof(T, a));
+ __ fld(ft1, a0, offsetof(T, b));
+
+ __ fclass_d(t5, ft0);
+ __ fclass_d(t6, ft1);
+ __ or_(t5, t5, t6);
+ __ andi(t5, t5, kSignalingNaN | kQuietNaN);
+ __ beq(t5, zero_reg, &neither_is_nan);
+ __ sw(zero_reg, a0, offsetof(T, result));
+ __ j(&outa_here);
+
+ __ bind(&neither_is_nan);
+
+ __ flt_d(t5, ft1, ft0);
+ __ bne(t5, zero_reg, &less_than);
+
+ __ sw(zero_reg, a0, offsetof(T, result));
+ __ j(&outa_here);
+
+ __ bind(&less_than);
+ __ RV_li(a4, 1);
+ __ sw(a4, a0, offsetof(T, result)); // Set true.
+
+ // This test-case should have additional
+ // tests.
+
+ __ bind(&outa_here);
+ };
+
+ auto f = AssembleCode<F3>(fn);
+
+ t.a = 1.5e14;
+ t.b = 2.75e11;
+ t.c = 2.0;
+ t.d = -4.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.result = 0;
+ f.Call(&t, 0, 0, 0, 0);
+ CHECK_EQ(1.5e14, t.a);
+ CHECK_EQ(2.75e11, t.b);
+ CHECK_EQ(1, t.result);
+}
+
+TEST(RISCV9) {
+ // Test BRANCH improvements.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ Label exit, exit2, exit3;
+
+ __ Branch(&exit, ge, a0, Operand(zero_reg));
+ __ Branch(&exit2, ge, a0, Operand(0x00001FFF));
+ __ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
+
+ __ bind(&exit);
+ __ bind(&exit2);
+ __ bind(&exit3);
+ __ jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ USE(code);
+}
+
+TEST(NAN_BOX) {
+ // Test float NaN-boxing.
+ CcTest::InitializeVM();
+
+ // Test NaN boxing in FMV.X.D
+ {
+ auto fn = [](MacroAssembler& assm) { __ fmv_x_d(a0, fa0); };
+ auto res = GenAndRunTest<uint64_t>(1234.56f, fn);
+ CHECK_EQ(0xFFFFFFFF00000000 | bit_cast<uint32_t>(1234.56f), res);
+ }
+ // Test NaN boxing in FMV.X.W
+ {
+ auto fn = [](MacroAssembler& assm) { __ fmv_x_w(a0, fa0); };
+ auto res = GenAndRunTest<uint64_t>(1234.56f, fn);
+ CHECK_EQ((uint64_t)bit_cast<uint32_t>(1234.56f), res);
+ }
+
+ // Test FLW and FSW
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ float a;
+ uint64_t box;
+ uint64_t res;
+ } t;
+
+ auto fn = [](MacroAssembler& assm) {
+ // Load all structure elements to registers.
+ __ flw(fa0, a0, offsetof(T, a));
+ // Check boxing when flw
+ __ fsd(fa0, a0, offsetof(T, box));
+ // Check only transfer low 32bits when fsw
+ __ fsw(fa0, a0, offsetof(T, res));
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ t.a = -123.45;
+ t.box = 0;
+ t.res = 0;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(0xFFFFFFFF00000000 | bit_cast<int32_t>(t.a), t.box);
+ CHECK_EQ((uint64_t)bit_cast<uint32_t>(t.a), t.res);
+}
+
+TEST(RVC_CI) {
+ // Test RV64C extension CI type instructions.
+ CcTest::InitializeVM();
+
+ // Test c.addi
+ {
+ auto fn = [](MacroAssembler& assm) { __ c_addi(a0, -15); };
+ auto res = GenAndRunTest<int64_t>(LARGE_INT_EXCEED_32_BIT, fn);
+ CHECK_EQ(LARGE_INT_EXCEED_32_BIT - 15, res);
+ }
+
+ // Test c.addiw
+ {
+ auto fn = [](MacroAssembler& assm) { __ c_addiw(a0, -20); };
+ auto res = GenAndRunTest<int32_t>(LARGE_INT_UNDER_32_BIT, fn);
+ CHECK_EQ(LARGE_INT_UNDER_32_BIT - 20, res);
+ }
+
+ // Test c.addi16sp
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ mv(t1, sp);
+ __ mv(sp, a0);
+ __ c_addi16sp(-432);
+ __ mv(a0, sp);
+ __ mv(sp, t1);
+ };
+ auto res = GenAndRunTest<int64_t>(66666, fn);
+ CHECK_EQ(66666 - 432, res);
+ }
+
+ // Test c.li
+ {
+ auto fn = [](MacroAssembler& assm) { __ c_li(a0, -15); };
+ auto res = GenAndRunTest<int64_t>(1234543, fn);
+ CHECK_EQ(-15, res);
+ }
+
+ // Test c.lui
+ {
+ auto fn = [](MacroAssembler& assm) { __ c_lui(a0, -20); };
+ auto res = GenAndRunTest<int64_t>(0x1234567, fn);
+ CHECK_EQ(0xfffffffffffec000, (uint64_t)res);
+ }
+
+ // Test c.slli
+ {
+ auto fn = [](MacroAssembler& assm) { __ c_slli(a0, 13); };
+ auto res = GenAndRunTest<int64_t>(0x1234'5678ULL, fn);
+ CHECK_EQ(0x1234'5678ULL << 13, res);
+ }
+}
+
+TEST(RVC_CIW) {
+ CcTest::InitializeVM();
+
+ // Test c.addi4spn
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ mv(t1, sp);
+ __ mv(sp, a0);
+ __ c_addi4spn(a0, 924);
+ __ mv(sp, t1);
+ };
+ auto res = GenAndRunTest<int64_t>(66666, fn);
+ CHECK_EQ(66666 + 924, res);
+ }
+}
+
+TEST(RVC_CR) {
+ // Test RV64C extension CR type instructions.
+ CcTest::InitializeVM();
+
+ // Test c.add
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ RV_li(a1, MIN_VAL_IMM12);
+ __ c_add(a0, a1);
+ };
+ auto res = GenAndRunTest<int64_t>(LARGE_INT_EXCEED_32_BIT, fn);
+ CHECK_EQ(LARGE_INT_EXCEED_32_BIT + MIN_VAL_IMM12, res);
+ }
+}
+
+TEST(RVC_CA) {
+ // Test RV64C extension CA type instructions.
+ CcTest::InitializeVM();
+
+ // Test c.sub
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ RV_li(a1, MIN_VAL_IMM12);
+ __ c_sub(a0, a1);
+ };
+ auto res = GenAndRunTest<int64_t>(LARGE_INT_UNDER_32_BIT, fn);
+ CHECK_EQ(LARGE_INT_UNDER_32_BIT - MIN_VAL_IMM12, res);
+ }
+
+ // Test c.xor
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ RV_li(a1, MIN_VAL_IMM12);
+ __ c_xor(a0, a1);
+ };
+ auto res = GenAndRunTest<int64_t>(LARGE_INT_UNDER_32_BIT, fn);
+ CHECK_EQ(LARGE_INT_UNDER_32_BIT ^ MIN_VAL_IMM12, res);
+ }
+
+ // Test c.or
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ RV_li(a1, MIN_VAL_IMM12);
+ __ c_or(a0, a1);
+ };
+ auto res = GenAndRunTest<int64_t>(LARGE_INT_UNDER_32_BIT, fn);
+ CHECK_EQ(LARGE_INT_UNDER_32_BIT | MIN_VAL_IMM12, res);
+ }
+
+ // Test c.and
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ RV_li(a1, MIN_VAL_IMM12);
+ __ c_and(a0, a1);
+ };
+ auto res = GenAndRunTest<int64_t>(LARGE_INT_UNDER_32_BIT, fn);
+ CHECK_EQ(LARGE_INT_UNDER_32_BIT & MIN_VAL_IMM12, res);
+ }
+
+ // Test c.subw
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ RV_li(a1, MIN_VAL_IMM12);
+ __ c_subw(a0, a1);
+ };
+ auto res = GenAndRunTest<int64_t>(LARGE_INT_UNDER_32_BIT, fn);
+ CHECK_EQ(LARGE_INT_UNDER_32_BIT - MIN_VAL_IMM12, res);
+ }
+
+ // Test c.addw
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ RV_li(a1, MIN_VAL_IMM12);
+ __ c_addw(a0, a1);
+ };
+ auto res = GenAndRunTest<int64_t>(LARGE_INT_UNDER_32_BIT, fn);
+ CHECK_EQ(LARGE_INT_UNDER_32_BIT + MIN_VAL_IMM12, res);
+ }
+}
+
+TEST(RVC_LOAD_STORE_SP) {
+ // Test RV64C extension fldsp/fsdsp, lwsp/swsp, ldsp/sdsp.
+ CcTest::InitializeVM();
+
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ c_fsdsp(fa0, 80);
+ __ c_fldsp(fa0, 80);
+ };
+ auto res = GenAndRunTest<double>(-3456.678, fn);
+ CHECK_EQ(-3456.678, res);
+ }
+
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ c_swsp(a0, 40);
+ __ c_lwsp(a0, 40);
+ };
+ auto res = GenAndRunTest<int32_t>(0x456AF894, fn);
+ CHECK_EQ(0x456AF894, res);
+ }
+
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ c_sdsp(a0, 160);
+ __ c_ldsp(a0, 160);
+ };
+ auto res = GenAndRunTest<uint64_t>(0xFBB10A9C12345678, fn);
+ CHECK_EQ(0xFBB10A9C12345678, res);
+ }
+}
+
+TEST(RVC_LOAD_STORE_COMPRESSED) {
+ // Test RV64C extension fld, lw, ld.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ double a;
+ double b;
+ double c;
+ } t;
+
+ // c.fld
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ c_fld(fa0, a0, offsetof(T, a));
+ __ c_fld(fa1, a0, offsetof(T, b));
+ __ fadd_d(fa2, fa1, fa0);
+ __ c_fsd(fa2, a0, offsetof(T, c)); // c = a + b.
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ t.a = 1.5e14;
+ t.b = 1.5e14;
+ t.c = 3.0e14;
+ f.Call(&t, 0, 0, 0, 0);
+ // Expected double results.
+ CHECK_EQ(1.5e14, t.a);
+ CHECK_EQ(1.5e14, t.b);
+ CHECK_EQ(3.0e14, t.c);
+ }
+
+ struct S {
+ int32_t a;
+ int32_t b;
+ int32_t c;
+ } s;
+ // c.lw
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ c_lw(a1, a0, offsetof(S, a));
+ __ c_lw(a2, a0, offsetof(S, b));
+ __ add(a3, a1, a2);
+ __ c_sw(a3, a0, offsetof(S, c)); // c = a + b.
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ s.a = 1;
+ s.b = 2;
+ s.c = 3;
+ f.Call(&s, 0, 0, 0, 0);
+ CHECK_EQ(1, s.a);
+ CHECK_EQ(2, s.b);
+ CHECK_EQ(3, s.c);
+ }
+
+ struct U {
+ int64_t a;
+ int64_t b;
+ int64_t c;
+ } u;
+ // c.ld
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ c_ld(a1, a0, offsetof(U, a));
+ __ c_ld(a2, a0, offsetof(U, b));
+ __ add(a3, a1, a2);
+ __ c_sd(a3, a0, offsetof(U, c)); // c = a + b.
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ u.a = 1;
+ u.b = 2;
+ u.c = 3;
+ f.Call(&u, 0, 0, 0, 0);
+ CHECK_EQ(1, u.a);
+ CHECK_EQ(2, u.b);
+ CHECK_EQ(3, u.c);
+ }
+}
+
+TEST(RVC_JUMP) {
+ CcTest::InitializeVM();
+
+ Label L, C;
+ auto fn = [&L, &C](MacroAssembler& assm) {
+ __ mv(a1, a0);
+ __ RV_li(a0, 0l);
+ __ c_j(&C);
+
+ __ bind(&L);
+ __ add(a0, a0, a1);
+ __ addi(a1, a1, -1);
+
+ __ bind(&C);
+ __ xori(a2, a1, 0);
+ __ bnez(a2, &L);
+ };
+
+ int64_t input = 50;
+ int64_t expected_res = 1275L;
+ auto res = GenAndRunTest<int64_t>(input, fn);
+ CHECK_EQ(expected_res, res);
+}
+
+TEST(TARGET_ADDR) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ // This is the series of instructions to load 48 bit address 0x0123456789ab
+ uint32_t buffer[6] = {0x091ab37, 0x2b330213, 0x00b21213,
+ 0x62626213, 0x00621213, 0x02b26213};
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ uintptr_t addr = reinterpret_cast<uintptr_t>(&buffer[0]);
+ Address res = __ target_address_at(static_cast<Address>(addr));
+ CHECK_EQ(0x0123456789abL, res);
+}
+
+TEST(SET_TARGET_ADDR) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ // This is the series of instructions to load 48 bit address 0xba9876543210
+ uint32_t buffer[6] = {0x091ab37, 0x2b330213, 0x00b21213,
+ 0x62626213, 0x00621213, 0x02b26213};
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ uintptr_t addr = reinterpret_cast<uintptr_t>(&buffer[0]);
+ __ set_target_value_at(static_cast<Address>(addr), 0xba9876543210L,
+ FLUSH_ICACHE_IF_NEEDED);
+ Address res = __ target_address_at(static_cast<Address>(addr));
+ CHECK_EQ(0xba9876543210L, res);
+}
+
+// pair.first is the F_TYPE input to test, pair.second is I_TYPE expected
+// result
+template <typename F_TYPE, typename I_TYPE>
+static const std::vector<std::pair<F_TYPE, I_TYPE>> out_of_range_test_values() {
+ static const std::pair<F_TYPE, I_TYPE> kValues[] = {
+ std::make_pair(std::numeric_limits<F_TYPE>::quiet_NaN(),
+ std::numeric_limits<I_TYPE>::max()),
+ std::make_pair(std::numeric_limits<F_TYPE>::signaling_NaN(),
+ std::numeric_limits<I_TYPE>::max()),
+ std::make_pair(std::numeric_limits<F_TYPE>::infinity(),
+ std::numeric_limits<I_TYPE>::max()),
+ std::make_pair(-std::numeric_limits<F_TYPE>::infinity(),
+ std::numeric_limits<I_TYPE>::min()),
+ std::make_pair(
+ static_cast<F_TYPE>(std::numeric_limits<I_TYPE>::max()) + 1024,
+ std::numeric_limits<I_TYPE>::max()),
+ std::make_pair(
+ static_cast<F_TYPE>(std::numeric_limits<I_TYPE>::min()) - 1024,
+ std::numeric_limits<I_TYPE>::min()),
+ };
+ return std::vector<std::pair<F_TYPE, I_TYPE>>(&kValues[0],
+ &kValues[arraysize(kValues)]);
+}
+
+// Test conversion from wider to narrower types w/ out-of-range values or from
+// nan, inf, -inf
+TEST(OUT_OF_RANGE_CVT) {
+ CcTest::InitializeVM();
+
+ { // test fvt_w_d
+ auto i_vec = out_of_range_test_values<double, int32_t>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fcvt_w_d(a0, fa0); };
+ auto res = GenAndRunTest<int32_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+
+ { // test fvt_w_s
+ auto i_vec = out_of_range_test_values<float, int32_t>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fcvt_w_s(a0, fa0); };
+ auto res = GenAndRunTest<int32_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+
+ { // test fvt_wu_d
+ auto i_vec = out_of_range_test_values<double, uint32_t>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fcvt_wu_d(a0, fa0); };
+ auto res = GenAndRunTest<uint32_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+
+ { // test fvt_wu_s
+ auto i_vec = out_of_range_test_values<float, uint32_t>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fcvt_wu_s(a0, fa0); };
+ auto res = GenAndRunTest<uint32_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+
+ { // test fvt_l_d
+ auto i_vec = out_of_range_test_values<double, int64_t>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fcvt_l_d(a0, fa0); };
+ auto res = GenAndRunTest<int64_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+
+ { // test fvt_l_s
+ auto i_vec = out_of_range_test_values<float, int64_t>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fcvt_l_s(a0, fa0); };
+ auto res = GenAndRunTest<int64_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+
+ { // test fvt_lu_d
+ auto i_vec = out_of_range_test_values<double, uint64_t>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fcvt_lu_d(a0, fa0); };
+ auto res = GenAndRunTest<uint64_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+
+ { // test fvt_lu_s
+ auto i_vec = out_of_range_test_values<float, uint64_t>();
+ for (auto i = i_vec.begin(); i != i_vec.end(); ++i) {
+ auto input = *i;
+ auto fn = [](MacroAssembler& assm) { __ fcvt_lu_s(a0, fa0); };
+ auto res = GenAndRunTest<uint64_t>(input.first, fn);
+ CHECK_EQ(input.second, res);
+ }
+ }
+}
+
+#define FCMP_TEST_HELPER(F, fn, op) \
+ { \
+ auto res1 = GenAndRunTest<int32_t>(std::numeric_limits<F>::quiet_NaN(), \
+ static_cast<F>(1.0), fn); \
+ CHECK_EQ(false, res1); \
+ auto res2 = \
+ GenAndRunTest<int32_t>(std::numeric_limits<F>::quiet_NaN(), \
+ std::numeric_limits<F>::quiet_NaN(), fn); \
+ CHECK_EQ(false, res2); \
+ auto res3 = \
+ GenAndRunTest<int32_t>(std::numeric_limits<F>::signaling_NaN(), \
+ std::numeric_limits<F>::quiet_NaN(), fn); \
+ CHECK_EQ(false, res3); \
+ auto res4 = \
+ GenAndRunTest<int32_t>(std::numeric_limits<F>::quiet_NaN(), \
+ std::numeric_limits<F>::infinity(), fn); \
+ CHECK_EQ(false, res4); \
+ auto res5 = \
+ GenAndRunTest<int32_t>(std::numeric_limits<F>::infinity(), \
+ std::numeric_limits<F>::infinity(), fn); \
+ CHECK_EQ((std::numeric_limits<F>::infinity() \
+ op std::numeric_limits<F>::infinity()), \
+ res5); \
+ auto res6 = \
+ GenAndRunTest<int32_t>(-std::numeric_limits<F>::infinity(), \
+ std::numeric_limits<F>::infinity(), fn); \
+ CHECK_EQ((-std::numeric_limits<F>::infinity() \
+ op std::numeric_limits<F>::infinity()), \
+ res6); \
+ }
+
+TEST(F_NAN) {
+ // test floating-point compare w/ NaN, +/-Inf
+ CcTest::InitializeVM();
+
+ // floating compare
+ auto fn1 = [](MacroAssembler& assm) { __ feq_s(a0, fa0, fa1); };
+ FCMP_TEST_HELPER(float, fn1, ==);
+ auto fn2 = [](MacroAssembler& assm) { __ flt_s(a0, fa0, fa1); };
+ FCMP_TEST_HELPER(float, fn2, <);
+ auto fn3 = [](MacroAssembler& assm) { __ fle_s(a0, fa0, fa1); };
+ FCMP_TEST_HELPER(float, fn3, <=);
+
+ // double compare
+ auto fn4 = [](MacroAssembler& assm) { __ feq_d(a0, fa0, fa1); };
+ FCMP_TEST_HELPER(double, fn4, ==);
+ auto fn5 = [](MacroAssembler& assm) { __ flt_d(a0, fa0, fa1); };
+ FCMP_TEST_HELPER(double, fn5, <);
+ auto fn6 = [](MacroAssembler& assm) { __ fle_d(a0, fa0, fa1); };
+ FCMP_TEST_HELPER(double, fn6, <=);
+}
+
+TEST(jump_tables1) {
+ // Test jump tables with forward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ const int kNumCases = 128;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases], done;
+
+ auto fn = [&labels, &done, values](MacroAssembler& assm) {
+ __ addi(sp, sp, -8);
+ __ Sd(ra, MemOperand(sp));
+ __ Align(8);
+ {
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
+
+ __ auipc(ra, 0);
+ __ slli(t3, a0, 3);
+ __ add(t3, t3, ra);
+ __ Ld(t3, MemOperand(t3, 6 * kInstrSize));
+ __ jr(t3);
+ __ nop(); // For 16-byte alignment
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lui(a0, (values[i] + 0x800) >> 12);
+ __ addi(a0, a0, (values[i] << 20 >> 20));
+ __ j(&done);
+ }
+
+ __ bind(&done);
+ __ Ld(ra, MemOperand(sp));
+ __ addi(sp, sp, 8);
+
+ CHECK_EQ(0, assm.UnboundLabelsCount());
+ };
+ auto f = AssembleCode<F1>(fn);
+
+ for (int i = 0; i < kNumCases; ++i) {
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
+ CHECK_EQ(values[i], static_cast<int>(res));
+ }
+}
+
+TEST(jump_tables2) {
+ // Test jump tables with backward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ const int kNumCases = 128;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases], done, dispatch;
+
+ auto fn = [&labels, &done, &dispatch, values](MacroAssembler& assm) {
+ __ addi(sp, sp, -8);
+ __ Sd(ra, MemOperand(sp));
+ __ j(&dispatch);
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lui(a0, (values[i] + 0x800) >> 12);
+ __ addi(a0, a0, (values[i] << 20 >> 20));
+ __ j(&done);
+ }
+
+ __ Align(8);
+ __ bind(&dispatch);
+
+ {
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
+
+ __ auipc(ra, 0);
+ __ slli(t3, a0, 3);
+ __ add(t3, t3, ra);
+ __ Ld(t3, MemOperand(t3, 6 * kInstrSize));
+ __ jr(t3);
+ __ nop(); // For 16-byte alignment
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+ __ bind(&done);
+ __ Ld(ra, MemOperand(sp));
+ __ addi(sp, sp, 8);
+ };
+ auto f = AssembleCode<F1>(fn);
+
+ for (int i = 0; i < kNumCases; ++i) {
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
+ CHECK_EQ(values[i], res);
+ }
+}
+
+TEST(jump_tables3) {
+ // Test jump tables with backward jumps and embedded heap objects.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ const int kNumCases = 128;
+ Handle<Object> values[kNumCases];
+ for (int i = 0; i < kNumCases; ++i) {
+ double value = isolate->random_number_generator()->NextDouble();
+ values[i] = isolate->factory()->NewHeapNumber<AllocationType::kOld>(value);
+ }
+ Label labels[kNumCases], done, dispatch;
+ Object obj;
+ int64_t imm64;
+
+ auto fn = [&labels, &done, &dispatch, values, &obj,
+ &imm64](MacroAssembler& assm) {
+ __ addi(sp, sp, -8);
+ __ Sd(ra, MemOperand(sp));
+
+ __ j(&dispatch);
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ obj = *values[i];
+ imm64 = obj.ptr();
+ __ nop(); // For 8 byte alignment
+ __ RV_li(a0, imm64);
+ __ nop(); // For 8 byte alignment
+ __ j(&done);
+ }
+
+ __ Align(8);
+ __ bind(&dispatch);
+ {
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
+
+ __ auipc(ra, 0);
+ __ slli(t3, a0, 3);
+ __ add(t3, t3, ra);
+ __ Ld(t3, MemOperand(t3, 6 * kInstrSize));
+ __ jr(t3);
+ __ nop(); // For 16-byte alignment
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ __ bind(&done);
+ __ Ld(ra, MemOperand(sp));
+ __ addi(sp, sp, 8);
+ };
+ auto f = AssembleCode<F1>(fn);
+
+ for (int i = 0; i < kNumCases; ++i) {
+ Handle<Object> result(
+ Object(reinterpret_cast<Address>(f.Call(i, 0, 0, 0, 0))), isolate);
+#ifdef OBJECT_PRINT
+ ::printf("f(%d) = ", i);
+ result->Print(std::cout);
+ ::printf("\n");
+#endif
+ CHECK(values[i].is_identical_to(result));
+ }
+}
+
+TEST(li_estimate) {
+ std::vector<int64_t> immediates = {
+ -256, -255, 0, 255, 8192, 0x7FFFFFFF,
+ INT32_MIN, INT32_MAX / 2, INT32_MAX, UINT32_MAX, INT64_MAX, INT64_MAX / 2,
+ INT64_MIN};
+ // Test jump tables with backward jumps and embedded heap objects.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ for (auto p : immediates) {
+ Label a;
+ assm.bind(&a);
+ assm.RV_li(t0, p);
+ int expected_count = assm.li_estimate(p, true);
+ int count = assm.InstructionsGeneratedSince(&a);
+ CHECK_EQ(count, expected_count);
+ }
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 928d47b03d..07e6c768f5 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -65,7 +65,7 @@ TEST(CallCFunction) {
MachineType type_intptr = MachineType::IntPtr();
- Node* const result =
+ TNode<IntPtrT> const result = m.UncheckedCast<IntPtrT>(
m.CallCFunction(fun_constant, type_intptr,
std::make_pair(type_intptr, m.IntPtrConstant(0)),
std::make_pair(type_intptr, m.IntPtrConstant(1)),
@@ -76,7 +76,7 @@ TEST(CallCFunction) {
std::make_pair(type_intptr, m.IntPtrConstant(6)),
std::make_pair(type_intptr, m.IntPtrConstant(7)),
std::make_pair(type_intptr, m.IntPtrConstant(8)),
- std::make_pair(type_intptr, m.IntPtrConstant(9)));
+ std::make_pair(type_intptr, m.IntPtrConstant(9))));
m.Return(m.SmiTag(result));
}
@@ -99,11 +99,12 @@ TEST(CallCFunctionWithCallerSavedRegisters) {
MachineType type_intptr = MachineType::IntPtr();
- Node* const result = m.CallCFunctionWithCallerSavedRegisters(
- fun_constant, type_intptr, kSaveFPRegs,
- std::make_pair(type_intptr, m.IntPtrConstant(0)),
- std::make_pair(type_intptr, m.IntPtrConstant(1)),
- std::make_pair(type_intptr, m.IntPtrConstant(2)));
+ TNode<IntPtrT> const result =
+ m.UncheckedCast<IntPtrT>(m.CallCFunctionWithCallerSavedRegisters(
+ fun_constant, type_intptr, kSaveFPRegs,
+ std::make_pair(type_intptr, m.IntPtrConstant(0)),
+ std::make_pair(type_intptr, m.IntPtrConstant(1)),
+ std::make_pair(type_intptr, m.IntPtrConstant(2))));
m.Return(m.SmiTag(result));
}
@@ -881,7 +882,8 @@ void TestNameDictionaryLookup() {
};
for (size_t i = 0; i < arraysize(keys); i++) {
- Handle<Object> value = factory->NewPropertyCell(keys[i]);
+ Handle<Object> value =
+ factory->NewPropertyCell(keys[i], fake_details, keys[i]);
dictionary =
Dictionary::Add(isolate, dictionary, keys[i], value, fake_details);
}
@@ -2747,7 +2749,8 @@ TEST(CreatePromiseResolvingFunctions) {
m.NewJSPromise(context, m.UndefinedConstant());
PromiseResolvingFunctions funcs = m.CreatePromiseResolvingFunctions(
context, promise, m.BooleanConstant(false), native_context);
- Node *resolve = funcs.resolve, *reject = funcs.reject;
+ TNode<JSFunction> resolve = funcs.resolve;
+ TNode<JSFunction> reject = funcs.reject;
TNode<IntPtrT> const kSize = m.IntPtrConstant(2);
TNode<FixedArray> const arr =
m.Cast(m.AllocateFixedArray(PACKED_ELEMENTS, kSize));
@@ -3853,8 +3856,8 @@ TEST(InstructionSchedulingCallerSavedRegisters) {
CodeStubAssembler m(asm_tester.state());
{
- Node* x = m.SmiUntag(m.Parameter<Smi>(1));
- Node* y = m.WordOr(m.WordShr(x, 1), m.IntPtrConstant(1));
+ TNode<IntPtrT> x = m.SmiUntag(m.Parameter<Smi>(1));
+ TNode<WordT> y = m.WordOr(m.WordShr(x, 1), m.IntPtrConstant(1));
TNode<ExternalReference> isolate_ptr =
m.ExternalConstant(ExternalReference::isolate_address(isolate));
m.CallCFunctionWithCallerSavedRegisters(
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 3b709aeeff..a74f3e6bd7 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -274,8 +274,9 @@ TEST(Regression236) {
TEST(GetScriptLineNumber) {
LocalContext context;
- v8::HandleScope scope(CcTest::isolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::ScriptOrigin origin = v8::ScriptOrigin(isolate, v8_str("test"));
const char function_f[] = "function f() {}";
const int max_rows = 1000;
const int buffer_size = max_rows + sizeof(function_f);
@@ -650,9 +651,10 @@ TEST(CompileFunctionInContextQuirks) {
TEST(CompileFunctionInContextScriptOrigin) {
CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
LocalContext env;
- v8::ScriptOrigin origin(v8_str("test"), 22, 41);
+ v8::ScriptOrigin origin(isolate, v8_str("test"), 22, 41);
v8::ScriptCompiler::Source script_source(v8_str("throw new Error()"), origin);
Local<ScriptOrModule> script;
v8::Local<v8::Function> fun =
@@ -693,12 +695,13 @@ void TestCompileFunctionInContextToStringImpl() {
{ // NOLINT
CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
LocalContext env;
// Regression test for v8:6190
{
- v8::ScriptOrigin origin(v8_str("test"), 22, 41);
+ v8::ScriptOrigin origin(isolate, v8_str("test"), 22, 41);
v8::ScriptCompiler::Source script_source(v8_str("return event"), origin);
v8::Local<v8::String> params[] = {v8_str("event")};
@@ -725,7 +728,7 @@ void TestCompileFunctionInContextToStringImpl() {
// With no parameters:
{
- v8::ScriptOrigin origin(v8_str("test"), 17, 31);
+ v8::ScriptOrigin origin(isolate, v8_str("test"), 17, 31);
v8::ScriptCompiler::Source script_source(v8_str("return 0"), origin);
v8::TryCatch try_catch(CcTest::isolate());
@@ -750,7 +753,7 @@ void TestCompileFunctionInContextToStringImpl() {
// With a name:
{
- v8::ScriptOrigin origin(v8_str("test"), 17, 31);
+ v8::ScriptOrigin origin(isolate, v8_str("test"), 17, 31);
v8::ScriptCompiler::Source script_source(v8_str("return 0"), origin);
v8::TryCatch try_catch(CcTest::isolate());
@@ -946,8 +949,7 @@ TEST(DecideToPretenureDuringCompilation) {
// compilation.
if (!i::FLAG_opt || i::FLAG_always_opt || i::FLAG_minor_mc ||
i::FLAG_stress_incremental_marking || i::FLAG_optimize_for_size ||
- i::FLAG_turbo_nci || i::FLAG_turbo_nci_as_midtier ||
- i::FLAG_stress_concurrent_allocation) {
+ i::FLAG_turbo_nci || i::FLAG_stress_concurrent_allocation) {
return;
}
@@ -1097,7 +1099,7 @@ TEST(ProfilerEnabledDuringBackgroundCompile) {
v8::Local<v8::Script> script =
v8::ScriptCompiler::Compile(isolate->GetCurrentContext(),
&streamed_source, v8_str(source),
- v8::ScriptOrigin(v8_str("foo")))
+ v8::ScriptOrigin(isolate, v8_str("foo")))
.ToLocalChecked();
i::Handle<i::Object> obj = Utils::OpenHandle(*script);
diff --git a/deps/v8/test/cctest/test-concurrent-descriptor-array.cc b/deps/v8/test/cctest/test-concurrent-descriptor-array.cc
index bc9eb53ff5..aefb7ac38b 100644
--- a/deps/v8/test/cctest/test-concurrent-descriptor-array.cc
+++ b/deps/v8/test/cctest/test-concurrent-descriptor-array.cc
@@ -70,7 +70,6 @@ class ConcurrentSearchThread final : public v8::base::Thread {
// Uses linear search on a flat object, with up to 8 elements.
TEST(LinearSearchFlatObject) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -122,7 +121,6 @@ TEST(LinearSearchFlatObject) {
// Uses linear search on a flat object, which has more than 8 elements.
TEST(LinearSearchFlatObject_ManyElements) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-concurrent-feedback-vector.cc b/deps/v8/test/cctest/test-concurrent-feedback-vector.cc
index 7279e28567..38f7c05ffe 100644
--- a/deps/v8/test/cctest/test-concurrent-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-concurrent-feedback-vector.cc
@@ -156,7 +156,6 @@ static void CheckedWait(base::Semaphore& semaphore) {
// Verify that a LoadIC can be cycled through different states and safely
// read on a background thread.
TEST(CheckLoadICStates) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
FLAG_lazy_feedback_allocation = false;
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-concurrent-js-array.cc b/deps/v8/test/cctest/test-concurrent-js-array.cc
new file mode 100644
index 0000000000..3dcff6b952
--- /dev/null
+++ b/deps/v8/test/cctest/test-concurrent-js-array.cc
@@ -0,0 +1,137 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api/api.h"
+#include "src/base/platform/semaphore.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/local-handles-inl.h"
+#include "src/handles/persistent-handles.h"
+#include "src/heap/heap.h"
+#include "src/heap/local-heap-inl.h"
+#include "src/heap/local-heap.h"
+#include "src/heap/parked-scope.h"
+#include "src/objects/js-array-inl.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+static constexpr int kNumArrays = 1024;
+
+namespace {
+
+class BackgroundThread final : public v8::base::Thread {
+ public:
+ BackgroundThread(Heap* heap, std::vector<Handle<JSArray>> handles,
+ std::unique_ptr<PersistentHandles> ph,
+ base::Semaphore* sema_started)
+ : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
+ heap_(heap),
+ handles_(std::move(handles)),
+ ph_(std::move(ph)),
+ sema_started_(sema_started) {}
+
+ void Run() override {
+ LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
+ UnparkedScope unparked_scope(&local_heap);
+ LocalHandleScope scope(&local_heap);
+ Isolate* isolate = heap_->isolate();
+
+ for (int i = 0; i < kNumArrays; i++) {
+ handles_[i] = local_heap.NewPersistentHandle(handles_[i]);
+ }
+
+ sema_started_->Signal();
+
+ // Iterate in the opposite directions as the main thread to make a race at
+ // some point more likely.
+ static constexpr int kIndex = 1;
+ for (int i = 0; i < kNumArrays; i++) {
+ Handle<JSArray> x = handles_[i];
+ Handle<FixedArrayBase> elements =
+ local_heap.NewPersistentHandle(x->elements(isolate, kRelaxedLoad));
+ ElementsKind elements_kind = x->map(isolate).elements_kind();
+
+ // Mirroring the conditions in JSArrayRef::GetOwnCowElement.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) continue;
+ if (elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
+ continue;
+ }
+
+ base::Optional<Object> result =
+ ConcurrentLookupIterator::TryGetOwnCowElement(
+ isolate, FixedArray::cast(*elements), elements_kind,
+ Smi::ToInt(x->length(isolate, kRelaxedLoad)), kIndex);
+
+ if (result.has_value()) {
+ // On any success, the elements at index 1 must be the original value
+ // Smi(1).
+ CHECK(result.value().IsSmi());
+ CHECK_EQ(Smi::ToInt(result.value()), 1);
+ }
+ }
+ }
+
+ private:
+ Heap* heap_;
+ std::vector<Handle<JSArray>> handles_;
+ std::unique_ptr<PersistentHandles> ph_;
+ base::Semaphore* sema_started_;
+};
+
+TEST(ArrayWithCowElements) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+
+ std::unique_ptr<PersistentHandles> ph = isolate->NewPersistentHandles();
+ std::vector<Handle<JSArray>> handles;
+ std::vector<Handle<JSArray>> persistent_handles;
+
+ HandleScope handle_scope(isolate);
+
+ // Create kNumArrays arrays with COW backing stores.
+ CompileRun(
+ "function f() { return [0,1,2,3,4]; }\n"
+ "const xs = [];\n"
+ "let i = 0;\n");
+
+ for (int i = 0; i < kNumArrays; i++) {
+ Handle<JSArray> x = Handle<JSArray>::cast(Utils::OpenHandle(
+ *CompileRunChecked(CcTest::isolate(), "xs[i++] = f();")));
+ CHECK_EQ(x->elements().map(), ReadOnlyRoots(isolate).fixed_cow_array_map());
+ handles.push_back(x);
+ persistent_handles.push_back(ph->NewHandle(x));
+ }
+
+ base::Semaphore sema_started(0);
+
+ // Pass persistent handles to background thread.
+ std::unique_ptr<BackgroundThread> thread(new BackgroundThread(
+ isolate->heap(), persistent_handles, std::move(ph), &sema_started));
+ CHECK(thread->Start());
+
+ sema_started.Wait();
+
+ // On the main thread, mutate the arrays, converting to a non-COW backing
+ // store.
+ static const char* const kMutators[] = {
+ "xs[--i].length--;", "xs[--i].length++;", "xs[--i].length = 0;",
+ "xs[--i][1] = 42;", "delete xs[--i][1];", "xs[--i].push(42);",
+ "xs[--i].pop();", "xs[--i][1] = 1.5;", "xs[--i][1] = {};",
+ };
+ static const int kNumMutators = arraysize(kMutators);
+
+ for (int i = kNumArrays - 1; i >= 0; i--) {
+ CompileRunChecked(CcTest::isolate(), kMutators[i % kNumMutators]);
+ CHECK_NE(handles[i]->elements().map(),
+ ReadOnlyRoots(isolate).fixed_cow_array_map());
+ }
+
+ thread->Join();
+}
+
+} // anonymous namespace
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-concurrent-prototype.cc b/deps/v8/test/cctest/test-concurrent-prototype.cc
index e46f3a5ade..88e902d13d 100644
--- a/deps/v8/test/cctest/test-concurrent-prototype.cc
+++ b/deps/v8/test/cctest/test-concurrent-prototype.cc
@@ -68,7 +68,6 @@ class ConcurrentSearchThread final : public v8::base::Thread {
// Test to search on a background thread, while the main thread is idle.
TEST(ProtoWalkBackground) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -108,7 +107,6 @@ TEST(ProtoWalkBackground) {
// Test to search on a background thread, while the main thread modifies the
// descriptor array.
TEST(ProtoWalkBackground_DescriptorArrayWrite) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -155,7 +153,6 @@ TEST(ProtoWalkBackground_DescriptorArrayWrite) {
}
TEST(ProtoWalkBackground_PrototypeChainWrite) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-concurrent-string.cc b/deps/v8/test/cctest/test-concurrent-string.cc
index 66988a5f83..e6d9be60f5 100644
--- a/deps/v8/test/cctest/test-concurrent-string.cc
+++ b/deps/v8/test/cctest/test-concurrent-string.cc
@@ -19,10 +19,10 @@ namespace internal {
namespace {
-#define DOUBLE_VALUE 12345.123456789
-#define STRING_VALUE "12345.123456789"
+#define DOUBLE_VALUE 28.123456789
+#define STRING_VALUE "28.123456789"
#define ARRAY_VALUE \
- { '1', '2', '3', '4', '5', '.', '1', '2', '3', '4', '5', '6', '7', '8', '9' }
+ { '2', '8', '.', '1', '2', '3', '4', '5', '6', '7', '8', '9' }
// Adapted from cctest/test-api.cc, and
// test/cctest/heap/test-external-string-tracker.cc.
@@ -100,7 +100,6 @@ class ConcurrentStringThread final : public v8::base::Thread {
// Inspect a one byte string, while the main thread externalizes it.
TEST(InspectOneByteExternalizing) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -146,7 +145,6 @@ TEST(InspectOneByteExternalizing) {
// Inspect a one byte string, while the main thread externalizes it into a two
// bytes string.
TEST(InspectOneIntoTwoByteExternalizing) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -189,7 +187,6 @@ TEST(InspectOneIntoTwoByteExternalizing) {
// Inspect a two byte string, while the main thread externalizes it.
TEST(InspectTwoByteExternalizing) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -202,8 +199,7 @@ TEST(InspectTwoByteExternalizing) {
// TODO(solanes): Can we have only one raw string?
const char* raw_string = STRING_VALUE;
// TODO(solanes): Is this the best way to create a two byte string from chars?
- const int kLength = 15;
- DCHECK_EQ(kLength, strlen(raw_string));
+ const int kLength = 12;
const uint16_t two_byte_array[kLength] = ARRAY_VALUE;
Handle<String> two_bytes_string;
{
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index dc883e4105..61ceae728f 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -589,6 +589,15 @@ static unsigned TotalHitCount(const v8::CpuProfileNode* node) {
return hit_count;
}
+static unsigned TotalHitCount(const v8::CpuProfileNode* node,
+ const std::string& name) {
+ if (name.compare(node->GetFunctionNameStr()) == 0) return TotalHitCount(node);
+ unsigned hit_count = 0;
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i)
+ hit_count += TotalHitCount(node->GetChild(i), name);
+ return hit_count;
+}
+
static const v8::CpuProfileNode* FindChild(v8::Local<v8::Context> context,
const v8::CpuProfileNode* node,
const char* name) {
@@ -2916,7 +2925,10 @@ TEST(TracingCpuProfiler) {
const profile_header = json[0];
if (typeof profile_header['startTime'] !== 'number')
return false;
- return json.some(event => (event.lines || []).some(line => line));
+ return json.some(event => (event.lines || []).some(line => line)) &&
+ json.filter(e => e.cpuProfile && e.cpuProfile.nodes)
+ .some(e => e.cpuProfile.nodes
+ .some(n => n.callFrame.codeType == "JS"));
}
checkProfile()" + profile_json +
")";
@@ -3269,22 +3281,31 @@ TEST(MultipleIsolates) {
// wrong if sampling an unlocked frame. We also prevent optimization to prevent
// inlining so each function call has its own frame.
const char* varying_frame_size_script = R"(
- %NeverOptimizeFunction(maybeYield);
+ %NeverOptimizeFunction(maybeYield0);
+ %NeverOptimizeFunction(maybeYield1);
+ %NeverOptimizeFunction(maybeYield2);
%NeverOptimizeFunction(bar);
%NeverOptimizeFunction(foo);
- function maybeYield(n) {
+ function maybeYield0(n) {
+ YieldIsolate(Math.random() > yieldLimit);
+ }
+ function maybeYield1(n) {
+ YieldIsolate(Math.random() > yieldLimit);
+ }
+ function maybeYield2(n) {
YieldIsolate(Math.random() > yieldLimit);
}
- function bar(a, b, c, d) {
- maybeYield(Math.random());
+ maybeYield = [maybeYield0 ,maybeYield1, maybeYield2];
+ function bar(threadNumber, a, b, c, d) {
+ maybeYield[threadNumber](Math.random());
return a.length + b.length + c.length + d.length;
}
- function foo(timeLimit, yieldProbability) {
+ function foo(timeLimit, yieldProbability, threadNumber) {
yieldLimit = 1 - yieldProbability;
const startTime = Date.now();
for (let i = 0; i < 1e6; i++) {
- maybeYield(1);
- bar("Hickory", "Dickory", "Doc", "Mouse");
+ maybeYield[threadNumber](1);
+ bar(threadNumber, "Hickory", "Dickory", "Doc", "Mouse");
YieldIsolate(Math.random() > 0.999);
if ((Date.now() - startTime) > timeLimit) break;
}
@@ -3293,8 +3314,10 @@ const char* varying_frame_size_script = R"(
class UnlockingThread : public v8::base::Thread {
public:
- explicit UnlockingThread(v8::Local<v8::Context> env)
- : Thread(Options("UnlockingThread")), env_(CcTest::isolate(), env) {}
+ explicit UnlockingThread(v8::Local<v8::Context> env, int32_t threadNumber)
+ : Thread(Options("UnlockingThread")),
+ env_(CcTest::isolate(), env),
+ threadNumber_(threadNumber) {}
void Run() override {
v8::Isolate* isolate = CcTest::isolate();
@@ -3302,10 +3325,11 @@ class UnlockingThread : public v8::base::Thread {
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> env = v8::Local<v8::Context>::New(isolate, env_);
- Profile(env);
+ Profile(env, threadNumber_);
}
- static void Profile(v8::Local<v8::Context> env) {
+ static void Profile(v8::Local<v8::Context> env, int32_t threadNumber) {
+ CHECK_LT(threadNumber, maxThreads_);
v8::Isolate* isolate = CcTest::isolate();
v8::Context::Scope context_scope(env);
v8::CpuProfiler* profiler = v8::CpuProfiler::New(isolate);
@@ -3315,15 +3339,24 @@ class UnlockingThread : public v8::base::Thread {
int32_t time_limit = 200;
double yield_probability = 0.001;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, time_limit),
- v8::Number::New(isolate, yield_probability)};
+ v8::Number::New(isolate, yield_probability),
+ v8::Integer::New(isolate, threadNumber)};
v8::Local<v8::Function> function = GetFunction(env, "foo");
function->Call(env, env->Global(), arraysize(args), args).ToLocalChecked();
- profiler->StopProfiling(profile_name);
+ const v8::CpuProfile* profile = profiler->StopProfiling(profile_name);
+ const CpuProfileNode* root = profile->GetTopDownRoot();
+ for (int32_t number = 0; number < maxThreads_; number++) {
+ std::string maybeYield = "maybeYield" + std::to_string(number);
+ unsigned hit_count = TotalHitCount(root, maybeYield);
+ if (hit_count) CHECK_EQ(number, threadNumber);
+ }
profiler->Dispose();
}
private:
v8::Persistent<v8::Context> env_;
+ int32_t threadNumber_;
+ static const int32_t maxThreads_ = 3;
};
// Checking for crashes with multiple thread/single Isolate profiling.
@@ -3343,14 +3376,14 @@ TEST(MultipleThreadsSingleIsolate) {
});
CompileRun(varying_frame_size_script);
- UnlockingThread thread1(env);
- UnlockingThread thread2(env);
+ UnlockingThread thread1(env, 1);
+ UnlockingThread thread2(env, 2);
CHECK(thread1.Start());
CHECK(thread2.Start());
// For good measure, profile on our own thread
- UnlockingThread::Profile(env);
+ UnlockingThread::Profile(env, 0);
{
v8::Unlocker unlocker(isolate);
thread1.Join();
@@ -3889,10 +3922,12 @@ namespace {
struct FastApiReceiver {
static void FastCallback(v8::ApiObject receiver, int argument,
- int* fallback) {
+ v8::FastApiCallbackOptions& options) {
+ // TODO(mslekova): The fallback is not used by the test. Replace this
+ // with a CHECK.
v8::Object* receiver_obj = reinterpret_cast<v8::Object*>(&receiver);
if (!IsValidUnwrapObject(receiver_obj)) {
- *fallback = 1;
+ options.fallback = 1;
return;
}
FastApiReceiver* receiver_ptr =
@@ -3945,7 +3980,6 @@ TEST(FastApiCPUProfiler) {
// None of the following configurations include JSCallReducer.
if (i::FLAG_jitless) return;
if (i::FLAG_turboprop) return;
- if (i::FLAG_turbo_nci_as_midtier) return;
FLAG_SCOPE_EXTERNAL(opt);
FLAG_SCOPE_EXTERNAL(turbo_fast_api_calls);
@@ -3969,8 +4003,7 @@ TEST(FastApiCPUProfiler) {
v8::TryCatch try_catch(isolate);
- v8::CFunction c_func =
- v8::CFunction::MakeWithFallbackSupport(FastApiReceiver::FastCallback);
+ v8::CFunction c_func = v8::CFunction::Make(FastApiReceiver::FastCallback);
Local<v8::FunctionTemplate> receiver_templ = v8::FunctionTemplate::New(
isolate, FastApiReceiver::SlowCallback, v8::Local<v8::Value>(),
@@ -4043,6 +4076,77 @@ TEST(FastApiCPUProfiler) {
#endif
}
+TEST(BytecodeFlushEventsEagerLogging) {
+#ifndef V8_LITE_MODE
+ FLAG_opt = false;
+ FLAG_always_opt = false;
+ i::FLAG_optimize_for_size = false;
+#endif // V8_LITE_MODE
+ i::FLAG_flush_bytecode = true;
+ i::FLAG_allow_natives_syntax = true;
+
+ TestSetup test_setup;
+ ManualGCScope manual_gc_scope;
+
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ Isolate* i_isolate = CcTest::i_isolate();
+ Factory* factory = i_isolate->factory();
+
+ CpuProfiler profiler(i_isolate, kDebugNaming, kEagerLogging);
+ CodeMap* code_map = profiler.code_map_for_test();
+
+ {
+ v8::HandleScope scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ const char* source =
+ "function foo() {"
+ " var x = 42;"
+ " var y = 42;"
+ " var z = x + y;"
+ "};"
+ "foo()";
+ Handle<String> foo_name = factory->InternalizeUtf8String("foo");
+
+ // This compile will add the code to the compilation cache.
+ {
+ v8::HandleScope scope(isolate);
+ CompileRun(source);
+ }
+
+ // Check function is compiled.
+ Handle<Object> func_value =
+ Object::GetProperty(i_isolate, i_isolate->global_object(), foo_name)
+ .ToHandleChecked();
+ CHECK(func_value->IsJSFunction());
+ Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
+ CHECK(function->shared().is_compiled());
+
+ i::BytecodeArray compiled_data =
+ function->shared().GetBytecodeArray(i_isolate);
+ i::Address bytecode_start = compiled_data.GetFirstBytecodeAddress();
+
+ CHECK(code_map->FindEntry(bytecode_start));
+
+ // The code will survive at least two GCs.
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+ CHECK(function->shared().is_compiled());
+
+ // Simulate several GCs that use full marking.
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ CcTest::CollectAllGarbage();
+ }
+
+ // foo should no longer be in the compilation cache
+ CHECK(!function->shared().is_compiled());
+ CHECK(!function->is_compiled());
+
+ CHECK(!code_map->FindEntry(bytecode_start));
+ }
+}
+
} // namespace test_cpu_profiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc
index a457546d4a..f1c2d40027 100644
--- a/deps/v8/test/cctest/test-debug-helper.cc
+++ b/deps/v8/test/cctest/test-debug-helper.cc
@@ -450,5 +450,46 @@ THREADED_TEST(GetFrameStack) {
.ToLocalChecked();
}
+TEST(SmallOrderedHashSetGetObjectProperties) {
+ LocalContext context;
+ Isolate* isolate = reinterpret_cast<Isolate*>((*context)->GetIsolate());
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<SmallOrderedHashSet> set = factory->NewSmallOrderedHashSet();
+ const size_t number_of_buckets = 2;
+ CHECK_EQ(number_of_buckets, set->NumberOfBuckets());
+ CHECK_EQ(0, set->NumberOfElements());
+
+ // Verify with the definition of SmallOrderedHashSet in
+ // src\objects\ordered-hash-table.tq.
+ d::HeapAddresses heap_addresses{0, 0, 0, 0};
+ d::ObjectPropertiesResultPtr props =
+ d::GetObjectProperties(set->ptr(), &ReadMemory, heap_addresses);
+ CHECK_EQ(props->type_check_result, d::TypeCheckResult::kUsedMap);
+ CHECK_EQ(props->type, std::string("v8::internal::SmallOrderedHashSet"));
+ CHECK_EQ(props->num_properties, 8);
+
+ CheckProp(*props->properties[0], "v8::internal::Map", "map");
+ CheckProp(*props->properties[1], "uint8_t", "number_of_elements");
+ CheckProp(*props->properties[2], "uint8_t", "number_of_deleted_elements");
+ CheckProp(*props->properties[3], "uint8_t", "number_of_buckets");
+#if TAGGED_SIZE_8_BYTES
+ CheckProp(*props->properties[4], "uint8_t", "padding",
+ d::PropertyKind::kArrayOfKnownSize, 5);
+#else
+ CheckProp(*props->properties[4], "uint8_t", "padding",
+ d::PropertyKind::kArrayOfKnownSize, 1);
+#endif
+ CheckProp(*props->properties[5], "v8::internal::Object", "data_table",
+ d::PropertyKind::kArrayOfKnownSize,
+ number_of_buckets * OrderedHashMap::kLoadFactor);
+ CheckProp(*props->properties[6], "uint8_t", "hash_table",
+ d::PropertyKind::kArrayOfKnownSize, number_of_buckets);
+ CheckProp(*props->properties[7], "uint8_t", "chain_table",
+ d::PropertyKind::kArrayOfKnownSize,
+ number_of_buckets * OrderedHashMap::kLoadFactor);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index c0c9dfd50f..9ffc69b682 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -2848,7 +2848,8 @@ TEST(PauseInScript) {
const char* src = "(function (evt) {})";
const char* script_name = "StepInHandlerTest";
- v8::ScriptOrigin origin(v8_str(env->GetIsolate(), script_name));
+ v8::ScriptOrigin origin(env->GetIsolate(),
+ v8_str(env->GetIsolate(), script_name));
v8::Local<v8::Script> script =
v8::Script::Compile(context, v8_str(env->GetIsolate(), src), &origin)
.ToLocalChecked();
@@ -3225,7 +3226,7 @@ TEST(DebugScriptLineEndsAreAscending) {
" debugger;\n"
"}\n");
- v8::ScriptOrigin origin1 = v8::ScriptOrigin(v8_str(isolate, "name"));
+ v8::ScriptOrigin origin1 = v8::ScriptOrigin(isolate, v8_str(isolate, "name"));
v8::Local<v8::Script> script1 =
v8::Script::Compile(env.local(), script, &origin1).ToLocalChecked();
USE(script1);
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 9ad1a0e225..3d8d662e18 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -401,6 +401,7 @@ TEST(DisasmIa320) {
__ movlps(Operand(ebx, ecx, times_4, 10000), xmm0);
__ movhps(xmm0, Operand(ebx, ecx, times_4, 10000));
__ movhps(Operand(ebx, ecx, times_4, 10000), xmm0);
+ __ unpcklps(xmm0, xmm1);
// logic operation
__ andps(xmm0, xmm1);
@@ -467,13 +468,18 @@ TEST(DisasmIa320) {
__ cvtss2sd(xmm1, xmm0);
__ cvtdq2ps(xmm1, xmm0);
__ cvtdq2ps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ cvtdq2pd(xmm1, xmm0);
+ __ cvtps2pd(xmm1, xmm0);
+ __ cvtpd2ps(xmm1, xmm0);
__ cvttps2dq(xmm1, xmm0);
__ cvttps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ cvttpd2dq(xmm1, xmm0);
__ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
// 128 bit move instructions.
__ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
__ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
+ __ movdqa(xmm1, xmm0);
__ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
__ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
__ movdqu(xmm1, xmm0);
@@ -640,6 +646,13 @@ TEST(DisasmIa320) {
}
#undef EMIT_SSE34_INSTR
+ {
+ if (CpuFeatures::IsSupported(SSE4_2)) {
+ CpuFeatureScope scope(&assm, SSE4_2);
+ __ pcmpgtq(xmm0, xmm1);
+ }
+ }
+
// AVX instruction
{
if (CpuFeatures::IsSupported(AVX)) {
@@ -800,13 +813,18 @@ TEST(DisasmIa320) {
__ vcvtdq2ps(xmm1, xmm0);
__ vcvtdq2ps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vcvtdq2pd(xmm1, xmm0);
+ __ vcvtps2pd(xmm1, xmm0);
+ __ vcvtpd2ps(xmm1, xmm0);
__ vcvttps2dq(xmm1, xmm0);
__ vcvttps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vcvttpd2dq(xmm1, xmm0);
__ vmovddup(xmm1, xmm2);
__ vmovddup(xmm1, Operand(ebx, ecx, times_4, 10000));
__ vmovshdup(xmm1, xmm2);
__ vbroadcastss(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vmovdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
__ vmovdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
__ vmovdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
__ vmovd(xmm0, edi);
@@ -818,6 +836,8 @@ TEST(DisasmIa320) {
__ vmovmskps(edx, xmm5);
__ vpmovmskb(ebx, xmm1);
+ __ vpcmpgtq(xmm0, xmm1, xmm2);
+
#define EMIT_SSE2_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3) \
__ v##instruction(xmm7, xmm5, xmm1); \
__ v##instruction(xmm7, xmm5, Operand(edx, 4));
diff --git a/deps/v8/test/cctest/test-disasm-riscv64.cc b/deps/v8/test/cctest/test-disasm-riscv64.cc
new file mode 100644
index 0000000000..c5c7a2eb41
--- /dev/null
+++ b/deps/v8/test/cctest/test-disasm-riscv64.cc
@@ -0,0 +1,523 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include <stdlib.h>
+
+#include "src/codegen/macro-assembler.h"
+#include "src/debug/debug.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
+#include "src/init/v8.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+bool prev_instr_compact_branch = false;
+
+bool DisassembleAndCompare(byte* pc, const char* compare_string) {
+ disasm::NameConverter converter;
+ disasm::Disassembler disasm(converter);
+ EmbeddedVector<char, 128> disasm_buffer;
+
+ if (prev_instr_compact_branch) {
+ disasm.InstructionDecode(disasm_buffer, pc);
+ pc += 4;
+ }
+
+ disasm.InstructionDecode(disasm_buffer, pc);
+
+ if (strcmp(compare_string, disasm_buffer.begin()) != 0) {
+ fprintf(stderr,
+ "expected: \n"
+ "%s\n"
+ "disassembled: \n"
+ "%s\n\n",
+ compare_string, disasm_buffer.begin());
+ return false;
+ }
+ return true;
+}
+
+// Set up V8 to a state where we can at least run the assembler and
+// disassembler. Declare the variables and allocate the data structures used
+// in the rest of the macros.
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(AssemblerOptions{}, \
+ ExternalAssemblerBuffer(buffer, 4 * 1024)); \
+ bool failure = false;
+
+// This macro assembles one instruction using the preallocated assembler and
+// disassembles the generated instruction, comparing the output to the expected
+// value. If the comparison fails an error message is printed, but the test
+// continues to run until the end.
+#define COMPARE(asm_, compare_string) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte* progcounter = &buffer[pc_offset]; \
+ assm.asm_; \
+ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
+ }
+
+#define COMPARE_PC_REL(asm_, compare_string, offset) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte* progcounter = &buffer[pc_offset]; \
+ char str_with_address[100]; \
+ snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
+ compare_string, static_cast<void*>(progcounter + (offset))); \
+ assm.asm_; \
+ if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
+ }
+
+// Verify that all invocations of the COMPARE macro passed successfully.
+// Exit with a failure if at least one of the tests failed.
+#define VERIFY_RUN() \
+ if (failure) { \
+ FATAL("RISCV Disassembler tests failed.\n"); \
+ }
+
+TEST(Arith) {
+ SET_UP();
+
+ // Arithmetic with immediate
+ COMPARE(addi(t6, s3, -268), "ef498f93 addi t6, s3, -268");
+ COMPARE(slti(t5, s4, -268), "ef4a2f13 slti t5, s4, -268");
+ COMPARE(sltiu(t4, s5, -268), "ef4abe93 sltiu t4, s5, -268");
+ COMPARE(xori(t3, s6, static_cast<int16_t>(0xfffffef4)),
+ "ef4b4e13 xori t3, s6, 0xfffffef4");
+ COMPARE(ori(s11, zero_reg, static_cast<int16_t>(0xfffffef4)),
+ "ef406d93 ori s11, zero_reg, 0xfffffef4");
+ COMPARE(andi(s10, ra, static_cast<int16_t>(0xfffffef4)),
+ "ef40fd13 andi s10, ra, 0xfffffef4");
+ COMPARE(slli(s9, sp, 17), "01111c93 slli s9, sp, 17");
+ COMPARE(srli(s8, gp, 17), "0111dc13 srli s8, gp, 17");
+ COMPARE(srai(s7, tp, 17), "41125b93 srai s7, tp, 17");
+
+ // Arithmetic
+ COMPARE(add(s6, t0, t4), "01d28b33 add s6, t0, t4");
+ COMPARE(sub(s5, t1, s4), "41430ab3 sub s5, t1, s4");
+ COMPARE(sll(s4, t2, s4), "01439a33 sll s4, t2, s4");
+ COMPARE(slt(s3, fp, s4), "014429b3 slt s3, fp, s4");
+ COMPARE(sltu(s2, s3, t6), "01f9b933 sltu s2, s3, t6");
+ COMPARE(xor_(a7, s4, s4), "014a48b3 xor a7, s4, s4");
+ COMPARE(srl(a6, s5, s4), "014ad833 srl a6, s5, s4");
+ COMPARE(sra(a0, s3, s4), "4149d533 sra a0, s3, s4");
+ COMPARE(or_(a0, s3, s4), "0149e533 or a0, s3, s4");
+ COMPARE(and_(a0, s3, s4), "0149f533 and a0, s3, s4");
+
+ VERIFY_RUN();
+}
+
+TEST(LD_ST) {
+ SET_UP();
+ // Loads
+ COMPARE(lb(t0, a0, 0), "00050283 lb t0, 0(a0)");
+ COMPARE(lh(t1, a1, -1024), "c0059303 lh t1, -1024(a1)");
+ COMPARE(lw(t2, a2, 100), "06462383 lw t2, 100(a2)");
+ COMPARE(lbu(fp, a3, -512), "e006c403 lbu fp, -512(a3)");
+ COMPARE(lhu(s1, a4, 258), "10275483 lhu s1, 258(a4)");
+
+ // Stores
+ COMPARE(sb(zero_reg, a5, -4), "fe078e23 sb zero_reg, -4(a5)");
+ COMPARE(sh(a6, s2, 4), "01091223 sh a6, 4(s2)");
+ COMPARE(sw(a7, s3, 100), "0719a223 sw a7, 100(s3)");
+
+ VERIFY_RUN();
+}
+
+TEST(MISC) {
+ SET_UP();
+
+ COMPARE(lui(sp, 0x64), "00064137 lui sp, 0x64");
+ COMPARE(auipc(ra, 0x7fe), "007fe097 auipc ra, 0x7fe");
+
+ // Jumps
+ COMPARE_PC_REL(jal(gp, 100), "064001ef jal gp, 100", 100);
+ COMPARE(jalr(tp, zero_reg, 100),
+ "06400267 jalr tp, 100(zero_reg)");
+
+ // Branches
+ COMPARE_PC_REL(beq(fp, a4, -268), "eee40ae3 beq fp, a4, -268",
+ -268);
+ COMPARE_PC_REL(bne(t1, s4, -268), "ef431ae3 bne t1, s4, -268",
+ -268);
+ COMPARE_PC_REL(blt(s3, t4, -268), "efd9cae3 blt s3, t4, -268",
+ -268);
+ COMPARE_PC_REL(bge(t2, sp, -268), "ee23dae3 bge t2, sp, -268",
+ -268);
+ COMPARE_PC_REL(bltu(s6, a1, -268), "eebb6ae3 bltu s6, a1, -268",
+ -268);
+ COMPARE_PC_REL(bgeu(a1, s3, -268), "ef35fae3 bgeu a1, s3, -268",
+ -268);
+
+ // Memory fences
+ COMPARE(fence(PSO | PSR, PSW | PSI), "0690000f fence or, iw");
+ COMPARE(fence_tso(), "8330000f fence rw, rw");
+
+ // Environment call / break
+ COMPARE(ecall(), "00000073 ecall");
+ COMPARE(ebreak(), "00100073 ebreak");
+
+ VERIFY_RUN();
+}
+
+TEST(CSR) {
+ SET_UP();
+
+ COMPARE(csrrw(a0, csr_fflags, t3), "001e1573 fsflags a0, t3");
+ COMPARE(csrrs(a0, csr_frm, t1), "00232573 csrrs a0, csr_frm, t1");
+ COMPARE(csrrc(a0, csr_fcsr, s3), "0039b573 csrrc a0, csr_fcsr, s3");
+ COMPARE(csrrwi(a0, csr_fflags, 0x10),
+ "00185573 csrrwi a0, csr_fflags, 0x10");
+ COMPARE(csrrsi(t3, csr_frm, 0x3),
+ "0021ee73 csrrsi t3, csr_frm, 0x3");
+ COMPARE(csrrci(a0, csr_fflags, 0x10),
+ "00187573 csrrci a0, csr_fflags, 0x10");
+
+ VERIFY_RUN();
+}
+
+TEST(RV64I) {
+ SET_UP();
+
+ COMPARE(lwu(a0, s3, -268), "ef49e503 lwu a0, -268(s3)");
+ COMPARE(ld(a1, s3, -268), "ef49b583 ld a1, -268(s3)");
+ COMPARE(sd(fp, sp, -268), "ee813a23 sd fp, -268(sp)");
+ COMPARE(addiw(gp, s3, -268), "ef49819b addiw gp, s3, -268");
+ COMPARE(slliw(tp, s3, 17), "0119921b slliw tp, s3, 17");
+ COMPARE(srliw(ra, s3, 10), "00a9d09b srliw ra, s3, 10");
+ COMPARE(sraiw(sp, s3, 17), "4119d11b sraiw sp, s3, 17");
+ COMPARE(addw(t1, zero_reg, s4), "0140033b addw t1, zero_reg, s4");
+ COMPARE(subw(t2, s3, s4), "414983bb subw t2, s3, s4");
+ COMPARE(sllw(s7, s3, s4), "01499bbb sllw s7, s3, s4");
+ COMPARE(srlw(s10, s3, s4), "0149dd3b srlw s10, s3, s4");
+ COMPARE(sraw(a7, s3, s4), "4149d8bb sraw a7, s3, s4");
+
+ VERIFY_RUN();
+}
+
+TEST(RV32M) {
+ SET_UP();
+
+ COMPARE(mul(a0, s3, t4), "03d98533 mul a0, s3, t4");
+ COMPARE(mulh(a0, s3, t4), "03d99533 mulh a0, s3, t4");
+ COMPARE(mulhsu(a0, s3, t4), "03d9a533 mulhsu a0, s3, t4");
+ COMPARE(mulhu(a0, s3, t4), "03d9b533 mulhu a0, s3, t4");
+ COMPARE(div(a0, s3, t4), "03d9c533 div a0, s3, t4");
+ COMPARE(divu(a0, s3, t4), "03d9d533 divu a0, s3, t4");
+ COMPARE(rem(a0, s3, t4), "03d9e533 rem a0, s3, t4");
+ COMPARE(remu(a0, s3, t4), "03d9f533 remu a0, s3, t4");
+
+ VERIFY_RUN();
+}
+
+TEST(RV64M) {
+ SET_UP();
+
+ COMPARE(mulw(a0, s3, s4), "0349853b mulw a0, s3, s4");
+ COMPARE(divw(a0, s3, s4), "0349c53b divw a0, s3, s4");
+ COMPARE(divuw(a0, s3, s4), "0349d53b divuw a0, s3, s4");
+ COMPARE(remw(a0, s3, s4), "0349e53b remw a0, s3, s4");
+ COMPARE(remuw(a0, s3, s4), "0349f53b remuw a0, s3, s4");
+
+ VERIFY_RUN();
+}
+
+TEST(RV32A) {
+ SET_UP();
+ // RV32A Standard Extension
+ COMPARE(lr_w(true, false, a0, s3), "1409a52f lr.w.aq a0, (s3)");
+ COMPARE(sc_w(true, true, a0, s3, s4),
+ "1f49a52f sc.w.aqrl a0, s4, (s3)");
+ COMPARE(amoswap_w(false, false, a0, s3, s4),
+ "0949a52f amoswap.w a0, s4, (s3)");
+ COMPARE(amoadd_w(false, true, a0, s3, s4),
+ "0349a52f amoadd.w.rl a0, s4, (s3)");
+ COMPARE(amoxor_w(true, false, a0, s3, s4),
+ "2549a52f amoxor.w.aq a0, s4, (s3)");
+ COMPARE(amoand_w(false, false, a0, s3, s4),
+ "6149a52f amoand.w a0, s4, (s3)");
+ COMPARE(amoor_w(true, true, a0, s3, s4),
+ "4749a52f amoor.w.aqrl a0, s4, (s3)");
+ COMPARE(amomin_w(false, true, a0, s3, s4),
+ "8349a52f amomin.w.rl a0, s4, (s3)");
+ COMPARE(amomax_w(true, false, a0, s3, s4),
+ "a549a52f amomax.w.aq a0, s4, (s3)");
+ COMPARE(amominu_w(false, false, a0, s3, s4),
+ "c149a52f amominu.w a0, s4, (s3)");
+ COMPARE(amomaxu_w(true, true, a0, s3, s4),
+ "e749a52f amomaxu.w.aqrl a0, s4, (s3)");
+ VERIFY_RUN();
+}
+
+TEST(RV64A) {
+ SET_UP();
+
+ COMPARE(lr_d(true, true, a0, s3), "1609b52f lr.d.aqrl a0, (s3)");
+ COMPARE(sc_d(false, true, a0, s3, s4), "1b49b52f sc.d.rl a0, s4, (s3)");
+ COMPARE(amoswap_d(true, false, a0, s3, s4),
+ "0d49b52f amoswap.d.aq a0, s4, (s3)");
+ COMPARE(amoadd_d(false, false, a0, s3, s4),
+ "0149b52f amoadd.d a0, s4, (s3)");
+ COMPARE(amoxor_d(true, false, a0, s3, s4),
+ "2549b52f amoxor.d.aq a0, s4, (s3)");
+ COMPARE(amoand_d(true, true, a0, s3, s4),
+ "6749b52f amoand.d.aqrl a0, s4, (s3)");
+ COMPARE(amoor_d(false, true, a0, s3, s4),
+ "4349b52f amoor.d.rl a0, s4, (s3)");
+ COMPARE(amomin_d(true, true, a0, s3, s4),
+ "8749b52f amomin.d.aqrl a0, s4, (s3)");
+ COMPARE(amomax_d(false, true, a0, s3, s4),
+ "a349b52f amoswap.d.rl a0, s4, (s3)");
+ COMPARE(amominu_d(true, false, a0, s3, s4),
+ "c549b52f amominu.d.aq a0, s4, (s3)");
+ COMPARE(amomaxu_d(false, true, a0, s3, s4),
+ "e349b52f amomaxu.d.rl a0, s4, (s3)");
+
+ VERIFY_RUN();
+}
+
+TEST(RV32F) {
+ SET_UP();
+ // RV32F Standard Extension
+ COMPARE(flw(fa0, s3, -268), "ef49a507 flw fa0, -268(s3)");
+ COMPARE(fsw(ft7, sp, -268), "ee712a27 fsw ft7, -268(sp)");
+ COMPARE(fmadd_s(fa0, ft8, fa5, fs5),
+ "a8fe0543 fmadd.s fa0, ft8, fa5, fs5");
+ COMPARE(fmsub_s(fa0, ft8, fa5, fs5),
+ "a8fe0547 fmsub.s fa0, ft8, fa5, fs5");
+ COMPARE(fnmsub_s(fa0, ft8, fa5, fs5),
+ "a8fe054b fnmsub.s fa0, ft8, fa5, fs5");
+ COMPARE(fnmadd_s(fa0, ft8, fa5, fs5),
+ "a8fe054f fnmadd.s fa0, ft8, fa5, fs5");
+ COMPARE(fadd_s(fa0, ft8, fa5), "00fe0553 fadd.s fa0, ft8, fa5");
+ COMPARE(fsub_s(fa0, ft8, fa5), "08fe0553 fsub.s fa0, ft8, fa5");
+ COMPARE(fmul_s(fa0, ft8, fa5), "10fe0553 fmul.s fa0, ft8, fa5");
+ COMPARE(fdiv_s(ft0, ft8, fa5), "18fe0053 fdiv.s ft0, ft8, fa5");
+ COMPARE(fsqrt_s(ft0, ft8), "580e0053 fsqrt.s ft0, ft8");
+ COMPARE(fsgnj_s(ft0, ft8, fa5), "20fe0053 fsgnj.s ft0, ft8, fa5");
+ COMPARE(fsgnjn_s(ft0, ft8, fa5), "20fe1053 fsgnjn.s ft0, ft8, fa5");
+ COMPARE(fsgnjx_s(ft0, ft8, fa5), "20fe2053 fsgnjx.s ft0, ft8, fa5");
+ COMPARE(fmin_s(ft0, ft8, fa5), "28fe0053 fmin.s ft0, ft8, fa5");
+ COMPARE(fmax_s(ft0, ft8, fa5), "28fe1053 fmax.s ft0, ft8, fa5");
+ COMPARE(fcvt_w_s(a0, ft8, RNE), "c00e0553 fcvt.w.s [RNE] a0, ft8");
+ COMPARE(fcvt_wu_s(a0, ft8, RTZ), "c01e1553 fcvt.wu.s [RTZ] a0, ft8");
+ COMPARE(fmv_x_w(a0, ft8), "e00e0553 fmv.x.w a0, ft8");
+ COMPARE(feq_s(a0, ft8, fa5), "a0fe2553 feq.s a0, ft8, fa5");
+ COMPARE(flt_s(a0, ft8, fa5), "a0fe1553 flt.s a0, ft8, fa5");
+ COMPARE(fle_s(a0, ft8, fa5), "a0fe0553 fle.s a0, ft8, fa5");
+ COMPARE(fclass_s(a0, ft8), "e00e1553 fclass.s a0, ft8");
+ COMPARE(fcvt_s_w(ft0, s3), "d0098053 fcvt.s.w ft0, s3");
+ COMPARE(fcvt_s_wu(ft0, s3), "d0198053 fcvt.s.wu ft0, s3");
+ COMPARE(fmv_w_x(ft0, s3), "f0098053 fmv.w.x ft0, s3");
+ VERIFY_RUN();
+}
+
+TEST(RV64F) {
+ SET_UP();
+ // RV64F Standard Extension (in addition to RV32F)
+ COMPARE(fcvt_l_s(a0, ft8, RNE), "c02e0553 fcvt.l.s [RNE] a0, ft8");
+ COMPARE(fcvt_lu_s(a0, ft8, RMM), "c03e4553 fcvt.lu.s [RMM] a0, ft8");
+ COMPARE(fcvt_s_l(ft0, s3), "d0298053 fcvt.s.l ft0, s3");
+ COMPARE(fcvt_s_lu(ft0, s3), "d0398053 fcvt.s.lu ft0, s3");
+ VERIFY_RUN();
+}
+
+TEST(RV32D) {
+ SET_UP();
+ // RV32D Standard Extension
+ COMPARE(fld(ft0, s3, -268), "ef49b007 fld ft0, -268(s3)");
+ COMPARE(fsd(ft7, sp, -268), "ee713a27 fsd ft7, -268(sp)");
+ COMPARE(fmadd_d(ft0, ft8, fa5, fs5),
+ "aafe0043 fmadd.d ft0, ft8, fa5, fs5");
+ COMPARE(fmsub_d(ft0, ft8, fa5, fs1),
+ "4afe0047 fmsub.d ft0, ft8, fa5, fs1");
+ COMPARE(fnmsub_d(ft0, ft8, fa5, fs2),
+ "92fe004b fnmsub.d ft0, ft8, fa5, fs2");
+ COMPARE(fnmadd_d(ft0, ft8, fa5, fs3),
+ "9afe004f fnmadd.d ft0, ft8, fa5, fs3");
+ COMPARE(fadd_d(ft0, ft8, fa5), "02fe0053 fadd.d ft0, ft8, fa5");
+ COMPARE(fsub_d(ft0, ft8, fa5), "0afe0053 fsub.d ft0, ft8, fa5");
+ COMPARE(fmul_d(ft0, ft8, fa5), "12fe0053 fmul.d ft0, ft8, fa5");
+ COMPARE(fdiv_d(ft0, ft8, fa5), "1afe0053 fdiv.d ft0, ft8, fa5");
+ COMPARE(fsqrt_d(ft0, ft8), "5a0e0053 fsqrt.d ft0, ft8");
+ COMPARE(fsgnj_d(ft0, ft8, fa5), "22fe0053 fsgnj.d ft0, ft8, fa5");
+ COMPARE(fsgnjn_d(ft0, ft8, fa5), "22fe1053 fsgnjn.d ft0, ft8, fa5");
+ COMPARE(fsgnjx_d(ft0, ft8, fa5), "22fe2053 fsgnjx.d ft0, ft8, fa5");
+ COMPARE(fmin_d(ft0, ft8, fa5), "2afe0053 fmin.d ft0, ft8, fa5");
+ COMPARE(fmax_d(ft0, ft8, fa5), "2afe1053 fmax.d ft0, ft8, fa5");
+ COMPARE(fcvt_s_d(ft0, ft8, RDN), "401e2053 fcvt.s.d [RDN] ft0, t3");
+ COMPARE(fcvt_d_s(ft0, fa0), "42050053 fcvt.d.s ft0, fa0");
+ COMPARE(feq_d(a0, ft8, fa5), "a2fe2553 feq.d a0, ft8, fa5");
+ COMPARE(flt_d(a0, ft8, fa5), "a2fe1553 flt.d a0, ft8, fa5");
+ COMPARE(fle_d(a0, ft8, fa5), "a2fe0553 fle.d a0, ft8, fa5");
+ COMPARE(fclass_d(a0, ft8), "e20e1553 fclass.d a0, ft8");
+ COMPARE(fcvt_w_d(a0, ft8, RNE), "c20e0553 fcvt.w.d [RNE] a0, ft8");
+ COMPARE(fcvt_wu_d(a0, ft8, RUP), "c21e3553 fcvt.wu.d [RUP] a0, ft8");
+ COMPARE(fcvt_d_w(ft0, s3), "d2098053 fcvt.d.w ft0, s3");
+ COMPARE(fcvt_d_wu(ft0, s3), "d2198053 fcvt.d.wu ft0, s3");
+
+ VERIFY_RUN();
+}
+
+TEST(RV64D) {
+ SET_UP();
+ // RV64D Standard Extension (in addition to RV32D)
+ COMPARE(fcvt_l_d(a0, ft8, RMM), "c22e4553 fcvt.l.d [RMM] a0, ft8");
+ COMPARE(fcvt_lu_d(a0, ft8, RDN), "c23e2553 fcvt.lu.d [RDN] a0, ft8");
+ COMPARE(fmv_x_d(a0, ft8), "e20e0553 fmv.x.d a0, ft8");
+ COMPARE(fcvt_d_l(ft0, s3), "d2298053 fcvt.d.l ft0, s3");
+ COMPARE(fcvt_d_lu(ft0, s3), "d2398053 fcvt.d.lu ft0, s3");
+ COMPARE(fmv_d_x(ft0, s3), "f2098053 fmv.d.x ft0, s3");
+ VERIFY_RUN();
+}
+
+TEST(PSEUDO) {
+ SET_UP();
+ // pseodu instructions according to rISCV assembly programmer's handbook
+ COMPARE(nop(), "00000013 nop");
+ COMPARE(RV_li(t6, -12), "ff400f93 li t6, -12");
+ COMPARE(mv(t0, a4), "00070293 mv t0, a4");
+ COMPARE(not_(t0, a5), "fff7c293 not t0, a5");
+ COMPARE(neg(ra, a6), "410000b3 neg ra, rs2");
+ COMPARE(negw(t2, fp), "408003bb negw t2, fp");
+ COMPARE(sext_w(t0, s1), "0004829b sext.w t0, s1");
+ COMPARE(seqz(sp, s2), "00193113 seqz sp, s2");
+ COMPARE(snez(fp, s3), "01303433 snez fp, s3");
+ COMPARE(sltz(a0, t5), "000f2533 sltz a0, t5");
+ COMPARE(sgtz(a1, t4), "01d025b3 sgtz a1, t4");
+
+ COMPARE(fmv_s(fa0, fs4), "214a0553 fmv.s fa0, fs4");
+ COMPARE(fabs_s(fa1, fs3), "2139a5d3 fabs.s fa1, fs3");
+ COMPARE(fneg_s(fa2, fs5), "215a9653 fneg.s fa2, fs5");
+ COMPARE(fmv_d(fa3, fs2), "232906d3 fmv.d fa3, fs2");
+ COMPARE(fabs_d(fs0, fs2), "23292453 fabs.d fs0, fs2");
+ COMPARE(fneg_d(fs1, fs1), "229494d3 fneg.d fs1, fs1");
+
+ COMPARE_PC_REL(j(-1024), "c01ff06f j -1024", -1024);
+ COMPARE_PC_REL(jal(32), "020000ef jal 32", 32);
+ COMPARE(jr(a1), "00058067 jr a1");
+ COMPARE(jalr(a1), "000580e7 jalr a1");
+ COMPARE(ret(), "00008067 ret");
+ // COMPARE(call(int32_t offset);
+
+ COMPARE(rdinstret(t0), "c02022f3 rdinstret t0");
+ COMPARE(rdinstreth(a0), "c8202573 rdinstreth a0");
+ COMPARE(rdcycle(a4), "c0002773 rdcycle a4");
+ COMPARE(rdcycleh(a5), "c80027f3 rdcycleh a5");
+ COMPARE(rdtime(a3), "c01026f3 rdtime a3");
+ COMPARE(rdtimeh(t2), "c81023f3 rdtimeh t2");
+
+ COMPARE(csrr(t3, csr_cycle), "c0002e73 rdcycle t3");
+ COMPARE(csrw(csr_instret, a1), "c0259073 csrw csr_instret, a1");
+ COMPARE(csrs(csr_timeh, a2), "c8162073 csrs csr_timeh, a2");
+ COMPARE(csrc(csr_cycleh, t1), "c8033073 csrc csr_cycleh, t1");
+
+ COMPARE(csrwi(csr_time, 0xf), "c017d073 csrwi csr_time, 0xf");
+ COMPARE(csrsi(csr_cycleh, 0x1), "c800e073 csrsi csr_cycleh, 0x1");
+ COMPARE(csrci(csr_instreth, 0x15),
+ "c82af073 csrci csr_instreth, 0x15");
+
+ COMPARE(frcsr(t4), "00302ef3 frcsr t4");
+ COMPARE(fscsr(t1, a1), "00359373 fscsr t1, a1");
+ COMPARE(fscsr(a4), "00371073 fscsr a4");
+
+ COMPARE(frrm(t2), "002023f3 frrm t2");
+ COMPARE(fsrm(t0, a1), "002592f3 fsrm t0, a1");
+ COMPARE(fsrm(a5), "00279073 fsrm a5");
+
+ COMPARE(frflags(s5), "00102af3 frflags s5");
+ COMPARE(fsflags(s2, t1), "00131973 fsflags s2, t1");
+ COMPARE(fsflags(s1), "00149073 fsflags s1");
+
+ VERIFY_RUN();
+}
+
+TEST(RV64C) {
+ SET_UP();
+
+ COMPARE(c_nop(), "00000001 nop");
+ COMPARE(c_addi(s3, -25), "0000199d addi s3, s3, -25");
+ COMPARE(c_addiw(gp, -30), "00003189 addiw gp, gp, -30");
+ COMPARE(c_addi16sp(-432), "00007161 addi sp, sp, -432");
+ COMPARE(c_addi4spn(a1, 924), "00000f6c addi a1, sp, 924");
+ COMPARE(c_li(t6, -15), "00005fc5 li t6, -15");
+ COMPARE(c_lui(s1, 0xf4), "000074d1 lui s1, 0xffff4");
+ COMPARE(c_slli(s9, 60), "00001cf2 slli s9, s9, 60");
+ COMPARE(c_fldsp(fa1, 360), "000035b6 fld fa1, 360(sp)");
+ COMPARE(c_lwsp(s7, 244), "00005bde lw s7, 244(sp)");
+ COMPARE(c_ldsp(s6, 344), "00006b76 ld s6, 344(sp)");
+
+ COMPARE(c_jr(a1), "00008582 jr a1");
+ COMPARE(c_mv(t0, a4), "000082ba mv t0, a4");
+ COMPARE(c_ebreak(), "00009002 ebreak");
+ COMPARE(c_jalr(a1), "00009582 jalr a1");
+ COMPARE(c_add(s6, t0), "00009b16 add s6, s6, t0");
+ COMPARE(c_sub(s1, a0), "00008c89 sub s1, s1, a0");
+ COMPARE(c_xor(s1, a0), "00008ca9 xor s1, s1, a0");
+ COMPARE(c_or(s1, a0), "00008cc9 or s1, s1, a0");
+ COMPARE(c_and(s1, a0), "00008ce9 and s1, s1, a0");
+ COMPARE(c_subw(s1, a0), "00009c89 subw s1, s1, a0");
+ COMPARE(c_addw(a0, a1), "00009d2d addw a0, a0, a1");
+
+ COMPARE(c_fsdsp(fa4, 232), "0000b5ba fsd fa4, 232(sp)");
+ COMPARE(c_swsp(s6, 180), "0000db5a sw s6, 180(sp)");
+ COMPARE(c_sdsp(a4, 216), "0000edba sd a4, 216(sp)");
+
+ COMPARE(c_lw(a2, s1, 24), "00004c90 lw a2, 24(s1)");
+ COMPARE(c_ld(a2, s1, 24), "00006c90 ld a2, 24(s1)");
+ COMPARE(c_fld(fa1, s1, 24), "00002c8c fld fa1, 24(s1)");
+
+ COMPARE(c_sw(a2, s1, 24), "0000cc90 sw a2, 24(s1)");
+ COMPARE(c_sd(a2, s1, 24), "0000ec90 sd a2, 24(s1)");
+ COMPARE(c_fsd(fa1, s1, 24), "0000ac8c fsd fa1, 24(s1)");
+
+ COMPARE(c_j(-12), "0000bfd5 j -12");
+ VERIFY_RUN();
+}
+
+/*
+TEST(Previleged) {
+ SET_UP();
+ // Privileged
+ COMPARE(uret(), "");
+ COMPARE(sret(), "");
+ COMPARE(mret(), "");
+ COMPARE(wfi(), "");
+ COMPARE(sfence_vma(s3, s4), "");
+ VERIFY_RUN();
+}
+*/
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index d447d386da..e86eb33c9c 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -396,6 +396,7 @@ TEST(DisasmX64) {
__ cvttps2dq(xmm0, xmm1);
__ cvttps2dq(xmm0, Operand(rbx, rcx, times_4, 10000));
__ movaps(xmm0, xmm1);
+ __ movaps(xmm0, Operand(rbx, rcx, times_4, 10000));
__ movdqa(xmm0, Operand(rsp, 12));
__ movdqa(Operand(rsp, 12), xmm0);
__ movdqu(xmm0, Operand(rsp, 12));
@@ -430,6 +431,7 @@ TEST(DisasmX64) {
// SSE2 instructions
{
+ __ cvtdq2pd(xmm3, xmm4);
__ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
__ cvttsd2si(rdx, xmm1);
__ cvttsd2siq(rdx, xmm1);
@@ -660,6 +662,7 @@ TEST(DisasmX64) {
__ vmovsd(Operand(rbx, rcx, times_4, 10000), xmm0);
__ vmovdqa(xmm4, xmm5);
+ __ vmovdqa(xmm4, Operand(rbx, rcx, times_4, 10000));
__ vmovdqu(xmm9, Operand(rbx, rcx, times_4, 10000));
__ vmovdqu(Operand(rbx, rcx, times_4, 10000), xmm0);
@@ -679,6 +682,7 @@ TEST(DisasmX64) {
__ vucomisd(xmm9, xmm1);
__ vucomisd(xmm8, Operand(rbx, rdx, times_2, 10981));
+ __ vcvtdq2pd(xmm9, xmm11);
__ vcvtss2sd(xmm4, xmm9, xmm11);
__ vcvtss2sd(xmm4, xmm9, Operand(rbx, rcx, times_1, 10000));
__ vcvttps2dq(xmm4, xmm11);
@@ -692,6 +696,7 @@ TEST(DisasmX64) {
__ vcvtsd2si(rdi, xmm9);
__ vmovaps(xmm10, xmm11);
+ __ vmovaps(xmm0, Operand(rbx, rcx, times_4, 10000));
__ vmovapd(xmm7, xmm0);
__ vmovupd(xmm0, Operand(rbx, rcx, times_4, 10000));
__ vmovupd(Operand(rbx, rcx, times_4, 10000), xmm0);
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index b39a41b8c1..b9a6d1a7c5 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -821,7 +821,7 @@ TEST(GeneralizeDoubleFieldToTagged) {
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- FLAG_unbox_double_fields ? kDeprecation : kFieldOwnerDependency);
+ kFieldOwnerDependency);
}
TEST(GeneralizeHeapObjectFieldToTagged) {
@@ -1066,20 +1066,31 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
Handle<Code> code_field_type = CreateDummyOptimizedCode(isolate);
Handle<Code> code_field_repr = CreateDummyOptimizedCode(isolate);
Handle<Code> code_field_const = CreateDummyOptimizedCode(isolate);
- Handle<Map> field_owner(
- map->FindFieldOwner(isolate, InternalIndex(kSplitProp)), isolate);
- DependentCode::InstallDependency(isolate,
- MaybeObjectHandle::Weak(code_field_type),
- field_owner, DependentCode::kFieldTypeGroup);
- DependentCode::InstallDependency(
- isolate, MaybeObjectHandle::Weak(code_field_repr), field_owner,
- DependentCode::kFieldRepresentationGroup);
- DependentCode::InstallDependency(
- isolate, MaybeObjectHandle::Weak(code_field_const), field_owner,
- DependentCode::kFieldConstGroup);
+ Handle<Code> code_src_field_const = CreateDummyOptimizedCode(isolate);
+ {
+ Handle<Map> field_owner(
+ map->FindFieldOwner(isolate, InternalIndex(kSplitProp)), isolate);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_type), field_owner,
+ DependentCode::kFieldTypeGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_repr), field_owner,
+ DependentCode::kFieldRepresentationGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_const), field_owner,
+ DependentCode::kFieldConstGroup);
+ }
+ {
+ Handle<Map> field_owner(
+ map2->FindFieldOwner(isolate, InternalIndex(kSplitProp)), isolate);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_src_field_const), field_owner,
+ DependentCode::kFieldConstGroup);
+ }
CHECK(!code_field_type->marked_for_deoptimization());
CHECK(!code_field_repr->marked_for_deoptimization());
CHECK(!code_field_const->marked_for_deoptimization());
+ CHECK(!code_src_field_const->marked_for_deoptimization());
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1087,10 +1098,21 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
Map::ReconfigureExistingProperty(isolate, map2, InternalIndex(kSplitProp),
kData, NONE, PropertyConstness::kConst);
- // |map2| should be left unchanged but marked unstable.
+ // |map2| should be mosly left unchanged but marked unstable and if the
+ // source property was constant it should also be transitioned to kMutable.
CHECK(!map2->is_stable());
CHECK(!map2->is_deprecated());
CHECK_NE(*map2, *new_map);
+ // If the "source" property was const then update constness expectations for
+ // "source" map and ensure the deoptimization dependency was triggered.
+ if (to.constness == PropertyConstness::kConst) {
+ expectations2.SetDataField(kSplitProp, READ_ONLY,
+ PropertyConstness::kMutable, to.representation,
+ to.type);
+ CHECK(code_src_field_const->marked_for_deoptimization());
+ } else {
+ CHECK(!code_src_field_const->marked_for_deoptimization());
+ }
CHECK(expectations2.Check(*map2));
for (int i = kSplitProp; i < kPropCount; i++) {
@@ -1215,25 +1237,25 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeDoubleFieldToTagged) {
{PropertyConstness::kConst, Representation::Double(), any_type},
{PropertyConstness::kConst, Representation::HeapObject(), value_type},
{PropertyConstness::kConst, Representation::Tagged(), any_type},
- FLAG_unbox_double_fields ? kDeprecation : kFieldOwnerDependency);
+ kFieldOwnerDependency);
TestReconfigureDataFieldAttribute_GeneralizeField(
{PropertyConstness::kConst, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- FLAG_unbox_double_fields ? kDeprecation : kFieldOwnerDependency);
+ kFieldOwnerDependency);
TestReconfigureDataFieldAttribute_GeneralizeField(
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kConst, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- FLAG_unbox_double_fields ? kDeprecation : kFieldOwnerDependency);
+ kFieldOwnerDependency);
TestReconfigureDataFieldAttribute_GeneralizeField(
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- FLAG_unbox_double_fields ? kDeprecation : kFieldOwnerDependency);
+ kFieldOwnerDependency);
}
TEST(ReconfigureDataFieldAttribute_GeneralizeHeapObjFieldToHeapObj) {
@@ -2272,7 +2294,7 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- FLAG_unbox_double_fields ? kDeprecation : kFieldOwnerDependency);
+ kFieldOwnerDependency);
}
}
@@ -2340,7 +2362,7 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- FLAG_unbox_double_fields ? kDeprecation : kFieldOwnerDependency);
+ kFieldOwnerDependency);
}
}
@@ -2384,7 +2406,7 @@ TEST(PrototypeTransitionFromMapOwningDescriptor) {
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- FLAG_unbox_double_fields ? kDeprecation : kFieldOwnerDependency);
+ kFieldOwnerDependency);
}
TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
@@ -2438,7 +2460,7 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type},
- FLAG_unbox_double_fields ? kDeprecation : kFieldOwnerDependency);
+ kFieldOwnerDependency);
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 233e4962a4..ed02cd1e37 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -4072,7 +4072,8 @@ TEST(WeakReference) {
.Build();
CHECK(code->IsCode());
- fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(*code));
+ fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(*code),
+ v8::kReleaseStore);
fv->set_flags(i::FeedbackVector::OptimizationTierBits::encode(
i::OptimizationTier::kTopTier) |
i::FeedbackVector::OptimizationMarkerBits::encode(
diff --git a/deps/v8/test/cctest/test-helper-riscv64.cc b/deps/v8/test/cctest/test-helper-riscv64.cc
new file mode 100644
index 0000000000..e54d62ecec
--- /dev/null
+++ b/deps/v8/test/cctest/test-helper-riscv64.cc
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/test-helper-riscv64.h"
+
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/isolate-inl.h"
+#include "src/init/v8.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+int64_t GenAndRunTest(Func test_generator) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ test_generator(assm);
+ assm.jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<int64_t()>::FromCode(*code);
+ return f.Call();
+}
+
+Handle<Code> AssembleCodeImpl(Func assemble) {
+ Isolate* isolate = CcTest::i_isolate();
+ MacroAssembler assm(isolate, CodeObjectRequired::kYes);
+
+ assemble(assm);
+ assm.jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ if (FLAG_print_code) {
+ code->Print();
+ }
+ return code;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-helper-riscv64.h b/deps/v8/test/cctest/test-helper-riscv64.h
new file mode 100644
index 0000000000..7b702fe57a
--- /dev/null
+++ b/deps/v8/test/cctest/test-helper-riscv64.h
@@ -0,0 +1,334 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_TEST_HELPER_RISCV_H_
+#define V8_CCTEST_TEST_HELPER_RISCV_H_
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
+#include "src/heap/factory.h"
+#include "src/init/v8.h"
+#include "src/utils/utils.h"
+#include "test/cctest/cctest.h"
+
+#define PRINT_RES(res, expected_res, in_hex) \
+ if (in_hex) std::cout << "[hex-form]" << std::hex; \
+ std::cout << "res = " << (res) << " expected = " << (expected_res) \
+ << std::endl;
+
+namespace v8 {
+namespace internal {
+
+using Func = std::function<void(MacroAssembler&)>;
+
+int64_t GenAndRunTest(Func test_generator);
+
+// f.Call(...) interface is implemented as varargs in V8. For varargs,
+// floating-point arguments and return values are passed in GPRs, therefore
+// the special handling to reinterpret floating-point as integer values when
+// passed in and out of f.Call()
+template <typename OUTPUT_T, typename INPUT_T>
+OUTPUT_T GenAndRunTest(INPUT_T input0, Func test_generator) {
+ DCHECK((sizeof(INPUT_T) == 4 || sizeof(INPUT_T) == 8));
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ // handle floating-point parameters
+ if (std::is_same<float, INPUT_T>::value) {
+ assm.fmv_w_x(fa0, a0);
+ } else if (std::is_same<double, INPUT_T>::value) {
+ assm.fmv_d_x(fa0, a0);
+ }
+
+ test_generator(assm);
+
+ // handle floating-point result
+ if (std::is_same<float, OUTPUT_T>::value) {
+ assm.fmv_x_w(a0, fa0);
+ } else if (std::is_same<double, OUTPUT_T>::value) {
+ assm.fmv_x_d(a0, fa0);
+ }
+ assm.jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ using OINT_T = typename std::conditional<
+ std::is_integral<OUTPUT_T>::value, OUTPUT_T,
+ typename std::conditional<sizeof(OUTPUT_T) == 4, int32_t,
+ int64_t>::type>::type;
+ using IINT_T = typename std::conditional<
+ std::is_integral<INPUT_T>::value, INPUT_T,
+ typename std::conditional<sizeof(INPUT_T) == 4, int32_t,
+ int64_t>::type>::type;
+
+ auto f = GeneratedCode<OINT_T(IINT_T)>::FromCode(*code);
+
+ auto res = f.Call(bit_cast<IINT_T>(input0));
+ return bit_cast<OUTPUT_T>(res);
+}
+
+template <typename OUTPUT_T, typename INPUT_T>
+OUTPUT_T GenAndRunTest(INPUT_T input0, INPUT_T input1, Func test_generator) {
+ DCHECK((sizeof(INPUT_T) == 4 || sizeof(INPUT_T) == 8));
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ // handle floating-point parameters
+ if (std::is_same<float, INPUT_T>::value) {
+ assm.fmv_w_x(fa0, a0);
+ assm.fmv_w_x(fa1, a1);
+ } else if (std::is_same<double, INPUT_T>::value) {
+ assm.fmv_d_x(fa0, a0);
+ assm.fmv_d_x(fa1, a1);
+ }
+
+ test_generator(assm);
+
+ // handle floating-point result
+ if (std::is_same<float, OUTPUT_T>::value) {
+ assm.fmv_x_w(a0, fa0);
+ } else if (std::is_same<double, OUTPUT_T>::value) {
+ assm.fmv_x_d(a0, fa0);
+ }
+ assm.jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ using OINT_T = typename std::conditional<
+ std::is_integral<OUTPUT_T>::value, OUTPUT_T,
+ typename std::conditional<sizeof(OUTPUT_T) == 4, int32_t,
+ int64_t>::type>::type;
+ using IINT_T = typename std::conditional<
+ std::is_integral<INPUT_T>::value, INPUT_T,
+ typename std::conditional<sizeof(INPUT_T) == 4, int32_t,
+ int64_t>::type>::type;
+ auto f = GeneratedCode<OINT_T(IINT_T, IINT_T)>::FromCode(*code);
+
+ auto res = f.Call(bit_cast<IINT_T>(input0), bit_cast<IINT_T>(input1));
+ return bit_cast<OUTPUT_T>(res);
+}
+
+template <typename OUTPUT_T, typename INPUT_T>
+OUTPUT_T GenAndRunTest(INPUT_T input0, INPUT_T input1, INPUT_T input2,
+ Func test_generator) {
+ DCHECK((sizeof(INPUT_T) == 4 || sizeof(INPUT_T) == 8));
+ DCHECK(sizeof(OUTPUT_T) == sizeof(INPUT_T));
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ // handle floating-point parameters
+ if (std::is_same<float, INPUT_T>::value) {
+ assm.fmv_w_x(fa0, a0);
+ assm.fmv_w_x(fa1, a1);
+ assm.fmv_w_x(fa2, a2);
+ } else if (std::is_same<double, INPUT_T>::value) {
+ assm.fmv_d_x(fa0, a0);
+ assm.fmv_d_x(fa1, a1);
+ assm.fmv_d_x(fa2, a2);
+ }
+
+ test_generator(assm);
+
+ // handle floating-point result
+ if (std::is_same<float, OUTPUT_T>::value) {
+ assm.fmv_x_w(a0, fa0);
+ } else if (std::is_same<double, OUTPUT_T>::value) {
+ assm.fmv_x_d(a0, fa0);
+ }
+ assm.jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ using OINT_T = typename std::conditional<
+ std::is_integral<OUTPUT_T>::value, OUTPUT_T,
+ typename std::conditional<sizeof(OUTPUT_T) == 4, int32_t,
+ int64_t>::type>::type;
+ using IINT_T = typename std::conditional<
+ std::is_integral<INPUT_T>::value, INPUT_T,
+ typename std::conditional<sizeof(INPUT_T) == 4, int32_t,
+ int64_t>::type>::type;
+ auto f = GeneratedCode<OINT_T(IINT_T, IINT_T, IINT_T)>::FromCode(*code);
+
+ auto res = f.Call(bit_cast<IINT_T>(input0), bit_cast<IINT_T>(input1),
+ bit_cast<IINT_T>(input2));
+ return bit_cast<OUTPUT_T>(res);
+}
+
+template <typename T>
+void GenAndRunTestForLoadStore(T value, Func test_generator) {
+ DCHECK(sizeof(T) == 4 || sizeof(T) == 8);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ if (std::is_same<float, T>::value) {
+ assm.fmv_w_x(fa0, a1);
+ } else if (std::is_same<double, T>::value) {
+ assm.fmv_d_x(fa0, a1);
+ }
+
+ test_generator(assm);
+
+ if (std::is_same<float, T>::value) {
+ assm.fmv_x_w(a0, fa0);
+ } else if (std::is_same<double, T>::value) {
+ assm.fmv_x_d(a0, fa0);
+ }
+ assm.jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ using INT_T = typename std::conditional<
+ std::is_integral<T>::value, T,
+ typename std::conditional<sizeof(T) == 4, int32_t, int64_t>::type>::type;
+
+ auto f = GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(*code);
+
+ int64_t tmp = 0;
+ auto res = f.Call(&tmp, bit_cast<INT_T>(value));
+ CHECK_EQ(bit_cast<T>(res), value);
+}
+
+template <typename T, typename Func>
+void GenAndRunTestForLRSC(T value, Func test_generator) {
+ DCHECK(sizeof(T) == 4 || sizeof(T) == 8);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ if (std::is_same<float, T>::value) {
+ assm.fmv_w_x(fa0, a1);
+ } else if (std::is_same<double, T>::value) {
+ assm.fmv_d_x(fa0, a1);
+ }
+
+ if (std::is_same<int32_t, T>::value) {
+ assm.sw(a1, a0, 0);
+ } else if (std::is_same<int64_t, T>::value) {
+ assm.sd(a1, a0, 0);
+ }
+ test_generator(assm);
+
+ if (std::is_same<float, T>::value) {
+ assm.fmv_x_w(a0, fa0);
+ } else if (std::is_same<double, T>::value) {
+ assm.fmv_x_d(a0, fa0);
+ }
+ assm.jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#if defined(DEBUG)
+ code->Print();
+#endif
+ using INT_T =
+ typename std::conditional<sizeof(T) == 4, int32_t, int64_t>::type;
+
+ T tmp = 0;
+ auto f = GeneratedCode<INT_T(void* base, INT_T val)>::FromCode(*code);
+ auto res = f.Call(&tmp, bit_cast<T>(value));
+ CHECK_EQ(bit_cast<T>(res), static_cast<T>(0));
+}
+
+template <typename INPUT_T, typename OUTPUT_T, typename Func>
+OUTPUT_T GenAndRunTestForAMO(INPUT_T input0, INPUT_T input1,
+ Func test_generator) {
+ DCHECK(sizeof(INPUT_T) == 4 || sizeof(INPUT_T) == 8);
+ DCHECK(sizeof(OUTPUT_T) == 4 || sizeof(OUTPUT_T) == 8);
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ // handle floating-point parameters
+ if (std::is_same<float, INPUT_T>::value) {
+ assm.fmv_w_x(fa0, a1);
+ assm.fmv_w_x(fa1, a2);
+ } else if (std::is_same<double, INPUT_T>::value) {
+ assm.fmv_d_x(fa0, a1);
+ assm.fmv_d_x(fa1, a2);
+ }
+
+ // store base integer
+ if (std::is_same<int32_t, INPUT_T>::value ||
+ std::is_same<uint32_t, INPUT_T>::value) {
+ assm.sw(a1, a0, 0);
+ } else if (std::is_same<int64_t, INPUT_T>::value ||
+ std::is_same<uint64_t, INPUT_T>::value) {
+ assm.sd(a1, a0, 0);
+ }
+ test_generator(assm);
+
+ // handle floating-point result
+ if (std::is_same<float, OUTPUT_T>::value) {
+ assm.fmv_x_w(a0, fa0);
+ } else if (std::is_same<double, OUTPUT_T>::value) {
+ assm.fmv_x_d(a0, fa0);
+ }
+
+ // load written integer
+ if (std::is_same<int32_t, INPUT_T>::value ||
+ std::is_same<uint32_t, INPUT_T>::value) {
+ assm.lw(a0, a0, 0);
+ } else if (std::is_same<int64_t, INPUT_T>::value ||
+ std::is_same<uint64_t, INPUT_T>::value) {
+ assm.ld(a0, a0, 0);
+ }
+
+ assm.jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#if defined(DEBUG)
+ code->Print();
+#endif
+ OUTPUT_T tmp = 0;
+ auto f =
+ GeneratedCode<OUTPUT_T(void* base, INPUT_T, INPUT_T)>::FromCode(*code);
+ auto res = f.Call(&tmp, bit_cast<INPUT_T>(input0), bit_cast<INPUT_T>(input1));
+ return bit_cast<OUTPUT_T>(res);
+}
+
+Handle<Code> AssembleCodeImpl(Func assemble);
+
+template <typename Signature>
+GeneratedCode<Signature> AssembleCode(Func assemble) {
+ return GeneratedCode<Signature>::FromCode(*AssembleCodeImpl(assemble));
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CCTEST_TEST_HELPER_RISCV_H_
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index 82baa9fe96..13c94f3afc 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -61,6 +61,10 @@ static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
for (int i = 0; i < kNumInstr; ++i) {
__ agfi(r2, Operand(1));
}
+#elif V8_TARGET_ARCH_RISCV64
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ Add32(a0, a0, Operand(1));
+ }
#else
#error Unsupported architecture
#endif
@@ -173,7 +177,8 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
HandleScope handles(isolate);
for (int i = 0; i < kNumIterations; ++i) {
- auto buffer = AllocateAssemblerBuffer(kBufferSize);
+ auto buffer = AllocateAssemblerBuffer(kBufferSize, nullptr,
+ VirtualMemory::kMapAsJittable);
// Allow calling the function from C++.
auto f = GeneratedCode<F0>::FromBuffer(isolate, buffer->start());
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index e2de4df4fb..0a88302cd9 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -80,15 +80,11 @@ static Object GetFieldValue(JSObject obj, int property_index) {
}
static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
- if (obj.IsUnboxedDoubleField(field_index)) {
- return obj.RawFastDoublePropertyAt(field_index);
+ Object value = obj.RawFastPropertyAt(field_index);
+ if (value.IsHeapNumber()) {
+ return HeapNumber::cast(value).value();
} else {
- Object value = obj.RawFastPropertyAt(field_index);
- if (value.IsHeapNumber()) {
- return HeapNumber::cast(value).value();
- } else {
- return value.Number();
- }
+ return value.Number();
}
}
diff --git a/deps/v8/test/cctest/test-js-to-wasm.cc b/deps/v8/test/cctest/test-js-to-wasm.cc
new file mode 100644
index 0000000000..4d61e944dc
--- /dev/null
+++ b/deps/v8/test/cctest/test-js-to-wasm.cc
@@ -0,0 +1,999 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iomanip>
+
+#include "include/v8.h"
+#include "src/api/api.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/node-observer-tester.h"
+#include "test/cctest/test-api.h"
+#include "test/common/wasm/flag-utils.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+static const int kDeoptLoopCount = 1e4;
+
+// Validates the type of the result returned by a test function.
+template <typename T>
+bool CheckType(v8::Local<v8::Value> result) {
+ return result->IsNumber();
+}
+template <>
+bool CheckType<void>(v8::Local<v8::Value> result) {
+ return result->IsUndefined();
+}
+template <>
+bool CheckType<int>(v8::Local<v8::Value> result) {
+ return result->IsInt32();
+}
+template <>
+bool CheckType<int64_t>(v8::Local<v8::Value> result) {
+ return result->IsBigInt();
+}
+template <>
+bool CheckType<v8::Local<v8::BigInt>>(v8::Local<v8::Value> result) {
+ return result->IsBigInt();
+}
+
+static TestSignatures sigs;
+
+struct ExportedFunction {
+ std::string name;
+ FunctionSig* signature;
+ std::vector<ValueType> locals;
+ std::vector<uint8_t> code;
+
+ bool DoesSignatureContainI64() const {
+ for (auto type : signature->all()) {
+ if (type == wasm::kWasmI64) return true;
+ }
+ return false;
+ }
+};
+
+#define WASM_CODE(...) __VA_ARGS__
+
+#define DECLARE_EXPORTED_FUNCTION(name, sig, code) \
+ static ExportedFunction k_##name = {#name, sig, {}, code};
+
+#define DECLARE_EXPORTED_FUNCTION_WITH_LOCALS(name, sig, locals, code) \
+ static ExportedFunction k_##name = {#name, sig, locals, code};
+
+DECLARE_EXPORTED_FUNCTION(nop, sigs.v_v(), WASM_CODE({WASM_NOP}))
+
+DECLARE_EXPORTED_FUNCTION(i32_square, sigs.i_i(),
+ WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
+ kExprI32Mul}))
+
+DECLARE_EXPORTED_FUNCTION(i64_square, sigs.l_l(),
+ WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
+ kExprI64Mul}))
+
+DECLARE_EXPORTED_FUNCTION(f32_square, sigs.f_f(),
+ WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
+ kExprF32Mul}))
+
+DECLARE_EXPORTED_FUNCTION(f64_square, sigs.d_d(),
+ WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
+ kExprF64Mul}))
+
+DECLARE_EXPORTED_FUNCTION(void_square, sigs.v_i(),
+ WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
+ kExprI32Mul, kExprDrop}))
+
+DECLARE_EXPORTED_FUNCTION(add, sigs.i_ii(),
+ WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ kExprI32Add}))
+
+DECLARE_EXPORTED_FUNCTION(i64_add, sigs.l_ll(),
+ WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ kExprI64Add}))
+
+DECLARE_EXPORTED_FUNCTION(sum3, sigs.i_iii(),
+ WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
+ WASM_LOCAL_GET(2), kExprI32Add,
+ kExprI32Add}))
+
+DECLARE_EXPORTED_FUNCTION(no_args, sigs.i_v(), WASM_CODE({WASM_I32V(42)}))
+
+DECLARE_EXPORTED_FUNCTION(load_i32, sigs.i_i(),
+ WASM_CODE({WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_LOCAL_GET(0))}))
+DECLARE_EXPORTED_FUNCTION(load_i64, sigs.l_l(),
+ WASM_CODE({WASM_I64_SCONVERT_I32(WASM_LOAD_MEM(
+ MachineType::Int32(),
+ WASM_I32_CONVERT_I64(WASM_LOCAL_GET(0))))}))
+DECLARE_EXPORTED_FUNCTION(load_f32, sigs.f_f(),
+ WASM_CODE({WASM_F32_SCONVERT_I32(WASM_LOAD_MEM(
+ MachineType::Int32(),
+ WASM_I32_SCONVERT_F32(WASM_LOCAL_GET(0))))}))
+DECLARE_EXPORTED_FUNCTION(load_f64, sigs.d_d(),
+ WASM_CODE({WASM_F64_SCONVERT_I32(WASM_LOAD_MEM(
+ MachineType::Int32(),
+ WASM_I32_SCONVERT_F64(WASM_LOCAL_GET(0))))}))
+DECLARE_EXPORTED_FUNCTION(store_i32, sigs.v_ii(),
+ WASM_CODE({WASM_STORE_MEM(MachineType::Int32(),
+ WASM_LOCAL_GET(0),
+ WASM_LOCAL_GET(1))}))
+
+// int32_t test(int32_t v0, int32_t v1, int32_t v2, int32_t v3, int32_t v4,
+// int32_t v5, int32_t v6, int32_t v7, int32_t v8, int32_t v9) {
+// return v0 + v1 + v2 + v3 + v4 + v5 + v6 + v7 + v8 + v9;
+// }
+static const ValueType kIntTypes11[11] = {
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32};
+static FunctionSig i_iiiiiiiiii(1, 10, kIntTypes11);
+DECLARE_EXPORTED_FUNCTION(
+ sum10, &i_iiiiiiiiii,
+ WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(1), WASM_LOCAL_GET(2),
+ WASM_LOCAL_GET(3), WASM_LOCAL_GET(4), WASM_LOCAL_GET(5),
+ WASM_LOCAL_GET(6), WASM_LOCAL_GET(7), WASM_LOCAL_GET(8),
+ WASM_LOCAL_GET(9), kExprI32Add, kExprI32Add, kExprI32Add,
+ kExprI32Add, kExprI32Add, kExprI32Add, kExprI32Add, kExprI32Add,
+ kExprI32Add}))
+
+// double test(int32_t i32, int64_t i64, float f32, double f64) {
+// return i32 + i64 + f32 + f64;
+// }
+static const ValueType kMixedTypes5[5] = {kWasmF64, kWasmI32, kWasmI64,
+ kWasmF32, kWasmF64};
+static FunctionSig d_ilfd(1, 4, kMixedTypes5);
+DECLARE_EXPORTED_FUNCTION(
+ sum_mixed, &d_ilfd,
+ WASM_CODE({WASM_LOCAL_GET(2), kExprF64ConvertF32, WASM_LOCAL_GET(3),
+ kExprF64Add, WASM_LOCAL_GET(0), kExprF64UConvertI32, kExprF64Add,
+ WASM_LOCAL_GET(1), kExprF64UConvertI64, kExprF64Add}))
+
+// float f32_square_deopt(float f32) {
+// static int count = 0;
+// if (++count == kDeoptLoopCount) {
+// callback(f32);
+// }
+// return f32 * f32;
+// }
+DECLARE_EXPORTED_FUNCTION_WITH_LOCALS(
+ f32_square_deopt, sigs.f_f(), {kWasmI32},
+ WASM_CODE(
+ {WASM_STORE_MEM(
+ MachineType::Int32(), WASM_I32V(1024),
+ WASM_LOCAL_TEE(1, WASM_I32_ADD(WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_I32V(1024)),
+ WASM_ONE))),
+ WASM_BLOCK(
+ WASM_BR_IF(0, WASM_I32_NE(WASM_LOCAL_GET(1),
+ WASM_I32V(kDeoptLoopCount))),
+ WASM_CALL_FUNCTION(0, WASM_F64_CONVERT_F32(WASM_LOCAL_GET(0)))),
+ WASM_F32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))}))
+
+// double f64_square_deopt(double f64) {
+// static int count = 0;
+// if (++count == kDeoptLoopCount) {
+// callback(f64);
+// }
+// return f64 * f64;
+// }
+DECLARE_EXPORTED_FUNCTION_WITH_LOCALS(
+ f64_square_deopt, sigs.d_d(), {kWasmI32},
+ WASM_CODE(
+ {WASM_STORE_MEM(
+ MachineType::Int32(), WASM_I32V(1028),
+ WASM_LOCAL_TEE(1, WASM_I32_ADD(WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_I32V(1028)),
+ WASM_ONE))),
+ WASM_BLOCK(WASM_BR_IF(0, WASM_I32_NE(WASM_LOCAL_GET(1),
+ WASM_I32V(kDeoptLoopCount))),
+ WASM_CALL_FUNCTION(0, WASM_LOCAL_GET(0))),
+ WASM_F64_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))}))
+
+// int32_t i32_square_deopt(int32_t i32) {
+// static int count = 0;
+// if (++count == kDeoptLoopCount) {
+// callback(i32);
+// }
+// return i32 * i32;
+// }
+DECLARE_EXPORTED_FUNCTION_WITH_LOCALS(
+ i32_square_deopt, sigs.i_i(), {kWasmI32},
+ WASM_CODE(
+ {WASM_STORE_MEM(
+ MachineType::Int32(), WASM_I32V(1032),
+ WASM_LOCAL_TEE(1, WASM_I32_ADD(WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_I32V(1032)),
+ WASM_ONE))),
+ WASM_BLOCK(
+ WASM_BR_IF(0, WASM_I32_NE(WASM_LOCAL_GET(1),
+ WASM_I32V(kDeoptLoopCount))),
+ WASM_CALL_FUNCTION(0, WASM_F64_SCONVERT_I32(WASM_LOCAL_GET(0)))),
+ WASM_I32_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))}))
+
+// int64_t i64_square_deopt(int64_t i64) {
+// static int count = 0;
+// if (++count == kDeoptLoopCount) {
+// callback(i64);
+// }
+// return i64 * i64;
+// }
+DECLARE_EXPORTED_FUNCTION_WITH_LOCALS(
+ i64_square_deopt, sigs.l_l(), {kWasmI32},
+ WASM_CODE(
+ {WASM_STORE_MEM(
+ MachineType::Int32(), WASM_I32V(1036),
+ WASM_LOCAL_TEE(1, WASM_I32_ADD(WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_I32V(1036)),
+ WASM_ONE))),
+ WASM_BLOCK(
+ WASM_BR_IF(0, WASM_I32_NE(WASM_LOCAL_GET(1),
+ WASM_I32V(kDeoptLoopCount))),
+ WASM_CALL_FUNCTION(0, WASM_F64_SCONVERT_I64(WASM_LOCAL_GET(0)))),
+ WASM_I64_MUL(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0))}))
+
+// void void_square_deopt(int32_t i32) {
+// static int count = 0;
+// if (++count == kDeoptLoopCount) {
+// callback(i32);
+// }
+// }
+DECLARE_EXPORTED_FUNCTION_WITH_LOCALS(
+ void_square_deopt, sigs.v_i(), {kWasmI32},
+ WASM_CODE(
+ {WASM_STORE_MEM(
+ MachineType::Int32(), WASM_I32V(1040),
+ WASM_LOCAL_TEE(1, WASM_I32_ADD(WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_I32V(1040)),
+ WASM_ONE))),
+ WASM_BLOCK(
+ WASM_BR_IF(0, WASM_I32_NE(WASM_LOCAL_GET(1),
+ WASM_I32V(kDeoptLoopCount))),
+ WASM_CALL_FUNCTION(0, WASM_F64_SCONVERT_I32(WASM_LOCAL_GET(0))))}))
+
+enum TestMode { kJSToWasmInliningDisabled, kJSToWasmInliningEnabled };
+
+class FastJSWasmCallTester {
+ public:
+ FastJSWasmCallTester()
+ : allow_natives_syntax_(&i::FLAG_allow_natives_syntax, true),
+ inline_js_wasm_calls_(&i::FLAG_turbo_inline_js_wasm_calls, true),
+ stress_background_compile_(&i::FLAG_stress_background_compile, false),
+ allocator_(),
+ zone_(&allocator_, ZONE_NAME),
+ builder_(zone_.New<WasmModuleBuilder>(&zone_)) {}
+
+ void DeclareCallback(const char* name, FunctionSig* signature,
+ const char* module) {
+ builder_->AddImport(CStrVector(name), signature, CStrVector(module));
+ }
+
+ void AddExportedFunction(const ExportedFunction& exported_func) {
+ WasmFunctionBuilder* func = builder_->AddFunction(exported_func.signature);
+ for (auto& wasm_type : exported_func.locals) func->AddLocal(wasm_type);
+ func->EmitCode(exported_func.code.data(),
+ static_cast<uint32_t>(exported_func.code.size()));
+ func->Emit(kExprEnd);
+ builder_->AddExport(CStrVector(exported_func.name.c_str()),
+ kExternalFunction, func->func_index());
+
+ // JS-to-Wasm inlining is disabled when targeting 32 bits if the Wasm
+ // function signature contains an I64.
+#if defined(V8_TARGET_ARCH_32_BIT)
+ if (exported_func.DoesSignatureContainI64()) {
+ test_mode_ = kJSToWasmInliningDisabled;
+ }
+#endif
+ }
+
+ // Executes a test function that returns a value of type T.
+ template <typename T>
+ void CallAndCheckWasmFunction(const std::string& exported_function_name,
+ const std::vector<v8::Local<v8::Value>>& args,
+ const T& expected_result,
+ bool test_lazy_deopt = false) {
+ LocalContext env;
+
+ v8::Local<v8::Value> result_value = DoCallAndCheckWasmFunction(
+ env, exported_function_name, args, test_lazy_deopt);
+
+ CHECK(CheckType<T>(result_value));
+ T result = ConvertJSValue<T>::Get(result_value, env.local()).ToChecked();
+ CHECK_EQ(result, expected_result);
+ }
+
+ // Executes a test function that returns NaN.
+ void CallAndCheckWasmFunctionNaN(
+ const std::string& exported_function_name,
+ const std::vector<v8::Local<v8::Value>>& args,
+ bool test_lazy_deopt = false) {
+ LocalContext env;
+ v8::Local<v8::Value> result_value = DoCallAndCheckWasmFunction(
+ env, exported_function_name, args, test_lazy_deopt);
+
+ CHECK(CheckType<double>(result_value));
+ double result =
+ ConvertJSValue<double>::Get(result_value, env.local()).ToChecked();
+ CHECK(std::isnan(result));
+ }
+
+ // Executes a test function that returns a BigInt.
+ void CallAndCheckWasmFunctionBigInt(
+ const std::string& exported_function_name,
+ const std::vector<v8::Local<v8::Value>>& args,
+ const v8::Local<v8::BigInt> expected_result,
+ bool test_lazy_deopt = false) {
+ LocalContext env;
+ v8::Local<v8::Value> result_value = DoCallAndCheckWasmFunction(
+ env, exported_function_name, args, test_lazy_deopt);
+
+ CHECK(CheckType<v8::Local<v8::BigInt>>(result_value));
+ auto result =
+ ConvertJSValue<v8::BigInt>::Get(result_value, env.local()).ToChecked();
+ CHECK_EQ(result->Int64Value(), expected_result->Int64Value());
+ }
+
+ // Executes a test function that returns void.
+ void CallAndCheckWasmFunction(const std::string& exported_function_name,
+ const std::vector<v8::Local<v8::Value>>& args,
+ bool test_lazy_deopt = false) {
+ LocalContext env;
+ v8::Local<v8::Value> result_value = DoCallAndCheckWasmFunction(
+ env, exported_function_name, args, test_lazy_deopt);
+
+ CHECK(test_lazy_deopt ? result_value->IsNumber() /* NaN */
+ : result_value->IsUndefined());
+ }
+
+ // Executes a test function that triggers eager deoptimization.
+ template <typename T>
+ T CallAndCheckWasmFunctionWithEagerDeopt(
+ const std::string& exported_function_name, const std::string& arg,
+ const T& expected_result, const std::string& deopt_arg) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::TryCatch try_catch(isolate);
+
+ std::string js_code =
+ "const importObj = {"
+ " env: {"
+ " callback : function(num) {}"
+ " }"
+ "};"
+ "let buf = new Uint8Array(" +
+ WasmModuleAsJSArray() +
+ ");"
+ "let module = new WebAssembly.Module(buf);"
+ "let instance = new WebAssembly.Instance(module, importObj);"
+ "function test(value) {"
+ " return %ObserveNode(instance.exports." +
+ exported_function_name +
+ "(value));"
+ "}"
+ "%PrepareFunctionForOptimization(test);"
+ "test(" +
+ arg +
+ ");"
+ "%OptimizeFunctionOnNextCall(test);"
+ "test(" +
+ arg + ");";
+
+ v8::Local<v8::Value> result_value =
+ CompileRunWithJSWasmCallNodeObserver(js_code.c_str());
+ CHECK(CheckType<T>(result_value));
+ T result = ConvertJSValue<T>::Get(result_value, env.local()).ToChecked();
+ CHECK_EQ(result, expected_result);
+
+ std::string deopt_code = "test(" + deopt_arg + ");";
+ result_value = CompileRun(deopt_code.c_str());
+ CHECK(CheckType<T>(result_value));
+ return ConvertJSValue<T>::Get(result_value, env.local()).ToChecked();
+ }
+
+ // Executes a test function that throws an exception.
+ void CallAndCheckExceptionCaught(const std::string& exported_function_name,
+ const v8::Local<v8::Value> arg) {
+ LocalContext env;
+ CHECK((*env)->Global()->Set(env.local(), v8_str("arg"), arg).FromJust());
+
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::TryCatch try_catch(isolate);
+
+ std::string js_code =
+ "const importObj = {"
+ " env: {"
+ " callback : function(num) {}"
+ " }"
+ "};"
+ "let buf = new Uint8Array(" +
+ WasmModuleAsJSArray() +
+ ");"
+ "let module = new WebAssembly.Module(buf);"
+ "let instance = new WebAssembly.Instance(module, importObj);"
+ "let " +
+ exported_function_name + " = instance.exports." +
+ exported_function_name +
+ ";"
+ "function test() {"
+ " return %ObserveNode(" +
+ exported_function_name +
+ "(arg));"
+ "}"
+ "%PrepareFunctionForOptimization(test);"
+ "test();";
+
+ CompileRun(js_code.c_str());
+ CHECK(try_catch.HasCaught());
+
+ try_catch.Reset();
+ CompileRunWithJSWasmCallNodeObserver(
+ "%OptimizeFunctionOnNextCall(test); test();");
+ CHECK(try_catch.HasCaught());
+ }
+
+ // Executes a test function with a try/catch.
+ void CallAndCheckWithTryCatch(const std::string& exported_function_name,
+ const v8::Local<v8::Value> arg) {
+ LocalContext env;
+ CHECK((*env)->Global()->Set(env.local(), v8_str("arg"), arg).FromJust());
+
+ std::string js_code =
+ "const importObj = {"
+ " env: {"
+ " callback : function(num) {}"
+ " }"
+ "};"
+ "let buf = new Uint8Array(" +
+ WasmModuleAsJSArray() +
+ ");"
+ "let module = new WebAssembly.Module(buf);"
+ "let instance = new WebAssembly.Instance(module, importObj);"
+ "let " +
+ exported_function_name + " = instance.exports." +
+ exported_function_name +
+ ";"
+ "function test() {"
+ " try {"
+ " return " +
+ exported_function_name +
+ "(arg);"
+ " } catch (e) {"
+ " return 0;"
+ " }"
+ "}"
+ "%PrepareFunctionForOptimization(test);"
+ "test();";
+ v8::Local<v8::Value> result_value_interpreted = CompileRun(js_code.c_str());
+ CHECK(CheckType<int32_t>(result_value_interpreted));
+ auto result_interpreted =
+ ConvertJSValue<int32_t>::Get(result_value_interpreted, env.local())
+ .ToChecked();
+
+ v8::Local<v8::Value> result_value_compiled = CompileRun(
+ "%OptimizeFunctionOnNextCall(test);"
+ "test();");
+ CHECK(CheckType<int32_t>(result_value_compiled));
+ auto result_compiled =
+ ConvertJSValue<int32_t>::Get(result_value_compiled, env.local())
+ .ToChecked();
+
+ CHECK_EQ(result_interpreted, result_compiled);
+ }
+
+ // Executes a test function with a try/catch calling a Wasm function returning
+ // void.
+ void CallAndCheckWithTryCatch_void(const std::string& exported_function_name,
+ const v8::Local<v8::Value> arg0,
+ const v8::Local<v8::Value> arg1) {
+ LocalContext env;
+ CHECK((*env)->Global()->Set(env.local(), v8_str("arg0"), arg0).FromJust());
+ CHECK((*env)->Global()->Set(env.local(), v8_str("arg1"), arg1).FromJust());
+
+ std::string js_code =
+ "const importObj = {"
+ " env: {"
+ " callback : function(num) {}"
+ " }"
+ "};"
+ "let buf = new Uint8Array(" +
+ WasmModuleAsJSArray() +
+ ");"
+ "let module = new WebAssembly.Module(buf);"
+ "let instance = new WebAssembly.Instance(module, importObj);"
+ "let " +
+ exported_function_name + " = instance.exports." +
+ exported_function_name +
+ ";"
+ "function test() {"
+ " try {"
+ " " +
+ exported_function_name +
+ "(arg0, arg1);"
+ " return 1;"
+ " } catch (e) {"
+ " return 0;"
+ " }"
+ "}"
+ "%PrepareFunctionForOptimization(test);"
+ "test();";
+ v8::Local<v8::Value> result_value_interpreted = CompileRun(js_code.c_str());
+ CHECK(CheckType<int32_t>(result_value_interpreted));
+ auto result_interpreted =
+ ConvertJSValue<int32_t>::Get(result_value_interpreted, env.local())
+ .ToChecked();
+
+ v8::Local<v8::Value> result_value_compiled = CompileRun(
+ "%OptimizeFunctionOnNextCall(test);"
+ "test();");
+ CHECK(CheckType<int32_t>(result_value_compiled));
+ auto result_compiled =
+ ConvertJSValue<int32_t>::Get(result_value_compiled, env.local())
+ .ToChecked();
+
+ CHECK_EQ(result_interpreted, result_compiled);
+ }
+
+ private:
+ // Convert the code of a Wasm module into a string that represents the content
+ // of a JavaScript Uint8Array, that can be loaded with
+ // WebAssembly.Module(buf).
+ std::string WasmModuleAsJSArray() {
+ ZoneBuffer buffer(&zone_);
+ builder_->WriteTo(&buffer);
+
+ std::stringstream string_stream;
+ string_stream << "[";
+ auto it = buffer.begin();
+ if (it != buffer.end()) {
+ string_stream << "0x" << std::setfill('0') << std::setw(2) << std::hex
+ << static_cast<int>(*it++);
+ }
+ while (it != buffer.end()) {
+ string_stream << ", 0x" << std::setfill('0') << std::setw(2) << std::hex
+ << static_cast<int>(*it++);
+ }
+ string_stream << "]";
+ return string_stream.str();
+ }
+
+ v8::Local<v8::Value> DoCallAndCheckWasmFunction(
+ LocalContext& env, const std::string& exported_function_name,
+ const std::vector<v8::Local<v8::Value>>& args,
+ bool test_lazy_deopt = false) {
+ for (size_t i = 0; i < args.size(); i++) {
+ CHECK((*env)
+ ->Global()
+ ->Set(env.local(), v8_str(("arg" + std::to_string(i)).c_str()),
+ args[i])
+ .FromJust());
+ }
+
+ std::string js_code =
+ test_lazy_deopt
+ ? GetJSTestCodeWithLazyDeopt(env, WasmModuleAsJSArray(),
+ exported_function_name, args.size())
+ : GetJSTestCode(WasmModuleAsJSArray(), exported_function_name,
+ args.size());
+ return CompileRunWithJSWasmCallNodeObserver(js_code);
+ }
+
+ v8::Local<v8::Value> CompileRunWithJSWasmCallNodeObserver(
+ const std::string& js_code) {
+ compiler::ModificationObserver js_wasm_call_observer(
+ [](const compiler::Node* node) {
+ CHECK_EQ(compiler::IrOpcode::kJSCall, node->opcode());
+ },
+ [this](const compiler::Node* node,
+ const compiler::ObservableNodeState& old_state)
+ -> compiler::NodeObserver::Observation {
+ if (old_state.opcode() != node->opcode()) {
+ CHECK_EQ(compiler::IrOpcode::kJSCall, old_state.opcode());
+
+ // JS-to-Wasm inlining is disabled when targeting 32 bits if the
+ // Wasm function signature contains an I64.
+ if (test_mode_ == kJSToWasmInliningEnabled) {
+ CHECK_EQ(compiler::IrOpcode::kJSWasmCall, node->opcode());
+ } else {
+ CHECK_EQ(compiler::IrOpcode::kCall, node->opcode());
+ }
+
+ return compiler::NodeObserver::Observation::kStop;
+ }
+ return compiler::NodeObserver::Observation::kContinue;
+ });
+
+ {
+ compiler::ObserveNodeScope scope(CcTest::i_isolate(),
+ &js_wasm_call_observer);
+ return CompileRun(js_code.c_str());
+ }
+ }
+
+ // Format the JS test code that loads and instantiates a Wasm module and
+ // calls a Wasm exported function, making sure that it is compiled by
+ // TurboFan:
+ //
+ // function test() {"
+ // let result = exported_func(arg0, arg1, ..., argN-1);
+ // return result;"
+ // }
+ std::string GetJSTestCode(const std::string& wasm_module,
+ const std::string& wasm_exported_function_name,
+ size_t arity) {
+ std::string js_args = ArgsToString(arity);
+ return "const importObj = {"
+ " env: { callback : function(num) {} }"
+ "};"
+ "let buf = new Uint8Array(" +
+ wasm_module +
+ ");"
+ "let module = new WebAssembly.Module(buf);"
+ "let instance = new WebAssembly.Instance(module, importObj);"
+ "let " +
+ wasm_exported_function_name + " = instance.exports." +
+ wasm_exported_function_name +
+ ";"
+ "function test() {"
+ " let result = %ObserveNode(" +
+ wasm_exported_function_name + "(" + js_args +
+ "));"
+ " return result;"
+ "}"
+ "%PrepareFunctionForOptimization(test);"
+ "test(" +
+ js_args +
+ ");"
+ "%OptimizeFunctionOnNextCall(test);"
+ "test(" +
+ js_args + ");";
+ }
+
+ // Format the JS test code that loads and instantiates a Wasm module and
+ // calls a Wasm exported function in a loop, and it's compiled with TurboFan:
+ //
+ // var b = 0;"
+ // var n = 0;"
+ // function test() {"
+ // let result = 0;
+ // for(var i = 0; i < 1e5; i++) {
+ // result = exported_func(arg0 + b) + n;
+ // }
+ // return result;"
+ // }
+ //
+ // Here the Wasm function calls back into a JavaScript function that modifies
+ // the values of 'b' and 'n', triggering the lazy deoptimization of the 'test'
+ // function.
+ std::string GetJSTestCodeWithLazyDeopt(
+ LocalContext& env, const std::string& wasm_module,
+ const std::string& wasm_exported_function_name, size_t arity) {
+ DCHECK_LE(arity, 1);
+ bool bigint_arg = false;
+ if (arity == 1) {
+ v8::Local<v8::Value> arg0 =
+ (*env)->Global()->Get(env.local(), v8_str("arg0")).ToLocalChecked();
+ bigint_arg = arg0->IsBigInt();
+ }
+
+ std::string js_args = ArgsToString(arity);
+ std::string code =
+ "const importObj = {"
+ " env: {"
+ " callback : function(num) {"
+ " n = 1; b = 1;"
+ " }"
+ " }"
+ "};"
+ "let buf = new Uint8Array(" +
+ wasm_module +
+ ");"
+ "let module = new WebAssembly.Module(buf);"
+ "let instance = new WebAssembly.Instance(module, importObj);"
+ "let " +
+ wasm_exported_function_name + " = instance.exports." +
+ wasm_exported_function_name +
+ ";"
+ "var b = 0;"
+ "var n = 0;"
+ "function test(" +
+ js_args +
+ ") {"
+ " var result = 0;"
+ " for (let i = 0; i < " +
+ std::to_string(kDeoptLoopCount) + " + 5; i++) {";
+ code += bigint_arg
+ ? " result = %ObserveNode(" + wasm_exported_function_name +
+ "(" + js_args + " + BigInt(b))) + BigInt(n);"
+ : " result = %ObserveNode(" + wasm_exported_function_name +
+ "(" + js_args + " + b)) + n;";
+ code +=
+ " }"
+ " return result;"
+ "}"
+ "test(" +
+ js_args + ");";
+
+ return code;
+ }
+
+ // Format a string that represents the set of arguments passed to a test
+ // function, in the form 'arg0, arg1, ..., argN-1'.
+ // The value of these args is set by GetJSTestCodeWithLazyDeopt.
+ std::string ArgsToString(size_t arity) {
+ std::stringstream string_stream;
+ for (size_t i = 0; i < arity; i++) {
+ if (i > 0) string_stream << ", ";
+ string_stream << "arg" << i;
+ }
+ return string_stream.str();
+ }
+
+ i::FlagScope<bool> allow_natives_syntax_;
+ i::FlagScope<bool> inline_js_wasm_calls_;
+ i::FlagScope<bool> stress_background_compile_;
+ AccountingAllocator allocator_;
+ Zone zone_;
+ WasmModuleBuilder* builder_;
+ TestMode test_mode_ = kJSToWasmInliningEnabled;
+};
+
+TEST(TestFastJSWasmCall_Nop) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_nop);
+ tester.CallAndCheckWasmFunction("nop", {});
+}
+
+TEST(TestFastJSWasmCall_I32Arg) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_i32_square);
+ tester.CallAndCheckWasmFunction<int32_t>("i32_square", {v8_num(42)}, 42 * 42);
+}
+
+TEST(TestFastJSWasmCall_I32ArgNotSmi) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_add);
+ tester.CallAndCheckWasmFunction<int32_t>(
+ "add", {v8_num(0x7fffffff), v8_int(1)}, 0x80000000);
+}
+
+TEST(TestFastJSWasmCall_F32Arg) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_f32_square);
+ tester.CallAndCheckWasmFunction<float>("f32_square", {v8_num(42.0)},
+ 42.0 * 42.0);
+}
+
+TEST(TestFastJSWasmCall_F64Arg) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_f64_square);
+ tester.CallAndCheckWasmFunction<double>("f64_square", {v8_num(42.0)},
+ 42.0 * 42.0);
+}
+
+TEST(TestFastJSWasmCall_I64Arg) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_i64_square);
+ tester.CallAndCheckWasmFunctionBigInt("i64_square", {v8_bigint(1234567890ll)},
+ v8_bigint(1234567890ll * 1234567890ll));
+}
+
+TEST(TestFastJSWasmCall_I64NegativeResult) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_i64_add);
+ tester.CallAndCheckWasmFunctionBigInt(
+ "i64_add", {v8_bigint(1ll), v8_bigint(-2ll)}, v8_bigint(-1ll));
+}
+
+TEST(TestFastJSWasmCall_MultipleArgs) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_sum10);
+ tester.CallAndCheckWasmFunction<int32_t>(
+ "sum10",
+ {v8_num(1), v8_num(2), v8_num(3), v8_num(4), v8_num(5), v8_num(6),
+ v8_num(7), v8_num(8), v8_num(9), v8_num(10)},
+ 55);
+}
+
+TEST(TestFastJSWasmCall_MixedArgs) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_sum_mixed);
+ tester.CallAndCheckWasmFunction<double>(
+ "sum_mixed", {v8_num(1), v8_bigint(0x80000000), v8_num(42.0), v8_num(.5)},
+ 1 + 0x80000000 + 42 + .5);
+}
+
+TEST(TestFastJSWasmCall_MistypedArgs) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+
+ tester.AddExportedFunction(k_i32_square);
+ tester.CallAndCheckWasmFunction<int32_t>("i32_square", {v8_str("test")}, 0);
+}
+
+TEST(TestFastJSWasmCall_MixedMistypedArgs) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+
+ tester.AddExportedFunction(k_sum_mixed);
+ tester.CallAndCheckWasmFunctionNaN(
+ "sum_mixed", {v8_str("alpha"), v8_bigint(0x80000000), v8_str("beta"),
+ v8_str("gamma")});
+}
+
+TEST(TestFastJSWasmCall_NoArgs) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+
+ tester.AddExportedFunction(k_no_args);
+ tester.CallAndCheckWasmFunction<int32_t>("no_args", {}, 42);
+}
+
+TEST(TestFastJSWasmCall_NoReturnTypes) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+
+ tester.AddExportedFunction(k_void_square);
+ tester.CallAndCheckWasmFunction("void_square", {v8_num(42)});
+}
+
+TEST(TestFastJSWasmCall_MismatchedArity) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+
+ tester.AddExportedFunction(k_sum3);
+ tester.CallAndCheckWasmFunction<int32_t>("sum3", {v8_num(1), v8_num(2)}, 3);
+ tester.CallAndCheckWasmFunction<int32_t>(
+ "sum3",
+ {v8_num(1), v8_num(2), v8_num(3), v8_num(4), v8_num(5), v8_num(6)}, 6);
+ tester.CallAndCheckWasmFunction<int32_t>("sum3", {}, 0);
+}
+
+// Lazy deoptimization tests
+
+TEST(TestFastJSWasmCall_LazyDeopt_I32Result) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.DeclareCallback("callback", sigs.v_d(), "env");
+ tester.AddExportedFunction(k_i32_square_deopt);
+ tester.CallAndCheckWasmFunction<int32_t>("i32_square_deopt", {v8_num(42)},
+ 43 * 43 + 1, true);
+}
+
+TEST(TestFastJSWasmCall_LazyDeopt_I64Result) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.DeclareCallback("callback", sigs.v_d(), "env");
+ tester.AddExportedFunction(k_i64_square_deopt);
+
+ tester.CallAndCheckWasmFunctionBigInt("i64_square_deopt", {v8_bigint(42)},
+ v8_bigint(43 * 43 + 1), true);
+
+ // This test would fail if the result was converted into a HeapNumber through
+ // a double, losing precision.
+ tester.CallAndCheckWasmFunctionBigInt(
+ "i64_square_deopt", {v8_bigint(1234567890ll)},
+ v8_bigint(1524157877488187882ll), // (1234567890 + 1)*(1234567890 + 1)+1
+ true);
+}
+
+TEST(TestFastJSWasmCall_LazyDeopt_F32Result) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.DeclareCallback("callback", sigs.v_d(), "env");
+ tester.AddExportedFunction(k_f32_square_deopt);
+ tester.CallAndCheckWasmFunction<float>("f32_square_deopt", {v8_num(42.0)},
+ 43 * 43 + 1, true);
+}
+
+TEST(TestFastJSWasmCall_LazyDeopt_F64Result) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.DeclareCallback("callback", sigs.v_d(), "env");
+ tester.AddExportedFunction(k_f64_square_deopt);
+ tester.CallAndCheckWasmFunction<float>("f64_square_deopt", {v8_num(42.0)},
+ 43 * 43 + 1, true);
+}
+
+TEST(TestFastJSWasmCall_LazyDeopt_VoidResult) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.DeclareCallback("callback", sigs.v_d(), "env");
+ tester.AddExportedFunction(k_void_square_deopt);
+ tester.CallAndCheckWasmFunction("void_square_deopt", {v8_num(42.0)}, true);
+}
+
+// Eager deoptimization tests
+
+TEST(TestFastJSWasmCall_EagerDeopt) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_f32_square);
+ float result_after_deopt =
+ tester.CallAndCheckWasmFunctionWithEagerDeopt<float>(
+ "f32_square", "42", 42.0 * 42.0, "{x:1,y:2}");
+ CHECK(std::isnan(result_after_deopt));
+}
+
+// Exception handling tests
+
+TEST(TestFastJSWasmCall_Trap_i32) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_load_i32);
+ tester.CallAndCheckWithTryCatch("load_i32", {v8_int(0x7fffffff)});
+}
+
+TEST(TestFastJSWasmCall_Trap_i64) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_load_i64);
+ tester.CallAndCheckWithTryCatch("load_i64", {v8_bigint(0x7fffffff)});
+}
+
+TEST(TestFastJSWasmCall_Trap_f32) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_load_f32);
+ tester.CallAndCheckWithTryCatch("load_f32", {v8_num(0x7fffffff)});
+}
+
+TEST(TestFastJSWasmCall_Trap_f64) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_load_f64);
+ tester.CallAndCheckWithTryCatch("load_f64", {v8_num(0x7fffffff)});
+}
+
+TEST(TestFastJSWasmCall_Trap_void) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_store_i32);
+ tester.CallAndCheckWithTryCatch_void("store_i32", v8_int(0x7fffffff),
+ v8_int(42));
+}
+
+// BigInt
+
+TEST(TestFastJSWasmCall_I64ArgExpectsBigInt) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_i64_square);
+ tester.CallAndCheckExceptionCaught("i64_square", v8_int(42));
+}
+
+TEST(TestFastJSWasmCall_F32ArgDoesntExpectBigInt) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_f32_square);
+ tester.CallAndCheckExceptionCaught("f32_square", v8_bigint(42ll));
+}
+
+TEST(TestFastJSWasmCall_F64ArgDoesntExpectBigInt) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_f64_square);
+ tester.CallAndCheckExceptionCaught("f64_square", v8_bigint(42ll));
+}
+
+TEST(TestFastJSWasmCall_I32ArgDoesntExpectBigInt) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_i32_square);
+ tester.CallAndCheckExceptionCaught("i32_square", v8_bigint(42ll));
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-local-handles.cc b/deps/v8/test/cctest/test-local-handles.cc
index 90bee202c9..ca8d01bdb6 100644
--- a/deps/v8/test/cctest/test-local-handles.cc
+++ b/deps/v8/test/cctest/test-local-handles.cc
@@ -72,7 +72,6 @@ class LocalHandlesThread final : public v8::base::Thread {
};
TEST(CreateLocalHandles) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -100,7 +99,6 @@ TEST(CreateLocalHandles) {
}
TEST(CreateLocalHandlesWithoutLocalHandleScope) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
@@ -109,7 +107,6 @@ TEST(CreateLocalHandlesWithoutLocalHandleScope) {
}
TEST(DereferenceLocalHandle) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -133,7 +130,6 @@ TEST(DereferenceLocalHandle) {
}
TEST(DereferenceLocalHandleFailsWhenDisallowed) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 247735ab39..fd1f91a8eb 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -504,6 +504,7 @@ UNINITIALIZED_TEST(Issue539892) {
UNINITIALIZED_TEST(LogAll) {
SETUP_FLAGS();
i::FLAG_log_all = true;
+ i::FLAG_log_deopt = true;
i::FLAG_log_api = true;
i::FLAG_turbo_inlining = false;
i::FLAG_log_internal_timer_events = true;
@@ -614,7 +615,7 @@ UNINITIALIZED_TEST(LogInterpretedFramesNativeStackWithSerialization) {
v8::Local<v8::String> source = v8_str(
"function eyecatcher() { return a * a; } return eyecatcher();");
v8::Local<v8::String> arg_str = v8_str("a");
- v8::ScriptOrigin origin(v8_str("filename"));
+ v8::ScriptOrigin origin(isolate, v8_str("filename"));
i::DisallowCompilation* no_compile_expected =
has_cache ? new i::DisallowCompilation(
@@ -709,7 +710,7 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerInnerFunctions) {
code_event_handler.Enable();
v8::Local<v8::String> source_string = v8_str(source_cstring);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate1, v8_str("test"));
v8::ScriptCompiler::Source source(source_string, origin);
v8::Local<v8::UnboundScript> script =
v8::ScriptCompiler::CompileUnboundScript(isolate1, &source)
@@ -733,7 +734,7 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerInnerFunctions) {
code_event_handler.Enable();
v8::Local<v8::String> source_string = v8_str(source_cstring);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate2, v8_str("test"));
v8::ScriptCompiler::Source source(source_string, origin, cache);
{
i::DisallowCompilation no_compile_expected(
@@ -802,7 +803,7 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerWithInterpretedFramesNativeStack) {
UNINITIALIZED_TEST(TraceMaps) {
SETUP_FLAGS();
- i::FLAG_trace_maps = true;
+ i::FLAG_log_maps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -837,7 +838,7 @@ UNINITIALIZED_TEST(TraceMaps) {
CHECK(logger.ContainsLine({"map,Transition", ",0x"}));
CHECK(logger.ContainsLine({"map-details", ",0x"}));
}
- i::FLAG_trace_maps = false;
+ i::FLAG_log_maps = false;
isolate->Dispose();
}
@@ -895,7 +896,7 @@ UNINITIALIZED_TEST(LogMapsDetailsStartup) {
}
// Test that all Map details from Maps in the snapshot are logged properly.
SETUP_FLAGS();
- i::FLAG_trace_maps = true;
+ i::FLAG_log_maps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -917,7 +918,7 @@ UNINITIALIZED_TEST(LogMapsDetailsCode) {
}
SETUP_FLAGS();
i::FLAG_retain_maps_for_n_gc = 0xFFFFFFF;
- i::FLAG_trace_maps = true;
+ i::FLAG_log_maps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -1014,7 +1015,7 @@ UNINITIALIZED_TEST(LogMapsDetailsContexts) {
}
// Test that all Map details from Maps in the snapshot are logged properly.
SETUP_FLAGS();
- i::FLAG_trace_maps = true;
+ i::FLAG_log_maps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -1132,28 +1133,28 @@ UNINITIALIZED_TEST(LogFunctionEvents) {
// Step 2: compiling top-level script and eager functions
// - Compiling script without name.
- {"function,compile,"},
- {"function,compile,", ",eagerFunction"},
+ {"function,interpreter,"},
+ {"function,interpreter,", ",eagerFunction"},
// Step 3: start executing script
// Step 4. - lazy parse, lazy compiling and execute skipped functions
// - execute eager functions.
{"function,parse-function,", ",lazyFunction"},
- {"function,compile-lazy,", ",lazyFunction"},
+ {"function,interpreter-lazy,", ",lazyFunction"},
{"function,first-execution,", ",lazyFunction"},
{"function,parse-function,", ",lazyInnerFunction"},
- {"function,compile-lazy,", ",lazyInnerFunction"},
+ {"function,interpreter-lazy,", ",lazyInnerFunction"},
{"function,first-execution,", ",lazyInnerFunction"},
{"function,first-execution,", ",eagerFunction"},
{"function,parse-function,", ",Foo"},
- {"function,compile-lazy,", ",Foo"},
+ {"function,interpreter-lazy,", ",Foo"},
{"function,first-execution,", ",Foo"},
{"function,parse-function,", ",Foo.foo"},
- {"function,compile-lazy,", ",Foo.foo"},
+ {"function,interpreter-lazy,", ",Foo.foo"},
{"function,first-execution,", ",Foo.foo"},
};
CHECK(logger.ContainsLinesInOrder(lines, start));
@@ -1191,3 +1192,101 @@ UNINITIALIZED_TEST(BuiltinsNotLoggedAsLazyCompile) {
}
isolate->Dispose();
}
+
+TEST(BytecodeFlushEvents) {
+ SETUP_FLAGS();
+
+#ifndef V8_LITE_MODE
+ i::FLAG_opt = false;
+ i::FLAG_always_opt = false;
+ i::FLAG_optimize_for_size = false;
+#endif // V8_LITE_MODE
+ i::FLAG_flush_bytecode = true;
+ i::FLAG_allow_natives_syntax = true;
+
+ ManualGCScope manual_gc_scope;
+
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
+ i::Factory* factory = i_isolate->factory();
+
+ struct FakeCodeEventLogger : public i::CodeEventLogger {
+ explicit FakeCodeEventLogger(i::Isolate* isolate)
+ : CodeEventLogger(isolate) {}
+
+ void CodeMoveEvent(i::AbstractCode from, i::AbstractCode to) override {}
+ void CodeDisableOptEvent(i::Handle<i::AbstractCode> code,
+ i::Handle<i::SharedFunctionInfo> shared) override {
+ }
+
+ void BytecodeFlushEvent(Address compiled_data_start) override {
+ // We only expect a single flush.
+ CHECK_EQ(flushed_compiled_data_start, i::kNullAddress);
+ flushed_compiled_data_start = compiled_data_start;
+ }
+
+ void LogRecordedBuffer(i::Handle<i::AbstractCode> code,
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_shared,
+ const char* name, int length) override {}
+ void LogRecordedBuffer(const i::wasm::WasmCode* code, const char* name,
+ int length) override {}
+
+ i::Address flushed_compiled_data_start = i::kNullAddress;
+ };
+
+ FakeCodeEventLogger code_event_logger(i_isolate);
+
+ {
+ ScopedLoggerInitializer logger(isolate);
+ logger.logger()->AddCodeEventListener(&code_event_logger);
+
+ const char* source =
+ "function foo() {"
+ " var x = 42;"
+ " var y = 42;"
+ " var z = x + y;"
+ "};"
+ "foo()";
+ i::Handle<i::String> foo_name = factory->InternalizeUtf8String("foo");
+
+ // This compile will add the code to the compilation cache.
+ {
+ v8::HandleScope scope(isolate);
+ CompileRun(source);
+ }
+
+ // Check function is compiled.
+ i::Handle<i::Object> func_value =
+ i::Object::GetProperty(i_isolate, i_isolate->global_object(), foo_name)
+ .ToHandleChecked();
+ CHECK(func_value->IsJSFunction());
+ i::Handle<i::JSFunction> function =
+ i::Handle<i::JSFunction>::cast(func_value);
+ CHECK(function->shared().is_compiled());
+
+ // The code will survive at least two GCs.
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+ CHECK(function->shared().is_compiled());
+ CHECK_EQ(code_event_logger.flushed_compiled_data_start, i::kNullAddress);
+
+ // Get the start address of the compiled data before flushing.
+ i::HeapObject compiled_data =
+ function->shared().GetBytecodeArray(i_isolate);
+ i::Address compiled_data_start = compiled_data.address();
+
+ // Simulate several GCs that use full marking.
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ CcTest::CollectAllGarbage();
+ }
+
+ // foo should no longer be in the compilation cache
+ CHECK(!function->shared().is_compiled());
+ CHECK(!function->is_compiled());
+
+ // Verify that foo() was in fact flushed.
+ CHECK_EQ(code_event_logger.flushed_compiled_data_start,
+ compiled_data_start);
+ }
+}
diff --git a/deps/v8/test/cctest/test-macro-assembler-riscv64.cc b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
new file mode 100644
index 0000000000..a5bb94166c
--- /dev/null
+++ b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
@@ -0,0 +1,1556 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include <iostream> // NOLINT(readability/streams)
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/simulator.h"
+#include "src/init/v8.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/test-helper-riscv64.h"
+#include "test/common/assembler-tester.h"
+
+namespace v8 {
+namespace internal {
+
+const float qnan_f = std::numeric_limits<float>::quiet_NaN();
+const float snan_f = std::numeric_limits<float>::signaling_NaN();
+const double qnan_d = std::numeric_limits<double>::quiet_NaN();
+const double snan_d = std::numeric_limits<double>::signaling_NaN();
+
+const float inf_f = std::numeric_limits<float>::infinity();
+const double inf_d = std::numeric_limits<double>::infinity();
+const float minf_f = -inf_f;
+const double minf_d = -inf_d;
+
+using FV = void*(int64_t x, int64_t y, int p2, int p3, int p4);
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F3 = void*(void* p, int p1, int p2, int p3, int p4);
+using F4 = void*(void* p0, void* p1, int p2, int p3, int p4);
+
+#define __ masm.
+
+static uint64_t run_CalcScaledAddress(uint64_t rt, uint64_t rs, int8_t sa) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ auto fn = [sa](MacroAssembler& masm) {
+ __ CalcScaledAddress(a0, a0, a1, sa);
+ };
+ auto f = AssembleCode<FV>(fn);
+
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(rt, rs, 0, 0, 0));
+
+ return res;
+}
+
+template <typename VTYPE, typename Func>
+VTYPE run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
+ VTYPE value, Func GenerateUnalignedInstructionFunc) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ auto fn = [in_offset, out_offset,
+ GenerateUnalignedInstructionFunc](MacroAssembler& masm) {
+ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
+ };
+ auto f = AssembleCode<int32_t(char*)>(fn);
+
+ MemCopy(memory_buffer + in_offset, &value, sizeof(VTYPE));
+ f.Call(memory_buffer);
+ VTYPE res;
+ MemCopy(&res, memory_buffer + out_offset, sizeof(VTYPE));
+
+ return res;
+}
+
+static const std::vector<int32_t> unsigned_test_offset() {
+ static const int32_t kValues[] = {// value, offset
+ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset_increment() {
+ static const int32_t kValues[] = {-7, -6, -5, -4, -3, -2, -1, 0,
+ 1, 2, 3, 4, 5, 6, 7};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+TEST(LoadConstants) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ int64_t refConstants[64];
+ int64_t result[64];
+
+ int64_t mask = 1;
+ for (int i = 0; i < 64; i++) {
+ refConstants[i] = ~(mask << i);
+ }
+
+ auto fn = [&refConstants](MacroAssembler& masm) {
+ __ mv(a4, a0);
+ for (int i = 0; i < 64; i++) {
+ // Load constant.
+ __ li(a5, Operand(refConstants[i]));
+ __ Sd(a5, MemOperand(a4));
+ __ Add64(a4, a4, Operand(kPointerSize));
+ }
+ };
+ auto f = AssembleCode<FV>(fn);
+
+ (void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
+ // Check results.
+ for (int i = 0; i < 64; i++) {
+ CHECK(refConstants[i] == result[i]);
+ }
+}
+
+TEST(LoadAddress) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes);
+ Label to_jump, skip;
+ __ mv(a4, a0);
+
+ __ Branch(&skip);
+ __ bind(&to_jump);
+ __ nop();
+ __ nop();
+ __ jr(ra);
+ __ nop();
+ __ bind(&skip);
+ __ li(a4,
+ Operand(masm.jump_address(&to_jump),
+ RelocInfo::INTERNAL_REFERENCE_ENCODED),
+ ADDRESS_LOAD);
+ int check_size = masm.InstructionsGeneratedSince(&skip);
+ // NOTE (RISCV): current li generates 6 instructions, if the sequence is
+ // changed, need to adjust the CHECK_EQ value too
+ CHECK_EQ(6, check_size);
+ __ jr(a4);
+ __ nop();
+ __ stop();
+ __ stop();
+ __ stop();
+ __ stop();
+ __ stop();
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<FV>::FromCode(*code);
+
+ (void)f.Call(0, 0, 0, 0, 0);
+ // Check results.
+}
+
+TEST(jump_tables4) {
+ // Similar to test-assembler-mips jump_tables1, with extra test for branch
+ // trampoline required before emission of the dd table (where trampolines are
+ // blocked), and proper transition to long-branch mode.
+ // Regression test for v8:4294.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ const int kNumCases = 128;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+ Label near_start, end, done;
+
+ __ Push(ra);
+ __ mv(a1, zero_reg);
+
+ __ Branch(&end);
+ __ bind(&near_start);
+
+ // Generate slightly less than 32K instructions, which will soon require
+ // trampoline for branch distance fixup.
+ for (int i = 0; i < 32768 - 256; ++i) {
+ __ addi(a1, a1, 1);
+ }
+
+ __ GenerateSwitchTable(a0, kNumCases,
+ [&labels](size_t i) { return labels + i; });
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ RV_li(a0, values[i]);
+ __ Branch(&done);
+ }
+
+ __ bind(&done);
+ __ Pop(ra);
+ __ jr(ra);
+
+ __ bind(&end);
+ __ Branch(&near_start);
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ for (int i = 0; i < kNumCases; ++i) {
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
+ // ::printf("f(%d) = %" PRId64 "\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+TEST(jump_tables6) {
+ // Similar to test-assembler-mips jump_tables1, with extra test for branch
+ // trampoline required after emission of the dd table (where trampolines are
+ // blocked). This test checks if number of really generated instructions is
+ // greater than number of counted instructions from code, as we are expecting
+ // generation of trampoline in this case (when number of kFillInstr
+ // instructions is close to 32K)
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ const int kSwitchTableCases = 40;
+
+ const int kMaxBranchOffset = Assembler::kMaxBranchOffset;
+ const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize;
+ const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize;
+
+ const int kMaxOffsetForTrampolineStart =
+ kMaxBranchOffset - 16 * kTrampolineSlotsSize;
+ const int kFillInstr = (kMaxOffsetForTrampolineStart / kInstrSize) -
+ (kSwitchTablePrologueSize + 2 * kSwitchTableCases) -
+ 20;
+
+ int values[kSwitchTableCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kSwitchTableCases];
+ Label near_start, end, done;
+
+ __ Push(ra);
+ __ mv(a1, zero_reg);
+
+ int offs1 = masm.pc_offset();
+ int gen_insn = 0;
+
+ __ Branch(&end);
+ gen_insn += 1;
+ __ bind(&near_start);
+
+ // Generate slightly less than 32K instructions, which will soon require
+ // trampoline for branch distance fixup.
+ for (int i = 0; i < kFillInstr; ++i) {
+ __ addi(a1, a1, 1);
+ }
+ gen_insn += kFillInstr;
+
+ __ GenerateSwitchTable(a0, kSwitchTableCases,
+ [&labels](size_t i) { return labels + i; });
+ gen_insn += (kSwitchTablePrologueSize + 2 * kSwitchTableCases);
+
+ for (int i = 0; i < kSwitchTableCases; ++i) {
+ __ bind(&labels[i]);
+ __ li(a0, Operand(values[i]));
+ __ Branch(&done);
+ }
+ gen_insn += 3 * kSwitchTableCases;
+
+ // If offset from here to first branch instr is greater than max allowed
+ // offset for trampoline ...
+ CHECK_LT(kMaxOffsetForTrampolineStart, masm.pc_offset() - offs1);
+ // ... number of generated instructions must be greater then "gen_insn",
+ // as we are expecting trampoline generation
+ CHECK_LT(gen_insn, (masm.pc_offset() - offs1) / kInstrSize);
+
+ __ bind(&done);
+ __ Pop(ra);
+ __ jr(ra);
+ __ nop();
+
+ __ bind(&end);
+ __ Branch(&near_start);
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ for (int i = 0; i < kSwitchTableCases; ++i) {
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
+ // ::printf("f(%d) = %" PRId64 "\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+TEST(CalcScaledAddress) {
+ CcTest::InitializeVM();
+ struct TestCaseLsa {
+ int64_t rt;
+ int64_t rs;
+ uint8_t sa;
+ uint64_t expected_res;
+ };
+
+ struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res
+ {0x4, 0x1, 1, 0x6},
+ {0x4, 0x1, 2, 0x8},
+ {0x4, 0x1, 3, 0xC},
+ {0x4, 0x1, 4, 0x14},
+ {0x4, 0x1, 5, 0x24},
+ {0x0, 0x1, 1, 0x2},
+ {0x0, 0x1, 2, 0x4},
+ {0x0, 0x1, 3, 0x8},
+ {0x0, 0x1, 4, 0x10},
+ {0x0, 0x1, 5, 0x20},
+ {0x4, 0x0, 1, 0x4},
+ {0x4, 0x0, 2, 0x4},
+ {0x4, 0x0, 3, 0x4},
+ {0x4, 0x0, 4, 0x4},
+ {0x4, 0x0, 5, 0x4},
+
+ // Shift overflow.
+ {0x4, INT64_MAX, 1, 0x2},
+ {0x4, INT64_MAX >> 1, 2, 0x0},
+ {0x4, INT64_MAX >> 2, 3, 0xFFFFFFFFFFFFFFFC},
+ {0x4, INT64_MAX >> 3, 4, 0xFFFFFFFFFFFFFFF4},
+ {0x4, INT64_MAX >> 4, 5, 0xFFFFFFFFFFFFFFE4},
+
+ // Signed addition overflow.
+ {INT64_MAX - 1, 0x1, 1, 0x8000000000000000},
+ {INT64_MAX - 3, 0x1, 2, 0x8000000000000000},
+ {INT64_MAX - 7, 0x1, 3, 0x8000000000000000},
+ {INT64_MAX - 15, 0x1, 4, 0x8000000000000000},
+ {INT64_MAX - 31, 0x1, 5, 0x8000000000000000},
+
+ // Addition overflow.
+ {-2, 0x1, 1, 0x0},
+ {-4, 0x1, 2, 0x0},
+ {-8, 0x1, 3, 0x0},
+ {-16, 0x1, 4, 0x0},
+ {-32, 0x1, 5, 0x0}};
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLsa);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_CalcScaledAddress(tc[i].rt, tc[i].rs, tc[i].sa);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+static const std::vector<uint32_t> cvt_trunc_uint32_test_values() {
+ static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00FFFF00,
+ 0x7FFFFFFF, 0x80000000, 0x80000001,
+ 0x80FFFF00, 0x8FFFFFFF};
+ return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> cvt_trunc_int32_test_values() {
+ static const int32_t kValues[] = {
+ static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
+ static_cast<int32_t>(0x00FFFF00), static_cast<int32_t>(0x7FFFFFFF),
+ static_cast<int32_t>(0x80000000), static_cast<int32_t>(0x80000001),
+ static_cast<int32_t>(0x80FFFF00), static_cast<int32_t>(0x8FFFFFFF),
+ static_cast<int32_t>(0xFFFFFFFF)};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<uint64_t> cvt_trunc_uint64_test_values() {
+ static const uint64_t kValues[] = {
+ 0x0000000000000000, 0x0000000000000001, 0x0000FFFFFFFF0000,
+ 0x7FFFFFFFFFFFFFFF, 0x8000000000000000, 0x8000000000000001,
+ 0x8000FFFFFFFF0000, 0x8FFFFFFFFFFFFFFF /*, 0xFFFFFFFFFFFFFFFF*/};
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int64_t> cvt_trunc_int64_test_values() {
+ static const int64_t kValues[] = {static_cast<int64_t>(0x0000000000000000),
+ static_cast<int64_t>(0x0000000000000001),
+ static_cast<int64_t>(0x0000FFFFFFFF0000),
+ // static_cast<int64_t>(0x7FFFFFFFFFFFFFFF),
+ static_cast<int64_t>(0x8000000000000000),
+ static_cast<int64_t>(0x8000000000000001),
+ static_cast<int64_t>(0x8000FFFFFFFF0000),
+ static_cast<int64_t>(0x8FFFFFFFFFFFFFFF),
+ static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
+ return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+#define FOR_INPUTS3(ctype, var, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
+ for (ctype var : var##_vec)
+
+#define FOR_INT32_INPUTS3(var, test_vector) \
+ FOR_INPUTS3(int32_t, var, test_vector)
+#define FOR_INT64_INPUTS3(var, test_vector) \
+ FOR_INPUTS3(int64_t, var, test_vector)
+#define FOR_UINT32_INPUTS3(var, test_vector) \
+ FOR_INPUTS3(uint32_t, var, test_vector)
+#define FOR_UINT64_INPUTS3(var, test_vector) \
+ FOR_INPUTS3(uint64_t, var, test_vector)
+
+#define FOR_TWO_INPUTS(ctype, var1, var2, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
+ std::vector<ctype>::iterator var1; \
+ std::vector<ctype>::reverse_iterator var2; \
+ for (var1 = var##_vec.begin(), var2 = var##_vec.rbegin(); \
+ var1 != var##_vec.end(); ++var1, ++var2)
+
+#define FOR_INT32_TWO_INPUTS(var1, var2, test_vector) \
+ FOR_TWO_INPUTS(int32_t, var1, var2, test_vector)
+
+TEST(Cvt_s_uw_Trunc_uw_s) {
+ CcTest::InitializeVM();
+ auto fn = [](MacroAssembler& masm) {
+ __ Cvt_s_uw(fa0, a0);
+ __ Trunc_uw_s(a0, fa0);
+ };
+ FOR_UINT32_INPUTS3(i, cvt_trunc_uint32_test_values) {
+ // some integers cannot be represented precisely in float, input may
+ // not directly match the return value of GenAndRunTest
+ CHECK_EQ(static_cast<uint32_t>(static_cast<float>(i)),
+ GenAndRunTest<uint32_t>(i, fn));
+ }
+}
+
+TEST(Cvt_s_ul_Trunc_ul_s) {
+ CcTest::InitializeVM();
+ auto fn = [](MacroAssembler& masm) {
+ __ Cvt_s_ul(fa0, a0);
+ __ Trunc_ul_s(a0, fa0);
+ };
+ FOR_UINT64_INPUTS3(i, cvt_trunc_uint64_test_values) {
+ CHECK_EQ(static_cast<uint64_t>(static_cast<float>(i)),
+ GenAndRunTest<uint64_t>(i, fn));
+ }
+}
+
+TEST(Cvt_d_ul_Trunc_ul_d) {
+ CcTest::InitializeVM();
+ auto fn = [](MacroAssembler& masm) {
+ __ Cvt_d_ul(fa0, a0);
+ __ Trunc_ul_d(a0, fa0);
+ };
+ FOR_UINT64_INPUTS3(i, cvt_trunc_uint64_test_values) {
+ CHECK_EQ(static_cast<uint64_t>(static_cast<double>(i)),
+ GenAndRunTest<uint64_t>(i, fn));
+ }
+}
+
+TEST(cvt_d_l_Trunc_l_d) {
+ CcTest::InitializeVM();
+ auto fn = [](MacroAssembler& masm) {
+ __ fcvt_d_l(fa0, a0);
+ __ Trunc_l_d(a0, fa0);
+ };
+ FOR_INT64_INPUTS3(i, cvt_trunc_int64_test_values) {
+ CHECK_EQ(static_cast<int64_t>(static_cast<double>(i)),
+ GenAndRunTest<int64_t>(i, fn));
+ }
+}
+
+TEST(cvt_d_w_Trunc_w_d) {
+ CcTest::InitializeVM();
+ auto fn = [](MacroAssembler& masm) {
+ __ fcvt_d_w(fa0, a0);
+ __ Trunc_w_d(a0, fa0);
+ };
+ FOR_INT32_INPUTS3(i, cvt_trunc_int32_test_values) {
+ CHECK_EQ(static_cast<int32_t>(static_cast<double>(i)),
+ GenAndRunTest<int32_t>(i, fn));
+ }
+}
+
+static const std::vector<int64_t> overflow_int64_test_values() {
+ static const int64_t kValues[] = {static_cast<int64_t>(0xF000000000000000),
+ static_cast<int64_t>(0x0000000000000001),
+ static_cast<int64_t>(0xFF00000000000000),
+ static_cast<int64_t>(0x0000F00111111110),
+ static_cast<int64_t>(0x0F00001000000000),
+ static_cast<int64_t>(0x991234AB12A96731),
+ static_cast<int64_t>(0xB0FFFF0F0F0F0F01),
+ static_cast<int64_t>(0x00006FFFFFFFFFFF),
+ static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
+ return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+TEST(OverflowInstructions) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ struct T {
+ int64_t lhs;
+ int64_t rhs;
+ int64_t output_add;
+ int64_t output_add2;
+ int64_t output_sub;
+ int64_t output_sub2;
+ int64_t output_mul;
+ int64_t output_mul2;
+ int64_t overflow_add;
+ int64_t overflow_add2;
+ int64_t overflow_sub;
+ int64_t overflow_sub2;
+ int64_t overflow_mul;
+ int64_t overflow_mul2;
+ } t;
+
+ FOR_INT64_INPUTS3(i, overflow_int64_test_values) {
+ FOR_INT64_INPUTS3(j, overflow_int64_test_values) {
+ auto ii = i;
+ auto jj = j;
+ int64_t expected_add, expected_sub;
+ int32_t ii32 = static_cast<int32_t>(ii);
+ int32_t jj32 = static_cast<int32_t>(jj);
+ int32_t expected_mul;
+ int64_t expected_add_ovf, expected_sub_ovf, expected_mul_ovf;
+
+ auto fn = [](MacroAssembler& masm) {
+ __ Ld(t0, MemOperand(a0, offsetof(T, lhs)));
+ __ Ld(t1, MemOperand(a0, offsetof(T, rhs)));
+
+ __ AddOverflow64(t2, t0, Operand(t1), a1);
+ __ Sd(t2, MemOperand(a0, offsetof(T, output_add)));
+ __ Sd(a1, MemOperand(a0, offsetof(T, overflow_add)));
+ __ mv(a1, zero_reg);
+ __ AddOverflow64(t0, t0, Operand(t1), a1);
+ __ Sd(t0, MemOperand(a0, offsetof(T, output_add2)));
+ __ Sd(a1, MemOperand(a0, offsetof(T, overflow_add2)));
+
+ __ Ld(t0, MemOperand(a0, offsetof(T, lhs)));
+ __ Ld(t1, MemOperand(a0, offsetof(T, rhs)));
+
+ __ SubOverflow64(t2, t0, Operand(t1), a1);
+ __ Sd(t2, MemOperand(a0, offsetof(T, output_sub)));
+ __ Sd(a1, MemOperand(a0, offsetof(T, overflow_sub)));
+ __ mv(a1, zero_reg);
+ __ SubOverflow64(t0, t0, Operand(t1), a1);
+ __ Sd(t0, MemOperand(a0, offsetof(T, output_sub2)));
+ __ Sd(a1, MemOperand(a0, offsetof(T, overflow_sub2)));
+
+ __ Ld(t0, MemOperand(a0, offsetof(T, lhs)));
+ __ Ld(t1, MemOperand(a0, offsetof(T, rhs)));
+ __ SignExtendWord(t0, t0);
+ __ SignExtendWord(t1, t1);
+ __ MulOverflow32(t2, t0, Operand(t1), a1);
+ __ Sd(t2, MemOperand(a0, offsetof(T, output_mul)));
+ __ Sd(a1, MemOperand(a0, offsetof(T, overflow_mul)));
+ __ mv(a1, zero_reg);
+ __ MulOverflow32(t0, t0, Operand(t1), a1);
+ __ Sd(t0, MemOperand(a0, offsetof(T, output_mul2)));
+ __ Sd(a1, MemOperand(a0, offsetof(T, overflow_mul2)));
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ t.lhs = ii;
+ t.rhs = jj;
+ f.Call(&t, 0, 0, 0, 0);
+
+ expected_add_ovf = base::bits::SignedAddOverflow64(ii, jj, &expected_add);
+ expected_sub_ovf = base::bits::SignedSubOverflow64(ii, jj, &expected_sub);
+ expected_mul_ovf =
+ base::bits::SignedMulOverflow32(ii32, jj32, &expected_mul);
+
+ CHECK_EQ(expected_add_ovf, t.overflow_add < 0);
+ CHECK_EQ(expected_sub_ovf, t.overflow_sub < 0);
+ CHECK_EQ(expected_mul_ovf, t.overflow_mul != 0);
+
+ CHECK_EQ(t.overflow_add, t.overflow_add2);
+ CHECK_EQ(t.overflow_sub, t.overflow_sub2);
+ CHECK_EQ(t.overflow_mul, t.overflow_mul2);
+
+ CHECK_EQ(expected_add, t.output_add);
+ CHECK_EQ(expected_add, t.output_add2);
+ CHECK_EQ(expected_sub, t.output_sub);
+ CHECK_EQ(expected_sub, t.output_sub2);
+ if (!expected_mul_ovf) {
+ CHECK_EQ(expected_mul, t.output_mul);
+ CHECK_EQ(expected_mul, t.output_mul2);
+ }
+ }
+ }
+}
+
+TEST(min_max_nan) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct TestFloat {
+ double a;
+ double b;
+ double c;
+ double d;
+ float e;
+ float f;
+ float g;
+ float h;
+ } test;
+
+ const int kTableLength = 13;
+
+ double inputsa[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0,
+ inf_d, minf_d, inf_d, qnan_d, 3.0,
+ inf_d, qnan_d, qnan_d};
+ double inputsb[kTableLength] = {3.0, 2.0, 0.0, -0.0, inf_d,
+ 42.0, inf_d, minf_d, 3.0, qnan_d,
+ qnan_d, inf_d, qnan_d};
+ double outputsdmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0,
+ 42.0, minf_d, minf_d, qnan_d, qnan_d,
+ qnan_d, qnan_d, qnan_d};
+ double outputsdmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, inf_d,
+ inf_d, inf_d, inf_d, qnan_d, qnan_d,
+ qnan_d, qnan_d, qnan_d};
+
+ float inputse[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0,
+ inf_f, minf_f, inf_f, qnan_f, 3.0,
+ inf_f, qnan_f, qnan_f};
+ float inputsf[kTableLength] = {3.0, 2.0, 0.0, -0.0, inf_f,
+ 42.0, inf_f, minf_f, 3.0, qnan_f,
+ qnan_f, inf_f, qnan_f};
+ float outputsfmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0,
+ 42.0, minf_f, minf_f, qnan_f, qnan_f,
+ qnan_f, qnan_f, qnan_f};
+ float outputsfmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, inf_f,
+ inf_f, inf_f, inf_f, qnan_f, qnan_f,
+ qnan_f, qnan_f, qnan_f};
+
+ auto fn = [](MacroAssembler& masm) {
+ __ push(s6);
+ __ InitializeRootRegister();
+ __ LoadDouble(fa3, MemOperand(a0, offsetof(TestFloat, a)));
+ __ LoadDouble(fa4, MemOperand(a0, offsetof(TestFloat, b)));
+ __ LoadFloat(fa1, MemOperand(a0, offsetof(TestFloat, e)));
+ __ LoadFloat(fa2, MemOperand(a0, offsetof(TestFloat, f)));
+ __ Float64Min(fa5, fa3, fa4);
+ __ Float64Max(fa6, fa3, fa4);
+ __ Float32Min(fa7, fa1, fa2);
+ __ Float32Max(fa0, fa1, fa2);
+ __ StoreDouble(fa5, MemOperand(a0, offsetof(TestFloat, c)));
+ __ StoreDouble(fa6, MemOperand(a0, offsetof(TestFloat, d)));
+ __ StoreFloat(fa7, MemOperand(a0, offsetof(TestFloat, g)));
+ __ StoreFloat(fa0, MemOperand(a0, offsetof(TestFloat, h)));
+ __ pop(s6);
+ };
+ auto f = AssembleCode<F3>(fn);
+
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.e = inputse[i];
+ test.f = inputsf[i];
+
+ f.Call(&test, 0, 0, 0, 0);
+
+ CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
+ CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
+ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g)));
+ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h)));
+ }
+}
+
+TEST(Ulh) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ auto fn1 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ush(t0, MemOperand(a0, out_offset));
+ };
+
+ auto fn2 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ mv(t0, a0);
+ __ Ulh(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset));
+ };
+
+ auto fn3 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ mv(t0, a0);
+ __ Ulhu(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset));
+ };
+
+ auto fn4 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ Ulhu(t0, MemOperand(a0, in_offset));
+ __ Ush(t0, MemOperand(a0, out_offset));
+ };
+
+ FOR_UINT16_INPUTS(i) {
+ FOR_INT32_TWO_INPUTS(j1, j2, unsigned_test_offset) {
+ FOR_INT32_TWO_INPUTS(k1, k2, unsigned_test_offset_increment) {
+ auto value = i;
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn1));
+
+ // test when loaded value overwrites base-register of load address
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn2));
+
+ // test when loaded value overwrites base-register of load address
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn3));
+
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn4));
+ }
+ }
+ }
+}
+
+TEST(Ulh_bitextension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ auto fn = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ulhu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ sraiw(t0, t0, 15);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ sraiw(t1, t1, 15);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ sraiw(t0, t0, 15);
+ __ addiw(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ush(t0, MemOperand(a0, out_offset));
+ __ Branch(&end);
+ __ bind(&fail);
+ __ Ush(zero_reg, MemOperand(a0, out_offset));
+ __ bind(&end);
+ };
+
+ FOR_UINT16_INPUTS(i) {
+ FOR_INT32_TWO_INPUTS(j1, j2, unsigned_test_offset) {
+ FOR_INT32_TWO_INPUTS(k1, k2, unsigned_test_offset_increment) {
+ auto value = i;
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn));
+ }
+ }
+ }
+}
+
+TEST(Ulw) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ auto fn_1 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ Ulw(t0, MemOperand(a0, in_offset));
+ __ Usw(t0, MemOperand(a0, out_offset));
+ };
+
+ auto fn_2 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ mv(t0, a0);
+ __ Ulw(a0, MemOperand(a0, in_offset));
+ __ Usw(a0, MemOperand(t0, out_offset));
+ };
+
+ auto fn_3 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ Ulwu(t0, MemOperand(a0, in_offset));
+ __ Usw(t0, MemOperand(a0, out_offset));
+ };
+
+ auto fn_4 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ mv(t0, a0);
+ __ Ulwu(a0, MemOperand(a0, in_offset));
+ __ Usw(a0, MemOperand(t0, out_offset));
+ };
+
+ FOR_UINT32_INPUTS(i) {
+ FOR_INT32_TWO_INPUTS(j1, j2, unsigned_test_offset) {
+ FOR_INT32_TWO_INPUTS(k1, k2, unsigned_test_offset_increment) {
+ auto value = i;
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn_1));
+ // test when loaded value overwrites base-register of load address
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn_2));
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn_3));
+ // test when loaded value overwrites base-register of load address
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn_4));
+ }
+ }
+ }
+}
+
+TEST(Ulw_extension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ auto fn = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ulw(t0, MemOperand(a0, in_offset));
+ __ Ulwu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ srai(t0, t0, 31);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ srai(t1, t1, 31);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ srai(t0, t0, 31);
+ __ addi(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ulw(t0, MemOperand(a0, in_offset));
+ __ Usw(t0, MemOperand(a0, out_offset));
+ __ Branch(&end);
+ __ bind(&fail);
+ __ Usw(zero_reg, MemOperand(a0, out_offset));
+ __ bind(&end);
+ };
+
+ FOR_UINT32_INPUTS(i) {
+ FOR_INT32_TWO_INPUTS(j1, j2, unsigned_test_offset) {
+ FOR_INT32_TWO_INPUTS(k1, k2, unsigned_test_offset_increment) {
+ auto value = i;
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn));
+ }
+ }
+ }
+}
+
+TEST(Uld) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ auto fn_1 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ Uld(t0, MemOperand(a0, in_offset));
+ __ Usd(t0, MemOperand(a0, out_offset));
+ };
+
+ auto fn_2 = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ mv(t0, a0);
+ __ Uld(a0, MemOperand(a0, in_offset));
+ __ Usd(a0, MemOperand(t0, out_offset));
+ };
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_INT32_TWO_INPUTS(j1, j2, unsigned_test_offset) {
+ FOR_INT32_TWO_INPUTS(k1, k2, unsigned_test_offset_increment) {
+ auto value = i;
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn_1));
+
+ // test when loaded value overwrites base-register of load address
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn_2));
+ }
+ }
+ }
+}
+
+auto fn = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ ULoadFloat(fa0, MemOperand(a0, in_offset));
+ __ UStoreFloat(fa0, MemOperand(a0, out_offset));
+};
+
+TEST(ULoadFloat) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_FLOAT32_INPUTS(i) {
+ // skip nan because CHECK_EQ cannot handle NaN
+ if (std::isnan(i)) continue;
+ FOR_INT32_TWO_INPUTS(j1, j2, unsigned_test_offset) {
+ FOR_INT32_TWO_INPUTS(k1, k2, unsigned_test_offset_increment) {
+ auto value = i;
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn));
+ }
+ }
+ }
+}
+
+TEST(ULoadDouble) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ auto fn = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
+ __ ULoadDouble(fa0, MemOperand(a0, in_offset));
+ __ UStoreDouble(fa0, MemOperand(a0, out_offset));
+ };
+
+ FOR_FLOAT64_INPUTS(i) {
+ // skip nan because CHECK_EQ cannot handle NaN
+ if (std::isnan(i)) continue;
+ FOR_INT32_TWO_INPUTS(j1, j2, unsigned_test_offset) {
+ FOR_INT32_TWO_INPUTS(k1, k2, unsigned_test_offset_increment) {
+ auto value = i;
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+ CHECK_EQ(value, run_Unaligned(buffer_middle, in_offset, out_offset,
+ value, fn));
+ }
+ }
+ }
+}
+
+TEST(Sltu) {
+ CcTest::InitializeVM();
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ // compare against immediate value
+ auto fn_1 = [j](MacroAssembler& masm) { __ Sltu(a0, a0, Operand(j)); };
+ CHECK_EQ(i < j, GenAndRunTest<int32_t>(i, fn_1));
+ // compare against registers
+ auto fn_2 = [](MacroAssembler& masm) { __ Sltu(a0, a0, a1); };
+ CHECK_EQ(i < j, GenAndRunTest<int32_t>(i, j, fn_2));
+ }
+ }
+}
+
+template <typename T, typename Inputs, typename Results>
+static void GenerateMacroFloat32MinMax(MacroAssembler& masm) {
+ T a = T::from_code(4); // f4
+ T b = T::from_code(6); // f6
+ T c = T::from_code(8); // f8
+
+#define FLOAT_MIN_MAX(fminmax, res, x, y, res_field) \
+ __ LoadFloat(x, MemOperand(a0, offsetof(Inputs, src1_))); \
+ __ LoadFloat(y, MemOperand(a0, offsetof(Inputs, src2_))); \
+ __ fminmax(res, x, y); \
+ __ StoreFloat(a, MemOperand(a1, offsetof(Results, res_field)))
+
+ // a = min(b, c);
+ FLOAT_MIN_MAX(Float32Min, a, b, c, min_abc_);
+ // a = min(a, b);
+ FLOAT_MIN_MAX(Float32Min, a, a, b, min_aab_);
+ // a = min(b, a);
+ FLOAT_MIN_MAX(Float32Min, a, b, a, min_aba_);
+
+ // a = max(b, c);
+ FLOAT_MIN_MAX(Float32Max, a, b, c, max_abc_);
+ // a = max(a, b);
+ FLOAT_MIN_MAX(Float32Max, a, a, b, max_aab_);
+ // a = max(b, a);
+ FLOAT_MIN_MAX(Float32Max, a, b, a, max_aba_);
+
+#undef FLOAT_MIN_MAX
+}
+
+TEST(macro_float_minmax_f32) {
+ // Test the Float32Min and Float32Max macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct Inputs {
+ float src1_;
+ float src2_;
+ };
+
+ struct Results {
+ // Check all register aliasing possibilities in order to exercise all
+ // code-paths in the macro masm.
+ float min_abc_;
+ float min_aab_;
+ float min_aba_;
+ float max_abc_;
+ float max_aab_;
+ float max_aba_;
+ };
+
+ auto f = AssembleCode<F4>(
+ GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>);
+
+#define CHECK_MINMAX(src1, src2, min, max) \
+ do { \
+ Inputs inputs = {src1, src2}; \
+ Results results; \
+ f.Call(&inputs, &results, 0, 0, 0); \
+ CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_abc_)); \
+ CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aab_)); \
+ CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aba_)); \
+ CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_abc_)); \
+ CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_aab_)); \
+ CHECK_EQ( \
+ bit_cast<uint32_t>(max), \
+ bit_cast<uint32_t>(results.max_aba_)); /* Use a bit_cast to correctly \
+ identify -0.0 and NaNs. */ \
+ } while (0)
+
+ float nan_a = std::numeric_limits<float>::quiet_NaN();
+ float nan_b = std::numeric_limits<float>::quiet_NaN();
+
+ CHECK_MINMAX(1.0f, -1.0f, -1.0f, 1.0f);
+ CHECK_MINMAX(-1.0f, 1.0f, -1.0f, 1.0f);
+ CHECK_MINMAX(0.0f, -1.0f, -1.0f, 0.0f);
+ CHECK_MINMAX(-1.0f, 0.0f, -1.0f, 0.0f);
+ CHECK_MINMAX(-0.0f, -1.0f, -1.0f, -0.0f);
+ CHECK_MINMAX(-1.0f, -0.0f, -1.0f, -0.0f);
+ CHECK_MINMAX(0.0f, 1.0f, 0.0f, 1.0f);
+ CHECK_MINMAX(1.0f, 0.0f, 0.0f, 1.0f);
+
+ CHECK_MINMAX(0.0f, 0.0f, 0.0f, 0.0f);
+ CHECK_MINMAX(-0.0f, -0.0f, -0.0f, -0.0f);
+ CHECK_MINMAX(-0.0f, 0.0f, -0.0f, 0.0f);
+ CHECK_MINMAX(0.0f, -0.0f, -0.0f, 0.0f);
+
+ CHECK_MINMAX(0.0f, nan_a, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, 0.0f, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a);
+ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b);
+
+#undef CHECK_MINMAX
+}
+
+template <typename T, typename Inputs, typename Results>
+static void GenerateMacroFloat64MinMax(MacroAssembler& masm) {
+ T a = T::from_code(4); // f4
+ T b = T::from_code(6); // f6
+ T c = T::from_code(8); // f8
+
+#define FLOAT_MIN_MAX(fminmax, res, x, y, res_field) \
+ __ LoadDouble(x, MemOperand(a0, offsetof(Inputs, src1_))); \
+ __ LoadDouble(y, MemOperand(a0, offsetof(Inputs, src2_))); \
+ __ fminmax(res, x, y); \
+ __ StoreDouble(a, MemOperand(a1, offsetof(Results, res_field)))
+
+ // a = min(b, c);
+ FLOAT_MIN_MAX(Float64Min, a, b, c, min_abc_);
+ // a = min(a, b);
+ FLOAT_MIN_MAX(Float64Min, a, a, b, min_aab_);
+ // a = min(b, a);
+ FLOAT_MIN_MAX(Float64Min, a, b, a, min_aba_);
+
+ // a = max(b, c);
+ FLOAT_MIN_MAX(Float64Max, a, b, c, max_abc_);
+ // a = max(a, b);
+ FLOAT_MIN_MAX(Float64Max, a, a, b, max_aab_);
+ // a = max(b, a);
+ FLOAT_MIN_MAX(Float64Max, a, b, a, max_aba_);
+
+#undef FLOAT_MIN_MAX
+}
+
+TEST(macro_float_minmax_f64) {
+ // Test the Float64Min and Float64Max macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct Inputs {
+ double src1_;
+ double src2_;
+ };
+
+ struct Results {
+ // Check all register aliasing possibilities in order to exercise all
+ // code-paths in the macro masm.
+ double min_abc_;
+ double min_aab_;
+ double min_aba_;
+ double max_abc_;
+ double max_aab_;
+ double max_aba_;
+ };
+
+ auto f = AssembleCode<F4>(
+ GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>);
+
+#define CHECK_MINMAX(src1, src2, min, max) \
+ do { \
+ Inputs inputs = {src1, src2}; \
+ Results results; \
+ f.Call(&inputs, &results, 0, 0, 0); \
+ CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_abc_)); \
+ CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aab_)); \
+ CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aba_)); \
+ CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_abc_)); \
+ CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_aab_)); \
+ CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_aba_)); \
+ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
+ } while (0)
+
+ double nan_a = qnan_d;
+ double nan_b = qnan_d;
+
+ CHECK_MINMAX(1.0, -1.0, -1.0, 1.0);
+ CHECK_MINMAX(-1.0, 1.0, -1.0, 1.0);
+ CHECK_MINMAX(0.0, -1.0, -1.0, 0.0);
+ CHECK_MINMAX(-1.0, 0.0, -1.0, 0.0);
+ CHECK_MINMAX(-0.0, -1.0, -1.0, -0.0);
+ CHECK_MINMAX(-1.0, -0.0, -1.0, -0.0);
+ CHECK_MINMAX(0.0, 1.0, 0.0, 1.0);
+ CHECK_MINMAX(1.0, 0.0, 0.0, 1.0);
+
+ CHECK_MINMAX(0.0, 0.0, 0.0, 0.0);
+ CHECK_MINMAX(-0.0, -0.0, -0.0, -0.0);
+ CHECK_MINMAX(-0.0, 0.0, -0.0, 0.0);
+ CHECK_MINMAX(0.0, -0.0, -0.0, 0.0);
+
+ CHECK_MINMAX(0.0, nan_a, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, 0.0, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a);
+ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b);
+
+#undef CHECK_MINMAX
+}
+
+template <typename T>
+static bool CompareF(T input1, T input2, FPUCondition cond) {
+ switch (cond) {
+ case EQ:
+ return (input1 == input2);
+ case LT:
+ return (input1 < input2);
+ case LE:
+ return (input1 <= input2);
+ case NE:
+ return (input1 != input2);
+ case GT:
+ return (input1 > input2);
+ case GE:
+ return (input1 >= input2);
+ default:
+ UNREACHABLE();
+ }
+}
+
+static bool CompareU(uint64_t input1, uint64_t input2, Condition cond) {
+ switch (cond) {
+ case eq:
+ return (input1 == input2);
+ case ne:
+ return (input1 != input2);
+
+ case Uless:
+ return (input1 < input2);
+ case Uless_equal:
+ return (input1 <= input2);
+ case Ugreater:
+ return (input1 > input2);
+ case Ugreater_equal:
+ return (input1 >= input2);
+
+ case less:
+ return (static_cast<int64_t>(input1) < static_cast<int64_t>(input2));
+ case less_equal:
+ return (static_cast<int64_t>(input1) <= static_cast<int64_t>(input2));
+ case greater:
+ return (static_cast<int64_t>(input1) > static_cast<int64_t>(input2));
+ case greater_equal:
+ return (static_cast<int64_t>(input1) >= static_cast<int64_t>(input2));
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+static void FCompare32Helper(FPUCondition cond) {
+ auto fn = [cond](MacroAssembler& masm) { __ CompareF32(a0, cond, fa0, fa1); };
+ FOR_FLOAT32_INPUTS(i) {
+ FOR_FLOAT32_INPUTS(j) {
+ bool comp_res = CompareF(i, j, cond);
+ CHECK_EQ(comp_res, GenAndRunTest<int32_t>(i, j, fn));
+ }
+ }
+}
+
+static void FCompare64Helper(FPUCondition cond) {
+ auto fn = [cond](MacroAssembler& masm) { __ CompareF64(a0, cond, fa0, fa1); };
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) {
+ bool comp_res = CompareF(i, j, cond);
+ CHECK_EQ(comp_res, GenAndRunTest<int32_t>(i, j, fn));
+ }
+ }
+}
+
+TEST(FCompare32_Branch) {
+ CcTest::InitializeVM();
+
+ FCompare32Helper(EQ);
+ FCompare32Helper(LT);
+ FCompare32Helper(LE);
+ FCompare32Helper(NE);
+ FCompare32Helper(GT);
+ FCompare32Helper(GE);
+
+ // test CompareIsNanF32: return true if any operand isnan
+ auto fn = [](MacroAssembler& masm) { __ CompareIsNanF32(a0, fa0, fa1); };
+ CHECK_EQ(false, GenAndRunTest<int32_t>(1023.01f, -100.23f, fn));
+ CHECK_EQ(true, GenAndRunTest<int32_t>(1023.01f, snan_f, fn));
+ CHECK_EQ(true, GenAndRunTest<int32_t>(snan_f, -100.23f, fn));
+ CHECK_EQ(true, GenAndRunTest<int32_t>(snan_f, qnan_f, fn));
+}
+
+TEST(FCompare64_Branch) {
+ CcTest::InitializeVM();
+ FCompare64Helper(EQ);
+ FCompare64Helper(LT);
+ FCompare64Helper(LE);
+ FCompare64Helper(NE);
+ FCompare64Helper(GT);
+ FCompare64Helper(GE);
+
+ // test CompareIsNanF64: return true if any operand isnan
+ auto fn = [](MacroAssembler& masm) { __ CompareIsNanF64(a0, fa0, fa1); };
+ CHECK_EQ(false, GenAndRunTest<int32_t>(1023.01, -100.23, fn));
+ CHECK_EQ(true, GenAndRunTest<int32_t>(1023.01, snan_d, fn));
+ CHECK_EQ(true, GenAndRunTest<int32_t>(snan_d, -100.23, fn));
+ CHECK_EQ(true, GenAndRunTest<int32_t>(snan_d, qnan_d, fn));
+}
+
+static void CompareIHelper(Condition cond) {
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ auto input1 = i;
+ auto input2 = j;
+ bool comp_res = CompareU(input1, input2, cond);
+ // test compare against immediate value
+ auto fn1 = [cond, input2](MacroAssembler& masm) {
+ __ CompareI(a0, a0, Operand(input2), cond);
+ };
+ CHECK_EQ(comp_res, GenAndRunTest<int32_t>(input1, fn1));
+ // test compare registers
+ auto fn2 = [cond](MacroAssembler& masm) {
+ __ CompareI(a0, a0, Operand(a1), cond);
+ };
+ CHECK_EQ(comp_res, GenAndRunTest<int32_t>(input1, input2, fn2));
+ }
+ }
+}
+
+TEST(CompareI) {
+ CcTest::InitializeVM();
+ CompareIHelper(eq);
+ CompareIHelper(ne);
+
+ CompareIHelper(greater);
+ CompareIHelper(greater_equal);
+ CompareIHelper(less);
+ CompareIHelper(less_equal);
+
+ CompareIHelper(Ugreater);
+ CompareIHelper(Ugreater_equal);
+ CompareIHelper(Uless);
+ CompareIHelper(Uless_equal);
+}
+
+TEST(Clz32) {
+ CcTest::InitializeVM();
+ auto fn = [](MacroAssembler& masm) { __ Clz32(a0, a0); };
+ FOR_UINT32_INPUTS(i) {
+ // __builtin_clzll(0) is undefined
+ if (i == 0) continue;
+ CHECK_EQ(__builtin_clz(i), GenAndRunTest<int>(i, fn));
+ }
+}
+
+TEST(Ctz32) {
+ CcTest::InitializeVM();
+ auto fn = [](MacroAssembler& masm) { __ Ctz32(a0, a0); };
+ FOR_UINT32_INPUTS(i) {
+ // __builtin_clzll(0) is undefined
+ if (i == 0) continue;
+ CHECK_EQ(__builtin_ctz(i), GenAndRunTest<int>(i, fn));
+ }
+}
+
+TEST(Clz64) {
+ CcTest::InitializeVM();
+ auto fn = [](MacroAssembler& masm) { __ Clz64(a0, a0); };
+ FOR_UINT64_INPUTS(i) {
+ // __builtin_clzll(0) is undefined
+ if (i == 0) continue;
+ CHECK_EQ(__builtin_clzll(i), GenAndRunTest<int>(i, fn));
+ }
+}
+
+TEST(Ctz64) {
+ CcTest::InitializeVM();
+ auto fn = [](MacroAssembler& masm) { __ Ctz64(a0, a0); };
+ FOR_UINT64_INPUTS(i) {
+ // __builtin_clzll(0) is undefined
+ if (i == 0) continue;
+ CHECK_EQ(__builtin_ctzll(i), GenAndRunTest<int>(i, fn));
+ }
+}
+
+TEST(ByteSwap) {
+ CcTest::InitializeVM();
+ auto fn0 = [](MacroAssembler& masm) { __ ByteSwap(a0, a0, 4); };
+ CHECK_EQ((int32_t)0x89ab'cdef, GenAndRunTest<int32_t>(0xefcd'ab89, fn0));
+ auto fn1 = [](MacroAssembler& masm) { __ ByteSwap(a0, a0, 8); };
+ CHECK_EQ((int64_t)0x0123'4567'89ab'cdef,
+ GenAndRunTest<int64_t>(0xefcd'ab89'6745'2301, fn1));
+}
+
+TEST(Dpopcnt) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ uint64_t in[9];
+ uint64_t out[9];
+ uint64_t result[9];
+ uint64_t val = 0xffffffffffffffffl;
+ uint64_t cnt = 64;
+
+ for (int i = 0; i < 7; i++) {
+ in[i] = val;
+ out[i] = cnt;
+ cnt >>= 1;
+ val >>= cnt;
+ }
+
+ in[7] = 0xaf1000000000000bl;
+ out[7] = 10;
+ in[8] = 0xe030000f00003000l;
+ out[8] = 11;
+
+ auto fn = [&in](MacroAssembler& masm) {
+ __ mv(a4, a0);
+ for (int i = 0; i < 7; i++) {
+ // Load constant.
+ __ li(a3, Operand(in[i]));
+ __ Popcnt64(a5, a3);
+ __ Sd(a5, MemOperand(a4));
+ __ Add64(a4, a4, Operand(kPointerSize));
+ }
+ __ li(a3, Operand(in[7]));
+ __ Popcnt64(a5, a3);
+ __ Sd(a5, MemOperand(a4));
+ __ Add64(a4, a4, Operand(kPointerSize));
+
+ __ li(a3, Operand(in[8]));
+ __ Popcnt64(a5, a3);
+ __ Sd(a5, MemOperand(a4));
+ __ Add64(a4, a4, Operand(kPointerSize));
+ };
+ auto f = AssembleCode<FV>(fn);
+
+ (void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
+ // Check results.
+ for (int i = 0; i < 9; i++) {
+ CHECK(out[i] == result[i]);
+ }
+}
+
+TEST(Popcnt) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ uint64_t in[8];
+ uint64_t out[8];
+ uint64_t result[8];
+ uint64_t val = 0xffffffff;
+ uint64_t cnt = 32;
+
+ for (int i = 0; i < 6; i++) {
+ in[i] = val;
+ out[i] = cnt;
+ cnt >>= 1;
+ val >>= cnt;
+ }
+
+ in[6] = 0xaf10000b;
+ out[6] = 10;
+ in[7] = 0xe03f3000;
+ out[7] = 11;
+
+ auto fn = [&in](MacroAssembler& masm) {
+ __ mv(a4, a0);
+ for (int i = 0; i < 6; i++) {
+ // Load constant.
+ __ li(a3, Operand(in[i]));
+ __ Popcnt32(a5, a3);
+ __ Sd(a5, MemOperand(a4));
+ __ Add64(a4, a4, Operand(kPointerSize));
+ }
+
+ __ li(a3, Operand(in[6]));
+ __ Popcnt64(a5, a3);
+ __ Sd(a5, MemOperand(a4));
+ __ Add64(a4, a4, Operand(kPointerSize));
+
+ __ li(a3, Operand(in[7]));
+ __ Popcnt64(a5, a3);
+ __ Sd(a5, MemOperand(a4));
+ __ Add64(a4, a4, Operand(kPointerSize));
+ };
+ auto f = AssembleCode<FV>(fn);
+
+ (void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
+ // Check results.
+ for (int i = 0; i < 8; i++) {
+ CHECK(out[i] == result[i]);
+ }
+}
+
+TEST(Move) {
+ CcTest::InitializeVM();
+ union {
+ double dval;
+ int32_t ival[2];
+ } t;
+
+ {
+ auto fn = [](MacroAssembler& masm) { __ ExtractHighWordFromF64(a0, fa0); };
+ t.ival[0] = 256;
+ t.ival[1] = -123;
+ CHECK_EQ(static_cast<int64_t>(t.ival[1]),
+ GenAndRunTest<int64_t>(t.dval, fn));
+ t.ival[0] = 645;
+ t.ival[1] = 127;
+ CHECK_EQ(static_cast<int64_t>(t.ival[1]),
+ GenAndRunTest<int64_t>(t.dval, fn));
+ }
+
+ {
+ auto fn = [](MacroAssembler& masm) { __ ExtractLowWordFromF64(a0, fa0); };
+ t.ival[0] = 256;
+ t.ival[1] = -123;
+ CHECK_EQ(static_cast<int64_t>(t.ival[0]),
+ GenAndRunTest<int64_t>(t.dval, fn));
+ t.ival[0] = -645;
+ t.ival[1] = 127;
+ CHECK_EQ(static_cast<int64_t>(t.ival[0]),
+ GenAndRunTest<int64_t>(t.dval, fn));
+ }
+}
+
+TEST(DeoptExitSizeIsFixed) {
+ CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
+ STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+ for (int i = 0; i < kDeoptimizeKindCount; i++) {
+ DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
+ Label before_exit;
+ masm.bind(&before_exit);
+ if (kind == DeoptimizeKind::kEagerWithResume) {
+ Builtins::Name target = Deoptimizer::GetDeoptWithResumeBuiltin(
+ DeoptimizeReason::kDynamicCheckMaps);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ Deoptimizer::kEagerWithResumeBeforeArgsSize);
+ } else {
+ Builtins::Name target = Deoptimizer::GetDeoptimizationEntry(kind);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+ }
+ }
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index add41dbbb0..8f348c4584 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -115,13 +115,20 @@ TEST(SmiMove) {
TestMoveSmi(masm, &exit, 3, Smi::FromInt(128));
TestMoveSmi(masm, &exit, 4, Smi::FromInt(255));
TestMoveSmi(masm, &exit, 5, Smi::FromInt(256));
- TestMoveSmi(masm, &exit, 6, Smi::FromInt(Smi::kMaxValue));
- TestMoveSmi(masm, &exit, 7, Smi::FromInt(-1));
- TestMoveSmi(masm, &exit, 8, Smi::FromInt(-128));
- TestMoveSmi(masm, &exit, 9, Smi::FromInt(-129));
- TestMoveSmi(masm, &exit, 10, Smi::FromInt(-256));
- TestMoveSmi(masm, &exit, 11, Smi::FromInt(-257));
- TestMoveSmi(masm, &exit, 12, Smi::FromInt(Smi::kMinValue));
+ TestMoveSmi(masm, &exit, 6, Smi::FromInt(0xFFFF - 1));
+ TestMoveSmi(masm, &exit, 7, Smi::FromInt(0xFFFF));
+ TestMoveSmi(masm, &exit, 8, Smi::FromInt(0xFFFF + 1));
+ TestMoveSmi(masm, &exit, 9, Smi::FromInt(Smi::kMaxValue));
+
+ TestMoveSmi(masm, &exit, 10, Smi::FromInt(-1));
+ TestMoveSmi(masm, &exit, 11, Smi::FromInt(-128));
+ TestMoveSmi(masm, &exit, 12, Smi::FromInt(-129));
+ TestMoveSmi(masm, &exit, 13, Smi::FromInt(-256));
+ TestMoveSmi(masm, &exit, 14, Smi::FromInt(-257));
+ TestMoveSmi(masm, &exit, 15, Smi::FromInt(-0xFFFF + 1));
+ TestMoveSmi(masm, &exit, 16, Smi::FromInt(-0xFFFF));
+ TestMoveSmi(masm, &exit, 17, Smi::FromInt(-0xFFFF - 1));
+ TestMoveSmi(masm, &exit, 18, Smi::FromInt(Smi::kMinValue));
__ xorq(rax, rax); // Success.
__ bind(&exit);
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index 3f4b94962c..ab3afab758 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -26,8 +26,8 @@ using v8::String;
using v8::Value;
ScriptOrigin ModuleOrigin(Local<v8::Value> resource_name, Isolate* isolate) {
- ScriptOrigin origin(resource_name, 0, 0, false, -1, Local<v8::Value>(), false,
- false, true);
+ ScriptOrigin origin(isolate, resource_name, 0, 0, false, -1,
+ Local<v8::Value>(), false, false, true);
return origin;
}
@@ -148,13 +148,13 @@ MaybeLocal<Module> ResolveCallbackWithImportAssertions(
} else if (specifier->StrictEquals(v8_str("./bar.js"))) {
CHECK_EQ(3, import_assertions->Length());
Local<String> assertion_key =
- import_assertions->Get(context, 0).As<Value>().As<String>();
+ import_assertions->Get(env.local(), 0).As<Value>().As<String>();
CHECK(v8_str("a")->StrictEquals(assertion_key));
Local<String> assertion_value =
- import_assertions->Get(context, 1).As<Value>().As<String>();
+ import_assertions->Get(env.local(), 1).As<Value>().As<String>();
CHECK(v8_str("b")->StrictEquals(assertion_value));
Local<Data> assertion_source_offset_object =
- import_assertions->Get(context, 2);
+ import_assertions->Get(env.local(), 2);
Local<Int32> assertion_source_offset_int32 =
assertion_source_offset_object.As<Value>()
->ToInt32(context)
@@ -176,25 +176,18 @@ TEST(ModuleInstantiationWithImportAssertions) {
bool prev_top_level_await = i::FLAG_harmony_top_level_await;
bool prev_import_assertions = i::FLAG_harmony_import_assertions;
i::FLAG_harmony_import_assertions = true;
-
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- create_params.supported_import_assertions = {"extra0", "a", "extra1"};
- v8::Isolate* isolate = v8::Isolate::New(create_params);
-
for (auto top_level_await : {true, false}) {
i::FLAG_harmony_top_level_await = top_level_await;
- v8::Isolate::Scope isolate_scope(isolate);
+ Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
- Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
+ LocalContext env;
v8::TryCatch try_catch(isolate);
Local<Module> module;
{
Local<String> source_text = v8_str(
"import './foo.js' assert { };\n"
- "export {} from './bar.js' assert { a: 'b', c: 'd' };");
+ "export {} from './bar.js' assert { a: 'b' };");
ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
ScriptCompiler::Source source(source_text, origin);
module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
@@ -202,7 +195,7 @@ TEST(ModuleInstantiationWithImportAssertions) {
Local<FixedArray> module_requests = module->GetModuleRequests();
CHECK_EQ(2, module_requests->Length());
Local<ModuleRequest> module_request_0 =
- module_requests->Get(context, 0).As<ModuleRequest>();
+ module_requests->Get(env.local(), 0).As<ModuleRequest>();
CHECK(v8_str("./foo.js")->StrictEquals(module_request_0->GetSpecifier()));
int offset = module_request_0->GetSourceOffset();
CHECK_EQ(7, offset);
@@ -212,7 +205,7 @@ TEST(ModuleInstantiationWithImportAssertions) {
CHECK_EQ(0, module_request_0->GetImportAssertions()->Length());
Local<ModuleRequest> module_request_1 =
- module_requests->Get(context, 1).As<ModuleRequest>();
+ module_requests->Get(env.local(), 1).As<ModuleRequest>();
CHECK(v8_str("./bar.js")->StrictEquals(module_request_1->GetSpecifier()));
offset = module_request_1->GetSourceOffset();
CHECK_EQ(45, offset);
@@ -224,13 +217,13 @@ TEST(ModuleInstantiationWithImportAssertions) {
module_request_1->GetImportAssertions();
CHECK_EQ(3, import_assertions_1->Length());
Local<String> assertion_key =
- import_assertions_1->Get(context, 0).As<String>();
+ import_assertions_1->Get(env.local(), 0).As<String>();
CHECK(v8_str("a")->StrictEquals(assertion_key));
Local<String> assertion_value =
- import_assertions_1->Get(context, 1).As<String>();
+ import_assertions_1->Get(env.local(), 1).As<String>();
CHECK(v8_str("b")->StrictEquals(assertion_value));
int32_t assertion_source_offset =
- import_assertions_1->Get(context, 2).As<Int32>()->Value();
+ import_assertions_1->Get(env.local(), 2).As<Int32>()->Value();
CHECK_EQ(65, assertion_source_offset);
loc = module->SourceOffsetToLocation(assertion_source_offset);
CHECK_EQ(1, loc.GetLineNumber());
@@ -255,72 +248,6 @@ TEST(ModuleInstantiationWithImportAssertions) {
ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
}
- CHECK(
- module->InstantiateModule(context, ResolveCallbackWithImportAssertions)
- .FromJust());
- CHECK_EQ(Module::kInstantiated, module->GetStatus());
-
- MaybeLocal<Value> result = module->Evaluate(context);
- CHECK_EQ(Module::kEvaluated, module->GetStatus());
- if (i::FLAG_harmony_top_level_await) {
- Local<Promise> promise = Local<Promise>::Cast(result.ToLocalChecked());
- CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
- CHECK(promise->Result()->IsUndefined());
- } else {
- CHECK(!result.IsEmpty());
- ExpectInt32("Object.expando", 42);
- }
- CHECK(!try_catch.HasCaught());
- }
-
- isolate->Dispose();
- i::FLAG_harmony_top_level_await = prev_top_level_await;
- i::FLAG_harmony_import_assertions = prev_import_assertions;
-}
-
-TEST(ModuleInstantiationWithImportAssertionsWithoutSupportedAssertions) {
- bool prev_top_level_await = i::FLAG_harmony_top_level_await;
- bool prev_import_assertions = i::FLAG_harmony_import_assertions;
- i::FLAG_harmony_import_assertions = true;
- for (auto top_level_await : {true, false}) {
- i::FLAG_harmony_top_level_await = top_level_await;
- Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- LocalContext env;
- v8::TryCatch try_catch(isolate);
-
- Local<Module> module;
- {
- Local<String> source_text =
- v8_str("import './foo.js' assert { a: 'b' };");
- ScriptOrigin origin = ModuleOrigin(v8_str("file.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- CHECK_EQ(Module::kUninstantiated, module->GetStatus());
- Local<FixedArray> module_requests = module->GetModuleRequests();
- CHECK_EQ(1, module_requests->Length());
- Local<ModuleRequest> module_request_0 =
- module_requests->Get(env.local(), 0).As<ModuleRequest>();
- CHECK(v8_str("./foo.js")->StrictEquals(module_request_0->GetSpecifier()));
- int offset = module_request_0->GetSourceOffset();
- CHECK_EQ(7, offset);
- Location loc = module->SourceOffsetToLocation(offset);
- CHECK_EQ(0, loc.GetLineNumber());
- CHECK_EQ(7, loc.GetColumnNumber());
- // No supported assertions were provided in the Isolate's CreateParams, so
- // no import assertions should be visible on the API surface.
- CHECK_EQ(0, module_request_0->GetImportAssertions()->Length());
- }
-
- // foo.js
- {
- Local<String> source_text = v8_str("Object.expando = 40");
- ScriptOrigin origin = ModuleOrigin(v8_str("foo.js"), CcTest::isolate());
- ScriptCompiler::Source source(source_text, origin);
- fooModule =
- ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- }
-
CHECK(module
->InstantiateModule(env.local(),
ResolveCallbackWithImportAssertions)
@@ -335,7 +262,7 @@ TEST(ModuleInstantiationWithImportAssertionsWithoutSupportedAssertions) {
CHECK(promise->Result()->IsUndefined());
} else {
CHECK(!result.IsEmpty());
- ExpectInt32("Object.expando", 40);
+ ExpectInt32("Object.expando", 42);
}
CHECK(!try_catch.HasCaught());
}
@@ -816,7 +743,7 @@ TEST(ModuleNamespace) {
Local<Value> ns = module->GetModuleNamespace();
CHECK_EQ(Module::kInstantiated, module->GetStatus());
Local<v8::Object> nsobj = ns->ToObject(env.local()).ToLocalChecked();
- CHECK_EQ(nsobj->CreationContext(), env.local());
+ CHECK_EQ(nsobj->GetCreationContext().ToLocalChecked(), env.local());
// a, b
CHECK(nsobj->Get(env.local(), v8_str("a")).ToLocalChecked()->IsUndefined());
@@ -1011,7 +938,7 @@ void DoHostImportModuleDynamically(void* import_data) {
v8::MaybeLocal<v8::Promise> HostImportModuleDynamicallyCallbackResolve(
Local<Context> context, Local<v8::ScriptOrModule> referrer,
- Local<String> specifier) {
+ Local<String> specifier, Local<FixedArray> import_assertions) {
Isolate* isolate = context->GetIsolate();
Local<v8::Promise::Resolver> resolver =
v8::Promise::Resolver::New(context).ToLocalChecked();
@@ -1024,7 +951,7 @@ v8::MaybeLocal<v8::Promise> HostImportModuleDynamicallyCallbackResolve(
v8::MaybeLocal<v8::Promise> HostImportModuleDynamicallyCallbackReject(
Local<Context> context, Local<v8::ScriptOrModule> referrer,
- Local<String> specifier) {
+ Local<String> specifier, Local<FixedArray> import_assertions) {
Isolate* isolate = context->GetIsolate();
Local<v8::Promise::Resolver> resolver =
v8::Promise::Resolver::New(context).ToLocalChecked();
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index fbb4a2b30c..1b4d90628d 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -322,6 +322,7 @@ TEST_FUNCTION_KIND(IsArrowFunction)
bool FunctionKindIsAsyncGeneratorFunction(FunctionKind kind) {
switch (kind) {
case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kStaticAsyncConciseGeneratorMethod:
case FunctionKind::kAsyncGeneratorFunction:
return true;
default:
@@ -333,7 +334,9 @@ TEST_FUNCTION_KIND(IsAsyncGeneratorFunction)
bool FunctionKindIsGeneratorFunction(FunctionKind kind) {
switch (kind) {
case FunctionKind::kConciseGeneratorMethod:
+ case FunctionKind::kStaticConciseGeneratorMethod:
case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kStaticAsyncConciseGeneratorMethod:
case FunctionKind::kGeneratorFunction:
case FunctionKind::kAsyncGeneratorFunction:
return true;
@@ -348,7 +351,9 @@ bool FunctionKindIsAsyncFunction(FunctionKind kind) {
case FunctionKind::kAsyncFunction:
case FunctionKind::kAsyncArrowFunction:
case FunctionKind::kAsyncConciseMethod:
+ case FunctionKind::kStaticAsyncConciseMethod:
case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kStaticAsyncConciseGeneratorMethod:
case FunctionKind::kAsyncGeneratorFunction:
return true;
default:
@@ -360,9 +365,13 @@ TEST_FUNCTION_KIND(IsAsyncFunction)
bool FunctionKindIsConciseMethod(FunctionKind kind) {
switch (kind) {
case FunctionKind::kConciseMethod:
+ case FunctionKind::kStaticConciseMethod:
case FunctionKind::kConciseGeneratorMethod:
+ case FunctionKind::kStaticConciseGeneratorMethod:
case FunctionKind::kAsyncConciseMethod:
+ case FunctionKind::kStaticAsyncConciseMethod:
case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kStaticAsyncConciseGeneratorMethod:
case FunctionKind::kClassMembersInitializerFunction:
return true;
default:
@@ -374,7 +383,9 @@ TEST_FUNCTION_KIND(IsConciseMethod)
bool FunctionKindIsAccessorFunction(FunctionKind kind) {
switch (kind) {
case FunctionKind::kGetterFunction:
+ case FunctionKind::kStaticGetterFunction:
case FunctionKind::kSetterFunction:
+ case FunctionKind::kStaticSetterFunction:
return true;
default:
return false;
@@ -431,16 +442,22 @@ TEST_FUNCTION_KIND(IsClassConstructor)
bool FunctionKindIsConstructable(FunctionKind kind) {
switch (kind) {
case FunctionKind::kGetterFunction:
+ case FunctionKind::kStaticGetterFunction:
case FunctionKind::kSetterFunction:
+ case FunctionKind::kStaticSetterFunction:
case FunctionKind::kArrowFunction:
case FunctionKind::kAsyncArrowFunction:
case FunctionKind::kAsyncFunction:
case FunctionKind::kAsyncConciseMethod:
+ case FunctionKind::kStaticAsyncConciseMethod:
case FunctionKind::kAsyncConciseGeneratorMethod:
+ case FunctionKind::kStaticAsyncConciseGeneratorMethod:
case FunctionKind::kAsyncGeneratorFunction:
case FunctionKind::kGeneratorFunction:
case FunctionKind::kConciseGeneratorMethod:
+ case FunctionKind::kStaticConciseGeneratorMethod:
case FunctionKind::kConciseMethod:
+ case FunctionKind::kStaticConciseMethod:
case FunctionKind::kClassMembersInitializerFunction:
return false;
default:
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index e50c1198d3..19a6d3779f 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -1153,12 +1153,12 @@ TEST(ScopeUsesArgumentsSuperThis) {
CHECK_NOT_NULL(scope->AsDeclarationScope()->arguments());
}
if (IsClassConstructor(scope->AsDeclarationScope()->function_kind())) {
- CHECK_EQ((source_data[i].expected & SUPER_PROPERTY) != 0 ||
- (source_data[i].expected & EVAL) != 0,
- scope->AsDeclarationScope()->NeedsHomeObject());
+ CHECK_IMPLIES((source_data[i].expected & SUPER_PROPERTY) != 0 ||
+ (source_data[i].expected & EVAL) != 0,
+ scope->GetHomeObjectScope()->needs_home_object());
} else {
- CHECK_EQ((source_data[i].expected & SUPER_PROPERTY) != 0,
- scope->AsDeclarationScope()->NeedsHomeObject());
+ CHECK_IMPLIES((source_data[i].expected & SUPER_PROPERTY) != 0,
+ scope->GetHomeObjectScope()->needs_home_object());
}
if ((source_data[i].expected & THIS) != 0) {
// Currently the is_used() flag is conservative; all variables in a
diff --git a/deps/v8/test/cctest/test-persistent-handles.cc b/deps/v8/test/cctest/test-persistent-handles.cc
index 5b18b1cb35..f5ebf7944a 100644
--- a/deps/v8/test/cctest/test-persistent-handles.cc
+++ b/deps/v8/test/cctest/test-persistent-handles.cc
@@ -83,7 +83,6 @@ class PersistentHandlesThread final : public v8::base::Thread {
};
TEST(CreatePersistentHandles) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -119,7 +118,6 @@ TEST(CreatePersistentHandles) {
}
TEST(DereferencePersistentHandle) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -139,7 +137,6 @@ TEST(DereferencePersistentHandle) {
}
TEST(DereferencePersistentHandleFailsWhenDisallowed) {
- heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-pointer-auth-arm64.cc b/deps/v8/test/cctest/test-pointer-auth-arm64.cc
index d55349ff2c..8b799455c6 100644
--- a/deps/v8/test/cctest/test-pointer-auth-arm64.cc
+++ b/deps/v8/test/cctest/test-pointer-auth-arm64.cc
@@ -30,9 +30,7 @@ TEST(compute_pac) {
}
TEST(add_and_auth_pac) {
-#ifdef DEBUG
i::FLAG_sim_abort_on_bad_auth = false;
-#endif
Decoder<DispatchingDecoderVisitor>* decoder =
new Decoder<DispatchingDecoderVisitor>();
Simulator simulator(decoder);
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm64.cc b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
index 9e2ee91ef4..7256a5876a 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
@@ -208,13 +208,9 @@ TEST(DisasmPoisonMonomorphicLoadFloat64) {
"b.ne", // deopt if differ
"csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
"csdb", // spec. barrier
-#if V8_DOUBLE_FIELDS_UNBOXING
- "add <<Addr:x[0-9]+>>, <<Obj>>, #0x[0-9a-f]+", // addr. calculation
-#else
"ldur <<F1:x[0-9]+>>, \\[<<Obj>>, #23\\]", // load heap number
"and <<F1>>, <<F1>>, " + kPReg, // apply the poison
"add <<Addr:x[0-9]+>>, <<F1>>, #0x7", // addr. calculation
-#endif
"and <<Addr>>, <<Addr>>, " + kPReg, // apply the poison
"ldr d[0-9]+, \\[<<Addr>>\\]", // load Float64
};
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 7535cdfeb2..7460e9df8f 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -523,6 +523,117 @@ TEST(SampleIds) {
}
}
+namespace {
+class DiscardedSamplesDelegateImpl : public v8::DiscardedSamplesDelegate {
+ public:
+ DiscardedSamplesDelegateImpl() : DiscardedSamplesDelegate() {}
+ void Notify() override {}
+};
+
+class MockPlatform : public TestPlatform {
+ public:
+ MockPlatform()
+ : old_platform_(i::V8::GetCurrentPlatform()),
+ mock_task_runner_(new MockTaskRunner()) {
+ // Now that it's completely constructed, make this the current platform.
+ i::V8::SetPlatformForTesting(this);
+ }
+
+ // When done, explicitly revert to old_platform_.
+ ~MockPlatform() override { i::V8::SetPlatformForTesting(old_platform_); }
+
+ std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
+ v8::Isolate*) override {
+ return mock_task_runner_;
+ }
+
+ int posted_count() { return mock_task_runner_->posted_count(); }
+
+ private:
+ class MockTaskRunner : public v8::TaskRunner {
+ public:
+ void PostTask(std::unique_ptr<v8::Task> task) override {
+ task->Run();
+ posted_count_++;
+ }
+
+ void PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) override {
+ task_ = std::move(task);
+ delay_ = delay_in_seconds;
+ }
+
+ void PostIdleTask(std::unique_ptr<IdleTask> task) override {
+ UNREACHABLE();
+ }
+
+ bool IdleTasksEnabled() override { return false; }
+
+ int posted_count() { return posted_count_; }
+
+ private:
+ int posted_count_ = 0;
+ double delay_ = -1;
+ std::unique_ptr<Task> task_;
+ };
+
+ v8::Platform* old_platform_;
+ std::shared_ptr<MockTaskRunner> mock_task_runner_;
+};
+} // namespace
+
+TEST(MaxSamplesCallback) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ CpuProfilesCollection profiles(isolate);
+ CpuProfiler profiler(isolate);
+ profiles.set_cpu_profiler(&profiler);
+ MockPlatform* mock_platform = new MockPlatform();
+ std::unique_ptr<DiscardedSamplesDelegateImpl> impl =
+ std::make_unique<DiscardedSamplesDelegateImpl>(
+ DiscardedSamplesDelegateImpl());
+ profiles.StartProfiling("",
+ {v8::CpuProfilingMode::kLeafNodeLineNumbers, 1, 1,
+ MaybeLocal<v8::Context>()},
+ std::move(impl));
+
+ StringsStorage strings;
+ CodeMap code_map(strings);
+ Symbolizer symbolizer(&code_map);
+ TickSample sample1;
+ sample1.timestamp = v8::base::TimeTicks::HighResolutionNow();
+ sample1.pc = ToPointer(0x1600);
+ sample1.stack[0] = ToPointer(0x1510);
+ sample1.frames_count = 1;
+ auto symbolized = symbolizer.SymbolizeTickSample(sample1);
+ profiles.AddPathToCurrentProfiles(sample1.timestamp, symbolized.stack_trace,
+ symbolized.src_line, true,
+ base::TimeDelta());
+ CHECK_EQ(0, mock_platform->posted_count());
+ TickSample sample2;
+ sample2.timestamp = v8::base::TimeTicks::HighResolutionNow();
+ sample2.pc = ToPointer(0x1925);
+ sample2.stack[0] = ToPointer(0x1780);
+ sample2.frames_count = 2;
+ symbolized = symbolizer.SymbolizeTickSample(sample2);
+ profiles.AddPathToCurrentProfiles(sample2.timestamp, symbolized.stack_trace,
+ symbolized.src_line, true,
+ base::TimeDelta());
+ CHECK_EQ(1, mock_platform->posted_count());
+ TickSample sample3;
+ sample3.timestamp = v8::base::TimeTicks::HighResolutionNow();
+ sample3.pc = ToPointer(0x1510);
+ sample3.frames_count = 3;
+ symbolized = symbolizer.SymbolizeTickSample(sample3);
+ profiles.AddPathToCurrentProfiles(sample3.timestamp, symbolized.stack_trace,
+ symbolized.src_line, true,
+ base::TimeDelta());
+ CHECK_EQ(1, mock_platform->posted_count());
+
+ // Teardown
+ profiles.StopProfiling("");
+ delete mock_platform;
+}
+
TEST(NoSamples) {
TestSetup test_setup;
i::Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-property-details.cc b/deps/v8/test/cctest/test-property-details.cc
new file mode 100644
index 0000000000..2eb2499a70
--- /dev/null
+++ b/deps/v8/test/cctest/test-property-details.cc
@@ -0,0 +1,72 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/objects/property-details.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+std::vector<PropertyDetails> make_details() {
+ std::vector<PropertyDetails> result;
+ for (PropertyKind kind : {PropertyKind::kData, PropertyKind::kAccessor}) {
+ for (PropertyConstness constness :
+ {PropertyConstness::kConst, PropertyConstness::kMutable}) {
+ for (PropertyCellType cell_type :
+ {PropertyCellType::kConstant, PropertyCellType::kConstantType,
+ PropertyCellType::kMutable, PropertyCellType::kUndefined,
+ PropertyCellType::kNoCell}) {
+ for (int attrs = 0; attrs < 8; ++attrs) {
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(attrs);
+ PropertyDetails details(kind, attributes, cell_type);
+ details = details.CopyWithConstness(constness);
+ result.push_back(details);
+ }
+ }
+ }
+ }
+ return result;
+}
+
+} // namespace
+
+#ifndef DEBUG
+// This test will trigger a DCHECK failure in debug mode. We must ensure that in
+// release mode, the enum index doesn't interfere with other fields once it
+// becomes too large.
+TEST(ExceedMaxEnumerationIndex) {
+ int too_large_enum_index = std::numeric_limits<int>::max();
+
+ for (PropertyDetails d : make_details()) {
+ PropertyDetails copy(d);
+
+ d = d.set_index(too_large_enum_index);
+ CHECK_EQ(copy.kind(), d.kind());
+ CHECK_EQ(copy.location(), d.location());
+ CHECK_EQ(copy.attributes(), d.attributes());
+ CHECK_EQ(copy.cell_type(), d.cell_type());
+ CHECK_EQ(PropertyDetails::DictionaryStorageField::kMax,
+ d.dictionary_index());
+ }
+}
+#endif
+
+TEST(AsByte) {
+ for (PropertyDetails original : make_details()) {
+ if (original.cell_type() != PropertyCellType::kNoCell) continue;
+
+ uint8_t as_byte = original.ToByte();
+ PropertyDetails from_byte = PropertyDetails::FromByte(as_byte);
+
+ CHECK_EQ(original, from_byte);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index c7c1f07265..63495194d4 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -68,7 +68,6 @@ static bool CheckParse(const char* input) {
JSRegExp::kNone, &result);
}
-
static void CheckParseEq(const char* input, const char* expected,
bool unicode = false) {
Isolate* isolate = CcTest::i_isolate();
@@ -92,7 +91,6 @@ static void CheckParseEq(const char* input, const char* expected,
CHECK_EQ(0, strcmp(expected, os.str().c_str()));
}
-
static bool CheckSimple(const char* input) {
Isolate* isolate = CcTest::i_isolate();
@@ -113,7 +111,6 @@ struct MinMaxPair {
int max_match;
};
-
static MinMaxPair CheckMinMaxMatch(const char* input) {
Isolate* isolate = CcTest::i_isolate();
@@ -128,17 +125,17 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
CHECK(result.error == RegExpError::kNone);
int min_match = result.tree->min_match();
int max_match = result.tree->max_match();
- MinMaxPair pair = { min_match, max_match };
+ MinMaxPair pair = {min_match, max_match};
return pair;
}
-
#define CHECK_PARSE_ERROR(input) CHECK(!CheckParse(input))
#define CHECK_SIMPLE(input, simple) CHECK_EQ(simple, CheckSimple(input));
-#define CHECK_MIN_MAX(input, min, max) \
- { MinMaxPair min_max = CheckMinMaxMatch(input); \
- CHECK_EQ(min, min_max.min_match); \
- CHECK_EQ(max, min_max.max_match); \
+#define CHECK_MIN_MAX(input, min, max) \
+ { \
+ MinMaxPair min_max = CheckMinMaxMatch(input); \
+ CHECK_EQ(min, min_max.min_match); \
+ CHECK_EQ(max, min_max.max_match); \
}
TEST(RegExpParser) {
@@ -446,7 +443,6 @@ static void ExpectError(const char* input, const char* expected,
CHECK_EQ(0, strcmp(expected, RegExpErrorString(result.error)));
}
-
TEST(Errors) {
const char* kEndBackslash = "\\ at end of pattern";
ExpectError("\\", kEndBackslash);
@@ -533,7 +529,6 @@ TEST(CharacterClassEscapes) {
TestCharacterClassEscapes('W', NotWord);
}
-
static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
bool is_one_byte, Zone* zone) {
Isolate* isolate = CcTest::i_isolate();
@@ -557,7 +552,6 @@ static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
return compile_data.node;
}
-
static void Execute(const char* input, bool multiline, bool unicode,
bool is_one_byte, bool dot_output = false) {
v8::HandleScope scope(CcTest::isolate());
@@ -618,18 +612,18 @@ using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS;
using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS;
#elif V8_TARGET_ARCH_X87
using ArchRegExpMacroAssembler = RegExpMacroAssemblerX87;
+#elif V8_TARGET_ARCH_RISCV64
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerRISCV;
#endif
class ContextInitializer {
public:
ContextInitializer()
- : scope_(CcTest::isolate()),
- env_(v8::Context::New(CcTest::isolate())) {
+ : scope_(CcTest::isolate()), env_(v8::Context::New(CcTest::isolate())) {
env_->Enter();
}
- ~ContextInitializer() {
- env_->Exit();
- }
+ ~ContextInitializer() { env_->Exit(); }
+
private:
v8::HandleScope scope_;
v8::Local<v8::Context> env_;
@@ -698,7 +692,6 @@ TEST(MacroAssemblerNativeSuccess) {
CHECK_EQ(-1, captures[3]);
}
-
TEST(MacroAssemblerNativeSimple) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -757,7 +750,6 @@ TEST(MacroAssemblerNativeSimple) {
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
-
TEST(MacroAssemblerNativeSimpleUC16) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -793,10 +785,11 @@ TEST(MacroAssemblerNativeSimpleUC16) {
Handle<JSRegExp> regexp = CreateJSRegExp(source, code, true);
int captures[4] = {42, 37, 87, 117};
- const uc16 input_data[6] = {'f', 'o', 'o', 'f', 'o',
- static_cast<uc16>(0x2603)};
- Handle<String> input = factory->NewStringFromTwoByte(
- Vector<const uc16>(input_data, 6)).ToHandleChecked();
+ const uc16 input_data[6] = {'f', 'o', 'o',
+ 'f', 'o', static_cast<uc16>(0x2603)};
+ Handle<String> input =
+ factory->NewStringFromTwoByte(Vector<const uc16>(input_data, 6))
+ .ToHandleChecked();
Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -809,10 +802,10 @@ TEST(MacroAssemblerNativeSimpleUC16) {
CHECK_EQ(-1, captures[2]);
CHECK_EQ(-1, captures[3]);
- const uc16 input_data2[9] = {'b', 'a', 'r', 'b', 'a', 'r', 'b', 'a',
- static_cast<uc16>(0x2603)};
- input = factory->NewStringFromTwoByte(
- Vector<const uc16>(input_data2, 9)).ToHandleChecked();
+ const uc16 input_data2[9] = {
+ 'b', 'a', 'r', 'b', 'a', 'r', 'b', 'a', static_cast<uc16>(0x2603)};
+ input = factory->NewStringFromTwoByte(Vector<const uc16>(input_data2, 9))
+ .ToHandleChecked();
seq_input = Handle<SeqTwoByteString>::cast(input);
start_adr = seq_input->GetCharsAddress();
@@ -822,7 +815,6 @@ TEST(MacroAssemblerNativeSimpleUC16) {
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
-
TEST(MacroAssemblerNativeBacktrack) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -859,7 +851,6 @@ TEST(MacroAssemblerNativeBacktrack) {
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
-
TEST(MacroAssemblerNativeBackReferenceLATIN1) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -905,7 +896,6 @@ TEST(MacroAssemblerNativeBackReferenceLATIN1) {
CHECK_EQ(-1, output[3]);
}
-
TEST(MacroAssemblerNativeBackReferenceUC16) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -937,8 +927,9 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
Handle<JSRegExp> regexp = CreateJSRegExp(source, code, true);
const uc16 input_data[6] = {'f', 0x2028, 'o', 'o', 'f', 0x2028};
- Handle<String> input = factory->NewStringFromTwoByte(
- Vector<const uc16>(input_data, 6)).ToHandleChecked();
+ Handle<String> input =
+ factory->NewStringFromTwoByte(Vector<const uc16>(input_data, 6))
+ .ToHandleChecked();
Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
@@ -953,8 +944,6 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
CHECK_EQ(-1, output[3]);
}
-
-
TEST(MacroAssemblernativeAtStart) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -1006,7 +995,6 @@ TEST(MacroAssemblernativeAtStart) {
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
}
-
TEST(MacroAssemblerNativeBackRefNoCase) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -1060,8 +1048,6 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
CHECK_EQ(3, output[3]);
}
-
-
TEST(MacroAssemblerNativeRegisters) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -1159,7 +1145,6 @@ TEST(MacroAssemblerNativeRegisters) {
CHECK_EQ(-1, output[5]);
}
-
TEST(MacroAssemblerStackOverflow) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -1194,7 +1179,6 @@ TEST(MacroAssemblerStackOverflow) {
isolate->clear_pending_exception();
}
-
TEST(MacroAssemblerNativeLotsOfRegisters) {
v8::V8::Initialize();
ContextInitializer initializer;
@@ -1284,8 +1268,9 @@ TEST(MacroAssembler) {
std::memset(captures, 0, sizeof(captures));
const uc16 str1[] = {'f', 'o', 'o', 'b', 'a', 'r'};
- Handle<String> f1_16 = factory->NewStringFromTwoByte(
- Vector<const uc16>(str1, 6)).ToHandleChecked();
+ Handle<String> f1_16 =
+ factory->NewStringFromTwoByte(Vector<const uc16>(str1, 6))
+ .ToHandleChecked();
CHECK_EQ(IrregexpInterpreter::SUCCESS,
IrregexpInterpreter::MatchInternal(
@@ -1298,8 +1283,9 @@ TEST(MacroAssembler) {
CHECK_EQ(84, captures[4]);
const uc16 str2[] = {'b', 'a', 'r', 'f', 'o', 'o'};
- Handle<String> f2_16 = factory->NewStringFromTwoByte(
- Vector<const uc16>(str2, 6)).ToHandleChecked();
+ Handle<String> f2_16 =
+ factory->NewStringFromTwoByte(Vector<const uc16>(str2, 6))
+ .ToHandleChecked();
std::memset(captures, 0, sizeof(captures));
CHECK_EQ(IrregexpInterpreter::FAILURE,
@@ -1337,8 +1323,7 @@ TEST(LatinCanonicalize) {
CHECK_EQ(upper, uncanon[0]);
CHECK_EQ(lower, uncanon[1]);
}
- for (uc32 c = 128; c < (1 << 21); c++)
- CHECK_GE(canonicalize(c), 128);
+ for (uc32 c = 128; c < (1 << 21); c++) CHECK_GE(canonicalize(c), 128);
unibrow::Mapping<unibrow::ToUppercase> to_upper;
// Canonicalization is only defined for the Basic Multilingual Plane.
for (uc32 c = 0; c < (1 << 16); c++) {
@@ -1349,8 +1334,7 @@ TEST(LatinCanonicalize) {
upper[0] = c;
}
uc32 u = upper[0];
- if (length > 1 || (c >= 128 && u < 128))
- u = c;
+ if (length > 1 || (c >= 128 && u < 128)) u = c;
CHECK_EQ(u, canonicalize(c));
}
}
@@ -1366,7 +1350,6 @@ static uc32 CanonRangeEnd(uc32 c) {
}
}
-
TEST(RangeCanonicalization) {
// Check that we arrive at the same result when using the basic
// range canonicalization primitives as when using immediate
@@ -1394,7 +1377,6 @@ TEST(RangeCanonicalization) {
}
}
-
TEST(UncanonicalizeEquivalence) {
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> un_canonicalize;
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
@@ -1428,7 +1410,6 @@ static void TestRangeCaseIndependence(Isolate* isolate, CharacterRange input,
}
}
-
static void TestSimpleRangeCaseIndependence(Isolate* isolate,
CharacterRange input,
CharacterRange expected) {
@@ -1437,7 +1418,6 @@ static void TestSimpleRangeCaseIndependence(Isolate* isolate,
TestRangeCaseIndependence(isolate, input, vector);
}
-
TEST(CharacterRangeCaseIndependence) {
Isolate* isolate = CcTest::i_isolate();
TestSimpleRangeCaseIndependence(isolate, CharacterRange::Singleton('a'),
@@ -1480,8 +1460,7 @@ static bool InClass(uc32 c,
if (ranges == nullptr) return false;
for (size_t i = 0; i < ranges->size(); i++) {
CharacterRange range = ranges->at(i);
- if (range.from() <= c && c <= range.to())
- return true;
+ if (range.from() <= c && c <= range.to()) return true;
}
return false;
}
@@ -1528,7 +1507,6 @@ TEST(UnicodeRangeSplitter) {
}
}
-
TEST(CanonicalizeCharacterSets) {
Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneList<CharacterRange>* list = zone.New<ZoneList<CharacterRange>>(4, &zone);
@@ -1588,7 +1566,6 @@ TEST(CanonicalizeCharacterSets) {
CHECK_EQ(30, list->at(0).to());
}
-
TEST(CharacterRangeMerge) {
Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneList<CharacterRange> l1(4, &zone);
@@ -1675,11 +1652,7 @@ TEST(CharacterRangeMerge) {
ZoneList<CharacterRange> both(4, &zone);
}
-
-TEST(Graph) {
- Execute("\\b\\w+\\b", false, true, true);
-}
-
+TEST(Graph) { Execute("\\b\\w+\\b", false, true, true); }
namespace {
@@ -1689,7 +1662,7 @@ void MockUseCounterCallback(v8::Isolate* isolate,
v8::Isolate::UseCounterFeature feature) {
++global_use_counts[feature];
}
-}
+} // namespace
// Test that ES2015+ RegExp compatibility fixes are in place, that they
// are not overly broad, and the appropriate UseCounters are incremented
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index 5ffa1e0544..179caa86cd 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -141,7 +141,6 @@ SamplingTestHelper* SamplingTestHelper::instance_;
} // namespace
-
// A JavaScript function which takes stack depth
// (minimum value 2) as an argument.
// When at the bottom of the recursion,
@@ -153,19 +152,16 @@ static const char* test_function =
" else return func(depth - 1);"
"}";
-
TEST(StackDepthIsConsistent) {
SamplingTestHelper helper(std::string(test_function) + "func(8);");
CHECK_EQ(8, helper.sample().size());
}
-
TEST(StackDepthDoesNotExceedMaxValue) {
SamplingTestHelper helper(std::string(test_function) + "func(300);");
CHECK_EQ(Sample::kFramesLimit, helper.sample().size());
}
-
// The captured sample should have three pc values.
// They should fall in the range where the compiled code resides.
// The expected stack is:
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 05eae84b6d..cf66f54f4f 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -1479,7 +1479,7 @@ v8::StartupData CreateCustomSnapshotWithKeep() {
v8::Local<v8::String> source_str = v8_str(
"function f() { return Math.abs(1); }\n"
"function g() { return String.raw(1); }");
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate, v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin);
CompileRun(isolate->GetCurrentContext(), &source,
v8::ScriptCompiler::kEagerCompile);
@@ -2114,28 +2114,28 @@ TEST(CodeSerializerExternalString) {
v8::HandleScope scope(CcTest::isolate());
// Obtain external internalized one-byte string.
- SerializerOneByteResource one_byte_resource("one_byte_but_long", 17);
+ SerializerOneByteResource one_byte_resource("one_byte", 8);
Handle<String> one_byte_string =
- isolate->factory()->NewStringFromAsciiChecked("one_byte_but_long");
+ isolate->factory()->NewStringFromAsciiChecked("one_byte");
one_byte_string = isolate->factory()->InternalizeString(one_byte_string);
one_byte_string->MakeExternal(&one_byte_resource);
CHECK(one_byte_string->IsExternalOneByteString());
CHECK(one_byte_string->IsInternalizedString());
// Obtain external internalized two-byte string.
- SerializerTwoByteResource two_byte_resource("two_byte_but_long", 17);
+ SerializerTwoByteResource two_byte_resource("two_byte", 8);
Handle<String> two_byte_string =
- isolate->factory()->NewStringFromAsciiChecked("two_byte_but_long");
+ isolate->factory()->NewStringFromAsciiChecked("two_byte");
two_byte_string = isolate->factory()->InternalizeString(two_byte_string);
two_byte_string->MakeExternal(&two_byte_resource);
CHECK(two_byte_string->IsExternalTwoByteString());
CHECK(two_byte_string->IsInternalizedString());
const char* source =
- "var o = {} \n"
- "o.one_byte_but_long = 7; \n"
- "o.two_byte_but_long = 8; \n"
- "o.one_byte_but_long + o.two_byte_but_long; \n";
+ "var o = {} \n"
+ "o.one_byte = 7; \n"
+ "o.two_byte = 8; \n"
+ "o.one_byte + o.two_byte; \n";
Handle<String> source_string = isolate->factory()
->NewStringFromUtf8(CStrVector(source))
.ToHandleChecked();
@@ -2311,7 +2311,7 @@ v8::ScriptCompiler::CachedData* CompileRunAndProduceCache(
v8::Context::Scope context_scope(context);
v8::Local<v8::String> source_str = v8_str(source);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate1, v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin);
v8::ScriptCompiler::CompileOptions options;
switch (cacheType) {
@@ -2367,7 +2367,7 @@ TEST(CodeSerializerIsolates) {
v8::Context::Scope context_scope(context);
v8::Local<v8::String> source_str = v8_str(source);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate2, v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin, cache);
v8::Local<v8::UnboundScript> script;
{
@@ -2413,7 +2413,7 @@ TEST(CodeSerializerIsolatesEager) {
v8::Context::Scope context_scope(context);
v8::Local<v8::String> source_str = v8_str(source);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate2, v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin, cache);
v8::Local<v8::UnboundScript> script;
{
@@ -2456,7 +2456,7 @@ TEST(CodeSerializerAfterExecute) {
v8::Context::Scope context_scope(context);
v8::Local<v8::String> source_str = v8_str(source);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate2, v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin, cache);
v8::Local<v8::UnboundScript> script;
{
@@ -2507,7 +2507,7 @@ TEST(CodeSerializerFlagChange) {
v8::Context::Scope context_scope(context);
v8::Local<v8::String> source_str = v8_str(source);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate2, v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin, cache);
v8::ScriptCompiler::CompileUnboundScript(
isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
@@ -2536,7 +2536,7 @@ TEST(CodeSerializerBitFlip) {
v8::Context::Scope context_scope(context);
v8::Local<v8::String> source_str = v8_str(source);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate2, v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin, cache);
v8::ScriptCompiler::CompileUnboundScript(
isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
@@ -2566,7 +2566,7 @@ TEST(CodeSerializerWithHarmonyScoping) {
CompileRun(source2);
v8::Local<v8::String> source_str = v8_str(source3);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate1, v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin);
v8::Local<v8::UnboundScript> script =
v8::ScriptCompiler::CompileUnboundScript(
@@ -2597,7 +2597,7 @@ TEST(CodeSerializerWithHarmonyScoping) {
CompileRun(source1);
v8::Local<v8::String> source_str = v8_str(source3);
- v8::ScriptOrigin origin(v8_str("test"));
+ v8::ScriptOrigin origin(isolate2, v8_str("test"));
v8::ScriptCompiler::Source source(source_str, origin, cache);
v8::Local<v8::UnboundScript> script;
{
diff --git a/deps/v8/test/cctest/test-simple-riscv64.cc b/deps/v8/test/cctest/test-simple-riscv64.cc
new file mode 100644
index 0000000000..991c6c09da
--- /dev/null
+++ b/deps/v8/test/cctest/test-simple-riscv64.cc
@@ -0,0 +1,253 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <iostream> // NOLINT(readability/streams)
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
+#include "src/heap/factory.h"
+#include "src/init/v8.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+// Define these function prototypes to match JSEntryFunction in execution.cc.
+// TODO(mips64): Refine these signatures per test case.
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F2 = void*(int x, int y, int p2, int p3, int p4);
+using F3 = void*(void* p, int p1, int p2, int p3, int p4);
+using F4 = void*(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
+using F5 = void*(void* p0, void* p1, int p2, int p3, int p4);
+
+#define __ assm.
+
+TEST(RISCV_SIMPLE0) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ // Addition.
+ __ add(a0, a0, a1);
+ __ jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
+ CHECK_EQ(0xABCL, res);
+}
+
+TEST(RISCV_SIMPLE1) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ // Addition.
+ __ addi(a0, a0, -1);
+ __ jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(100, 0, 0, 0, 0));
+ CHECK_EQ(99L, res);
+}
+
+// Loop 100 times, adding loop counter to result
+TEST(RISCV_SIMPLE2) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ Label L, C;
+ // input a0, result a1
+ __ mv(a1, a0);
+ __ RV_li(a0, 0);
+ __ j(&C);
+
+ __ bind(&L);
+
+ __ add(a0, a0, a1);
+ __ addi(a1, a1, -1);
+
+ __ bind(&C);
+ __ bgtz(a1, &L);
+ __ jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef DEBUG
+ code->Print();
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(100, 0, 0, 0, 0));
+ CHECK_EQ(5050, res);
+}
+
+// Test part of Load and Store
+TEST(RISCV_SIMPLE3) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ __ sb(a0, sp, -4);
+ __ lb(a0, sp, -4);
+ __ jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(255, 0, 0, 0, 0));
+ CHECK_EQ(-1, res);
+}
+
+// Test loading immediates of various sizes
+TEST(LI) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ Label error;
+
+ // Load 0
+ __ RV_li(a0, 0l);
+ __ bnez(a0, &error);
+
+ // Load small number (<12 bits)
+ __ RV_li(a1, 5);
+ __ RV_li(a2, -5);
+ __ add(a0, a1, a2);
+ __ bnez(a0, &error);
+
+ // Load medium number (13-32 bits)
+ __ RV_li(a1, 124076833);
+ __ RV_li(a2, -124076833);
+ __ add(a0, a1, a2);
+ __ bnez(a0, &error);
+
+ // Load large number (33-64 bits)
+ __ RV_li(a1, 11649936536080);
+ __ RV_li(a2, -11649936536080);
+ __ add(a0, a1, a2);
+ __ bnez(a0, &error);
+
+ // Load large number (33-64 bits)
+ __ RV_li(a1, 1070935975390360080);
+ __ RV_li(a2, -1070935975390360080);
+ __ add(a0, a1, a2);
+ __ bnez(a0, &error);
+
+ __ mv(a0, zero_reg);
+ __ jr(ra);
+
+ __ bind(&error);
+ __ jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0xDEADBEEF, 0, 0, 0, 0));
+ CHECK_EQ(0L, res);
+}
+
+TEST(LI_CONST) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ Label error;
+
+ // Load 0
+ __ li_constant(a0, 0l);
+ __ bnez(a0, &error);
+
+ // Load small number (<12 bits)
+ __ li_constant(a1, 5);
+ __ li_constant(a2, -5);
+ __ add(a0, a1, a2);
+ __ bnez(a0, &error);
+
+ // Load medium number (13-32 bits)
+ __ li_constant(a1, 124076833);
+ __ li_constant(a2, -124076833);
+ __ add(a0, a1, a2);
+ __ bnez(a0, &error);
+
+ // Load large number (33-64 bits)
+ __ li_constant(a1, 11649936536080);
+ __ li_constant(a2, -11649936536080);
+ __ add(a0, a1, a2);
+ __ bnez(a0, &error);
+
+ // Load large number (33-64 bits)
+ __ li_constant(a1, 1070935975390360080);
+ __ li_constant(a2, -1070935975390360080);
+ __ add(a0, a1, a2);
+ __ bnez(a0, &error);
+
+ __ mv(a0, zero_reg);
+ __ jr(ra);
+
+ __ bind(&error);
+ __ jr(ra);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0xDEADBEEF, 0, 0, 0, 0));
+ CHECK_EQ(0L, res);
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index dc792f4b03..41aa707231 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -39,7 +39,6 @@
#include "src/heap/heap-inl.h"
#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
-#include "src/objects/string.h"
#include "src/strings/unicode-decoder.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -1902,6 +1901,20 @@ TEST(StringEquals) {
CHECK(!bar_str->StringEquals(foo_str2));
}
+class OneByteStringResource : public v8::String::ExternalOneByteStringResource {
+ public:
+ // Takes ownership of |data|.
+ OneByteStringResource(char* data, size_t length)
+ : data_(data), length_(length) {}
+ ~OneByteStringResource() override { delete[] data_; }
+ const char* data() const override { return data_; }
+ size_t length() const override { return length_; }
+
+ private:
+ char* data_;
+ size_t length_;
+};
+
TEST(Regress876759) {
// Thin strings are used in conjunction with young gen
if (FLAG_single_generation) return;
@@ -1936,9 +1949,9 @@ TEST(Regress876759) {
Handle<String> grandparent =
handle(ThinString::cast(*parent).actual(), isolate);
CHECK_EQ(*parent, SlicedString::cast(*sliced).parent());
- OneByteResource* resource =
- new OneByteResource(external_one_byte_buf, kLength);
- CHECK(grandparent->MakeExternal(resource));
+ OneByteStringResource* resource =
+ new OneByteStringResource(external_one_byte_buf, kLength);
+ grandparent->MakeExternal(resource);
// The grandparent string becomes one-byte, but the child strings are still
// two-byte.
CHECK(grandparent->IsOneByteRepresentation());
@@ -1948,71 +1961,6 @@ TEST(Regress876759) {
CHECK(String::IsOneByteRepresentationUnderneath(*sliced));
}
-// Show that small internal strings are not externalizable since it would make
-// them external and uncached through MakeExternal. One byte version.
-TEST(MakeExternalCreationFailureOneByte) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
-// Due to different size restrictions the string needs to be small but not too
-// small. One of these restrictions is whether pointer compression is enabled.
-#ifdef V8_COMPRESS_POINTERS
- const char* raw_small = "small string";
-#elif V8_TARGET_ARCH_32_BIT
- const char* raw_small = "smol";
-#else
- const char* raw_small = "smalls";
-#endif // V8_COMPRESS_POINTERS
-
- HandleScope handle_scope(isolate);
- Handle<String> one_byte_string =
- factory->InternalizeString(factory->NewStringFromAsciiChecked(raw_small));
- CHECK(one_byte_string->IsOneByteRepresentation());
- CHECK(!one_byte_string->IsExternalString());
- CHECK(!one_byte_string->SupportsExternalization());
-}
-
-// Show that small internal strings are not externalizable since it would make
-// them external and uncached through MakeExternal. Two byte version.
-TEST(MakeExternalCreationFailureTwoByte) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- // Due to different size restrictions the string needs to be small but not too
- // small.
- const char* raw_small = "smalls";
- const int kLength = 6;
- DCHECK_EQ(kLength, strlen(raw_small));
- const uint16_t two_byte_array[kLength] = {'s', 'm', 'a', 'l', 'l', 's'};
-
- HandleScope handle_scope(isolate);
- Handle<String> two_bytes_string;
- {
- Handle<SeqTwoByteString> raw =
- factory->NewRawTwoByteString(kLength).ToHandleChecked();
- DisallowGarbageCollection no_gc;
- CopyChars(raw->GetChars(no_gc), two_byte_array, kLength);
- two_bytes_string = raw;
- }
- two_bytes_string = factory->InternalizeString(two_bytes_string);
- CHECK(two_bytes_string->IsTwoByteRepresentation());
- CHECK(!two_bytes_string->IsExternalString());
- if (COMPRESS_POINTERS_BOOL) {
- CHECK(!two_bytes_string->SupportsExternalization());
- } else {
- // Without pointer compression, there is no string size that can cause a
- // failure for a two byte string. It needs to be bigger than 5 chars to
- // support externalization, but at that point is bigger than the limit and
- // it is not uncached anymore.
- // As a note, since pointer compression is only enabled for 64 bits, all
- // target 32 bit archs fall in this case.
- CHECK(two_bytes_string->MakeExternal(
- new Resource(AsciiToTwoByteString(raw_small), strlen(raw_small))));
- auto external = Handle<ExternalString>::cast(two_bytes_string);
- CHECK(!external->is_uncached());
- }
-}
-
// Show that it is possible to internalize an external string without a copy, as
// long as it is not uncached.
TEST(InternalizeExternalString) {
@@ -2142,6 +2090,7 @@ TEST(InternalizeExternalStringUncachedWithCopyTwoByte) {
CHECK(external->is_uncached());
// Internalize succesfully, with a copy.
+ CHECK(!external->IsInternalizedString());
Handle<String> internal = factory->InternalizeString(external);
CHECK(!external->IsInternalizedString());
CHECK(internal->IsInternalizedString());
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary.cc b/deps/v8/test/cctest/test-swiss-name-dictionary.cc
new file mode 100644
index 0000000000..e274eed358
--- /dev/null
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary.cc
@@ -0,0 +1,81 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/swiss-name-dictionary-inl.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace test_swiss_hash_table {
+
+TEST(CapacityFor) {
+ for (int elements = 0; elements <= 32; elements++) {
+ int capacity = SwissNameDictionary::CapacityFor(elements);
+ if (elements == 0) {
+ CHECK_EQ(0, capacity);
+ } else if (elements <= 3) {
+ CHECK_EQ(4, capacity);
+ } else if (elements == 4) {
+ CHECK_IMPLIES(SwissNameDictionary::kGroupWidth == 8, capacity == 8);
+ CHECK_IMPLIES(SwissNameDictionary::kGroupWidth == 16, capacity == 4);
+ } else if (elements <= 7) {
+ CHECK_EQ(8, capacity);
+ } else if (elements <= 14) {
+ CHECK_EQ(16, capacity);
+ } else if (elements <= 28) {
+ CHECK_EQ(32, capacity);
+ } else if (elements <= 32) {
+ CHECK_EQ(64, capacity);
+ }
+ }
+}
+
+TEST(MaxUsableCapacity) {
+ CHECK_EQ(0, SwissNameDictionary::MaxUsableCapacity(0));
+ CHECK_IMPLIES(SwissNameDictionary::kGroupWidth == 8,
+ SwissNameDictionary::MaxUsableCapacity(4) == 3);
+ CHECK_IMPLIES(SwissNameDictionary::kGroupWidth == 16,
+ SwissNameDictionary::MaxUsableCapacity(4) == 4);
+ CHECK_EQ(7, SwissNameDictionary::MaxUsableCapacity(8));
+ CHECK_EQ(14, SwissNameDictionary::MaxUsableCapacity(16));
+ CHECK_EQ(28, SwissNameDictionary::MaxUsableCapacity(32));
+}
+
+TEST(SizeFor) {
+ int baseline = HeapObject::kHeaderSize +
+ // prefix:
+ 4 +
+ // capacity:
+ 4 +
+ // meta table:
+ kTaggedSize;
+
+ int size_0 = baseline +
+ // ctrl table:
+ SwissNameDictionary::kGroupWidth;
+
+ int size_4 = baseline +
+ // data table:
+ 4 * 2 * kTaggedSize +
+ // ctrl table:
+ 4 + SwissNameDictionary::kGroupWidth +
+ // property details table:
+ 4;
+
+ int size_8 = baseline +
+ // data table:
+ 8 * 2 * kTaggedSize +
+ // ctrl table:
+ 8 + SwissNameDictionary::kGroupWidth +
+ // property details table:
+ 8;
+
+ CHECK_EQ(SwissNameDictionary::SizeFor(0), size_0);
+ CHECK_EQ(SwissNameDictionary::SizeFor(4), size_4);
+ CHECK_EQ(SwissNameDictionary::SizeFor(8), size_8);
+}
+
+} // namespace test_swiss_hash_table
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
deleted file mode 100644
index 11db542e30..0000000000
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ /dev/null
@@ -1,1642 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdlib.h>
-#include <utility>
-
-#include "src/init/v8.h"
-
-#include "src/api/api-inl.h"
-#include "src/base/overflowing-math.h"
-#include "src/builtins/accessors.h"
-#include "src/codegen/compilation-cache.h"
-#include "src/execution/execution.h"
-#include "src/handles/global-handles.h"
-#include "src/heap/factory.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/incremental-marking.h"
-#include "src/heap/spaces.h"
-#include "src/ic/ic.h"
-#include "src/objects/api-callbacks.h"
-#include "src/objects/field-type.h"
-#include "src/objects/heap-number-inl.h"
-#include "src/objects/layout-descriptor.h"
-#include "src/objects/objects-inl.h"
-#include "src/objects/property.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/heap/heap-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace test_unboxed_doubles {
-
-#if V8_DOUBLE_FIELDS_UNBOXING
-
-
-//
-// Helper functions.
-//
-
-static void InitializeVerifiedMapDescriptors(
- Isolate* isolate, Map map, DescriptorArray descriptors,
- LayoutDescriptor layout_descriptor) {
- map.InitializeDescriptors(isolate, descriptors, layout_descriptor);
- CHECK(layout_descriptor.IsConsistentWithMap(map, true));
-}
-
-Handle<JSObject> GetObject(const char* name) {
- return Handle<JSObject>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
- CcTest::global()
- ->Get(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str(name))
- .ToLocalChecked())));
-}
-
-static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
- if (obj.IsUnboxedDoubleField(field_index)) {
- return obj.RawFastDoublePropertyAt(field_index);
- } else {
- Object value = obj.RawFastPropertyAt(field_index);
- CHECK(value.IsHeapNumber());
- return HeapNumber::cast(value).value();
- }
-}
-
-void WriteToField(JSObject object, int index, Object value) {
- DescriptorArray descriptors = object.map().instance_descriptors(kRelaxedLoad);
- InternalIndex descriptor(index);
- PropertyDetails details = descriptors.GetDetails(descriptor);
- object.WriteToField(descriptor, details, value);
-}
-
-const int kNumberOfBits = 32;
-const int kBitsInSmiLayout = SmiValuesAre32Bits() ? 32 : kSmiValueSize - 1;
-
-enum TestPropertyKind {
- PROP_ACCESSOR_INFO,
- PROP_SMI,
- PROP_DOUBLE,
- PROP_TAGGED,
- PROP_KIND_NUMBER
-};
-
-static Representation representations[PROP_KIND_NUMBER] = {
- Representation::None(), Representation::Smi(), Representation::Double(),
- Representation::Tagged()};
-
-
-static Handle<DescriptorArray> CreateDescriptorArray(Isolate* isolate,
- TestPropertyKind* props,
- int kPropsCount) {
- Factory* factory = isolate->factory();
-
- Handle<DescriptorArray> descriptors =
- DescriptorArray::Allocate(isolate, 0, kPropsCount);
-
- int next_field_offset = 0;
- for (int i = 0; i < kPropsCount; i++) {
- EmbeddedVector<char, 64> buffer;
- SNPrintF(buffer, "prop%d", i);
- Handle<String> name = factory->InternalizeUtf8String(buffer.begin());
-
- TestPropertyKind kind = props[i];
-
- Descriptor d;
- if (kind == PROP_ACCESSOR_INFO) {
- Handle<AccessorInfo> info =
- Accessors::MakeAccessor(isolate, name, nullptr, nullptr);
- d = Descriptor::AccessorConstant(name, info, NONE);
-
- } else {
- d = Descriptor::DataField(isolate, name, next_field_offset, NONE,
- representations[kind]);
- }
- descriptors->Append(&d);
- PropertyDetails details = d.GetDetails();
- if (details.location() == kField) {
- next_field_offset += details.field_width_in_words();
- }
- }
- return descriptors;
-}
-
-
-TEST(LayoutDescriptorBasicFast) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- LayoutDescriptor layout_desc = LayoutDescriptor::FastPointerLayout();
-
- CHECK(!layout_desc.IsSlowLayout());
- CHECK(layout_desc.IsFastPointerLayout());
- CHECK_EQ(kBitsInSmiLayout, layout_desc.capacity());
-
- for (int i = 0; i < kBitsInSmiLayout + 13; i++) {
- CHECK(layout_desc.IsTagged(i));
- }
- CHECK(layout_desc.IsTagged(-1));
- CHECK(layout_desc.IsTagged(-12347));
- CHECK(layout_desc.IsTagged(15635));
- CHECK(layout_desc.IsFastPointerLayout());
-
- for (int i = 0; i < kBitsInSmiLayout; i++) {
- layout_desc = layout_desc.SetTaggedForTesting(i, false);
- CHECK(!layout_desc.IsTagged(i));
- layout_desc = layout_desc.SetTaggedForTesting(i, true);
- CHECK(layout_desc.IsTagged(i));
- }
- CHECK(layout_desc.IsFastPointerLayout());
-
- int sequence_length;
- CHECK_EQ(true, layout_desc.IsTagged(0, std::numeric_limits<int>::max(),
- &sequence_length));
- CHECK_EQ(std::numeric_limits<int>::max(), sequence_length);
-
- CHECK(layout_desc.IsTagged(0, 7, &sequence_length));
- CHECK_EQ(7, sequence_length);
-}
-
-
-TEST(LayoutDescriptorBasicSlow) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- // All properties tagged.
- props[i] = PROP_TAGGED;
- }
-
- {
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- Handle<Map> map = Map::Create(isolate, kPropsCount);
-
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- CHECK_EQ(kBitsInSmiLayout, layout_descriptor->capacity());
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
- }
-
- props[0] = PROP_DOUBLE;
- props[kPropsCount - 1] = PROP_DOUBLE;
-
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- {
- int inobject_properties = kPropsCount - 1;
- Handle<Map> map = Map::Create(isolate, inobject_properties);
-
- // Should be fast as the only double property is the first one.
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- CHECK(!layout_descriptor->IsSlowLayout());
- CHECK(!layout_descriptor->IsFastPointerLayout());
-
- CHECK(!layout_descriptor->IsTagged(0));
- for (int i = 1; i < kPropsCount; i++) {
- CHECK(layout_descriptor->IsTagged(i));
- }
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
- }
-
- {
- int inobject_properties = kPropsCount;
- Handle<Map> map = Map::Create(isolate, inobject_properties);
-
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- CHECK(layout_descriptor->IsSlowLayout());
- CHECK(!layout_descriptor->IsFastPointerLayout());
- CHECK_GT(layout_descriptor->capacity(), kBitsInSmiLayout);
-
- CHECK(!layout_descriptor->IsTagged(0));
- CHECK(!layout_descriptor->IsTagged(kPropsCount - 1));
- for (int i = 1; i < kPropsCount - 1; i++) {
- CHECK(layout_descriptor->IsTagged(i));
- }
-
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
-
- // Here we have truly slow layout descriptor, so play with the bits.
- CHECK(layout_descriptor->IsTagged(-1));
- CHECK(layout_descriptor->IsTagged(-12347));
- CHECK(layout_descriptor->IsTagged(15635));
-
- LayoutDescriptor layout_desc = *layout_descriptor;
- // Play with the bits but leave it in consistent state with map at the end.
- for (int i = 1; i < kPropsCount - 1; i++) {
- layout_desc = layout_desc.SetTaggedForTesting(i, false);
- CHECK(!layout_desc.IsTagged(i));
- layout_desc = layout_desc.SetTaggedForTesting(i, true);
- CHECK(layout_desc.IsTagged(i));
- }
- CHECK(layout_desc.IsSlowLayout());
- CHECK(!layout_desc.IsFastPointerLayout());
- CHECK(layout_descriptor->IsConsistentWithMap(*map, true));
- }
-}
-
-
-static void TestLayoutDescriptorQueries(int layout_descriptor_length,
- int* bit_flip_positions,
- int max_sequence_length) {
- Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::NewForTesting(
- CcTest::i_isolate(), layout_descriptor_length);
- layout_descriptor_length = layout_descriptor->capacity();
- LayoutDescriptor layout_desc = *layout_descriptor;
-
- {
- // Fill in the layout descriptor.
- int cur_bit_flip_index = 0;
- bool tagged = true;
- for (int i = 0; i < layout_descriptor_length; i++) {
- if (i == bit_flip_positions[cur_bit_flip_index]) {
- tagged = !tagged;
- ++cur_bit_flip_index;
- CHECK(i < bit_flip_positions[cur_bit_flip_index]); // check test data
- }
- layout_desc = layout_desc.SetTaggedForTesting(i, tagged);
- }
- }
-
- if (layout_desc.IsFastPointerLayout()) {
- return;
- }
-
- {
- // Check queries.
- int cur_bit_flip_index = 0;
- bool tagged = true;
- for (int i = 0; i < layout_descriptor_length; i++) {
- if (i == bit_flip_positions[cur_bit_flip_index]) {
- tagged = !tagged;
- ++cur_bit_flip_index;
- }
- CHECK_EQ(tagged, layout_desc.IsTagged(i));
-
- int next_bit_flip_position = bit_flip_positions[cur_bit_flip_index];
- int expected_sequence_length;
- if (next_bit_flip_position < layout_desc.capacity()) {
- expected_sequence_length = next_bit_flip_position - i;
- } else {
- expected_sequence_length = tagged ? std::numeric_limits<int>::max()
- : (layout_desc.capacity() - i);
- }
- expected_sequence_length =
- std::min(expected_sequence_length, max_sequence_length);
- int sequence_length;
- CHECK_EQ(tagged,
- layout_desc.IsTagged(i, max_sequence_length, &sequence_length));
- CHECK_GT(sequence_length, 0);
-
- CHECK_EQ(expected_sequence_length, sequence_length);
- }
-
- int sequence_length;
- CHECK_EQ(true, layout_desc.IsTagged(layout_descriptor_length,
- max_sequence_length, &sequence_length));
- CHECK_EQ(max_sequence_length, sequence_length);
- }
-}
-
-
-static void TestLayoutDescriptorQueriesFast(int max_sequence_length) {
- {
- LayoutDescriptor layout_desc = LayoutDescriptor::FastPointerLayout();
- int sequence_length;
- for (int i = 0; i < kNumberOfBits; i++) {
- CHECK_EQ(true,
- layout_desc.IsTagged(i, max_sequence_length, &sequence_length));
- CHECK_GT(sequence_length, 0);
- CHECK_EQ(max_sequence_length, sequence_length);
- }
- }
-
- {
- int bit_flip_positions[] = {1000};
- TestLayoutDescriptorQueries(kBitsInSmiLayout, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[] = {0, 1000};
- TestLayoutDescriptorQueries(kBitsInSmiLayout, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[kNumberOfBits + 1];
- for (int i = 0; i <= kNumberOfBits; i++) {
- bit_flip_positions[i] = i;
- }
- TestLayoutDescriptorQueries(kBitsInSmiLayout, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[] = {3, 7, 8, 10, 15, 21, 30, 1000};
- TestLayoutDescriptorQueries(kBitsInSmiLayout, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[] = {0, 1, 2, 3, 5, 7, 9,
- 12, 15, 18, 22, 26, 29, 1000};
- TestLayoutDescriptorQueries(kBitsInSmiLayout, bit_flip_positions,
- max_sequence_length);
- }
-}
-
-
-TEST(LayoutDescriptorQueriesFastLimited7) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- TestLayoutDescriptorQueriesFast(7);
-}
-
-
-TEST(LayoutDescriptorQueriesFastLimited13) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- TestLayoutDescriptorQueriesFast(13);
-}
-
-
-TEST(LayoutDescriptorQueriesFastUnlimited) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- TestLayoutDescriptorQueriesFast(std::numeric_limits<int>::max());
-}
-
-
-static void TestLayoutDescriptorQueriesSlow(int max_sequence_length) {
- {
- int bit_flip_positions[] = {10000};
- TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[] = {0, 10000};
- TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[kMaxNumberOfDescriptors + 1];
- for (int i = 0; i < kMaxNumberOfDescriptors; i++) {
- bit_flip_positions[i] = i;
- }
- bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
- TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[] = {3, 7, 8, 10, 15, 21, 30,
- 37, 54, 80, 99, 383, 10000};
- TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[] = {0, 10, 20, 30, 50, 70, 90,
- 120, 150, 180, 220, 260, 290, 10000};
- TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[kMaxNumberOfDescriptors + 1];
- int cur = 0;
- for (int i = 0; i < kMaxNumberOfDescriptors; i++) {
- bit_flip_positions[i] = cur;
- cur = base::MulWithWraparound((cur + 1), 2);
- }
- CHECK_LT(cur, 10000);
- bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
- TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
- max_sequence_length);
- }
-
- {
- int bit_flip_positions[kMaxNumberOfDescriptors + 1];
- int cur = 3;
- for (int i = 0; i < kMaxNumberOfDescriptors; i++) {
- bit_flip_positions[i] = cur;
- cur = base::MulWithWraparound((cur + 1), 2);
- }
- CHECK_LT(cur, 10000);
- bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
- TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
- max_sequence_length);
- }
-}
-
-
-TEST(LayoutDescriptorQueriesSlowLimited7) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- TestLayoutDescriptorQueriesSlow(7);
-}
-
-
-TEST(LayoutDescriptorQueriesSlowLimited13) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- TestLayoutDescriptorQueriesSlow(13);
-}
-
-
-TEST(LayoutDescriptorQueriesSlowLimited42) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- TestLayoutDescriptorQueriesSlow(42);
-}
-
-
-TEST(LayoutDescriptorQueriesSlowUnlimited) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- TestLayoutDescriptorQueriesSlow(std::numeric_limits<int>::max());
-}
-
-
-TEST(LayoutDescriptorCreateNewFast) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- TestPropertyKind props[] = {
- PROP_ACCESSOR_INFO,
- PROP_TAGGED, // field #0
- PROP_ACCESSOR_INFO,
- PROP_DOUBLE, // field #1
- PROP_ACCESSOR_INFO,
- PROP_TAGGED, // field #2
- PROP_ACCESSOR_INFO,
- };
- const int kPropsCount = arraysize(props);
-
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- {
- Handle<Map> map = Map::Create(isolate, 0);
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
- }
-
- {
- Handle<Map> map = Map::Create(isolate, 1);
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
- }
-
- {
- Handle<Map> map = Map::Create(isolate, 2);
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- CHECK(!layout_descriptor->IsSlowLayout());
- CHECK(layout_descriptor->IsTagged(0));
- CHECK(!layout_descriptor->IsTagged(1));
- CHECK(layout_descriptor->IsTagged(2));
- CHECK(layout_descriptor->IsTagged(125));
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
- }
-}
-
-
-TEST(LayoutDescriptorCreateNewSlow) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- props[i] = static_cast<TestPropertyKind>(i % PROP_KIND_NUMBER);
- }
-
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- {
- Handle<Map> map = Map::Create(isolate, 0);
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
- }
-
- {
- Handle<Map> map = Map::Create(isolate, 1);
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
- }
-
- {
- Handle<Map> map = Map::Create(isolate, 2);
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- CHECK(!layout_descriptor->IsSlowLayout());
- CHECK(layout_descriptor->IsTagged(0));
- CHECK(!layout_descriptor->IsTagged(1));
- CHECK(layout_descriptor->IsTagged(2));
- CHECK(layout_descriptor->IsTagged(125));
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
- }
-
- {
- int inobject_properties = kPropsCount / 2;
- Handle<Map> map = Map::Create(isolate, inobject_properties);
- layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- CHECK(layout_descriptor->IsSlowLayout());
- for (int i = 0; i < inobject_properties; i++) {
- // PROP_DOUBLE has index 1 among DATA properties.
- const bool tagged = (i % (PROP_KIND_NUMBER - 1)) != 1;
- CHECK_EQ(tagged, layout_descriptor->IsTagged(i));
- }
- // Every property after inobject_properties must be tagged.
- for (int i = inobject_properties; i < kPropsCount; i++) {
- CHECK(layout_descriptor->IsTagged(i));
- }
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
-
- // Now test LayoutDescriptor::cast_gc_safe().
- Handle<LayoutDescriptor> layout_descriptor_copy =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
-
- LayoutDescriptor layout_desc = *layout_descriptor;
- CHECK_EQ(layout_desc, LayoutDescriptor::cast(layout_desc));
- CHECK_EQ(layout_desc, LayoutDescriptor::cast_gc_safe(layout_desc));
- CHECK(layout_desc.IsSlowLayout());
- // Now make it look like a forwarding pointer to layout_descriptor_copy.
- MapWord map_word = layout_desc.map_word();
- CHECK(!map_word.IsForwardingAddress());
- layout_desc.set_map_word(
- MapWord::FromForwardingAddress(*layout_descriptor_copy));
- CHECK(layout_desc.map_word().IsForwardingAddress());
- CHECK_EQ(layout_desc, LayoutDescriptor::cast_gc_safe(layout_desc));
-
- // Restore it back.
- layout_desc.set_map_word(map_word);
- CHECK_EQ(layout_desc, LayoutDescriptor::cast(layout_desc));
- }
-}
-
-
-static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
- Isolate* isolate, int inobject_properties, TestPropertyKind* props,
- int kPropsCount) {
- Factory* factory = isolate->factory();
-
- Handle<DescriptorArray> descriptors =
- DescriptorArray::Allocate(isolate, 0, kPropsCount);
-
- Handle<Map> map = Map::Create(isolate, inobject_properties);
- map->InitializeDescriptors(isolate, *descriptors,
- LayoutDescriptor::FastPointerLayout());
-
- int next_field_offset = 0;
- for (int i = 0; i < kPropsCount; i++) {
- EmbeddedVector<char, 64> buffer;
- SNPrintF(buffer, "prop%d", i);
- Handle<String> name = factory->InternalizeUtf8String(buffer.begin());
-
- Handle<LayoutDescriptor> layout_descriptor;
- TestPropertyKind kind = props[i];
- Descriptor d;
- if (kind == PROP_ACCESSOR_INFO) {
- Handle<AccessorInfo> info =
- Accessors::MakeAccessor(isolate, name, nullptr, nullptr);
- d = Descriptor::AccessorConstant(name, info, NONE);
-
- } else {
- d = Descriptor::DataField(isolate, name, next_field_offset, NONE,
- representations[kind]);
- }
- PropertyDetails details = d.GetDetails();
- layout_descriptor = LayoutDescriptor::ShareAppend(isolate, map, details);
- descriptors->Append(&d);
- if (details.location() == kField) {
- int field_width_in_words = details.field_width_in_words();
- next_field_offset += field_width_in_words;
-
- int field_index = details.field_index();
- bool is_inobject = field_index < map->GetInObjectProperties();
- for (int bit = 0; bit < field_width_in_words; bit++) {
- CHECK_EQ(is_inobject && (kind == PROP_DOUBLE),
- !layout_descriptor->IsTagged(field_index + bit));
- }
- CHECK(layout_descriptor->IsTagged(next_field_offset));
- }
- map->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
- }
- Handle<LayoutDescriptor> layout_descriptor(
- map->layout_descriptor(kAcquireLoad), isolate);
- CHECK(layout_descriptor->IsConsistentWithMap(*map, true));
- return layout_descriptor;
-}
-
-
-TEST(LayoutDescriptorAppend) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- props[i] = static_cast<TestPropertyKind>(i % PROP_KIND_NUMBER);
- }
-
- layout_descriptor =
- TestLayoutDescriptorAppend(isolate, 0, props, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor =
- TestLayoutDescriptorAppend(isolate, 13, props, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor =
- TestLayoutDescriptorAppend(isolate, kBitsInSmiLayout, props, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppend(isolate, kBitsInSmiLayout * 2,
- props, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-
- layout_descriptor =
- TestLayoutDescriptorAppend(isolate, kPropsCount, props, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-}
-
-
-TEST(LayoutDescriptorAppendAllDoubles) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- props[i] = PROP_DOUBLE;
- }
-
- layout_descriptor =
- TestLayoutDescriptorAppend(isolate, 0, props, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor =
- TestLayoutDescriptorAppend(isolate, 13, props, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor =
- TestLayoutDescriptorAppend(isolate, kBitsInSmiLayout, props, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppend(isolate, kBitsInSmiLayout + 1,
- props, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppend(isolate, kBitsInSmiLayout * 2,
- props, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-
- layout_descriptor =
- TestLayoutDescriptorAppend(isolate, kPropsCount, props, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-
- {
- // Ensure layout descriptor switches into slow mode at the right moment.
- layout_descriptor = TestLayoutDescriptorAppend(isolate, kPropsCount, props,
- kBitsInSmiLayout);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppend(isolate, kPropsCount, props,
- kBitsInSmiLayout + 1);
- CHECK(layout_descriptor->IsSlowLayout());
- }
-}
-
-
-static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
- Isolate* isolate, int inobject_properties,
- Handle<DescriptorArray> descriptors, int number_of_descriptors) {
- Handle<Map> initial_map = Map::Create(isolate, inobject_properties);
-
- Handle<LayoutDescriptor> full_layout_descriptor = LayoutDescriptor::New(
- isolate, initial_map, descriptors, descriptors->number_of_descriptors());
-
- int nof = 0;
- bool switched_to_slow_mode = false;
-
- // This method calls LayoutDescriptor::AppendIfFastOrUseFull() internally
- // and does all the required map-descriptors related book keeping.
- Handle<Map> last_map = Map::AddMissingTransitionsForTesting(
- isolate, initial_map, descriptors, full_layout_descriptor);
-
- // Follow back pointers to construct a sequence of maps from |map|
- // to |last_map|.
- int descriptors_length = descriptors->number_of_descriptors();
- std::vector<Handle<Map>> maps(descriptors_length);
- {
- CHECK(last_map->is_stable());
- Map map = *last_map;
- for (int i = 0; i < descriptors_length; i++) {
- maps[descriptors_length - 1 - i] = handle(map, isolate);
- Object maybe_map = map.GetBackPointer();
- CHECK(maybe_map.IsMap());
- map = Map::cast(maybe_map);
- CHECK(!map.is_stable());
- }
- CHECK_EQ(1, maps[0]->NumberOfOwnDescriptors());
- }
-
- Handle<Map> map;
- // Now check layout descriptors of all intermediate maps.
- for (int i = 0; i < number_of_descriptors; i++) {
- PropertyDetails details = descriptors->GetDetails(InternalIndex(i));
- map = maps[i];
- LayoutDescriptor layout_desc = map->layout_descriptor(kAcquireLoad);
-
- if (layout_desc.IsSlowLayout()) {
- switched_to_slow_mode = true;
- CHECK_EQ(*full_layout_descriptor, layout_desc);
- } else {
- CHECK(!switched_to_slow_mode);
- if (details.location() == kField) {
- nof++;
- int field_index = details.field_index();
- int field_width_in_words = details.field_width_in_words();
-
- bool is_inobject = field_index < map->GetInObjectProperties();
- for (int bit = 0; bit < field_width_in_words; bit++) {
- CHECK_EQ(is_inobject && details.representation().IsDouble(),
- !layout_desc.IsTagged(field_index + bit));
- }
- CHECK(layout_desc.IsTagged(field_index + field_width_in_words));
- }
- }
- CHECK(map->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map));
- }
-
- Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
- isolate);
- CHECK(layout_descriptor->IsConsistentWithMap(*map));
- return layout_descriptor;
-}
-
-
-TEST(LayoutDescriptorAppendIfFastOrUseFull) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- props[i] = static_cast<TestPropertyKind>(i % PROP_KIND_NUMBER);
- }
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, 0, descriptors, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, 13, descriptors, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, kBitsInSmiLayout, descriptors, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, kBitsInSmiLayout * 2, descriptors, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, kPropsCount, descriptors, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-}
-
-
-TEST(LayoutDescriptorAppendIfFastOrUseFullAllDoubles) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- props[i] = PROP_DOUBLE;
- }
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, 0, descriptors, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, 13, descriptors, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, kBitsInSmiLayout, descriptors, kPropsCount);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, kBitsInSmiLayout + 1, descriptors, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, kBitsInSmiLayout * 2, descriptors, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, kPropsCount, descriptors, kPropsCount);
- CHECK(layout_descriptor->IsSlowLayout());
-
- {
- // Ensure layout descriptor switches into slow mode at the right moment.
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, kPropsCount, descriptors, kBitsInSmiLayout);
- CHECK(!layout_descriptor->IsSlowLayout());
-
- layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
- isolate, kPropsCount, descriptors, kBitsInSmiLayout + 1);
- CHECK(layout_descriptor->IsSlowLayout());
- }
-}
-
-
-TEST(Regress436816) {
- ManualGCScope manual_gc_scope;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- v8::HandleScope scope(CcTest::isolate());
-
- // Force a GC to free up space before we allocate objects whose
- // mid-test states would fail heap verification.
- CcTest::CollectAllGarbage();
-
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- props[i] = PROP_DOUBLE;
- }
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- Handle<Map> map = Map::Create(isolate, kPropsCount);
- Handle<LayoutDescriptor> layout_descriptor =
- LayoutDescriptor::New(isolate, map, descriptors, kPropsCount);
- map->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
-
- Handle<JSObject> object =
- factory->NewJSObjectFromMap(map, AllocationType::kOld);
-
- Address fake_address = static_cast<Address>(~kHeapObjectTagMask);
- HeapObject fake_object = HeapObject::FromAddress(fake_address);
- CHECK(fake_object.IsHeapObject());
-
- uint64_t boom_value = bit_cast<uint64_t>(fake_object);
- for (InternalIndex i : InternalIndex::Range(kPropsCount)) {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- CHECK(map->IsUnboxedDoubleField(index));
- object->RawFastDoublePropertyAsBitsAtPut(index, boom_value);
- }
- CHECK(object->HasFastProperties());
- CHECK(!object->map().HasFastPointerLayout());
-
- Handle<Map> normalized_map =
- Map::Normalize(isolate, map, KEEP_INOBJECT_PROPERTIES, "testing");
- JSObject::MigrateToMap(isolate, object, normalized_map);
- CHECK(!object->HasFastProperties());
- CHECK(object->map().HasFastPointerLayout());
-
- // Trigger GCs and heap verification.
- CcTest::CollectAllGarbage();
-}
-
-
-TEST(DescriptorArrayTrimming) {
- ManualGCScope manual_gc_scope;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
-
- const int kFieldCount = 128;
- const int kSplitFieldIndex = 32;
- const int kTrimmedLayoutDescriptorLength = 64;
-
- Handle<FieldType> any_type = FieldType::Any(isolate);
- Handle<Map> map = Map::Create(isolate, kFieldCount);
- for (int i = 0; i < kSplitFieldIndex; i++) {
- map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", i),
- any_type, NONE, PropertyConstness::kMutable,
- Representation::Smi(), INSERT_TRANSITION)
- .ToHandleChecked();
- }
- map = Map::CopyWithField(isolate, map,
- CcTest::MakeName("dbl", kSplitFieldIndex), any_type,
- NONE, PropertyConstness::kMutable,
- Representation::Double(), INSERT_TRANSITION)
- .ToHandleChecked();
- CHECK(map->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map, true));
- CHECK(map->layout_descriptor(kAcquireLoad).IsSlowLayout());
- CHECK(map->owns_descriptors());
- CHECK_EQ(8, map->layout_descriptor(kAcquireLoad).length());
-
- {
- // Add transitions to double fields.
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<Map> tmp_map = map;
- for (int i = kSplitFieldIndex + 1; i < kFieldCount; i++) {
- tmp_map = Map::CopyWithField(isolate, tmp_map, CcTest::MakeName("dbl", i),
- any_type, NONE, PropertyConstness::kMutable,
- Representation::Double(), INSERT_TRANSITION)
- .ToHandleChecked();
- CHECK(tmp_map->layout_descriptor(kAcquireLoad)
- .IsConsistentWithMap(*tmp_map, true));
- }
- // Check that descriptors are shared.
- CHECK(tmp_map->owns_descriptors());
- CHECK_EQ(map->instance_descriptors(kRelaxedLoad),
- tmp_map->instance_descriptors(kRelaxedLoad));
- CHECK_EQ(map->layout_descriptor(kAcquireLoad),
- tmp_map->layout_descriptor(kAcquireLoad));
- }
- CHECK(map->layout_descriptor(kAcquireLoad).IsSlowLayout());
- CHECK_EQ(16, map->layout_descriptor(kAcquireLoad).length());
-
- // The unused tail of the layout descriptor is now "durty" because of sharing.
- CHECK(map->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map));
- for (int i = kSplitFieldIndex + 1; i < kTrimmedLayoutDescriptorLength; i++) {
- CHECK(!map->layout_descriptor(kAcquireLoad).IsTagged(i));
- }
- CHECK_LT(map->NumberOfOwnDescriptors(),
- map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
-
- // Call GC that should trim both |map|'s descriptor array and layout
- // descriptor.
- CcTest::CollectAllGarbage();
-
- // The unused tail of the layout descriptor is now "clean" again.
- CHECK(map->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map, true));
- CHECK(map->owns_descriptors());
- CHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
- CHECK(map->layout_descriptor(kAcquireLoad).IsSlowLayout());
- CHECK_EQ(8, map->layout_descriptor(kAcquireLoad).length());
-
- {
- // Add transitions to tagged fields.
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<Map> tmp_map = map;
- for (int i = kSplitFieldIndex + 1; i < kFieldCount - 1; i++) {
- tmp_map =
- Map::CopyWithField(isolate, tmp_map, CcTest::MakeName("tagged", i),
- any_type, NONE, PropertyConstness::kMutable,
- Representation::Tagged(), INSERT_TRANSITION)
- .ToHandleChecked();
- CHECK(tmp_map->layout_descriptor(kAcquireLoad)
- .IsConsistentWithMap(*tmp_map, true));
- }
- tmp_map = Map::CopyWithField(isolate, tmp_map, CcTest::MakeString("dbl"),
- any_type, NONE, PropertyConstness::kMutable,
- Representation::Double(), INSERT_TRANSITION)
- .ToHandleChecked();
- CHECK(tmp_map->layout_descriptor(kAcquireLoad)
- .IsConsistentWithMap(*tmp_map, true));
- // Check that descriptors are shared.
- CHECK(tmp_map->owns_descriptors());
- CHECK_EQ(map->instance_descriptors(kRelaxedLoad),
- tmp_map->instance_descriptors(kRelaxedLoad));
- }
- CHECK(map->layout_descriptor(kAcquireLoad).IsSlowLayout());
-}
-
-
-TEST(DoScavenge) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
-
- // The plan: create |obj| with double field in new space, do scanvenge so
- // that |obj| is moved to old space, construct a double value that looks like
- // a pointer to "from space" pointer. Do scavenge one more time and ensure
- // that it didn't crash or corrupt the double value stored in the object.
-
- Handle<FieldType> any_type = FieldType::Any(isolate);
- Handle<Map> map = Map::Create(isolate, 10);
- map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 0), any_type,
- NONE, PropertyConstness::kMutable,
- Representation::Double(), INSERT_TRANSITION)
- .ToHandleChecked();
-
- // Create object in new space.
- Handle<JSObject> obj =
- factory->NewJSObjectFromMap(map, AllocationType::kYoung);
-
- Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
- WriteToField(*obj, 0, *heap_number);
-
- {
- // Ensure the object is properly set up.
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, InternalIndex(0));
- CHECK(field_index.is_inobject() && field_index.is_double());
- CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
- CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
- }
- CHECK(isolate->heap()->new_space()->Contains(*obj));
-
- // Do scavenge so that |obj| is moved to survivor space.
- CcTest::CollectGarbage(i::NEW_SPACE);
-
- // Create temp object in the new space.
- Handle<JSArray> temp = factory->NewJSArray(0, PACKED_ELEMENTS);
- CHECK(isolate->heap()->new_space()->Contains(*temp));
-
- // Construct a double value that looks like a pointer to the new space object
- // and store it into the obj.
- Address fake_object = temp->ptr() + kSystemPointerSize;
- double boom_value = bit_cast<double>(fake_object);
-
- FieldIndex field_index =
- FieldIndex::ForDescriptor(obj->map(), InternalIndex(0));
- auto boom_number = factory->NewHeapNumber(boom_value);
- obj->FastPropertyAtPut(field_index, *boom_number);
-
- // Now |obj| moves to old gen and it has a double field that looks like
- // a pointer to a from semi-space.
- CcTest::CollectGarbage(i::NEW_SPACE);
-
- CHECK(isolate->heap()->old_space()->Contains(*obj));
-
- CHECK_EQ(boom_value, GetDoubleFieldValue(*obj, field_index));
-}
-
-
-TEST(DoScavengeWithIncrementalWriteBarrier) {
- if (FLAG_never_compact || !FLAG_incremental_marking) return;
- ManualGCScope manual_gc_scope;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- Heap* heap = CcTest::heap();
- PagedSpace* old_space = heap->old_space();
-
- // The plan: create |obj_value| in old space and ensure that it is allocated
- // on evacuation candidate page, create |obj| with double and tagged fields
- // in new space and write |obj_value| to tagged field of |obj|, do two
- // scavenges to promote |obj| to old space, a GC in old space and ensure that
- // the tagged value was properly updated after candidates evacuation.
-
- Handle<FieldType> any_type = FieldType::Any(isolate);
- Handle<Map> map = Map::Create(isolate, 10);
- map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 0), any_type,
- NONE, PropertyConstness::kMutable,
- Representation::Double(), INSERT_TRANSITION)
- .ToHandleChecked();
- map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 1), any_type,
- NONE, PropertyConstness::kMutable,
- Representation::Tagged(), INSERT_TRANSITION)
- .ToHandleChecked();
-
- // Create |obj_value| in old space.
- Handle<HeapObject> obj_value;
- Page* ec_page;
- {
- AlwaysAllocateScope always_allocate(isolate);
- // Make sure |obj_value| is placed on an old-space evacuation candidate.
- heap::SimulateFullSpace(old_space);
- obj_value =
- factory->NewJSArray(32 * KB, HOLEY_ELEMENTS, AllocationType::kOld);
- ec_page = Page::FromHeapObject(*obj_value);
- }
-
- // Create object in new space.
- Handle<JSObject> obj =
- factory->NewJSObjectFromMap(map, AllocationType::kYoung);
-
- Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
- WriteToField(*obj, 0, *heap_number);
- WriteToField(*obj, 1, *obj_value);
-
- {
- // Ensure the object is properly set up.
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, InternalIndex(0));
- CHECK(field_index.is_inobject() && field_index.is_double());
- CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
- CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
-
- field_index = FieldIndex::ForDescriptor(*map, InternalIndex(1));
- CHECK(field_index.is_inobject() && !field_index.is_double());
- CHECK(!map->IsUnboxedDoubleField(field_index));
- }
- CHECK(isolate->heap()->new_space()->Contains(*obj));
-
- // Heap is ready, force |ec_page| to become an evacuation candidate and
- // simulate incremental marking.
- FLAG_stress_compaction = true;
- FLAG_manual_evacuation_candidates_selection = true;
- heap::ForceEvacuationCandidate(ec_page);
- heap::SimulateIncrementalMarking(heap);
- // Disable stress compaction mode in order to let GC do scavenge.
- FLAG_stress_compaction = false;
-
- // Check that everything is ready for triggering incremental write barrier
- // during scavenge (i.e. that |obj| is black and incremental marking is
- // in compacting mode and |obj_value|'s page is an evacuation candidate).
- IncrementalMarking* marking = heap->incremental_marking();
- CHECK(marking->IsCompacting());
- IncrementalMarking::MarkingState* marking_state =
- heap->incremental_marking()->marking_state();
- CHECK(marking_state->IsBlack(*obj));
- CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
-
- // Trigger GCs so that |obj| moves to old gen.
- CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
-
- CHECK(isolate->heap()->old_space()->Contains(*obj));
- CHECK(isolate->heap()->old_space()->Contains(*obj_value));
- CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
-
- CcTest::CollectGarbage(i::OLD_SPACE);
-
- // |obj_value| must be evacuated.
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
-
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, InternalIndex(1));
- CHECK_EQ(*obj_value, obj->RawFastPropertyAt(field_index));
-}
-
-
-static void TestLayoutDescriptorHelper(Isolate* isolate,
- int inobject_properties,
- Handle<DescriptorArray> descriptors,
- int number_of_descriptors) {
- Handle<Map> map = Map::Create(isolate, inobject_properties);
-
- Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::New(
- isolate, map, descriptors, descriptors->number_of_descriptors());
- InitializeVerifiedMapDescriptors(isolate, *map, *descriptors,
- *layout_descriptor);
-
- LayoutDescriptorHelper helper(*map);
- bool all_fields_tagged = true;
-
- int instance_size = map->instance_size();
-
- int end_offset = instance_size * 2;
- int first_non_tagged_field_offset = end_offset;
- for (InternalIndex i : InternalIndex::Range(number_of_descriptors)) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- if (!index.is_inobject()) continue;
- all_fields_tagged &= !details.representation().IsDouble();
- bool expected_tagged = !index.is_double();
- if (!expected_tagged) {
- first_non_tagged_field_offset =
- std::min(first_non_tagged_field_offset, index.offset());
- }
-
- int end_of_region_offset;
- CHECK_EQ(expected_tagged, helper.IsTagged(index.offset()));
- CHECK_EQ(expected_tagged, helper.IsTagged(index.offset(), instance_size,
- &end_of_region_offset));
- CHECK_GT(end_of_region_offset, 0);
- CHECK_EQ(end_of_region_offset % kTaggedSize, 0);
- CHECK(end_of_region_offset <= instance_size);
-
- for (int offset = index.offset(); offset < end_of_region_offset;
- offset += kTaggedSize) {
- CHECK_EQ(expected_tagged, helper.IsTagged(index.offset()));
- }
- if (end_of_region_offset < instance_size) {
- CHECK_EQ(!expected_tagged, helper.IsTagged(end_of_region_offset));
- } else {
- CHECK(helper.IsTagged(end_of_region_offset));
- }
- }
-
- for (int offset = 0; offset < JSObject::kHeaderSize; offset += kTaggedSize) {
- // Header queries
- CHECK(helper.IsTagged(offset));
- int end_of_region_offset;
- CHECK(helper.IsTagged(offset, end_offset, &end_of_region_offset));
- CHECK_EQ(first_non_tagged_field_offset, end_of_region_offset);
-
- // Out of bounds queries
- CHECK(helper.IsTagged(offset + instance_size));
- }
-
- CHECK_EQ(all_fields_tagged, helper.all_fields_tagged());
-}
-
-
-TEST(LayoutDescriptorHelperMixed) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- props[i] = static_cast<TestPropertyKind>(i % PROP_KIND_NUMBER);
- }
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, 0, descriptors, kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, 13, descriptors, kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, kBitsInSmiLayout, descriptors,
- kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, kBitsInSmiLayout * 2, descriptors,
- kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, kPropsCount, descriptors, kPropsCount);
-}
-
-
-TEST(LayoutDescriptorHelperAllTagged) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- props[i] = PROP_TAGGED;
- }
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, 0, descriptors, kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, 13, descriptors, kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, kBitsInSmiLayout, descriptors,
- kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, kBitsInSmiLayout * 2, descriptors,
- kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, kPropsCount, descriptors, kPropsCount);
-}
-
-
-TEST(LayoutDescriptorHelperAllDoubles) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<LayoutDescriptor> layout_descriptor;
- const int kPropsCount = kBitsInSmiLayout * 3;
- TestPropertyKind props[kPropsCount];
- for (int i = 0; i < kPropsCount; i++) {
- props[i] = PROP_DOUBLE;
- }
- Handle<DescriptorArray> descriptors =
- CreateDescriptorArray(isolate, props, kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, 0, descriptors, kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, 13, descriptors, kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, kBitsInSmiLayout, descriptors,
- kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, kBitsInSmiLayout * 2, descriptors,
- kPropsCount);
-
- TestLayoutDescriptorHelper(isolate, kPropsCount, descriptors, kPropsCount);
-}
-
-
-TEST(LayoutDescriptorSharing) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Handle<FieldType> any_type = FieldType::Any(isolate);
-
- Handle<Map> split_map;
- {
- Handle<Map> map = Map::Create(isolate, 64);
- for (int i = 0; i < 32; i++) {
- Handle<String> name = CcTest::MakeName("prop", i);
- map = Map::CopyWithField(isolate, map, name, any_type, NONE,
- PropertyConstness::kMutable,
- Representation::Smi(), INSERT_TRANSITION)
- .ToHandleChecked();
- }
- split_map = Map::CopyWithField(isolate, map, CcTest::MakeString("dbl"),
- any_type, NONE, PropertyConstness::kMutable,
- Representation::Double(), INSERT_TRANSITION)
- .ToHandleChecked();
- }
- Handle<LayoutDescriptor> split_layout_descriptor(
- split_map->layout_descriptor(kAcquireLoad), isolate);
- CHECK(split_layout_descriptor->IsConsistentWithMap(*split_map, true));
- CHECK(split_layout_descriptor->IsSlowLayout());
- CHECK(split_map->owns_descriptors());
-
- Handle<Map> map1 =
- Map::CopyWithField(isolate, split_map, CcTest::MakeString("foo"),
- any_type, NONE, PropertyConstness::kMutable,
- Representation::Double(), INSERT_TRANSITION)
- .ToHandleChecked();
- CHECK(!split_map->owns_descriptors());
- CHECK_EQ(*split_layout_descriptor,
- split_map->layout_descriptor(kAcquireLoad));
-
- // Layout descriptors should be shared with |split_map|.
- CHECK(map1->owns_descriptors());
- CHECK_EQ(*split_layout_descriptor, map1->layout_descriptor(kAcquireLoad));
- CHECK(map1->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map1, true));
-
- Handle<Map> map2 =
- Map::CopyWithField(isolate, split_map, CcTest::MakeString("bar"),
- any_type, NONE, PropertyConstness::kMutable,
- Representation::Tagged(), INSERT_TRANSITION)
- .ToHandleChecked();
-
- // Layout descriptors should not be shared with |split_map|.
- CHECK(map2->owns_descriptors());
- CHECK_NE(*split_layout_descriptor, map2->layout_descriptor(kAcquireLoad));
- CHECK(map2->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map2, true));
-}
-
-static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
- InternalIndex tagged_descriptor,
- InternalIndex double_descriptor,
- bool check_tagged_value = true) {
- FLAG_stress_compaction = true;
- FLAG_manual_evacuation_candidates_selection = true;
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- Heap* heap = CcTest::heap();
- PagedSpace* old_space = heap->old_space();
-
- // The plan: create |obj| by |map| in old space, create |obj_value| in
- // new space and ensure that write barrier is triggered when |obj_value| is
- // written to property |tagged_descriptor| of |obj|.
- // Then migrate object to |new_map| and set proper value for property
- // |double_descriptor|. Call GC and ensure that it did not crash during
- // store buffer entries updating.
-
- Handle<JSObject> obj;
- Handle<HeapObject> obj_value;
- {
- AlwaysAllocateScope always_allocate(isolate);
- obj = factory->NewJSObjectFromMap(map, AllocationType::kOld);
- CHECK(old_space->Contains(*obj));
-
- obj_value = factory->NewHeapNumber(0.);
- }
-
- CHECK(Heap::InYoungGeneration(*obj_value));
-
- {
- FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
- const int n = 153;
- for (int i = 0; i < n; i++) {
- obj->FastPropertyAtPut(index, *obj_value);
- }
- }
-
- // Migrate |obj| to |new_map| which should shift fields and put the
- // |boom_value| to the slot that was earlier recorded by write barrier.
- JSObject::MigrateToMap(isolate, obj, new_map);
-
- Address fake_object = obj_value->ptr() + kTaggedSize;
- uint64_t boom_value = bit_cast<uint64_t>(fake_object);
-
- FieldIndex double_field_index =
- FieldIndex::ForDescriptor(*new_map, double_descriptor);
- CHECK(obj->IsUnboxedDoubleField(double_field_index));
- obj->RawFastDoublePropertyAsBitsAtPut(double_field_index, boom_value);
-
- // Trigger GC to evacuate all candidates.
- CcTest::CollectGarbage(NEW_SPACE);
-
- if (check_tagged_value) {
- FieldIndex tagged_field_index =
- FieldIndex::ForDescriptor(*new_map, tagged_descriptor);
- CHECK_EQ(*obj_value, obj->RawFastPropertyAt(tagged_field_index));
- }
- CHECK_EQ(boom_value, obj->RawFastDoublePropertyAsBitsAt(double_field_index));
-}
-
-static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
- InternalIndex tagged_descriptor,
- InternalIndex double_descriptor,
- bool check_tagged_value = true) {
- if (FLAG_never_compact || !FLAG_incremental_marking) return;
- ManualGCScope manual_gc_scope;
- FLAG_manual_evacuation_candidates_selection = true;
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- Heap* heap = CcTest::heap();
- PagedSpace* old_space = heap->old_space();
-
- // The plan: create |obj| by |map| in old space, create |obj_value| in
- // old space and ensure it end up in evacuation candidate page. Start
- // incremental marking and ensure that incremental write barrier is triggered
- // when |obj_value| is written to property |tagged_descriptor| of |obj|.
- // Then migrate object to |new_map| and set proper value for property
- // |double_descriptor|. Call GC and ensure that it did not crash during
- // slots buffer entries updating.
-
- Handle<JSObject> obj;
- Handle<HeapObject> obj_value;
- Page* ec_page;
- {
- AlwaysAllocateScope always_allocate(isolate);
- obj = factory->NewJSObjectFromMap(map, AllocationType::kOld);
- CHECK(old_space->Contains(*obj));
-
- // Make sure |obj_value| is placed on an old-space evacuation candidate.
- heap::SimulateFullSpace(old_space);
- obj_value =
- factory->NewJSArray(32 * KB, HOLEY_ELEMENTS, AllocationType::kOld);
- ec_page = Page::FromHeapObject(*obj_value);
- CHECK_NE(ec_page, Page::FromHeapObject(*obj));
- }
-
- // Heap is ready, force |ec_page| to become an evacuation candidate and
- // simulate incremental marking.
- heap::ForceEvacuationCandidate(ec_page);
- heap::SimulateIncrementalMarking(heap);
-
- // Check that everything is ready for triggering incremental write barrier
- // (i.e. that both |obj| and |obj_value| are black and the marking phase is
- // still active and |obj_value|'s page is indeed an evacuation candidate).
- IncrementalMarking* marking = heap->incremental_marking();
- CHECK(marking->IsMarking());
- IncrementalMarking::MarkingState* marking_state = marking->marking_state();
- CHECK(marking_state->IsBlack(*obj));
- CHECK(marking_state->IsBlack(*obj_value));
- CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
-
- // Trigger incremental write barrier, which should add a slot to remembered
- // set.
- {
- FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
- obj->FastPropertyAtPut(index, *obj_value);
- }
-
- // Migrate |obj| to |new_map| which should shift fields and put the
- // |boom_value| to the slot that was earlier recorded by incremental write
- // barrier.
- JSObject::MigrateToMap(isolate, obj, new_map);
-
- uint64_t boom_value = UINT64_C(0xBAAD0176A37C28E1);
-
- FieldIndex double_field_index =
- FieldIndex::ForDescriptor(*new_map, double_descriptor);
- CHECK(obj->IsUnboxedDoubleField(double_field_index));
- obj->RawFastDoublePropertyAsBitsAtPut(double_field_index, boom_value);
-
- // Trigger GC to evacuate all candidates.
- CcTest::CollectGarbage(OLD_SPACE);
-
- // Ensure that the values are still there and correct.
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
-
- if (check_tagged_value) {
- FieldIndex tagged_field_index =
- FieldIndex::ForDescriptor(*new_map, tagged_descriptor);
- CHECK_EQ(*obj_value, obj->RawFastPropertyAt(tagged_field_index));
- }
- CHECK_EQ(boom_value, obj->RawFastDoublePropertyAsBitsAt(double_field_index));
-}
-
-enum OldToWriteBarrierKind {
- OLD_TO_OLD_WRITE_BARRIER,
- OLD_TO_NEW_WRITE_BARRIER
-};
-static void TestWriteBarrierObjectShiftFieldsRight(
- OldToWriteBarrierKind write_barrier_kind) {
- ManualGCScope manual_gc_scope;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<FieldType> any_type = FieldType::Any(isolate);
-
- CompileRun("function func() { return 1; }");
-
- Handle<JSObject> func = GetObject("func");
-
- Handle<Map> map = Map::Create(isolate, 10);
- map = Map::CopyWithConstant(isolate, map, CcTest::MakeName("prop", 0), func,
- NONE, INSERT_TRANSITION)
- .ToHandleChecked();
- map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 1), any_type,
- NONE, PropertyConstness::kMutable,
- Representation::Double(), INSERT_TRANSITION)
- .ToHandleChecked();
- map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 2), any_type,
- NONE, PropertyConstness::kMutable,
- Representation::Tagged(), INSERT_TRANSITION)
- .ToHandleChecked();
-
- // Shift fields right by turning constant property to a field.
- Handle<Map> new_map =
- Map::ReconfigureProperty(isolate, map, InternalIndex(0), kData, NONE,
- Representation::Tagged(), any_type);
-
- if (write_barrier_kind == OLD_TO_NEW_WRITE_BARRIER) {
- TestWriteBarrier(map, new_map, InternalIndex(2), InternalIndex(1));
- } else {
- CHECK_EQ(OLD_TO_OLD_WRITE_BARRIER, write_barrier_kind);
- TestIncrementalWriteBarrier(map, new_map, InternalIndex(2),
- InternalIndex(1));
- }
-}
-
-TEST(WriteBarrierObjectShiftFieldsRight) {
- TestWriteBarrierObjectShiftFieldsRight(OLD_TO_NEW_WRITE_BARRIER);
-}
-
-
-TEST(IncrementalWriteBarrierObjectShiftFieldsRight) {
- TestWriteBarrierObjectShiftFieldsRight(OLD_TO_OLD_WRITE_BARRIER);
-}
-
-
-// TODO(ishell): add respective tests for property kind reconfiguring from
-// accessor field to double, once accessor fields are supported by
-// Map::ReconfigureProperty().
-
-
-// TODO(ishell): add respective tests for fast property removal case once
-// Map::ReconfigureProperty() supports that.
-
-#endif
-
-} // namespace test_unboxed_doubles
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-unwinder-code-pages.cc b/deps/v8/test/cctest/test-unwinder-code-pages.cc
index d5d1dfbdec..cac00e0a70 100644
--- a/deps/v8/test/cctest/test-unwinder-code-pages.cc
+++ b/deps/v8/test/cctest/test-unwinder-code-pages.cc
@@ -42,29 +42,28 @@ void CheckCalleeSavedRegisters(const RegisterState& register_state) {}
#elif V8_TARGET_ARCH_ARM
// How much the JSEntry frame occupies in the stack.
-constexpr int kJSEntryFrameSpace = 27;
+constexpr int kJSEntryFrameSpace = 26;
// Offset where the FP, PC and SP live from the beginning of the JSEntryFrame.
-constexpr int kFPOffset = 24;
-constexpr int kPCOffset = 25;
-constexpr int kSPOffset = 26;
+constexpr int kFPOffset = 0;
+constexpr int kPCOffset = 1;
+constexpr int kSPOffset = 25;
// Builds the stack from {stack} as it is explained in frame-constants-arm.h.
void BuildJSEntryStack(uintptr_t* stack) {
- stack[0] = -1; // the bad frame pointer (0xF..F)
+ stack[0] = reinterpret_cast<uintptr_t>(stack); // saved FP.
+ stack[1] = 100; // Return address into C++ code (i.e lr/pc)
// Set d8 = 150, d9 = 151, ..., d15 = 157.
for (int i = 0; i < 8; ++i) {
// Double registers occupy two slots. Therefore, upper bits are zeroed.
- stack[1 + i * 2] = 0;
- stack[1 + i * 2 + 1] = 150 + i;
+ stack[2 + i * 2] = 0;
+ stack[2 + i * 2 + 1] = 150 + i;
}
// Set r4 = 160, ..., r10 = 166.
for (int i = 0; i < 7; ++i) {
- stack[17 + i] = 160 + i;
+ stack[18 + i] = 160 + i;
}
- stack[24] = reinterpret_cast<uintptr_t>(stack + 24); // saved FP.
- stack[25] = 100; // Return address into C++ code (i.e lr/pc)
- stack[26] = reinterpret_cast<uintptr_t>(stack + 26); // saved SP.
+ stack[25] = reinterpret_cast<uintptr_t>(stack + 25); // saved SP.
}
// Checks that the values in the calee saved registers are the same as the ones
@@ -81,27 +80,26 @@ void CheckCalleeSavedRegisters(const RegisterState& register_state) {
#elif V8_TARGET_ARCH_ARM64
// How much the JSEntry frame occupies in the stack.
-constexpr int kJSEntryFrameSpace = 22;
+constexpr int kJSEntryFrameSpace = 21;
// Offset where the FP, PC and SP live from the beginning of the JSEntryFrame.
-constexpr int kFPOffset = 11;
-constexpr int kPCOffset = 12;
-constexpr int kSPOffset = 21;
+constexpr int kFPOffset = 0;
+constexpr int kPCOffset = 1;
+constexpr int kSPOffset = 20;
// Builds the stack from {stack} as it is explained in frame-constants-arm64.h.
void BuildJSEntryStack(uintptr_t* stack) {
- stack[0] = -1; // the bad frame pointer (0xF..F)
+ stack[0] = reinterpret_cast<uintptr_t>(stack); // saved FP.
+ stack[1] = 100; // Return address into C++ code (i.e lr/pc)
// Set x19 = 150, ..., x28 = 159.
for (int i = 0; i < 10; ++i) {
- stack[1 + i] = 150 + i;
+ stack[2 + i] = 150 + i;
}
- stack[11] = reinterpret_cast<uintptr_t>(stack + 11); // saved FP.
- stack[12] = 100; // Return address into C++ code (i.e lr/pc)
// Set d8 = 160, ..., d15 = 167.
for (int i = 0; i < 8; ++i) {
- stack[13 + i] = 160 + i;
+ stack[12 + i] = 160 + i;
}
- stack[21] = reinterpret_cast<uintptr_t>(stack + 21); // saved SP.
+ stack[20] = reinterpret_cast<uintptr_t>(stack + 20); // saved SP.
}
// Dummy method since we don't save callee saved registers in arm64.
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 02509a1c7b..653eebe66f 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -881,6 +881,58 @@ TEST(TestOffHeapSlice) {
ft.Call();
}
+TEST(TestCallMultiReturnBuiltin) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ CodeAssemblerTester asm_tester(isolate, 1);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ m.TestCallMultiReturnBuiltin(
+ m.UncheckedCast<Context>(m.HeapConstant(context)));
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestRunLazyTwice) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ const int kNumParams = 0;
+ int lazyNumber = 3;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ CodeStubAssembler::LazyNode<Smi> lazy = [&]() {
+ return m.SmiConstant(lazyNumber++);
+ };
+ m.Return(m.TestRunLazyTwice(lazy));
+ }
+ CHECK_EQ(lazyNumber, 5);
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+ Handle<Object> result = ft.Call().ToHandleChecked();
+ CHECK_EQ(7, Handle<Smi>::cast(result)->value());
+}
+
+TEST(TestCreateLazyNodeFromTorque) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ const int kNumParams = 0;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestCreateLazyNodeFromTorque();
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
index 44d3b03076..dd1dfea0f1 100644
--- a/deps/v8/test/cctest/wasm/test-gc.cc
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -204,7 +204,6 @@ ValueType optref(uint32_t type_index) {
WASM_COMPILED_EXEC_TEST(WasmBasicStruct) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte type_index =
tester.DefineStruct({F(kWasmI32, true), F(kWasmI32, true)});
@@ -277,7 +276,6 @@ WASM_COMPILED_EXEC_TEST(WasmBasicStruct) {
// struct refs types in globals and if-results.
WASM_COMPILED_EXEC_TEST(WasmRefAsNonNull) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte type_index =
tester.DefineStruct({F(kWasmI32, true), F(kWasmI32, true)});
ValueType kRefTypes[] = {ref(type_index)};
@@ -308,7 +306,6 @@ WASM_COMPILED_EXEC_TEST(WasmRefAsNonNull) {
WASM_COMPILED_EXEC_TEST(WasmBrOnNull) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte type_index =
tester.DefineStruct({F(kWasmI32, true), F(kWasmI32, true)});
ValueType kRefTypes[] = {ref(type_index)};
@@ -346,86 +343,77 @@ WASM_COMPILED_EXEC_TEST(WasmBrOnNull) {
WASM_COMPILED_EXEC_TEST(BrOnCast) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
+ ValueType kDataRefNull = ValueType::Ref(HeapType::kData, kNullable);
const byte type_index = tester.DefineStruct({F(kWasmI32, true)});
+ const byte other_type_index = tester.DefineStruct({F(kWasmF32, true)});
const byte rtt_index =
- tester.AddGlobal(ValueType::Rtt(type_index, 1), false,
+ tester.AddGlobal(ValueType::Rtt(type_index, 0), false,
WasmInitExpr::RttCanon(
static_cast<HeapType::Representation>(type_index)));
const byte kTestStruct = tester.DefineFunction(
- tester.sigs.i_v(), {kWasmI32, kWasmEqRef},
- {WASM_BLOCK(WASM_LOCAL_SET(0, WASM_I32V(111)),
- // Pipe a struct through a local so it's statically typed
- // as eqref.
- WASM_LOCAL_SET(
- 1, WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(1),
- WASM_GLOBAL_GET(rtt_index))),
- WASM_LOCAL_GET(1),
- // The struct is not an i31, so this branch isn't taken.
- WASM_BR_ON_CAST(0, WASM_RTT_CANON(kI31RefCode)),
- WASM_LOCAL_SET(0, WASM_I32V(222)), // Final result.
- // This branch is taken.
- WASM_BR_ON_CAST(0, WASM_GLOBAL_GET(rtt_index)),
- // Not executed due to the branch.
- WASM_DROP, WASM_LOCAL_SET(0, WASM_I32V(333))),
- WASM_LOCAL_GET(0), kExprEnd});
-
- const byte kTestI31 = tester.DefineFunction(
- tester.sigs.i_v(), {kWasmI32, kWasmEqRef},
- {WASM_BLOCK(WASM_LOCAL_SET(0, WASM_I32V(111)),
- // Pipe an i31ref through a local so it's statically typed
- // as eqref.
- WASM_LOCAL_SET(1, WASM_I31_NEW(WASM_I32V(42))),
- WASM_LOCAL_GET(1),
- // The i31 is not a struct, so this branch isn't taken.
- WASM_BR_ON_CAST(0, WASM_GLOBAL_GET(rtt_index)),
- WASM_LOCAL_SET(0, WASM_I32V(222)), // Final result.
- // This branch is taken.
- WASM_BR_ON_CAST(0, WASM_RTT_CANON(kI31RefCode)),
- // Not executed due to the branch.
- WASM_DROP, WASM_LOCAL_SET(0, WASM_I32V(333))),
- WASM_LOCAL_GET(0), kExprEnd});
+ tester.sigs.i_v(), {kWasmI32, kDataRefNull},
+ {WASM_BLOCK_R(ValueType::Ref(type_index, kNullable),
+ WASM_LOCAL_SET(0, WASM_I32V(111)),
+ // Pipe a struct through a local so it's statically typed
+ // as dataref.
+ WASM_LOCAL_SET(1, WASM_STRUCT_NEW_WITH_RTT(
+ other_type_index, WASM_F32(1.0),
+ WASM_RTT_CANON(other_type_index))),
+ WASM_LOCAL_GET(1),
+ // The type check fails, so this branch isn't taken.
+ WASM_BR_ON_CAST(0, WASM_GLOBAL_GET(rtt_index)), WASM_DROP,
+
+ WASM_LOCAL_SET(0, WASM_I32V(221)), // (Final result) - 1
+ WASM_LOCAL_SET(1, WASM_STRUCT_NEW_WITH_RTT(
+ type_index, WASM_I32V(1),
+ WASM_GLOBAL_GET(rtt_index))),
+ WASM_LOCAL_GET(1),
+ // This branch is taken.
+ WASM_BR_ON_CAST(0, WASM_GLOBAL_GET(rtt_index)),
+ WASM_GLOBAL_GET(rtt_index), WASM_GC_OP(kExprRefCast),
+
+ // Not executed due to the branch.
+ WASM_LOCAL_SET(0, WASM_I32V(333))),
+ WASM_GC_OP(kExprStructGet), type_index, 0, WASM_LOCAL_GET(0),
+ kExprI32Add, kExprEnd});
const byte kTestNull = tester.DefineFunction(
- tester.sigs.i_v(), {kWasmI32, kWasmEqRef},
- {WASM_BLOCK(WASM_LOCAL_SET(0, WASM_I32V(111)),
- WASM_LOCAL_GET(1), // Put a nullref onto the value stack.
- // Neither of these branches is taken for nullref.
- WASM_BR_ON_CAST(0, WASM_RTT_CANON(kI31RefCode)),
- WASM_LOCAL_SET(0, WASM_I32V(222)),
- WASM_BR_ON_CAST(0, WASM_GLOBAL_GET(rtt_index)), WASM_DROP,
- WASM_LOCAL_SET(0, WASM_I32V(333))), // Final result.
- WASM_LOCAL_GET(0), kExprEnd});
+ tester.sigs.i_v(), {kWasmI32, kDataRefNull},
+ {WASM_BLOCK_R(ValueType::Ref(type_index, kNullable),
+ WASM_LOCAL_SET(0, WASM_I32V(111)),
+ WASM_LOCAL_GET(1), // Put a nullref onto the value stack.
+ // Not taken for nullref.
+ WASM_BR_ON_CAST(0, WASM_GLOBAL_GET(rtt_index)),
+ WASM_RTT_CANON(type_index), WASM_GC_OP(kExprRefCast),
+
+ WASM_LOCAL_SET(0, WASM_I32V(222))), // Final result.
+ WASM_DROP, WASM_LOCAL_GET(0), kExprEnd});
const byte kTypedAfterBranch = tester.DefineFunction(
- tester.sigs.i_v(), {kWasmI32, kWasmEqRef},
+ tester.sigs.i_v(), {kWasmI32, kDataRefNull},
{WASM_LOCAL_SET(1, WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(42),
WASM_GLOBAL_GET(rtt_index))),
- WASM_BLOCK(WASM_LOCAL_SET(
+ WASM_BLOCK_I(
+ // The inner block should take the early branch with a struct
+ // on the stack.
+ WASM_BLOCK_R(ValueType::Ref(type_index, kNonNullable),
+ WASM_LOCAL_GET(1),
+ WASM_BR_ON_CAST(0, WASM_GLOBAL_GET(rtt_index)),
+ // Returning 123 is the unreachable failure case.
+ WASM_I32V(123), WASM_BR(1)),
// The outer block catches the struct left behind by the inner block
// and reads its field.
- 0,
- WASM_STRUCT_GET(
- type_index, 0,
- // The inner block should take the early branch with a struct
- // on the stack.
- WASM_BLOCK_R(ValueType::Ref(type_index, kNonNullable),
- WASM_LOCAL_GET(1),
- WASM_BR_ON_CAST(0, WASM_GLOBAL_GET(rtt_index)),
- // Returning 123 is the unreachable failure case.
- WASM_LOCAL_SET(0, WASM_I32V(123)), WASM_BR(1))))),
- WASM_LOCAL_GET(0), kExprEnd});
+ WASM_GC_OP(kExprStructGet), type_index, 0),
+ kExprEnd});
tester.CompileModule();
tester.CheckResult(kTestStruct, 222);
- tester.CheckResult(kTestI31, 222);
- tester.CheckResult(kTestNull, 333);
+ tester.CheckResult(kTestNull, 222);
tester.CheckResult(kTypedAfterBranch, 42);
}
WASM_COMPILED_EXEC_TEST(WasmRefEq) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
byte type_index = tester.DefineStruct({F(kWasmI32, true), F(kWasmI32, true)});
ValueType kRefTypes[] = {ref(type_index)};
ValueType kOptRefType = optref(type_index);
@@ -465,7 +453,6 @@ WASM_COMPILED_EXEC_TEST(WasmRefEq) {
WASM_COMPILED_EXEC_TEST(WasmPackedStructU) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte type_index = tester.DefineStruct(
{F(kWasmI8, true), F(kWasmI16, true), F(kWasmI32, true)});
@@ -503,7 +490,6 @@ WASM_COMPILED_EXEC_TEST(WasmPackedStructU) {
WASM_COMPILED_EXEC_TEST(WasmPackedStructS) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte type_index = tester.DefineStruct(
{F(kWasmI8, true), F(kWasmI16, true), F(kWasmI32, true)});
@@ -621,7 +607,6 @@ TEST(WasmLetInstruction) {
WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte type_index = tester.DefineArray(wasm::kWasmI32, true);
ValueType kRefTypes[] = {ref(type_index)};
@@ -697,7 +682,6 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
WASM_COMPILED_EXEC_TEST(WasmPackedArrayU) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte array_index = tester.DefineArray(kWasmI8, true);
ValueType array_type = optref(array_index);
@@ -733,7 +717,6 @@ WASM_COMPILED_EXEC_TEST(WasmPackedArrayU) {
WASM_COMPILED_EXEC_TEST(WasmPackedArrayS) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte array_index = tester.DefineArray(kWasmI16, true);
ValueType array_type = optref(array_index);
@@ -769,7 +752,6 @@ WASM_COMPILED_EXEC_TEST(WasmPackedArrayS) {
WASM_COMPILED_EXEC_TEST(NewDefault) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte struct_type = tester.DefineStruct(
{F(wasm::kWasmI32, true), F(wasm::kWasmF64, true), F(optref(0), true)});
const byte array_type = tester.DefineArray(wasm::kWasmI32, true);
@@ -801,17 +783,18 @@ WASM_COMPILED_EXEC_TEST(NewDefault) {
tester.CheckResult(allocate_array, 0);
}
-TEST(BasicRTT) {
- WasmGCTester tester;
+WASM_COMPILED_EXEC_TEST(BasicRtt) {
+ WasmGCTester tester(execution_tier);
+
const byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
const byte subtype_index =
tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)});
- ValueType kRttTypes[] = {ValueType::Rtt(type_index, 1)};
+
+ ValueType kRttTypes[] = {ValueType::Rtt(type_index, 0)};
FunctionSig sig_t_v(1, 0, kRttTypes);
- ValueType kRttSubtypes[] = {
- ValueType::Rtt(static_cast<HeapType>(subtype_index), 2)};
+ ValueType kRttSubtypes[] = {ValueType::Rtt(subtype_index, 1)};
FunctionSig sig_t2_v(1, 0, kRttSubtypes);
- ValueType kRttTypesDeeper[] = {ValueType::Rtt(type_index, 2)};
+ ValueType kRttTypesDeeper[] = {ValueType::Rtt(type_index, 1)};
FunctionSig sig_t3_v(1, 0, kRttTypesDeeper);
ValueType kRefTypes[] = {ref(type_index)};
FunctionSig sig_q_v(1, 0, kRefTypes);
@@ -821,43 +804,34 @@ TEST(BasicRTT) {
const byte kRttSub = tester.DefineFunction(
&sig_t2_v, {},
{WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)), kExprEnd});
- const byte kRttSubGeneric = tester.DefineFunction(
- &sig_t3_v, {},
- {WASM_RTT_SUB(type_index, WASM_RTT_CANON(kEqRefCode)), kExprEnd});
const byte kStructWithRtt = tester.DefineFunction(
&sig_q_v, {},
{WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(42),
WASM_RTT_CANON(type_index)),
kExprEnd});
+
const int kFieldIndex = 1;
- const int kStructIndexCode = 1; // Shifted in 'let' block.
- const int kRttIndexCode = 0; // Let-bound, hence first local.
+ const int kStructIndexCode = 0;
// This implements the following function:
// var local_struct: type0;
- // let (local_rtt = rtt.sub(rtt.canon(type0), type1) in {
- // local_struct = new type1 with rtt 'local_rtt';
- // return (ref.test local_struct local_rtt) +
- // ((ref.cast local_struct local_rtt)[field0]);
+ // local_struct = new type1 with rtt 'kRttSub()';
+ // return (ref.test local_struct kRttSub()) +
+ // ((ref.cast local_struct kRttSub())[field0]);
// }
// The expected return value is 1+42 = 43.
const byte kRefCast = tester.DefineFunction(
tester.sigs.i_v(), {optref(type_index)},
- {WASM_LET_1_I(
- WASM_RTT(2, subtype_index),
- WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)),
- WASM_LOCAL_SET(kStructIndexCode,
- WASM_STRUCT_NEW_WITH_RTT(
- subtype_index, WASM_I32V(11), WASM_I32V(42),
- WASM_LOCAL_GET(kRttIndexCode))),
- WASM_I32_ADD(
- WASM_REF_TEST(type_index, subtype_index,
- WASM_LOCAL_GET(kStructIndexCode),
- WASM_LOCAL_GET(kRttIndexCode)),
- WASM_STRUCT_GET(subtype_index, kFieldIndex,
- WASM_REF_CAST(type_index, subtype_index,
- WASM_LOCAL_GET(kStructIndexCode),
- WASM_LOCAL_GET(kRttIndexCode)))),
- kExprEnd)});
+ {WASM_LOCAL_SET(
+ kStructIndexCode,
+ WASM_STRUCT_NEW_WITH_RTT(subtype_index, WASM_I32V(11), WASM_I32V(42),
+ WASM_CALL_FUNCTION0(kRttSub))),
+ WASM_I32_ADD(
+ WASM_REF_TEST(WASM_LOCAL_GET(kStructIndexCode),
+ WASM_CALL_FUNCTION0(kRttSub)),
+ WASM_STRUCT_GET(subtype_index, kFieldIndex,
+ WASM_REF_CAST(WASM_LOCAL_GET(kStructIndexCode),
+ WASM_CALL_FUNCTION0(kRttSub)))),
+ kExprEnd});
tester.CompileModule();
@@ -875,7 +849,6 @@ TEST(BasicRTT) {
tester.GetResultObject(kRttSub).ToHandleChecked();
CHECK(subref_result->IsMap());
Handle<Map> submap = Handle<Map>::cast(subref_result);
- CHECK_EQ(*map, submap->wasm_type_info().parent());
CHECK_EQ(reinterpret_cast<Address>(
tester.instance()->module()->struct_type(subtype_index)),
submap->wasm_type_info().foreign_address());
@@ -883,12 +856,6 @@ TEST(BasicRTT) {
tester.GetResultObject(kRttSub).ToHandleChecked();
CHECK(subref_result.is_identical_to(subref_result_canonicalized));
- Handle<Object> sub_generic_1 =
- tester.GetResultObject(kRttSubGeneric).ToHandleChecked();
- Handle<Object> sub_generic_2 =
- tester.GetResultObject(kRttSubGeneric).ToHandleChecked();
- CHECK(sub_generic_1.is_identical_to(sub_generic_2));
-
Handle<Object> s = tester.GetResultObject(kStructWithRtt).ToHandleChecked();
CHECK(s->IsWasmStruct());
CHECK_EQ(Handle<WasmStruct>::cast(s)->map(), *map);
@@ -896,84 +863,85 @@ TEST(BasicRTT) {
tester.CheckResult(kRefCast, 43);
}
-WASM_COMPILED_EXEC_TEST(AnyRefRtt) {
+WASM_EXEC_TEST(NoDepthRtt) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
-
- ValueType any_rtt_0_type = ValueType::Rtt(HeapType::kAny, 0);
- FunctionSig sig_any_canon(1, 0, &any_rtt_0_type);
- byte kAnyRttCanon = tester.DefineFunction(
- &sig_any_canon, {}, {WASM_RTT_CANON(kAnyRefCode), kExprEnd});
-
- ValueType any_rtt_1_type = ValueType::Rtt(HeapType::kAny, 1);
- FunctionSig sig_any_sub(1, 0, &any_rtt_1_type);
- byte kAnyRttSub = tester.DefineFunction(
- &sig_any_sub, {},
- {WASM_RTT_SUB(kAnyRefCode, WASM_RTT_CANON(kAnyRefCode)), kExprEnd});
-
- ValueType func_rtt_1_type = ValueType::Rtt(HeapType::kFunc, 1);
- FunctionSig sig_func_sub(1, 0, &func_rtt_1_type);
- byte kFuncRttSub = tester.DefineFunction(
- &sig_func_sub, {},
- {WASM_RTT_SUB(kFuncRefCode, WASM_RTT_CANON(kAnyRefCode)), kExprEnd});
-
- ValueType eq_rtt_1_type = ValueType::Rtt(HeapType::kEq, 1);
- FunctionSig sig_eq_sub(1, 0, &eq_rtt_1_type);
- byte kEqRttSub = tester.DefineFunction(
- &sig_eq_sub, {},
- {WASM_RTT_SUB(kEqRefCode, WASM_RTT_CANON(kAnyRefCode)), kExprEnd});
- const byte type_index = tester.DefineArray(kWasmI32, true);
- ValueType array_rtt_type = ValueType::Rtt(type_index, 1);
- FunctionSig sig_array_canon(1, 0, &array_rtt_type);
- byte kArrayRttCanon = tester.DefineFunction(
- &sig_array_canon, {}, {WASM_RTT_CANON(type_index), kExprEnd});
-
- byte kCheckArrayAgainstAny = tester.DefineFunction(
- tester.sigs.i_v(), {kWasmAnyRef},
- {WASM_LOCAL_SET(0, WASM_ARRAY_NEW_DEFAULT(type_index, WASM_I32V(5),
- WASM_RTT_CANON(type_index))),
- WASM_REF_TEST(kAnyRefCode, type_index, WASM_LOCAL_GET(0),
- WASM_RTT_CANON(type_index)),
+ const byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
+ const byte subtype_index =
+ tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)});
+ const byte empty_struct_index = tester.DefineStruct({});
+
+ ValueType kRttSubtypeNoDepth = ValueType::Rtt(subtype_index);
+ FunctionSig sig_t2_v_nd(1, 0, &kRttSubtypeNoDepth);
+
+ const byte kRttSubtypeCanon = tester.DefineFunction(
+ &sig_t2_v_nd, {}, {WASM_RTT_CANON(subtype_index), kExprEnd});
+ const byte kRttSubtypeSub = tester.DefineFunction(
+ &sig_t2_v_nd, {},
+ {WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)), kExprEnd});
+
+ const byte kTestCanon = tester.DefineFunction(
+ tester.sigs.i_v(), {optref(type_index)},
+ {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_WITH_RTT(
+ subtype_index, WASM_I32V(11), WASM_I32V(42),
+ WASM_RTT_CANON(subtype_index))),
+ WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeCanon)),
kExprEnd});
- byte kCheckAnyAgainstAny = tester.DefineFunction(
- tester.sigs.i_v(), {kWasmAnyRef},
- {WASM_LOCAL_SET(0, WASM_ARRAY_NEW_DEFAULT(type_index, WASM_I32V(5),
- WASM_RTT_CANON(type_index))),
- WASM_REF_TEST(kAnyRefCode, kAnyRefCode, WASM_LOCAL_GET(0),
- WASM_RTT_CANON(kAnyRefCode)),
+ const byte kTestSub = tester.DefineFunction(
+ tester.sigs.i_v(), {optref(type_index)},
+ {WASM_LOCAL_SET(
+ 0, WASM_STRUCT_NEW_WITH_RTT(
+ subtype_index, WASM_I32V(11), WASM_I32V(42),
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)))),
+ WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeSub)),
kExprEnd});
- tester.CompileModule();
+ const byte kTestSubVsEmpty = tester.DefineFunction(
+ tester.sigs.i_v(), {optref(type_index)},
+ {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_WITH_RTT(
+ subtype_index, WASM_I32V(11), WASM_I32V(42),
+ WASM_RTT_SUB(subtype_index,
+ WASM_RTT_CANON(empty_struct_index)))),
+ WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeSub)),
+ kExprEnd});
- // Check (rtt.canon any).
- Handle<Object> result_any_canon =
- tester.GetResultObject(kAnyRttCanon).ToHandleChecked();
- CHECK(result_any_canon->IsMap());
- Handle<Map> any_map = Handle<Map>::cast(result_any_canon);
- CHECK_EQ(any_map->wasm_type_info().parent(),
- tester.isolate()->root(RootIndex::kNullMap));
- CHECK_EQ(any_map->wasm_type_info().supertypes().length(), 0);
-
- for (byte func_index : {kArrayRttCanon, kAnyRttSub, kFuncRttSub, kEqRttSub}) {
- Handle<Object> result =
- tester.GetResultObject(func_index).ToHandleChecked();
- CHECK(result->IsMap());
- Handle<Map> map = Handle<Map>::cast(result);
- // Its parent should be (rtt.canon any).
- CHECK_EQ(map->wasm_type_info().parent(), *any_map);
- CHECK_EQ(map->wasm_type_info().supertypes().get(0), *any_map);
- CHECK_EQ(map->wasm_type_info().supertypes().length(), 1);
- }
+ const byte kTestSubVsCanon = tester.DefineFunction(
+ tester.sigs.i_v(), {optref(type_index)},
+ {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_WITH_RTT(
+ subtype_index, WASM_I32V(11), WASM_I32V(42),
+ WASM_RTT_CANON(subtype_index))),
+ WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeSub)),
+ kExprEnd});
- tester.CheckResult(kCheckArrayAgainstAny, 1);
- tester.CheckResult(kCheckAnyAgainstAny, 1);
+ const byte kTestCanonVsSub = tester.DefineFunction(
+ tester.sigs.i_v(), {optref(type_index)},
+ {WASM_LOCAL_SET(
+ 0, WASM_STRUCT_NEW_WITH_RTT(
+ subtype_index, WASM_I32V(11), WASM_I32V(42),
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)))),
+ WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeCanon)),
+ kExprEnd});
+
+ const byte kTestSuperVsSub = tester.DefineFunction(
+ tester.sigs.i_v(), {optref(type_index)},
+ {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(42),
+ WASM_RTT_CANON(type_index))),
+ WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION0(kRttSubtypeCanon)),
+ kExprEnd});
+
+ tester.CompileModule();
+
+ tester.CheckResult(kTestCanon, 1);
+ tester.CheckResult(kTestSub, 1);
+ tester.CheckResult(kTestSubVsEmpty, 0);
+ tester.CheckResult(kTestSubVsCanon, 0);
+ tester.CheckResult(kTestCanonVsSub, 0);
+ tester.CheckResult(kTestSuperVsSub, 0);
}
WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte type_index = tester.DefineArray(kWasmI32, true);
@@ -985,7 +953,7 @@ WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
WASM_RTT_CANON(type_index)),
kExprEnd});
- ValueType rtt_type = ValueType::Rtt(type_index, 1);
+ ValueType rtt_type = ValueType::Rtt(type_index, 0);
FunctionSig rtt_canon_sig(1, 0, &rtt_type);
const byte kRttCanon = tester.DefineFunction(
&rtt_canon_sig, {}, {WASM_RTT_CANON(type_index), kExprEnd});
@@ -1000,8 +968,8 @@ WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
CHECK_EQ(Handle<WasmArray>::cast(result)->map(), *map);
}
-TEST(FunctionRefs) {
- WasmGCTester tester;
+WASM_COMPILED_EXEC_TEST(FunctionRefs) {
+ WasmGCTester tester(execution_tier);
const byte func_index =
tester.DefineFunction(tester.sigs.i_v(), {}, {WASM_I32V(42), kExprEnd});
const byte sig_index = 0;
@@ -1012,26 +980,18 @@ TEST(FunctionRefs) {
tester.AddGlobal(ValueType::Ref(sig_index, kNullable), false,
WasmInitExpr::RefFuncConst(func_index));
- ValueType func_type = ValueType::Ref(sig_index, kNonNullable);
+ ValueType func_type = ValueType::Ref(sig_index, kNullable);
FunctionSig sig_func(1, 0, &func_type);
- ValueType rtt1 = ValueType::Rtt(sig_index, 1);
- FunctionSig sig_rtt1(1, 0, &rtt1);
+ ValueType rtt0 = ValueType::Rtt(sig_index, 0);
+ FunctionSig sig_rtt0(1, 0, &rtt0);
const byte rtt_canon = tester.DefineFunction(
- &sig_rtt1, {}, {WASM_RTT_CANON(sig_index), kExprEnd});
-
- ValueType rtt2 = ValueType::Rtt(sig_index, 2);
- FunctionSig sig_rtt2(1, 0, &rtt2);
- const byte rtt_sub = tester.DefineFunction(
- &sig_rtt2, {},
- {WASM_RTT_SUB(sig_index, WASM_RTT_CANON(kFuncRefCode)), kExprEnd});
+ &sig_rtt0, {}, {WASM_RTT_CANON(sig_index), kExprEnd});
const byte cast = tester.DefineFunction(
&sig_func, {kWasmFuncRef},
{WASM_LOCAL_SET(0, WASM_REF_FUNC(func_index)),
- WASM_REF_CAST(kFuncRefCode, sig_index, WASM_LOCAL_GET(0),
- WASM_RTT_CANON(sig_index)),
- kExprEnd});
+ WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_RTT_CANON(sig_index)), kExprEnd});
const byte cast_reference = tester.DefineFunction(
&sig_func, {}, {WASM_REF_FUNC(sig_index), kExprEnd});
@@ -1039,8 +999,19 @@ TEST(FunctionRefs) {
const byte test = tester.DefineFunction(
tester.sigs.i_v(), {kWasmFuncRef},
{WASM_LOCAL_SET(0, WASM_REF_FUNC(func_index)),
- WASM_REF_TEST(kFuncRefCode, other_sig_index, WASM_LOCAL_GET(0),
- WASM_RTT_CANON(other_sig_index)),
+ WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_RTT_CANON(sig_index)), kExprEnd});
+
+ const byte test_fail_1 = tester.DefineFunction(
+ tester.sigs.i_v(), {kWasmFuncRef},
+ {WASM_LOCAL_SET(0, WASM_REF_FUNC(func_index)),
+ WASM_REF_TEST(WASM_LOCAL_GET(0), WASM_RTT_CANON(other_sig_index)),
+ kExprEnd});
+
+ const byte test_fail_2 = tester.DefineFunction(
+ tester.sigs.i_v(), {kWasmFuncRef},
+ {WASM_LOCAL_SET(0, WASM_REF_FUNC(func_index)),
+ WASM_REF_TEST(WASM_LOCAL_GET(0),
+ WASM_RTT_SUB(sig_index, WASM_RTT_CANON(sig_index))),
kExprEnd});
tester.CompileModule();
@@ -1051,11 +1022,6 @@ TEST(FunctionRefs) {
Handle<Map> map_canon = Handle<Map>::cast(result_canon);
CHECK(map_canon->IsJSFunctionMap());
- Handle<Object> result_sub = tester.GetResultObject(rtt_sub).ToHandleChecked();
- CHECK(result_sub->IsMap());
- Handle<Map> map_sub = Handle<Map>::cast(result_sub);
- CHECK(map_sub->IsJSFunctionMap());
-
Handle<Object> result_cast = tester.GetResultObject(cast).ToHandleChecked();
CHECK(result_cast->IsJSFunction());
Handle<JSFunction> cast_function = Handle<JSFunction>::cast(result_cast);
@@ -1069,11 +1035,13 @@ TEST(FunctionRefs) {
CHECK_EQ(cast_function->code().raw_instruction_start(),
cast_function_reference->code().raw_instruction_start());
- tester.CheckResult(test, 0);
+ tester.CheckResult(test, 1);
+ tester.CheckResult(test_fail_1, 0);
+ tester.CheckResult(test_fail_2, 0);
}
-TEST(CallRef) {
- WasmGCTester tester;
+WASM_COMPILED_EXEC_TEST(CallRef) {
+ WasmGCTester tester(execution_tier);
byte callee = tester.DefineFunction(
tester.sigs.i_ii(), {},
{WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), kExprEnd});
@@ -1093,29 +1061,154 @@ TEST(CallRef) {
WASM_COMPILED_EXEC_TEST(RefTestCastNull) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
const byte kRefTestNull = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_REF_TEST(type_index, type_index, WASM_REF_NULL(type_index),
- WASM_RTT_CANON(type_index)),
+ {WASM_REF_TEST(WASM_REF_NULL(type_index), WASM_RTT_CANON(type_index)),
kExprEnd});
const byte kRefCastNull = tester.DefineFunction(
- tester.sigs.i_i(), // Argument and return value ignored
- {},
- {WASM_REF_CAST(type_index, type_index, WASM_REF_NULL(type_index),
- WASM_RTT_CANON(type_index)),
- kExprDrop, WASM_I32V(0), kExprEnd});
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(type_index),
+ WASM_RTT_CANON(type_index))),
+ kExprEnd});
tester.CompileModule();
tester.CheckResult(kRefTestNull, 0);
- tester.CheckHasThrown(kRefCastNull, 0);
+ tester.CheckResult(kRefCastNull, 1);
+}
+
+WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
+ WasmGCTester tester(execution_tier);
+
+ byte array_index = tester.DefineArray(kWasmI32, true);
+ byte function_index =
+ tester.DefineFunction(tester.sigs.v_v(), {}, {kExprEnd});
+ byte sig_index = 1;
+
+ // This is just so func_index counts as "declared".
+ tester.AddGlobal(ValueType::Ref(sig_index, kNullable), false,
+ WasmInitExpr::RefFuncConst(function_index));
+
+ byte kDataCheckNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_DATA(WASM_REF_NULL(kAnyRefCode)), kExprEnd});
+ byte kFuncCheckNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_FUNC(WASM_REF_NULL(kAnyRefCode)), kExprEnd});
+ byte kI31CheckNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_I31(WASM_REF_NULL(kAnyRefCode)), kExprEnd});
+
+ byte kDataCastNull =
+ tester.DefineFunction(tester.sigs.i_v(), {},
+ {WASM_REF_AS_DATA(WASM_REF_NULL(kAnyRefCode)),
+ WASM_DROP, WASM_I32V(1), kExprEnd});
+ byte kFuncCastNull =
+ tester.DefineFunction(tester.sigs.i_v(), {},
+ {WASM_REF_AS_FUNC(WASM_REF_NULL(kAnyRefCode)),
+ WASM_DROP, WASM_I32V(1), kExprEnd});
+ byte kI31CastNull =
+ tester.DefineFunction(tester.sigs.i_v(), {},
+ {WASM_REF_AS_I31(WASM_REF_NULL(kAnyRefCode)),
+ WASM_DROP, WASM_I32V(1), kExprEnd});
+
+#define TYPE_CHECK(type, value) \
+ tester.DefineFunction(tester.sigs.i_v(), {kWasmAnyRef}, \
+ {WASM_LOCAL_SET(0, WASM_SEQ(value)), \
+ WASM_REF_IS_##type(WASM_LOCAL_GET(0)), kExprEnd})
+
+ byte kDataCheckSuccess =
+ TYPE_CHECK(DATA, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+ byte kDataCheckFailure = TYPE_CHECK(DATA, WASM_I31_NEW(WASM_I32V(42)));
+ byte kFuncCheckSuccess = TYPE_CHECK(FUNC, WASM_REF_FUNC(function_index));
+ byte kFuncCheckFailure =
+ TYPE_CHECK(FUNC, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+ byte kI31CheckSuccess = TYPE_CHECK(I31, WASM_I31_NEW(WASM_I32V(42)));
+ byte kI31CheckFailure =
+ TYPE_CHECK(I31, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+#undef TYPE_CHECK
+
+#define TYPE_CAST(type, value) \
+ tester.DefineFunction(tester.sigs.i_v(), {kWasmAnyRef}, \
+ {WASM_LOCAL_SET(0, WASM_SEQ(value)), \
+ WASM_REF_AS_##type(WASM_LOCAL_GET(0)), WASM_DROP, \
+ WASM_I32V(1), kExprEnd})
+
+ byte kDataCastSuccess =
+ TYPE_CAST(DATA, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+ byte kDataCastFailure = TYPE_CAST(DATA, WASM_I31_NEW(WASM_I32V(42)));
+ byte kFuncCastSuccess = TYPE_CAST(FUNC, WASM_REF_FUNC(function_index));
+ byte kFuncCastFailure =
+ TYPE_CAST(FUNC, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+ byte kI31CastSuccess = TYPE_CAST(I31, WASM_I31_NEW(WASM_I32V(42)));
+ byte kI31CastFailure =
+ TYPE_CAST(I31, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+#undef TYPE_CAST
+
+// If the branch is not taken, we return 0. If it is taken, then the respective
+// type check should succeed, and we return 1.
+#define BR_ON(TYPE, type, value) \
+ tester.DefineFunction( \
+ tester.sigs.i_v(), {kWasmAnyRef}, \
+ {WASM_LOCAL_SET(0, WASM_SEQ(value)), \
+ WASM_REF_IS_##TYPE(WASM_BLOCK_R( \
+ kWasm##type##Ref, WASM_BR_ON_##TYPE(0, WASM_LOCAL_GET(0)), \
+ WASM_RETURN(WASM_I32V(0)))), \
+ kExprEnd})
+
+ byte kBrOnDataTaken =
+ BR_ON(DATA, Data,
+ WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+ byte kBrOnDataNotTaken = BR_ON(DATA, Data, WASM_REF_FUNC(function_index));
+ byte kBrOnFuncTaken = BR_ON(FUNC, Func, WASM_REF_FUNC(function_index));
+ byte kBrOnFuncNotTaken = BR_ON(FUNC, Func, WASM_I31_NEW(WASM_I32V(42)));
+ byte kBrOnI31Taken = BR_ON(I31, I31, WASM_I31_NEW(WASM_I32V(42)));
+ byte kBrOnI31NotTaken =
+ BR_ON(I31, I31,
+ WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
+#undef BR_ON
+
+ tester.CompileModule();
+
+ tester.CheckResult(kDataCheckNull, 0);
+ tester.CheckHasThrown(kDataCastNull);
+ tester.CheckResult(kDataCheckSuccess, 1);
+ tester.CheckResult(kDataCheckFailure, 0);
+ tester.CheckResult(kDataCastSuccess, 1);
+ tester.CheckHasThrown(kDataCastFailure);
+ tester.CheckResult(kBrOnDataTaken, 1);
+ tester.CheckResult(kBrOnDataNotTaken, 0);
+
+ tester.CheckResult(kFuncCheckNull, 0);
+ tester.CheckHasThrown(kFuncCastNull);
+ tester.CheckResult(kFuncCheckSuccess, 1);
+ tester.CheckResult(kFuncCheckFailure, 0);
+ tester.CheckResult(kFuncCastSuccess, 1);
+ tester.CheckHasThrown(kFuncCastFailure);
+ tester.CheckResult(kBrOnFuncTaken, 1);
+ tester.CheckResult(kBrOnFuncNotTaken, 0);
+
+ tester.CheckResult(kI31CheckNull, 0);
+ tester.CheckHasThrown(kI31CastNull);
+ tester.CheckResult(kI31CheckSuccess, 1);
+ tester.CheckResult(kI31CheckFailure, 0);
+ tester.CheckResult(kI31CastSuccess, 1);
+ tester.CheckHasThrown(kI31CastFailure);
+ tester.CheckResult(kBrOnI31Taken, 1);
+ tester.CheckResult(kBrOnI31NotTaken, 0);
}
WASM_COMPILED_EXEC_TEST(BasicI31) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte kSigned = tester.DefineFunction(
tester.sigs.i_i(), {},
{WASM_I31_GET_S(WASM_I31_NEW(WASM_LOCAL_GET(0))), kExprEnd});
@@ -1133,89 +1226,30 @@ WASM_COMPILED_EXEC_TEST(BasicI31) {
tester.CheckResult(kUnsigned, 0x7FFFFFFF, 0x7FFFFFFF);
}
-WASM_COMPILED_EXEC_TEST(I31Casts) {
- WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
- const byte struct_type = tester.DefineStruct({F(wasm::kWasmI32, true)});
- const byte i31_rtt =
- tester.AddGlobal(ValueType::Rtt(HeapType::kI31, 1), false,
- WasmInitExpr::RttCanon(HeapType::kI31));
- const byte struct_rtt =
- tester.AddGlobal(ValueType::Rtt(struct_type, 1), false,
- WasmInitExpr::RttCanon(
- static_cast<HeapType::Representation>(struct_type)));
- // Adds the result of a successful typecheck to the untagged value, i.e.
- // should return 1 + 42 = 43.
- const byte kTestAndCastSuccess = tester.DefineFunction(
- tester.sigs.i_v(), {kWasmEqRef},
- {WASM_LOCAL_SET(0, WASM_I31_NEW(WASM_I32V(42))),
- WASM_I32_ADD(WASM_REF_TEST(kEqRefCode, kI31RefCode, WASM_LOCAL_GET(0),
- WASM_GLOBAL_GET(i31_rtt)),
- WASM_I31_GET_S(WASM_REF_CAST(kEqRefCode, kI31RefCode,
- WASM_LOCAL_GET(0),
- WASM_GLOBAL_GET(i31_rtt)))),
- kExprEnd});
- // Adds the results of two unsuccessful type checks (an i31ref is not a
- // struct, nor the other way round).
- const byte kTestFalse = tester.DefineFunction(
- tester.sigs.i_v(), {},
- {WASM_I32_ADD(
- WASM_REF_TEST(kEqRefCode, kI31RefCode,
- WASM_STRUCT_NEW_WITH_RTT(struct_type, WASM_I32V(42),
- WASM_GLOBAL_GET(struct_rtt)),
- WASM_GLOBAL_GET(i31_rtt)),
- WASM_REF_TEST(kEqRefCode, struct_type, WASM_I31_NEW(WASM_I32V(23)),
- WASM_GLOBAL_GET(struct_rtt))),
- kExprEnd});
- // Tries to cast an i31ref to a struct, which should trap.
- const byte kCastI31ToStruct = tester.DefineFunction(
- tester.sigs.i_i(), // Argument and return value ignored
- {},
- {WASM_STRUCT_GET(
- struct_type, 0,
- WASM_REF_CAST(kEqRefCode, struct_type, WASM_I31_NEW(WASM_I32V(42)),
- WASM_GLOBAL_GET(struct_rtt))),
- kExprEnd});
- // Tries to cast a struct to i31ref, which should trap.
- const byte kCastStructToI31 = tester.DefineFunction(
- tester.sigs.i_i(), // Argument and return value ignored
- {},
- {WASM_I31_GET_S(
- WASM_REF_CAST(kEqRefCode, kI31RefCode,
- WASM_STRUCT_NEW_WITH_RTT(struct_type, WASM_I32V(42),
- WASM_GLOBAL_GET(struct_rtt)),
- WASM_GLOBAL_GET(i31_rtt))),
- kExprEnd});
- tester.CompileModule();
- tester.CheckResult(kTestAndCastSuccess, 43);
- tester.CheckResult(kTestFalse, 0);
- tester.CheckHasThrown(kCastI31ToStruct, 0);
- tester.CheckHasThrown(kCastStructToI31, 0);
-}
-
// This flushed out a few bugs, so it serves as a regression test. It can also
// be modified (made to run longer) to measure performance of casts.
WASM_COMPILED_EXEC_TEST(CastsBenchmark) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte SuperType = tester.DefineStruct({F(wasm::kWasmI32, true)});
const byte SubType =
tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)});
- const byte ListType = tester.DefineArray(wasm::kWasmEqRef, true);
+
+ ValueType kDataRefNull = ValueType::Ref(HeapType::kData, kNullable);
+ const byte ListType = tester.DefineArray(kDataRefNull, true);
const byte List =
tester.AddGlobal(ValueType::Ref(ListType, kNullable), true,
WasmInitExpr::RefNullConst(
static_cast<HeapType::Representation>(ListType)));
const byte RttSuper = tester.AddGlobal(
- ValueType::Rtt(SuperType, 1), false,
+ ValueType::Rtt(SuperType, 0), false,
WasmInitExpr::RttCanon(static_cast<HeapType::Representation>(SuperType)));
const byte RttSub = tester.AddGlobal(
- ValueType::Rtt(SubType, 2), false,
+ ValueType::Rtt(SubType, 1), false,
WasmInitExpr::RttSub(static_cast<HeapType::Representation>(SubType),
WasmInitExpr::GlobalGet(RttSuper)));
const byte RttList = tester.AddGlobal(
- ValueType::Rtt(ListType, 1), false,
+ ValueType::Rtt(ListType, 0), false,
WasmInitExpr::RttCanon(static_cast<HeapType::Representation>(ListType)));
const uint32_t kListLength = 1024;
@@ -1273,7 +1307,6 @@ WASM_COMPILED_EXEC_TEST(CastsBenchmark) {
WASM_STRUCT_GET(
SuperType, 0,
WASM_REF_CAST(
- kEqRefCode, SuperType,
WASM_ARRAY_GET(
ListType, WASM_LOCAL_GET(list),
WASM_I32_AND(WASM_LOCAL_GET(i),
@@ -1296,7 +1329,6 @@ WASM_COMPILED_EXEC_TEST(CastsBenchmark) {
WASM_COMPILED_EXEC_TEST(GlobalInitReferencingGlobal) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
const byte from = tester.AddGlobal(kWasmI32, false, WasmInitExpr(42));
const byte to =
tester.AddGlobal(kWasmI32, false, WasmInitExpr::GlobalGet(from));
@@ -1311,7 +1343,6 @@ WASM_COMPILED_EXEC_TEST(GlobalInitReferencingGlobal) {
WASM_COMPILED_EXEC_TEST(IndirectNullSetManually) {
WasmGCTester tester(execution_tier);
- FLAG_experimental_liftoff_extern_ref = true;
byte sig_index = tester.DefineSignature(tester.sigs.i_i());
tester.DefineTable(ValueType::Ref(sig_index, kNullable), 1, 1);
byte func_index = tester.DefineFunction(
@@ -1325,74 +1356,71 @@ WASM_COMPILED_EXEC_TEST(IndirectNullSetManually) {
tester.CheckHasThrown(func_index, 42);
}
-TEST(JsAccess) {
- for (ValueType supertype : {kWasmEqRef, kWasmAnyRef}) {
- WasmGCTester tester;
- const byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
- ValueType kRefType = ref(type_index);
- ValueType kSupertypeToI[] = {kWasmI32, supertype};
- FunctionSig sig_t_v(1, 0, &kRefType);
- FunctionSig sig_super_v(1, 0, &supertype);
- FunctionSig sig_i_super(1, 1, kSupertypeToI);
-
- tester.DefineExportedFunction(
- "disallowed", &sig_t_v,
- {WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(42),
- WASM_RTT_CANON(type_index)),
- kExprEnd});
- // Same code, different signature.
- tester.DefineExportedFunction(
- "producer", &sig_super_v,
- {WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(42),
- WASM_RTT_CANON(type_index)),
- kExprEnd});
- tester.DefineExportedFunction(
- "consumer", &sig_i_super,
- {WASM_STRUCT_GET(
- type_index, 0,
- WASM_REF_CAST(supertype.value_type_code(), type_index,
- WASM_LOCAL_GET(0), WASM_RTT_CANON(type_index))),
- kExprEnd});
-
- tester.CompileModule();
- Isolate* isolate = tester.isolate();
- TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- MaybeHandle<Object> maybe_result =
- tester.CallExportedFunction("disallowed", 0, nullptr);
- CHECK(maybe_result.is_null());
- CHECK(try_catch.HasCaught());
- try_catch.Reset();
- isolate->clear_pending_exception();
-
- maybe_result = tester.CallExportedFunction("producer", 0, nullptr);
- if (maybe_result.is_null()) {
- FATAL("Calling 'producer' failed: %s",
- *v8::String::Utf8Value(reinterpret_cast<v8::Isolate*>(isolate),
- try_catch.Message()->Get()));
- }
- {
- Handle<Object> args[] = {maybe_result.ToHandleChecked()};
- maybe_result = tester.CallExportedFunction("consumer", 1, args);
- }
- if (maybe_result.is_null()) {
- FATAL("Calling 'consumer' failed: %s",
- *v8::String::Utf8Value(reinterpret_cast<v8::Isolate*>(isolate),
- try_catch.Message()->Get()));
- }
- Handle<Object> result = maybe_result.ToHandleChecked();
- CHECK(result->IsSmi());
- CHECK_EQ(42, Smi::cast(*result).value());
- // Calling {consumer} with any other object (e.g. the Smi we just got as
- // {result}) should trap.
- {
- Handle<Object> args[] = {result};
- maybe_result = tester.CallExportedFunction("consumer", 1, args);
- }
- CHECK(maybe_result.is_null());
- CHECK(try_catch.HasCaught());
- try_catch.Reset();
- isolate->clear_pending_exception();
+WASM_COMPILED_EXEC_TEST(JsAccess) {
+ WasmGCTester tester(execution_tier);
+ const byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
+ ValueType kRefType = ref(type_index);
+ ValueType kSupertypeToI[] = {kWasmI32, kWasmDataRef};
+ FunctionSig sig_t_v(1, 0, &kRefType);
+ FunctionSig sig_super_v(1, 0, &kWasmDataRef);
+ FunctionSig sig_i_super(1, 1, kSupertypeToI);
+
+ tester.DefineExportedFunction(
+ "disallowed", &sig_t_v,
+ {WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(42),
+ WASM_RTT_CANON(type_index)),
+ kExprEnd});
+ // Same code, different signature.
+ tester.DefineExportedFunction(
+ "producer", &sig_super_v,
+ {WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(42),
+ WASM_RTT_CANON(type_index)),
+ kExprEnd});
+ tester.DefineExportedFunction(
+ "consumer", &sig_i_super,
+ {WASM_STRUCT_GET(
+ type_index, 0,
+ WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_RTT_CANON(type_index))),
+ kExprEnd});
+
+ tester.CompileModule();
+ Isolate* isolate = tester.isolate();
+ TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ MaybeHandle<Object> maybe_result =
+ tester.CallExportedFunction("disallowed", 0, nullptr);
+ CHECK(maybe_result.is_null());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+ isolate->clear_pending_exception();
+
+ maybe_result = tester.CallExportedFunction("producer", 0, nullptr);
+ if (maybe_result.is_null()) {
+ FATAL("Calling 'producer' failed: %s",
+ *v8::String::Utf8Value(reinterpret_cast<v8::Isolate*>(isolate),
+ try_catch.Message()->Get()));
+ }
+ {
+ Handle<Object> args[] = {maybe_result.ToHandleChecked()};
+ maybe_result = tester.CallExportedFunction("consumer", 1, args);
+ }
+ if (maybe_result.is_null()) {
+ FATAL("Calling 'consumer' failed: %s",
+ *v8::String::Utf8Value(reinterpret_cast<v8::Isolate*>(isolate),
+ try_catch.Message()->Get()));
+ }
+ Handle<Object> result = maybe_result.ToHandleChecked();
+ CHECK(result->IsSmi());
+ CHECK_EQ(42, Smi::cast(*result).value());
+ // Calling {consumer} with any other object (e.g. the Smi we just got as
+ // {result}) should trap.
+ {
+ Handle<Object> args[] = {result};
+ maybe_result = tester.CallExportedFunction("consumer", 1, args);
}
+ CHECK(maybe_result.is_null());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+ isolate->clear_pending_exception();
}
} // namespace test_gc
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index 6b888511d9..bfe88180d1 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -144,6 +144,11 @@ void CompileJumpTableThunk(Address thunk, Address jump_target) {
__ lw(scratch, MemOperand(scratch, 0));
__ Branch(&exit, ne, scratch, Operand(zero_reg));
__ Jump(jump_target, RelocInfo::NONE);
+#elif V8_TARGET_ARCH_RISCV64
+ __ li(scratch, Operand(stop_bit_address, RelocInfo::NONE));
+ __ Lw(scratch, MemOperand(scratch, 0));
+ __ Branch(&exit, ne, scratch, Operand(zero_reg));
+ __ Jump(jump_target, RelocInfo::NONE);
#else
#error Unsupported architecture
#endif
@@ -232,7 +237,8 @@ TEST(JumpTablePatchingStress) {
constexpr int kNumberOfPatcherThreads = 3;
STATIC_ASSERT(kAssemblerBufferSize >= kJumpTableSize);
- auto buffer = AllocateAssemblerBuffer(kAssemblerBufferSize);
+ auto buffer = AllocateAssemblerBuffer(kAssemblerBufferSize, nullptr,
+ VirtualMemory::kMapAsJittable);
byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
std::bitset<kAvailableBufferSlots> used_thunk_slots;
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
index c3444b9ae6..f5847d1fb1 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
@@ -20,15 +20,16 @@ class LiftoffCompileEnvironment {
: isolate_(CcTest::InitIsolateOnce()),
handle_scope_(isolate_),
zone_(isolate_->allocator(), ZONE_NAME),
- module_builder_(&zone_, nullptr, TestExecutionTier::kLiftoff,
- kRuntimeExceptionSupport, kNoLowerSimd) {
+ wasm_runner_(nullptr, TestExecutionTier::kLiftoff, 0,
+ kRuntimeExceptionSupport, kNoLowerSimd) {
// Add a table of length 1, for indirect calls.
- module_builder_.AddIndirectFunctionTable(nullptr, 1);
+ wasm_runner_.builder().AddIndirectFunctionTable(nullptr, 1);
+ // Set tiered down such that we generate debugging code.
+ wasm_runner_.builder().SetTieredDown();
}
struct TestFunction {
- OwnedVector<uint8_t> body_bytes;
- WasmFunction* function;
+ WasmCode* code;
FunctionBody body;
};
@@ -39,17 +40,15 @@ class LiftoffCompileEnvironment {
auto test_func = AddFunction(return_types, param_types, raw_function_bytes);
// Now compile the function with Liftoff two times.
- CompilationEnv env = module_builder_.CreateCompilationEnv();
+ CompilationEnv env = wasm_runner_.builder().CreateCompilationEnv();
WasmFeatures detected1;
WasmFeatures detected2;
- WasmCompilationResult result1 =
- ExecuteLiftoffCompilation(isolate_->allocator(), &env, test_func.body,
- test_func.function->func_index, kNoDebugging,
- isolate_->counters(), &detected1);
- WasmCompilationResult result2 =
- ExecuteLiftoffCompilation(isolate_->allocator(), &env, test_func.body,
- test_func.function->func_index, kNoDebugging,
- isolate_->counters(), &detected2);
+ WasmCompilationResult result1 = ExecuteLiftoffCompilation(
+ isolate_->allocator(), &env, test_func.body, test_func.code->index(),
+ kNoDebugging, isolate_->counters(), &detected1);
+ WasmCompilationResult result2 = ExecuteLiftoffCompilation(
+ isolate_->allocator(), &env, test_func.body, test_func.code->index(),
+ kNoDebugging, isolate_->counters(), &detected2);
CHECK(result1.succeeded());
CHECK(result2.succeeded());
@@ -70,20 +69,20 @@ class LiftoffCompileEnvironment {
std::vector<int> breakpoints = {}) {
auto test_func = AddFunction(return_types, param_types, raw_function_bytes);
- CompilationEnv env = module_builder_.CreateCompilationEnv();
+ CompilationEnv env = wasm_runner_.builder().CreateCompilationEnv();
WasmFeatures detected;
std::unique_ptr<DebugSideTable> debug_side_table_via_compilation;
- ExecuteLiftoffCompilation(CcTest::i_isolate()->allocator(), &env,
- test_func.body, 0, kForDebugging, nullptr,
- &detected, VectorOf(breakpoints),
- &debug_side_table_via_compilation);
+ auto result = ExecuteLiftoffCompilation(
+ CcTest::i_isolate()->allocator(), &env, test_func.body, 0,
+ kForDebugging, nullptr, &detected, VectorOf(breakpoints),
+ &debug_side_table_via_compilation);
+ CHECK(result.succeeded());
// If there are no breakpoint, then {ExecuteLiftoffCompilation} should
// provide the same debug side table.
if (breakpoints.empty()) {
std::unique_ptr<DebugSideTable> debug_side_table =
- GenerateLiftoffDebugSideTable(CcTest::i_isolate()->allocator(), &env,
- test_func.body, 0);
+ GenerateLiftoffDebugSideTable(test_func.code);
CheckTableEquals(*debug_side_table, *debug_side_table_via_compilation);
}
@@ -94,6 +93,7 @@ class LiftoffCompileEnvironment {
static void CheckTableEquals(const DebugSideTable& a,
const DebugSideTable& b) {
CHECK_EQ(a.num_locals(), b.num_locals());
+ CHECK_EQ(a.entries().size(), b.entries().size());
CHECK(std::equal(a.entries().begin(), a.entries().end(),
b.entries().begin(), b.entries().end(),
&CheckEntryEquals));
@@ -102,42 +102,11 @@ class LiftoffCompileEnvironment {
static bool CheckEntryEquals(const DebugSideTable::Entry& a,
const DebugSideTable::Entry& b) {
CHECK_EQ(a.pc_offset(), b.pc_offset());
- CHECK(std::equal(a.values().begin(), a.values().end(), b.values().begin(),
- b.values().end(), &CheckValueEquals));
+ CHECK_EQ(a.stack_height(), b.stack_height());
+ CHECK_EQ(a.changed_values(), b.changed_values());
return true;
}
- static bool CheckValueEquals(const DebugSideTable::Entry::Value& a,
- const DebugSideTable::Entry::Value& b) {
- CHECK_EQ(a.type, b.type);
- CHECK_EQ(a.kind, b.kind);
- switch (a.kind) {
- case DebugSideTable::Entry::kConstant:
- CHECK_EQ(a.i32_const, b.i32_const);
- break;
- case DebugSideTable::Entry::kRegister:
- CHECK_EQ(a.reg_code, b.reg_code);
- break;
- case DebugSideTable::Entry::kStack:
- CHECK_EQ(a.stack_offset, b.stack_offset);
- break;
- }
- return true;
- }
-
- OwnedVector<uint8_t> GenerateFunctionBody(
- std::initializer_list<uint8_t> raw_function_bytes) {
- // Build the function bytes by prepending the locals decl and appending an
- // "end" opcode.
- OwnedVector<uint8_t> function_bytes =
- OwnedVector<uint8_t>::New(raw_function_bytes.size() + 2);
- function_bytes[0] = WASM_NO_LOCALS;
- std::copy(raw_function_bytes.begin(), raw_function_bytes.end(),
- &function_bytes[1]);
- function_bytes[raw_function_bytes.size() + 1] = WASM_END;
- return function_bytes;
- }
-
FunctionSig* AddSig(std::initializer_list<ValueType> return_types,
std::initializer_list<ValueType> param_types) {
ValueType* storage =
@@ -147,66 +116,81 @@ class LiftoffCompileEnvironment {
storage + return_types.size());
FunctionSig* sig = zone_.New<FunctionSig>(return_types.size(),
param_types.size(), storage);
- module_builder_.AddSignature(sig);
return sig;
}
TestFunction AddFunction(std::initializer_list<ValueType> return_types,
std::initializer_list<ValueType> param_types,
- std::initializer_list<uint8_t> raw_function_bytes) {
- OwnedVector<uint8_t> function_bytes =
- GenerateFunctionBody(raw_function_bytes);
+ std::initializer_list<uint8_t> function_bytes) {
FunctionSig* sig = AddSig(return_types, param_types);
- int func_index =
- module_builder_.AddFunction(sig, "f", TestingModuleBuilder::kWasm);
- WasmFunction* function = module_builder_.GetFunctionAt(func_index);
- function->code = {module_builder_.AddBytes(function_bytes.as_vector()),
- static_cast<uint32_t>(function_bytes.size())};
- FunctionBody body{function->sig, 0, function_bytes.begin(),
- function_bytes.end()};
- return {std::move(function_bytes), function, body};
+ // Compile the function so we can get the WasmCode* which is later used to
+ // generate the debug side table lazily.
+ auto& func_compiler = wasm_runner_.NewFunction(sig, "f");
+ func_compiler.Build(function_bytes.begin(), function_bytes.end());
+
+ WasmCode* code =
+ wasm_runner_.builder().GetFunctionCode(func_compiler.function_index());
+
+ // Get the wire bytes created by the function compiler (including locals
+ // declaration and the trailing "end" opcode).
+ NativeModule* native_module = code->native_module();
+ auto* function = &native_module->module()->functions[code->index()];
+ Vector<const uint8_t> function_wire_bytes =
+ native_module->wire_bytes().SubVector(function->code.offset(),
+ function->code.end_offset());
+
+ FunctionBody body{sig, 0, function_wire_bytes.begin(),
+ function_wire_bytes.end()};
+ return {code, body};
}
Isolate* isolate_;
HandleScope handle_scope_;
Zone zone_;
- TestingModuleBuilder module_builder_;
+ // wasm_runner_ is used to build actual code objects needed to request lazy
+ // generation of debug side tables.
+ WasmRunnerBase wasm_runner_;
+ WasmCodeRefScope code_ref_scope_;
};
struct DebugSideTableEntry {
- std::vector<DebugSideTable::Entry::Value> values;
+ int stack_height;
+ std::vector<DebugSideTable::Entry::Value> changed_values;
// Construct via vector or implicitly via initializer list.
- explicit DebugSideTableEntry(std::vector<DebugSideTable::Entry::Value> values)
- : values(std::move(values)) {}
+ DebugSideTableEntry(int stack_height,
+ std::vector<DebugSideTable::Entry::Value> changed_values)
+ : stack_height(stack_height), changed_values(std::move(changed_values)) {}
+
DebugSideTableEntry(
- std::initializer_list<DebugSideTable::Entry::Value> values)
- : values(values) {}
+ int stack_height,
+ std::initializer_list<DebugSideTable::Entry::Value> changed_values)
+ : stack_height(stack_height), changed_values(changed_values) {}
bool operator==(const DebugSideTableEntry& other) const {
- if (values.size() != other.values.size()) return false;
- for (size_t i = 0; i < values.size(); ++i) {
- if (values[i].type != other.values[i].type) return false;
- if (values[i].kind != other.values[i].kind) return false;
- // Stack offsets and register codes are platform dependent, so only check
- // constants here.
- if (values[i].kind == DebugSideTable::Entry::kConstant &&
- values[i].i32_const != other.values[i].i32_const) {
- return false;
- }
- }
- return true;
+ return stack_height == other.stack_height &&
+ std::equal(changed_values.begin(), changed_values.end(),
+ other.changed_values.begin(), other.changed_values.end(),
+ CheckValueEquals);
+ }
+
+ // Check for equality, but ignore exact register and stack offset.
+ static bool CheckValueEquals(const DebugSideTable::Entry::Value& a,
+ const DebugSideTable::Entry::Value& b) {
+ return a.index == b.index && a.kind == b.kind && a.kind == b.kind &&
+ (a.storage != DebugSideTable::Entry::kConstant ||
+ a.i32_const == b.i32_const);
}
};
// Debug builds will print the vector of DebugSideTableEntry.
#ifdef DEBUG
std::ostream& operator<<(std::ostream& out, const DebugSideTableEntry& entry) {
- out << "{";
+ out << "stack height " << entry.stack_height << ", changed: {";
const char* comma = "";
- for (auto& v : entry.values) {
- out << comma << v.type.name() << " ";
- switch (v.kind) {
+ for (auto& v : entry.changed_values) {
+ out << comma << v.index << ":" << name(v.kind) << " ";
+ switch (v.storage) {
case DebugSideTable::Entry::kConstant:
out << "const:" << v.i32_const;
break;
@@ -229,23 +213,27 @@ std::ostream& operator<<(std::ostream& out,
#endif // DEBUG
// Named constructors to make the tests more readable.
-DebugSideTable::Entry::Value Constant(ValueType type, int32_t constant) {
+DebugSideTable::Entry::Value Constant(int index, ValueKind kind,
+ int32_t constant) {
DebugSideTable::Entry::Value value;
- value.type = type;
- value.kind = DebugSideTable::Entry::kConstant;
+ value.index = index;
+ value.kind = kind;
+ value.storage = DebugSideTable::Entry::kConstant;
value.i32_const = constant;
return value;
}
-DebugSideTable::Entry::Value Register(ValueType type) {
+DebugSideTable::Entry::Value Register(int index, ValueKind kind) {
DebugSideTable::Entry::Value value;
- value.type = type;
- value.kind = DebugSideTable::Entry::kRegister;
+ value.index = index;
+ value.kind = kind;
+ value.storage = DebugSideTable::Entry::kRegister;
return value;
}
-DebugSideTable::Entry::Value Stack(ValueType type) {
+DebugSideTable::Entry::Value Stack(int index, ValueKind kind) {
DebugSideTable::Entry::Value value;
- value.type = type;
- value.kind = DebugSideTable::Entry::kStack;
+ value.index = index;
+ value.kind = kind;
+ value.storage = DebugSideTable::Entry::kStack;
return value;
}
@@ -253,10 +241,10 @@ void CheckDebugSideTable(std::vector<DebugSideTableEntry> expected_entries,
const wasm::DebugSideTable* debug_side_table) {
std::vector<DebugSideTableEntry> entries;
for (auto& entry : debug_side_table->entries()) {
- auto values = entry.values();
- entries.push_back(
- DebugSideTableEntry{std::vector<DebugSideTable::Entry::Value>{
- values.begin(), values.end()}});
+ entries.emplace_back(
+ entry.stack_height(),
+ std::vector<DebugSideTable::Entry::Value>{
+ entry.changed_values().begin(), entry.changed_values().end()});
}
CHECK_EQ(expected_entries, entries);
}
@@ -308,9 +296,9 @@ TEST(Liftoff_debug_side_table_simple) {
CheckDebugSideTable(
{
// function entry, locals in registers.
- {Register(kWasmI32), Register(kWasmI32)},
- // OOL stack check, locals spilled, stack empty.
- {Stack(kWasmI32), Stack(kWasmI32)},
+ {2, {Register(0, kI32), Register(1, kI32)}},
+ // OOL stack check, locals spilled, stack still empty.
+ {2, {Stack(0, kI32), Stack(1, kI32)}},
},
debug_side_table.get());
}
@@ -324,11 +312,11 @@ TEST(Liftoff_debug_side_table_call) {
CheckDebugSideTable(
{
// function entry, local in register.
- {Register(kWasmI32)},
+ {1, {Register(0, kI32)}},
// call, local spilled, stack empty.
- {Stack(kWasmI32)},
- // OOL stack check, local spilled, stack empty.
- {Stack(kWasmI32)},
+ {1, {Stack(0, kI32)}},
+ // OOL stack check, local spilled as before, stack empty.
+ {1, {}},
},
debug_side_table.get());
}
@@ -344,11 +332,11 @@ TEST(Liftoff_debug_side_table_call_const) {
CheckDebugSideTable(
{
// function entry, local in register.
- {Register(kWasmI32)},
+ {1, {Register(0, kI32)}},
// call, local is kConst.
- {Constant(kWasmI32, kConst)},
+ {1, {Constant(0, kI32, kConst)}},
// OOL stack check, local spilled.
- {Stack(kWasmI32)},
+ {1, {Stack(0, kI32)}},
},
debug_side_table.get());
}
@@ -363,15 +351,15 @@ TEST(Liftoff_debug_side_table_indirect_call) {
CheckDebugSideTable(
{
// function entry, local in register.
- {Register(kWasmI32)},
+ {1, {Register(0, kI32)}},
// indirect call, local spilled, stack empty.
- {Stack(kWasmI32)},
- // OOL stack check, local spilled, stack empty.
- {Stack(kWasmI32)},
- // OOL trap (invalid index), local spilled, stack has {kConst}.
- {Stack(kWasmI32), Constant(kWasmI32, kConst)},
- // OOL trap (sig mismatch), local spilled, stack has {kConst}.
- {Stack(kWasmI32), Constant(kWasmI32, kConst)},
+ {1, {Stack(0, kI32)}},
+ // OOL stack check, local still spilled.
+ {1, {}},
+ // OOL trap (invalid index), local still spilled, stack has {kConst}.
+ {2, {Constant(1, kI32, kConst)}},
+ // OOL trap (sig mismatch), stack unmodified.
+ {2, {}},
},
debug_side_table.get());
}
@@ -385,11 +373,11 @@ TEST(Liftoff_debug_side_table_loop) {
CheckDebugSideTable(
{
// function entry, local in register.
- {Register(kWasmI32)},
+ {1, {Register(0, kI32)}},
// OOL stack check, local spilled, stack empty.
- {Stack(kWasmI32)},
- // OOL loop stack check, local spilled, stack has {kConst}.
- {Stack(kWasmI32), Constant(kWasmI32, kConst)},
+ {1, {Stack(0, kI32)}},
+ // OOL loop stack check, local still spilled, stack has {kConst}.
+ {2, {Constant(1, kI32, kConst)}},
},
debug_side_table.get());
}
@@ -402,13 +390,13 @@ TEST(Liftoff_debug_side_table_trap) {
CheckDebugSideTable(
{
// function entry, locals in registers.
- {Register(kWasmI32), Register(kWasmI32)},
+ {2, {Register(0, kI32), Register(1, kI32)}},
// OOL stack check, local spilled, stack empty.
- {Stack(kWasmI32), Stack(kWasmI32)},
- // OOL trap (div by zero), locals spilled, stack empty.
- {Stack(kWasmI32), Stack(kWasmI32)},
- // OOL trap (result unrepresentable), locals spilled, stack empty.
- {Stack(kWasmI32), Stack(kWasmI32)},
+ {2, {Stack(0, kI32), Stack(1, kI32)}},
+ // OOL trap (div by zero), stack as before.
+ {2, {}},
+ // OOL trap (unrepresentable), stack as before.
+ {2, {}},
},
debug_side_table.get());
}
@@ -426,12 +414,11 @@ TEST(Liftoff_breakpoint_simple) {
CheckDebugSideTable(
{
// First break point, locals in registers.
- {Register(kWasmI32), Register(kWasmI32)},
- // Second break point, locals and two stack values in registers.
- {Register(kWasmI32), Register(kWasmI32), Register(kWasmI32),
- Register(kWasmI32)},
+ {2, {Register(0, kI32), Register(1, kI32)}},
+ // Second break point, locals unchanged, two register stack values.
+ {4, {Register(2, kI32), Register(3, kI32)}},
// OOL stack check, locals spilled, stack empty.
- {Stack(kWasmI32), Stack(kWasmI32)},
+ {2, {Stack(0, kI32), Stack(1, kI32)}},
},
debug_side_table.get());
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
index 9b391dd0d0..b00f0714c2 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
@@ -46,7 +46,6 @@ void CheckMemoryEqualsFollowedByZeroes(TestingModuleBuilder* builder,
} // namespace
WASM_EXEC_TEST(MemoryInit) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -83,7 +82,6 @@ WASM_EXEC_TEST(MemoryInit) {
}
WASM_EXEC_TEST(MemoryInitOutOfBoundsData) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -105,7 +103,6 @@ WASM_EXEC_TEST(MemoryInitOutOfBoundsData) {
}
WASM_EXEC_TEST(MemoryInitOutOfBounds) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[kWasmPageSize] = {};
@@ -137,7 +134,6 @@ WASM_EXEC_TEST(MemoryInitOutOfBounds) {
}
WASM_EXEC_TEST(MemoryCopy) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
byte* mem = r.builder().AddMemory(kWasmPageSize);
BUILD(
@@ -166,7 +162,6 @@ WASM_EXEC_TEST(MemoryCopy) {
}
WASM_EXEC_TEST(MemoryCopyOverlapping) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
byte* mem = r.builder().AddMemory(kWasmPageSize);
BUILD(
@@ -189,7 +184,6 @@ WASM_EXEC_TEST(MemoryCopyOverlapping) {
}
WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
byte* mem = r.builder().AddMemory(kWasmPageSize);
BUILD(
@@ -218,7 +212,6 @@ WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) {
}
WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(
@@ -248,7 +241,6 @@ WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
}
WASM_EXEC_TEST(MemoryFill) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(
@@ -272,7 +264,6 @@ WASM_EXEC_TEST(MemoryFill) {
}
WASM_EXEC_TEST(MemoryFillValueWrapsToByte) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(
@@ -286,7 +277,6 @@ WASM_EXEC_TEST(MemoryFillValueWrapsToByte) {
}
WASM_EXEC_TEST(MemoryFillOutOfBoundsData) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(
@@ -299,7 +289,6 @@ WASM_EXEC_TEST(MemoryFillOutOfBoundsData) {
}
WASM_EXEC_TEST(MemoryFillOutOfBounds) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(
@@ -322,7 +311,6 @@ WASM_EXEC_TEST(MemoryFillOutOfBounds) {
}
WASM_EXEC_TEST(DataDropTwice) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0};
@@ -334,7 +322,6 @@ WASM_EXEC_TEST(DataDropTwice) {
}
WASM_EXEC_TEST(DataDropThenMemoryInit) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -348,7 +335,6 @@ WASM_EXEC_TEST(DataDropThenMemoryInit) {
void TestTableCopyInbounds(TestExecutionTier execution_tier, int table_dst,
int table_src) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
const uint32_t kTableSize = 5;
// Add 10 function tables, even though we only test one table.
@@ -411,7 +397,6 @@ void CheckTableCall(Isolate* isolate, Handle<WasmTableObject> table,
} // namespace
void TestTableInitElems(TestExecutionTier execution_tier, int table_index) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
TestSignatures sigs;
@@ -489,7 +474,6 @@ WASM_COMPILED_EXEC_TEST(TableInitElems9) {
}
void TestTableInitOob(TestExecutionTier execution_tier, int table_index) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
TestSignatures sigs;
@@ -569,7 +553,6 @@ WASM_COMPILED_EXEC_TEST(TableInitOob9) {
void TestTableCopyElems(TestExecutionTier execution_tier, int table_dst,
int table_src) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
TestSignatures sigs;
@@ -651,7 +634,6 @@ WASM_COMPILED_EXEC_TEST(TableCopyElemsFrom6To6) {
void TestTableCopyCalls(TestExecutionTier execution_tier, int table_dst,
int table_src) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
TestSignatures sigs;
@@ -726,7 +708,6 @@ WASM_COMPILED_EXEC_TEST(TableCopyCallsTo6From6) {
void TestTableCopyOobWrites(TestExecutionTier execution_tier, int table_dst,
int table_src) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
TestSignatures sigs;
@@ -802,7 +783,6 @@ WASM_COMPILED_EXEC_TEST(TableCopyOobWritesFrom6To6) {
void TestTableCopyOob1(TestExecutionTier execution_tier, int table_dst,
int table_src) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
const uint32_t kTableSize = 5;
@@ -860,7 +840,6 @@ WASM_COMPILED_EXEC_TEST(TableCopyOob1From6To6) {
}
WASM_COMPILED_EXEC_TEST(ElemDropTwice) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddIndirectFunctionTable(nullptr, 1);
r.builder().AddPassiveElementSegment({});
@@ -871,7 +850,6 @@ WASM_COMPILED_EXEC_TEST(ElemDropTwice) {
}
WASM_COMPILED_EXEC_TEST(ElemDropThenTableInit) {
- EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
r.builder().AddIndirectFunctionTable(nullptr, 1);
r.builder().AddPassiveElementSegment({});
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
index 83b446cc49..e55547911b 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -12,7 +12,7 @@ namespace internal {
namespace wasm {
namespace test_run_wasm_exceptions {
-WASM_COMPILED_EXEC_TEST(TryCatchThrow) {
+WASM_EXEC_TEST(TryCatchThrow) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
@@ -27,12 +27,265 @@ WASM_COMPILED_EXEC_TEST(TryCatchThrow) {
WASM_THROW(except))),
WASM_STMTS(WASM_I32V(kResult0)), except));
- // Need to call through JS to allow for creation of stack traces.
- r.CheckCallViaJS(kResult0, 0);
- r.CheckCallViaJS(kResult1, 1);
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryCatchThrowWithValue) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_i());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_I32V(kResult0), WASM_THROW(except))),
+ WASM_STMTS(kExprNop), except));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryMultiCatchThrow) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except1 = r.builder().AddException(sigs.v_v());
+ uint32_t except2 = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+ constexpr uint32_t kResult2 = 51;
+
+ // Build the main test function.
+ BUILD(
+ r, kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
+ WASM_STMTS(WASM_I32V(kResult2),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except1)),
+ WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_THROW(except2))),
+ kExprCatch, except1, WASM_STMTS(WASM_I32V(kResult0)), kExprCatch, except2,
+ WASM_STMTS(WASM_I32V(kResult1)), kExprEnd);
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ r.CheckCallViaJS(kResult2, 2);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ CHECK_EQ(kResult2, r.CallInterpreter(2));
+ }
+}
+
+WASM_EXEC_TEST(TryCatchAllThrow) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except1 = r.builder().AddException(sigs.v_v());
+ uint32_t except2 = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+ constexpr uint32_t kResult2 = 51;
+
+ // Build the main test function.
+ BUILD(
+ r, kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
+ WASM_STMTS(WASM_I32V(kResult2),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except1)),
+ WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V(1)),
+ WASM_THROW(except2))),
+ kExprCatch, except1, WASM_STMTS(WASM_I32V(kResult0)), kExprCatchAll,
+ WASM_STMTS(WASM_I32V(kResult1)), kExprEnd);
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ r.CheckCallViaJS(kResult2, 2);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ CHECK_EQ(kResult2, r.CallInterpreter(2));
+ }
+}
+
+WASM_EXEC_TEST(TryImplicitRethrow) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except1 = r.builder().AddException(sigs.v_v());
+ uint32_t except2 = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+ constexpr uint32_t kResult2 = 51;
+
+ // Build the main test function.
+ BUILD(r,
+ WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_TRY_CATCH_T(kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_THROW(except2))),
+ WASM_STMTS(WASM_I32V(kResult2)), except1),
+ WASM_I32V(kResult0), except2));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryDelegate) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build the main test function.
+ BUILD(r,
+ WASM_TRY_CATCH_T(kWasmI32,
+ WASM_TRY_DELEGATE_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_THROW(except))),
+ 0),
+ WASM_I32V(kResult0), except));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
}
-WASM_COMPILED_EXEC_TEST(TryCatchCallDirect) {
+WASM_EXEC_TEST(TryUnwind) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_TRY_UNWIND_T(
+ kWasmI32,
+ WASM_TRY_DELEGATE_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_THROW(except))),
+ 0),
+ kExprNop),
+ WASM_I32V(kResult0), except));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryCatchRethrow) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except1 = r.builder().AddException(sigs.v_v());
+ uint32_t except2 = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+ constexpr uint32_t kUnreachable = 51;
+
+ // Build the main test function.
+ BUILD(r,
+ WASM_TRY_CATCH_CATCH_T(
+ kWasmI32,
+ WASM_TRY_CATCH_T(
+ kWasmI32, WASM_THROW(except2),
+ WASM_TRY_CATCH_T(
+ kWasmI32, WASM_THROW(except1),
+ WASM_STMTS(WASM_I32V(kUnreachable),
+ WASM_IF_ELSE(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_RETHROW(1), WASM_RETHROW(2))),
+ except1),
+ except2),
+ except1, WASM_I32V(kResult0), except2, WASM_I32V(kResult1)));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryDelegateToCaller) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build the main test function.
+ BUILD(r,
+ WASM_TRY_CATCH_T(kWasmI32,
+ WASM_TRY_DELEGATE_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_THROW(except))),
+ 1),
+ WASM_I32V(kResult0), except));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ constexpr int64_t trap = 0xDEADBEEF;
+ r.CheckCallViaJS(trap, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ constexpr int stopped = 0;
+ CHECK_EQ(stopped, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryCatchCallDirect) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
@@ -55,12 +308,17 @@ WASM_COMPILED_EXEC_TEST(TryCatchCallDirect) {
WASM_DROP))),
WASM_STMTS(WASM_I32V(kResult0)), except));
- // Need to call through JS to allow for creation of stack traces.
- r.CheckCallViaJS(kResult0, 0);
- r.CheckCallViaJS(kResult1, 1);
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
}
-WASM_COMPILED_EXEC_TEST(TryCatchCallIndirect) {
+WASM_EXEC_TEST(TryCatchCallIndirect) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
@@ -92,9 +350,14 @@ WASM_COMPILED_EXEC_TEST(TryCatchCallIndirect) {
WASM_DROP))),
WASM_STMTS(WASM_I32V(kResult0)), except));
- // Need to call through JS to allow for creation of stack traces.
- r.CheckCallViaJS(kResult0, 0);
- r.CheckCallViaJS(kResult1, 1);
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
}
WASM_COMPILED_EXEC_TEST(TryCatchCallExternal) {
@@ -152,32 +415,51 @@ void TestTrapNotCaught(byte* code, size_t code_size,
WASM_DROP),
WASM_STMTS(WASM_I32V(kResultCaught))));
- // Need to call through JS to allow for creation of stack traces.
- r.CheckCallViaJSTraps();
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJSTraps();
+ } else {
+ r.CallInterpreter();
+ }
}
} // namespace
-WASM_COMPILED_EXEC_TEST(TryCatchTrapUnreachable) {
+WASM_EXEC_TEST(TryCatchTrapUnreachable) {
byte code[] = {WASM_UNREACHABLE};
TestTrapNotCaught(code, arraysize(code), execution_tier);
}
-WASM_COMPILED_EXEC_TEST(TryCatchTrapMemOutOfBounds) {
+WASM_EXEC_TEST(TryCatchTrapMemOutOfBounds) {
byte code[] = {WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(-1))};
TestTrapNotCaught(code, arraysize(code), execution_tier);
}
-WASM_COMPILED_EXEC_TEST(TryCatchTrapDivByZero) {
+WASM_EXEC_TEST(TryCatchTrapDivByZero) {
byte code[] = {WASM_I32_DIVS(WASM_LOCAL_GET(0), WASM_I32V_1(0))};
TestTrapNotCaught(code, arraysize(code), execution_tier);
}
-WASM_COMPILED_EXEC_TEST(TryCatchTrapRemByZero) {
+WASM_EXEC_TEST(TryCatchTrapRemByZero) {
byte code[] = {WASM_I32_REMS(WASM_LOCAL_GET(0), WASM_I32V_1(0))};
TestTrapNotCaught(code, arraysize(code), execution_tier);
}
+TEST(Regress1180457) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kUnreachable = 42;
+ BUILD(r, WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_TRY_DELEGATE_T(
+ kWasmI32, WASM_STMTS(WASM_I32V(kResult0), WASM_BR(0)), 0),
+ WASM_I32V(kUnreachable)));
+
+ CHECK_EQ(kResult0, r.CallInterpreter());
+}
+
} // namespace test_run_wasm_exceptions
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
index 1dcccca67f..71bb77f6ad 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
@@ -7,6 +7,7 @@
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/common/wasm/wasm-module-runner.h"
namespace v8 {
namespace internal {
@@ -54,6 +55,49 @@ WASM_EXEC_TEST(Load) {
// TODO(clemensb): Test atomic instructions.
+WASM_EXEC_TEST(InitExpression) {
+ EXPERIMENTAL_FLAG_SCOPE(memory64);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+
+ ErrorThrower thrower(isolate, "TestMemory64InitExpression");
+
+ const byte data[] = {
+ WASM_MODULE_HEADER, //
+ SECTION(Memory, //
+ ENTRY_COUNT(1), //
+ kMemory64WithMaximum, // type
+ 1, // initial size
+ 2), // maximum size
+ SECTION(Data, //
+ ENTRY_COUNT(1), //
+ 0, // linear memory index
+ WASM_I64V_3(0xFFFF), kExprEnd, // destination offset
+ U32V_1(1), // source size
+ 'c') // data bytes
+ };
+
+ testing::CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(data, data + arraysize(data)));
+ if (thrower.error()) {
+ thrower.Reify()->Print();
+ FATAL("compile or instantiate error");
+ }
+}
+
+WASM_EXEC_TEST(MemorySize) {
+ // TODO(clemensb): Implement memory64 in the interpreter.
+ if (execution_tier == TestExecutionTier::kInterpreter) return;
+
+ Memory64Runner<uint64_t> r(execution_tier);
+ constexpr int kNumPages = 13;
+ r.builder().AddMemoryElems<uint8_t>(kNumPages * kWasmPageSize);
+
+ BUILD(r, WASM_MEMORY_SIZE);
+
+ CHECK_EQ(kNumPages, r.Call());
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 9a6e85f431..a9f5dd6b26 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -904,41 +904,6 @@ TEST(EmptyMemoryEmptyDataSegment) {
Cleanup();
}
-TEST(MemoryWithOOBEmptyDataSegment) {
- {
- FlagScope<bool> no_bulk_memory(
- &v8::internal::FLAG_experimental_wasm_bulk_memory, false);
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
- testing::SetupIsolateForWasmModule(isolate);
-
- ErrorThrower thrower(isolate, "Run_WasmModule_InitDataAtTheUpperLimit");
-
- const byte data[] = {
- WASM_MODULE_HEADER, // --
- kMemorySectionCode, // --
- U32V_1(4), // section size
- ENTRY_COUNT(1), // --
- kWithMaximum, // --
- 1, // initial size
- 1, // maximum size
- kDataSectionCode, // --
- U32V_1(9), // section size
- ENTRY_COUNT(1), // --
- 0, // linear memory index
- WASM_I32V_4(0x2468ACE), // destination offset
- kExprEnd,
- U32V_1(0), // source size
- };
-
- CompileAndInstantiateForTesting(
- isolate, &thrower, ModuleWireBytes(data, data + arraysize(data)));
- // It should not be possible to instantiate this module.
- CHECK(thrower.error());
- }
- Cleanup();
-}
-
#undef EMIT_CODE_WITH_END
} // namespace test_run_wasm_module
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
index 11db27f72c..4c5309aae5 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
@@ -224,7 +224,7 @@ WASM_SIMD_TEST(AnyTrue_DifferentShapes) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
- WASM_SIMD_OP(kExprV8x16AnyTrue));
+ WASM_SIMD_OP(kExprV128AnyTrue));
CHECK_EQ(0, r.Call(0x00000000));
}
@@ -233,7 +233,7 @@ WASM_SIMD_TEST(AnyTrue_DifferentShapes) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
- WASM_SIMD_OP(kExprV16x8AnyTrue));
+ WASM_SIMD_OP(kExprV128AnyTrue));
CHECK_EQ(1, r.Call(0x000000FF));
}
@@ -243,7 +243,7 @@ WASM_SIMD_TEST(AnyTrue_DifferentShapes) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)),
- WASM_SIMD_OP(kExprV8x16AnyTrue));
+ WASM_SIMD_OP(kExprV128AnyTrue));
CHECK_EQ(0, r.Call(0x00000000));
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index c6be948954..fa9299f27b 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -17,6 +17,7 @@
#include "src/base/macros.h"
#include "src/base/memory.h"
#include "src/base/overflowing-math.h"
+#include "src/base/safe_conversions.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/cpu-features.h"
@@ -800,26 +801,22 @@ WASM_SIMD_TEST_NO_LOWERING(I8x16SignSelect) {
}
WASM_SIMD_TEST_NO_LOWERING(I16x8SignSelect) {
- std::array<int16_t, kSimd128Size / 2> selection = {0x8000, 0, -1, 0,
- 0x8000, 0, -1, 0};
- std::array<int8_t, kSimd128Size> mask;
- memcpy(mask.data(), selection.data(), kSimd128Size);
+ std::array<int8_t, kSimd128Size> mask = {0, 0x80, 0, 0, -1, -1, 0, 0,
+ 0, 0x80, 0, 0, -1, -1, 0, 0};
RunSignSelect<int16_t>(execution_tier, lower_simd, kExprI16x8SignSelect,
kExprI16x8Splat, mask);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4SignSelect) {
- std::array<int32_t, kSimd128Size / 4> selection = {0x80000000, 0, -1, 0};
- std::array<int8_t, kSimd128Size> mask;
- memcpy(mask.data(), selection.data(), kSimd128Size);
+ std::array<int8_t, kSimd128Size> mask = {0, 0, 0, 0x80, 0, 0, 0, 0,
+ -1, -1, -1, -1, 0, 0, 0, 0};
RunSignSelect<int32_t>(execution_tier, lower_simd, kExprI32x4SignSelect,
kExprI32x4Splat, mask);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2SignSelect) {
- std::array<int64_t, kSimd128Size / 8> selection = {0x8000000000000000, 0};
- std::array<int8_t, kSimd128Size> mask;
- memcpy(mask.data(), selection.data(), kSimd128Size);
+ std::array<int8_t, kSimd128Size> mask = {0, 0, 0, 0, 0, 0, 0, 0x80,
+ 0, 0, 0, 0, 0, 0, 0, 0};
RunSignSelect<int64_t>(execution_tier, lower_simd, kExprI64x2SignSelect,
kExprI64x2Splat, mask);
}
@@ -954,6 +951,10 @@ WASM_SIMD_TEST(I64x2Neg) {
base::NegateWithWraparound);
}
+WASM_SIMD_TEST_NO_LOWERING(I64x2Abs) {
+ RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Abs, std::abs);
+}
+
void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int64ShiftOp expected_op) {
// Intentionally shift by 64, should be no-op.
@@ -1003,7 +1004,6 @@ WASM_SIMD_TEST(I64x2ShrU) {
void RunI64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int64BinOp expected_op) {
- FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
// Global to hold output.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
@@ -1038,15 +1038,29 @@ WASM_SIMD_TEST(I64x2Sub) {
base::SubWithWraparound);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
- // V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS64 ||
- // V8_TARGET_ARCH_MIPS
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
+}
WASM_SIMD_TEST(F64x2Splat) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
@@ -1282,6 +1296,115 @@ WASM_SIMD_TEST(F64x2NearestInt) {
true);
}
+template <typename SrcType>
+void RunF64x2ConvertLowI32x4Test(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode) {
+ WasmRunner<int32_t, SrcType> r(execution_tier, lower_simd);
+ double* g = r.builder().template AddGlobal<double>(kWasmS128);
+ // TODO(zhin): set top lanes to 0 to assert conversion happens on low lanes.
+ BUILD(
+ r,
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(opcode, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)))),
+ WASM_ONE);
+
+ for (SrcType x : compiler::ValueHelper::GetVector<SrcType>()) {
+ r.Call(x);
+ double expected = static_cast<double>(x);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, x, expected, actual, true);
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2ConvertLowI32x4S) {
+ RunF64x2ConvertLowI32x4Test<int32_t>(execution_tier, lower_simd,
+ kExprF64x2ConvertLowI32x4S);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2ConvertLowI32x4U) {
+ RunF64x2ConvertLowI32x4Test<uint32_t>(execution_tier, lower_simd,
+ kExprF64x2ConvertLowI32x4U);
+}
+
+template <typename SrcType>
+void RunI32x4TruncSatF64x2Test(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode) {
+ WasmRunner<int32_t, double> r(execution_tier, lower_simd);
+ SrcType* g = r.builder().AddGlobal<SrcType>(kWasmS128);
+ BUILD(
+ r,
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(opcode, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(0)))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ r.Call(x);
+ SrcType expected = base::saturated_cast<SrcType>(x);
+ for (int i = 0; i < 2; i++) {
+ SrcType actual = ReadLittleEndianValue<SrcType>(&g[i]);
+ CHECK_EQ(expected, actual);
+ }
+ // Top lanes are zero-ed.
+ for (int i = 2; i < 4; i++) {
+ CHECK_EQ(0, ReadLittleEndianValue<SrcType>(&g[i]));
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4TruncSatF64x2SZero) {
+ RunI32x4TruncSatF64x2Test<int32_t>(execution_tier, lower_simd,
+ kExprI32x4TruncSatF64x2SZero);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4TruncSatF64x2UZero) {
+ RunI32x4TruncSatF64x2Test<uint32_t>(execution_tier, lower_simd,
+ kExprI32x4TruncSatF64x2UZero);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F32x4DemoteF64x2Zero) {
+ WasmRunner<int32_t, double> r(execution_tier, lower_simd);
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ BUILD(r,
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(kExprF32x4DemoteF64x2Zero,
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(0)))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ r.Call(x);
+ float expected = DoubleToFloat32(x);
+ for (int i = 0; i < 2; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, x, expected, actual, true);
+ }
+ for (int i = 2; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, x, 0, actual, true);
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2PromoteLowF32x4) {
+ WasmRunner<int32_t, float> r(execution_tier, lower_simd);
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ BUILD(r,
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)))),
+ WASM_ONE);
+
+ FOR_FLOAT32_INPUTS(x) {
+ r.Call(x);
+ double expected = static_cast<double>(x);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, x, expected, actual, true);
+ }
+ }
+}
+
void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, DoubleBinOp expected_op) {
WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
@@ -1804,18 +1927,14 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
}
}
-// TODO(v8:10972) Prototyping i64x2 convert from i32x4.
// Tests both signed and unsigned conversion from I32x4 (unpacking).
-#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || \
- V8_TARGET_ARCH_ARM
WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
- FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create four output vectors to hold signed and unsigned results.
int64_t* g0 = r.builder().AddGlobal<int64_t>(kWasmS128);
int64_t* g1 = r.builder().AddGlobal<int64_t>(kWasmS128);
- int64_t* g2 = r.builder().AddGlobal<int64_t>(kWasmS128);
- int64_t* g3 = r.builder().AddGlobal<int64_t>(kWasmS128);
+ uint64_t* g2 = r.builder().AddGlobal<uint64_t>(kWasmS128);
+ uint64_t* g3 = r.builder().AddGlobal<uint64_t>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
@@ -1833,17 +1952,16 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
FOR_INT32_INPUTS(x) {
r.Call(x);
int64_t expected_signed = static_cast<int64_t>(x);
- int64_t expected_unsigned = static_cast<int64_t>(static_cast<uint32_t>(x));
+ uint64_t expected_unsigned =
+ static_cast<uint64_t>(static_cast<uint32_t>(x));
for (int i = 0; i < 2; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int64_t>(&g0[i]));
CHECK_EQ(expected_signed, ReadLittleEndianValue<int64_t>(&g1[i]));
- CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int64_t>(&g2[i]));
- CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int64_t>(&g3[i]));
+ CHECK_EQ(expected_unsigned, ReadLittleEndianValue<uint64_t>(&g2[i]));
+ CHECK_EQ(expected_unsigned, ReadLittleEndianValue<uint64_t>(&g3[i]));
}
}
}
-#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 ||
- // V8_TARGET_ARCH_ARM
void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int32UnOp expected_op) {
@@ -1880,14 +1998,10 @@ WASM_SIMD_TEST(S128Not) {
[](int32_t x) { return ~x; });
}
-#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_IA32
-// TODO(v8:11086) Prototype i32x4.extadd_pairwise_i16x8_{s,u}
template <typename Narrow, typename Wide>
void RunExtAddPairwiseTest(TestExecutionTier execution_tier,
LowerSimd lower_simd, WasmOpcode ext_add_pairwise,
WasmOpcode splat) {
- FLAG_SCOPE(wasm_simd_post_mvp);
constexpr int num_lanes = kSimd128Size / sizeof(Wide);
WasmRunner<int32_t, Narrow> r(execution_tier, lower_simd);
Wide* g = r.builder().template AddGlobal<Wide>(kWasmS128);
@@ -1930,8 +2044,6 @@ WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16U) {
kExprI16x8ExtAddPairwiseI8x16U,
kExprI8x16Splat);
}
-#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_IA32
void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int32BinOp expected_op) {
@@ -2157,8 +2269,8 @@ WASM_SIMD_TEST(I16x8ConvertI32x4) {
FOR_INT32_INPUTS(x) {
r.Call(x);
- int16_t expected_signed = Saturate<int16_t>(x);
- int16_t expected_unsigned = Saturate<uint16_t>(x);
+ int16_t expected_signed = base::saturated_cast<int16_t>(x);
+ int16_t expected_unsigned = base::saturated_cast<uint16_t>(x);
for (int i = 0; i < 8; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g1[i]));
@@ -2325,7 +2437,6 @@ WASM_SIMD_TEST(I16x8RoundingAverageU) {
}
WASM_SIMD_TEST_NO_LOWERING(I16x8Q15MulRSatS) {
- FLAG_SCOPE(wasm_simd_post_mvp);
RunI16x8BinOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8Q15MulRSatS,
SaturateRoundingQMul<int16_t>);
}
@@ -2542,10 +2653,7 @@ WASM_SIMD_TEST(I8x16Abs) {
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Abs, Abs);
}
-#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
-// TODO(v8:11002) Prototype i8x16.popcnt.
WASM_SIMD_TEST_NO_LOWERING(I8x16Popcnt) {
- FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Global to hold output.
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
@@ -2565,14 +2673,13 @@ WASM_SIMD_TEST_NO_LOWERING(I8x16Popcnt) {
}
}
}
-#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
// Tests both signed and unsigned conversion from I16x8 (packing).
WASM_SIMD_TEST(I8x16ConvertI16x8) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create output vectors to hold signed and unsigned results.
- int8_t* g0 = r.builder().AddGlobal<int8_t>(kWasmS128);
- int8_t* g1 = r.builder().AddGlobal<int8_t>(kWasmS128);
+ int8_t* g_s = r.builder().AddGlobal<int8_t>(kWasmS128);
+ uint8_t* g_u = r.builder().AddGlobal<uint8_t>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
@@ -2587,11 +2694,11 @@ WASM_SIMD_TEST(I8x16ConvertI16x8) {
FOR_INT16_INPUTS(x) {
r.Call(x);
- int8_t expected_signed = Saturate<int8_t>(x);
- int8_t expected_unsigned = Saturate<uint8_t>(x);
+ int8_t expected_signed = base::saturated_cast<int8_t>(x);
+ uint8_t expected_unsigned = base::saturated_cast<uint8_t>(x);
for (int i = 0; i < 16; i++) {
- CHECK_EQ(expected_signed, ReadLittleEndianValue<int8_t>(&g0[i]));
- CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int8_t>(&g1[i]));
+ CHECK_EQ(expected_signed, ReadLittleEndianValue<int8_t>(&g_s[i]));
+ CHECK_EQ(expected_unsigned, ReadLittleEndianValue<uint8_t>(&g_u[i]));
}
}
}
@@ -3271,14 +3378,14 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
byte reduced = r.AllocateLocal(kWasmI32); \
BUILD(r, WASM_LOCAL_SET(zero, WASM_SIMD_I##format##_SPLAT(int_type(0))), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AnyTrue, \
+ reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AnyTrue, \
+ reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
@@ -3302,14 +3409,14 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
WASM_SIMD_I##format##_REPLACE_LANE( \
lanes - 1, WASM_LOCAL_GET(zero), int_type(1))), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AnyTrue, \
+ reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AnyTrue, \
+ reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
@@ -3333,6 +3440,7 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
CHECK_EQ(1, r.Call()); \
}
+WASM_SIMD_BOOL_REDUCTION_TEST(64x2, 2, WASM_I64V)
WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4, WASM_I32V)
WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8, WASM_I32V)
WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16, WASM_I32V)
@@ -3590,7 +3698,7 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
-#if V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
// TODO(v8:11168): Prototyping prefetch.
WASM_SIMD_TEST(SimdPrefetch) {
FLAG_SCOPE(wasm_simd_post_mvp);
@@ -3642,7 +3750,7 @@ WASM_SIMD_TEST(SimdPrefetch) {
}
}
}
-#endif // V8_TARGET_ARCH_ARM64
+#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(SimdLoadStoreLoad) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
@@ -3995,13 +4103,9 @@ WASM_SIMD_TEST(S128Load64Zero) {
RunLoadZeroTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Zero);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_MIPS64
-// TODO(v8:10975): Prototyping load lane and store lane.
template <typename T>
void RunLoadLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode load_op, WasmOpcode splat_op) {
- FLAG_SCOPE(wasm_simd_post_mvp);
WasmOpcode const_op =
splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const;
@@ -4100,12 +4204,6 @@ WASM_SIMD_TEST_NO_LOWERING(S128Load64Lane) {
template <typename T>
void RunStoreLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode store_op, WasmOpcode splat_op) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- if (execution_tier == TestExecutionTier::kLiftoff) {
- // Not yet implemented.
- return;
- }
-
constexpr int lanes = kSimd128Size / sizeof(T);
constexpr int mem_index = 16; // Store to mem index 16 (bytes).
constexpr int splat_value = 33;
@@ -4196,9 +4294,6 @@ WASM_SIMD_TEST_NO_LOWERING(S128Store64Lane) {
kExprI64x2Splat);
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
- // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_MIPS64
-
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AnyTrue) { \
FLAG_SCOPE(wasm_simd_post_mvp); \
@@ -4208,7 +4303,7 @@ WASM_SIMD_TEST_NO_LOWERING(S128Store64Lane) {
BUILD( \
r, \
WASM_LOCAL_SET(simd, WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(0))), \
- WASM_SIMD_UNOP(kExprV##format##AnyTrue, WASM_LOCAL_GET(simd))); \
+ WASM_SIMD_UNOP(kExprV128AnyTrue, WASM_LOCAL_GET(simd))); \
CHECK_EQ(1, r.Call(max)); \
CHECK_EQ(1, r.Call(5)); \
CHECK_EQ(0, r.Call(0)); \
@@ -4220,11 +4315,11 @@ WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
// Special any true test cases that splats a -0.0 double into a i64x2.
// This is specifically to ensure that our implementation correct handles that
// 0.0 and -0.0 will be different in an anytrue (IEEE753 says they are equals).
-WASM_SIMD_TEST(V32x4AnytrueWithNegativeZero) {
+WASM_SIMD_TEST(V128AnytrueWithNegativeZero) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(0))),
- WASM_SIMD_UNOP(kExprV32x4AnyTrue, WASM_LOCAL_GET(simd)));
+ WASM_SIMD_UNOP(kExprV128AnyTrue, WASM_LOCAL_GET(simd)));
CHECK_EQ(1, r.Call(0x8000000000000000));
CHECK_EQ(0, r.Call(0x0000000000000000));
}
@@ -4243,6 +4338,7 @@ WASM_SIMD_TEST(V32x4AnytrueWithNegativeZero) {
CHECK_EQ(1, r.Call(0x1)); \
CHECK_EQ(0, r.Call(0)); \
}
+WASM_SIMD_ALLTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff, int32_t)
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 863256818a..92ad205070 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -1372,6 +1372,28 @@ STREAM_TEST(TestProfilingMidStreaming) {
cpu_profiler->Dispose();
}
+STREAM_TEST(TierDownWithError) {
+ // https://crbug.com/1160031
+ StreamTester tester(isolate);
+ Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Zone* zone = tester.zone();
+
+ ZoneBuffer buffer(zone);
+ {
+ TestSignatures sigs;
+ WasmModuleBuilder builder(zone);
+ // Type error at i32.add.
+ builder.AddFunction(sigs.v_v())->Emit(kExprI32Add);
+ builder.WriteTo(&buffer);
+ }
+
+ i_isolate->wasm_engine()->TierDownAllModulesPerIsolate(i_isolate);
+
+ tester.OnBytesReceived(buffer.begin(), buffer.size());
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+}
+
#undef STREAM_TEST
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 2d7b4024d7..156bfb55ac 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -179,16 +179,16 @@ struct WasmValWrapper {
#ifdef DEBUG
std::ostream& operator<<(std::ostream& out, const WasmValWrapper& wrapper) {
switch (wrapper.val.type().kind()) {
- case ValueType::kI32:
+ case kI32:
out << "i32: " << wrapper.val.to<int32_t>();
break;
- case ValueType::kI64:
+ case kI64:
out << "i64: " << wrapper.val.to<int64_t>();
break;
- case ValueType::kF32:
+ case kF32:
out << "f32: " << wrapper.val.to<float>();
break;
- case ValueType::kF64:
+ case kF64:
out << "f64: " << wrapper.val.to<double>();
break;
default:
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index 2a0f441eb7..e23a549ddd 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -61,14 +61,6 @@ class WasmSerializationTest {
memset(const_cast<uint8_t*>(wire_bytes_.data()), 0, wire_bytes_.size() / 2);
}
- void InvalidateNumFunctions() {
- Address num_functions_slot =
- reinterpret_cast<Address>(serialized_bytes_.data()) +
- WasmSerializer::kHeaderSize;
- CHECK_EQ(1, base::ReadUnalignedValue<uint32_t>(num_functions_slot));
- base::WriteUnalignedValue<uint32_t>(num_functions_slot, 0);
- }
-
MaybeHandle<WasmModuleObject> Deserialize(
Vector<const char> source_url = {}) {
return DeserializeNativeModule(CcTest::i_isolate(),
@@ -239,16 +231,6 @@ TEST(DeserializeNoSerializedData) {
test.CollectGarbage();
}
-TEST(DeserializeInvalidNumFunctions) {
- WasmSerializationTest test;
- {
- HandleScope scope(CcTest::i_isolate());
- test.InvalidateNumFunctions();
- CHECK(test.Deserialize().is_null());
- }
- test.CollectGarbage();
-}
-
TEST(DeserializeWireBytesAndSerializedDataInvalid) {
WasmSerializationTest test;
{
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 7ef79a6350..9faab4479e 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -196,11 +196,11 @@ WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_WasmUrl) {
Handle<FixedArray> stack_trace_object =
isolate->GetDetailedStackTrace(Handle<JSObject>::cast(exception));
CHECK(!stack_trace_object.is_null());
- Handle<StackTraceFrame> stack_frame = Handle<StackTraceFrame>::cast(
- handle(stack_trace_object->get(0), isolate));
+ Handle<StackFrameInfo> stack_frame(
+ StackFrameInfo::cast(stack_trace_object->get(0)), isolate);
MaybeHandle<String> maybe_stack_trace_str =
- SerializeStackTraceFrame(isolate, stack_frame);
+ SerializeStackFrameInfo(isolate, stack_frame);
CHECK(!maybe_stack_trace_str.is_null());
Handle<String> stack_trace_str = maybe_stack_trace_str.ToHandleChecked();
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 2657d7e9a7..82f7824315 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -335,6 +335,7 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
auto native_module = isolate_->wasm_engine()->NewNativeModule(
isolate_, enabled_features_, test_module_, code_size_estimate);
native_module->SetWireBytes(OwnedVector<const uint8_t>());
+ native_module->compilation_state()->set_compilation_id(0);
constexpr Vector<const char> kNoSourceUrl{"", 0};
Handle<Script> script = isolate_->wasm_engine()->GetOrCreateScript(
isolate_, native_module, kNoSourceUrl);
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index eeb29acf81..f873390283 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -233,12 +233,16 @@ class TestingModuleBuilder {
void SetExecutable() { native_module_->SetExecutable(true); }
- void TierDown() {
+ void SetTieredDown() {
native_module_->SetTieringState(kTieredDown);
- native_module_->RecompileForTiering();
execution_tier_ = TestExecutionTier::kLiftoff;
}
+ void TierDown() {
+ SetTieredDown();
+ native_module_->RecompileForTiering();
+ }
+
CompilationEnv CreateCompilationEnv();
ExecutionTier execution_tier() const {
diff --git a/deps/v8/test/common/assembler-tester.h b/deps/v8/test/common/assembler-tester.h
index 966ee7da0b..38dcbdee38 100644
--- a/deps/v8/test/common/assembler-tester.h
+++ b/deps/v8/test/common/assembler-tester.h
@@ -15,31 +15,32 @@ namespace internal {
class TestingAssemblerBuffer : public AssemblerBuffer {
public:
- TestingAssemblerBuffer(size_t requested, void* address) {
+ TestingAssemblerBuffer(
+ size_t requested, void* address,
+ VirtualMemory::JitPermission jit_permission = VirtualMemory::kNoJit) {
size_t page_size = v8::internal::AllocatePageSize();
size_t alloc_size = RoundUp(requested, page_size);
CHECK_GE(kMaxInt, alloc_size);
- size_ = static_cast<int>(alloc_size);
- buffer_ = static_cast<byte*>(AllocatePages(GetPlatformPageAllocator(),
- address, alloc_size, page_size,
- v8::PageAllocator::kReadWrite));
- CHECK_NOT_NULL(buffer_);
+ reservation_ = VirtualMemory(GetPlatformPageAllocator(), alloc_size,
+ address, page_size, jit_permission);
+ CHECK(reservation_.IsReserved());
+ MakeWritable();
}
- ~TestingAssemblerBuffer() {
- CHECK(FreePages(GetPlatformPageAllocator(), buffer_, size_));
- }
+ ~TestingAssemblerBuffer() { reservation_.Free(); }
- byte* start() const override { return buffer_; }
+ byte* start() const override {
+ return reinterpret_cast<byte*>(reservation_.address());
+ }
- int size() const override { return size_; }
+ int size() const override { return static_cast<int>(reservation_.size()); }
std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
FATAL("Cannot grow TestingAssemblerBuffer");
}
std::unique_ptr<AssemblerBuffer> CreateView() const {
- return ExternalAssemblerBuffer(buffer_, size_);
+ return ExternalAssemblerBuffer(start(), size());
}
void MakeExecutable() {
@@ -48,35 +49,36 @@ class TestingAssemblerBuffer : public AssemblerBuffer {
// some older ARM kernels there is a bug which causes an access error on
// cache flush instructions to trigger access error on non-writable memory.
// See https://bugs.chromium.org/p/v8/issues/detail?id=8157
- FlushInstructionCache(buffer_, size_);
+ FlushInstructionCache(start(), size());
- bool result = SetPermissions(GetPlatformPageAllocator(), buffer_, size_,
+ bool result = SetPermissions(GetPlatformPageAllocator(), start(), size(),
v8::PageAllocator::kReadExecute);
CHECK(result);
}
void MakeWritable() {
- bool result = SetPermissions(GetPlatformPageAllocator(), buffer_, size_,
+ bool result = SetPermissions(GetPlatformPageAllocator(), start(), size(),
v8::PageAllocator::kReadWrite);
CHECK(result);
}
// TODO(wasm): Only needed for the "test-jump-table-assembler.cc" tests.
void MakeWritableAndExecutable() {
- bool result = SetPermissions(GetPlatformPageAllocator(), buffer_, size_,
+ bool result = SetPermissions(GetPlatformPageAllocator(), start(), size(),
v8::PageAllocator::kReadWriteExecute);
CHECK(result);
}
private:
- byte* buffer_;
- int size_;
+ VirtualMemory reservation_;
};
static inline std::unique_ptr<TestingAssemblerBuffer> AllocateAssemblerBuffer(
size_t requested = v8::internal::AssemblerBase::kDefaultBufferSize,
- void* address = nullptr) {
- return std::make_unique<TestingAssemblerBuffer>(requested, address);
+ void* address = nullptr,
+ VirtualMemory::JitPermission jit_permission = VirtualMemory::kNoJit) {
+ return std::make_unique<TestingAssemblerBuffer>(requested, address,
+ jit_permission);
}
} // namespace internal
diff --git a/deps/v8/test/common/wasm/test-signatures.h b/deps/v8/test/common/wasm/test-signatures.h
index c7429c23e2..fb1a1fcddf 100644
--- a/deps/v8/test/common/wasm/test-signatures.h
+++ b/deps/v8/test/common/wasm/test-signatures.h
@@ -47,6 +47,7 @@ class TestSignatures {
sig_v_iii(0, 3, kIntTypes4),
sig_v_e(0, 1, kExternRefTypes4),
sig_v_c(0, 1, kFuncTypes4),
+ sig_v_d(0, 1, kDoubleTypes4),
sig_s_i(1, 1, kSimd128IntTypes4),
sig_s_s(1, 1, kSimd128Types4),
sig_s_ss(1, 2, kSimd128Types4),
@@ -111,6 +112,7 @@ class TestSignatures {
FunctionSig* v_iii() { return &sig_v_iii; }
FunctionSig* v_e() { return &sig_v_e; }
FunctionSig* v_c() { return &sig_v_c; }
+ FunctionSig* v_d() { return &sig_v_d; }
FunctionSig* s_i() { return &sig_s_i; }
FunctionSig* s_s() { return &sig_s_s; }
FunctionSig* s_ss() { return &sig_s_ss; }
@@ -178,6 +180,7 @@ class TestSignatures {
FunctionSig sig_v_iii;
FunctionSig sig_v_e;
FunctionSig sig_v_c;
+ FunctionSig sig_v_d;
FunctionSig sig_s_i;
FunctionSig sig_s_s;
FunctionSig sig_s_ss;
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc
index e58e518e8d..4a4d08524a 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.cc
+++ b/deps/v8/test/common/wasm/wasm-interpreter.cc
@@ -8,6 +8,7 @@
#include <type_traits>
#include "src/base/overflowing-math.h"
+#include "src/base/safe_conversions.h"
#include "src/codegen/assembler-inl.h"
#include "src/common/globals.h"
#include "src/compiler/wasm-compiler.h"
@@ -449,19 +450,6 @@ int_type ExecuteConvert(float_type a, TrapReason* trap) {
return 0;
}
-template <typename int_type, typename float_type>
-int_type ExecuteConvertSaturate(float_type a) {
- TrapReason base_trap = kTrapCount;
- int32_t val = ExecuteConvert<int_type>(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < static_cast<float_type>(0.0)
- ? std::numeric_limits<int_type>::min()
- : std::numeric_limits<int_type>::max());
-}
-
template <typename dst_type, typename src_type, void (*fn)(Address)>
dst_type CallExternalIntToFloatFunction(src_type input) {
uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
@@ -471,13 +459,14 @@ dst_type CallExternalIntToFloatFunction(src_type input) {
return ReadUnalignedValue<dst_type>(data_addr);
}
-template <typename dst_type, typename src_type, int32_t (*fn)(Address)>
-dst_type CallExternalFloatToIntFunction(src_type input, TrapReason* trap) {
- uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
- Address data_addr = reinterpret_cast<Address>(data);
- WriteUnalignedValue<src_type>(data_addr, input);
- if (!fn(data_addr)) *trap = kTrapFloatUnrepresentable;
- return ReadUnalignedValue<dst_type>(data_addr);
+template <typename dst_type, typename src_type>
+dst_type ConvertFloatToIntOrTrap(src_type input, TrapReason* trap) {
+ if (base::IsValueInRangeForNumericType<dst_type>(input)) {
+ return static_cast<dst_type>(input);
+ } else {
+ *trap = kTrapFloatUnrepresentable;
+ return 0;
+ }
}
uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
@@ -485,67 +474,19 @@ uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
}
int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<int64_t, float,
- float32_to_int64_wrapper>(a, trap);
-}
-
-int64_t ExecuteI64SConvertSatF32(float a) {
- TrapReason base_trap = kTrapCount;
- int64_t val = ExecuteI64SConvertF32(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<int64_t>::min()
- : std::numeric_limits<int64_t>::max());
+ return ConvertFloatToIntOrTrap<int64_t, float>(a, trap);
}
int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<int64_t, double,
- float64_to_int64_wrapper>(a, trap);
-}
-
-int64_t ExecuteI64SConvertSatF64(double a) {
- TrapReason base_trap = kTrapCount;
- int64_t val = ExecuteI64SConvertF64(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<int64_t>::min()
- : std::numeric_limits<int64_t>::max());
+ return ConvertFloatToIntOrTrap<int64_t, double>(a, trap);
}
uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<uint64_t, float,
- float32_to_uint64_wrapper>(a, trap);
-}
-
-uint64_t ExecuteI64UConvertSatF32(float a) {
- TrapReason base_trap = kTrapCount;
- uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
- : std::numeric_limits<uint64_t>::max());
+ return ConvertFloatToIntOrTrap<uint64_t, float>(a, trap);
}
uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<uint64_t, double,
- float64_to_uint64_wrapper>(a, trap);
-}
-
-uint64_t ExecuteI64UConvertSatF64(double a) {
- TrapReason base_trap = kTrapCount;
- int64_t val = ExecuteI64UConvertF64(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
- : std::numeric_limits<uint64_t>::max());
+ return ConvertFloatToIntOrTrap<uint64_t, double>(a, trap);
}
int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
@@ -614,7 +555,8 @@ int64_t ExecuteI64ReinterpretF64(WasmValue a) {
return a.to_f64_boxed().get_bits();
}
-constexpr int32_t kCatchInArity = 1;
+constexpr int32_t kCatchAllExceptionIndex = -1;
+constexpr int32_t kRethrowOrDelegateExceptionIndex = -2;
} // namespace
@@ -637,10 +579,13 @@ struct InterpreterCode {
class SideTable : public ZoneObject {
public:
ControlTransferMap map_;
+ // Map rethrow instructions to the catch block index they target.
+ ZoneMap<pc_t, int> rethrow_map_;
int32_t max_stack_height_ = 0;
+ int32_t max_control_stack_height = 0;
SideTable(Zone* zone, const WasmModule* module, InterpreterCode* code)
- : map_(zone) {
+ : map_(zone), rethrow_map_(zone) {
// Create a zone for all temporary objects.
Zone control_transfer_zone(zone->allocator(), ZONE_NAME);
@@ -649,7 +594,10 @@ class SideTable : public ZoneObject {
friend Zone;
explicit CLabel(Zone* zone, int32_t target_stack_height, uint32_t arity)
- : target_stack_height(target_stack_height), arity(arity), refs(zone) {
+ : catch_targets(zone),
+ target_stack_height(target_stack_height),
+ arity(arity),
+ refs(zone) {
DCHECK_LE(0, target_stack_height);
}
@@ -658,7 +606,13 @@ class SideTable : public ZoneObject {
const byte* from_pc;
const int32_t stack_height;
};
+ struct CatchTarget {
+ int exception_index;
+ int target_control_index;
+ const byte* pc;
+ };
const byte* target = nullptr;
+ ZoneVector<CatchTarget> catch_targets;
int32_t target_stack_height;
// Arity when branching to this label.
const uint32_t arity;
@@ -674,6 +628,10 @@ class SideTable : public ZoneObject {
target = pc;
}
+ void Bind(const byte* pc, int exception_index, int target_control_index) {
+ catch_targets.push_back({exception_index, target_control_index, pc});
+ }
+
// Reference this label from the given location.
void Ref(const byte* from_pc, int32_t stack_height) {
// Target being bound before a reference means this is a loop.
@@ -682,19 +640,41 @@ class SideTable : public ZoneObject {
}
void Finish(ControlTransferMap* map, const byte* start) {
- DCHECK_NOT_NULL(target);
+ DCHECK_EQ(!!target, catch_targets.empty());
for (auto ref : refs) {
size_t offset = static_cast<size_t>(ref.from_pc - start);
- auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
DCHECK_GE(ref.stack_height, target_stack_height);
spdiff_t spdiff =
static_cast<spdiff_t>(ref.stack_height - target_stack_height);
- TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
- pcdiff, ref.stack_height, target_stack_height, spdiff);
- ControlTransferEntry& entry = (*map)[offset];
- entry.pc_diff = pcdiff;
- entry.sp_diff = spdiff;
- entry.target_arity = arity;
+ if (target) {
+ auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
+ TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
+ pcdiff, ref.stack_height, target_stack_height, spdiff);
+ ControlTransferEntry& entry = (map->map)[offset];
+ entry.pc_diff = pcdiff;
+ entry.sp_diff = spdiff;
+ entry.target_arity = arity;
+ } else {
+ Zone* zone = map->catch_map.get_allocator().zone();
+ auto p = map->catch_map.emplace(
+ offset, ZoneVector<CatchControlTransferEntry>(zone));
+ auto& catch_entries = p.first->second;
+ for (auto& p : catch_targets) {
+ auto pcdiff = static_cast<pcdiff_t>(p.pc - ref.from_pc);
+ TRACE(
+ "control transfer @%zu: Δpc %d, stack %u->%u, exn: %d = "
+ "-%u\n",
+ offset, pcdiff, ref.stack_height, target_stack_height,
+ p.exception_index, spdiff);
+ CatchControlTransferEntry entry;
+ entry.pc_diff = pcdiff;
+ entry.sp_diff = spdiff;
+ entry.target_arity = arity;
+ entry.exception_index = p.exception_index;
+ entry.target_control_index = p.target_control_index;
+ catch_entries.emplace_back(entry);
+ }
+ }
}
}
};
@@ -710,6 +690,9 @@ class SideTable : public ZoneObject {
// Track whether this block was already left, i.e. all further
// instructions are unreachable.
bool unreachable = false;
+ // Whether this is a try...unwind...end block. Needed to handle the
+ // implicit rethrow when we reach the end of the block.
+ bool unwind = false;
Control(const byte* pc, CLabel* end_label, CLabel* else_label,
uint32_t exit_arity)
@@ -733,9 +716,8 @@ class SideTable : public ZoneObject {
// bytecodes are within the true or false block of an else.
ZoneVector<Control> control_stack(&control_transfer_zone);
// It also maintains a stack of all nested {try} blocks to resolve local
- // handler targets for potentially throwing operations. These exceptional
- // control transfers are treated just like other branches in the resulting
- // map. This stack contains indices into the above control stack.
+ // handler targets for potentially throwing operations. This stack contains
+ // indices into the above control stack.
ZoneVector<size_t> exception_stack(zone);
int32_t stack_height = 0;
uint32_t func_arity =
@@ -750,6 +732,14 @@ class SideTable : public ZoneObject {
auto copy_unreachable = [&] {
control_stack.back().unreachable = control_parent().unreachable;
};
+ int max_exception_arity = 0;
+ if (module) {
+ for (auto& exception : module->exceptions) {
+ max_exception_arity =
+ std::max(max_exception_arity,
+ static_cast<int>(exception.sig->parameter_count()));
+ }
+ }
for (BytecodeIterator i(code->start, code->end, &code->locals);
i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
@@ -779,13 +769,16 @@ class SideTable : public ZoneObject {
DCHECK_GE(control_stack.size() - 1, exception_stack.back());
const Control* c = &control_stack[exception_stack.back()];
if (!unreachable) c->else_label->Ref(i.pc(), exceptional_stack_height);
- if (exceptional_stack_height + kCatchInArity > max_stack_height_) {
- max_stack_height_ = exceptional_stack_height + kCatchInArity;
+ if (exceptional_stack_height + max_exception_arity >
+ max_stack_height_) {
+ max_stack_height_ = exceptional_stack_height + max_exception_arity;
}
TRACE("handler @%u: %s -> try @%u\n", i.pc_offset(),
WasmOpcodes::OpcodeName(opcode),
static_cast<uint32_t>(c->pc - code->start));
}
+ max_control_stack_height = std::max(
+ max_control_stack_height, static_cast<int>(control_stack.size()));
switch (opcode) {
case kExprBlock:
case kExprLoop: {
@@ -836,12 +829,11 @@ class SideTable : public ZoneObject {
break;
}
case kExprElse: {
+ TRACE("control @%u: Else\n", i.pc_offset());
Control* c = &control_stack.back();
+ DCHECK_EQ(*c->pc, kExprIf);
copy_unreachable();
- TRACE("control @%u: Else\n", i.pc_offset());
- if (!unreachable) {
- c->end_label->Ref(i.pc(), stack_height);
- }
+ if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
DCHECK_NOT_NULL(c->else_label);
c->else_label->Bind(i.pc() + 1);
c->else_label->Finish(&map_, code->start);
@@ -851,6 +843,49 @@ class SideTable : public ZoneObject {
stack_height >= c->end_label->target_stack_height);
break;
}
+ case kExprCatchAll: {
+ TRACE("control @%u: CatchAll\n", i.pc_offset());
+ Control* c = &control_stack.back();
+ DCHECK_EQ(*c->pc, kExprTry);
+ if (!exception_stack.empty() &&
+ exception_stack.back() == control_stack.size() - 1) {
+ // Only pop the exception stack if this is the only catch handler.
+ exception_stack.pop_back();
+ }
+ copy_unreachable();
+ if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
+ DCHECK_NOT_NULL(c->else_label);
+ int control_index = static_cast<int>(control_stack.size()) - 1;
+ c->else_label->Bind(i.pc() + 1, kCatchAllExceptionIndex,
+ control_index);
+ c->else_label->Finish(&map_, code->start);
+ c->else_label = nullptr;
+ DCHECK_IMPLIES(!unreachable,
+ stack_height >= c->end_label->target_stack_height);
+ stack_height = c->end_label->target_stack_height;
+ break;
+ }
+ case kExprUnwind: {
+ TRACE("control @%u: Unwind\n", i.pc_offset());
+ Control* c = &control_stack.back();
+ DCHECK_EQ(*c->pc, kExprTry);
+ DCHECK(!exception_stack.empty());
+ DCHECK_EQ(exception_stack.back(), control_stack.size() - 1);
+ exception_stack.pop_back();
+ copy_unreachable();
+ if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
+ DCHECK_NOT_NULL(c->else_label);
+ int control_index = static_cast<int>(control_stack.size()) - 1;
+ c->else_label->Bind(i.pc() + 1, kCatchAllExceptionIndex,
+ control_index);
+ c->else_label->Finish(&map_, code->start);
+ c->else_label = nullptr;
+ c->unwind = true;
+ DCHECK_IMPLIES(!unreachable,
+ stack_height >= c->end_label->target_stack_height);
+ stack_height = c->end_label->target_stack_height;
+ break;
+ }
case kExprTry: {
BlockTypeImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), &i, i.pc() + 1, module);
@@ -862,29 +897,42 @@ class SideTable : public ZoneObject {
CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
imm.out_arity());
CLabel* catch_label =
- CLabel::New(&control_transfer_zone, stack_height, kCatchInArity);
+ CLabel::New(&control_transfer_zone, stack_height, 0);
control_stack.emplace_back(i.pc(), end_label, catch_label,
imm.out_arity());
exception_stack.push_back(control_stack.size() - 1);
copy_unreachable();
break;
}
+ case kExprRethrow: {
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ int index = static_cast<int>(control_stack.size()) - 1 - imm.depth;
+ rethrow_map_.emplace(i.pc() - i.start(), index);
+ break;
+ }
case kExprCatch: {
- DCHECK_EQ(control_stack.size() - 1, exception_stack.back());
+ if (!exception_stack.empty() &&
+ exception_stack.back() == control_stack.size() - 1) {
+ // Only pop the exception stack once when we enter the first catch.
+ exception_stack.pop_back();
+ }
+ ExceptionIndexImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
Control* c = &control_stack.back();
- exception_stack.pop_back();
copy_unreachable();
TRACE("control @%u: Catch\n", i.pc_offset());
- if (!unreachable) {
- c->end_label->Ref(i.pc(), stack_height);
- }
+ if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
+
DCHECK_NOT_NULL(c->else_label);
- c->else_label->Bind(i.pc() + 1);
- c->else_label->Finish(&map_, code->start);
- c->else_label = nullptr;
+ int control_index = static_cast<int>(control_stack.size()) - 1;
+ c->else_label->Bind(i.pc() + imm.length + 1, imm.index,
+ control_index);
+
DCHECK_IMPLIES(!unreachable,
stack_height >= c->end_label->target_stack_height);
- stack_height = c->end_label->target_stack_height + kCatchInArity;
+ const FunctionSig* exception_sig = module->exceptions[imm.index].sig;
+ int catch_in_arity =
+ static_cast<int>(exception_sig->parameter_count());
+ stack_height = c->end_label->target_stack_height + catch_in_arity;
break;
}
case kExprEnd: {
@@ -893,14 +941,73 @@ class SideTable : public ZoneObject {
// Only loops have bound labels.
DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
if (!c->end_label->target) {
- if (c->else_label) c->else_label->Bind(i.pc());
+ if (c->else_label) {
+ if (*c->pc == kExprIf) {
+ // Bind else label for one-armed if.
+ c->else_label->Bind(i.pc());
+ } else if (!exception_stack.empty()) {
+ // No catch_all block, prepare for implicit rethrow.
+ DCHECK_EQ(*c->pc, kExprTry);
+ Control* next_try_block =
+ &control_stack[exception_stack.back()];
+ constexpr int kUnusedControlIndex = -1;
+ c->else_label->Bind(i.pc(), kRethrowOrDelegateExceptionIndex,
+ kUnusedControlIndex);
+ if (!unreachable) {
+ next_try_block->else_label->Ref(
+ i.pc(), c->else_label->target_stack_height);
+ }
+ }
+ } else if (c->unwind) {
+ DCHECK_EQ(*c->pc, kExprTry);
+ rethrow_map_.emplace(i.pc() - i.start(),
+ static_cast<int>(control_stack.size()) - 1);
+ if (!exception_stack.empty()) {
+ Control* next_try_block =
+ &control_stack[exception_stack.back()];
+ if (!unreachable) {
+ next_try_block->else_label->Ref(i.pc(), stack_height);
+ }
+ }
+ }
c->end_label->Bind(i.pc() + 1);
}
c->Finish(&map_, code->start);
+
+ DCHECK_IMPLIES(!unreachable,
+ stack_height >= c->end_label->target_stack_height);
+ stack_height = c->end_label->target_stack_height + c->exit_arity;
+ control_stack.pop_back();
+ break;
+ }
+ case kExprDelegate: {
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ TRACE("control @%u: Delegate[depth=%u]\n", i.pc_offset(), imm.depth);
+ Control* c = &control_stack.back();
+ const size_t new_stack_size = control_stack.size() - 1;
+ const size_t max_depth = new_stack_size - 1;
+ if (imm.depth < max_depth) {
+ constexpr int kUnusedControlIndex = -1;
+ c->else_label->Bind(i.pc(), kRethrowOrDelegateExceptionIndex,
+ kUnusedControlIndex);
+ c->else_label->Finish(&map_, code->start);
+ Control* target = &control_stack[max_depth - imm.depth];
+ DCHECK_EQ(*target->pc, kExprTry);
+ DCHECK_NOT_NULL(target->else_label);
+ if (!unreachable) {
+ target->else_label->Ref(i.pc(),
+ c->end_label->target_stack_height);
+ }
+ }
+ c->else_label = nullptr;
+ c->end_label->Bind(i.pc() + imm.length + 1);
+ c->Finish(&map_, code->start);
+
DCHECK_IMPLIES(!unreachable,
stack_height >= c->end_label->target_stack_height);
stack_height = c->end_label->target_stack_height + c->exit_arity;
control_stack.pop_back();
+ exception_stack.pop_back();
break;
}
case kExprBr: {
@@ -944,13 +1051,18 @@ class SideTable : public ZoneObject {
}
bool HasEntryAt(pc_t from) {
- auto result = map_.find(from);
- return result != map_.end();
+ auto result = map_.map.find(from);
+ return result != map_.map.end();
+ }
+
+ bool HasCatchEntryAt(pc_t from) {
+ auto result = map_.catch_map.find(from);
+ return result != map_.catch_map.end();
}
ControlTransferEntry& Lookup(pc_t from) {
- auto result = map_.find(from);
- DCHECK(result != map_.end());
+ auto result = map_.map.find(from);
+ DCHECK(result != map_.map.end());
return result->second;
}
};
@@ -1177,18 +1289,28 @@ class WasmInterpreterInternals {
while (!frames_.empty()) {
Frame& frame = frames_.back();
InterpreterCode* code = frame.code;
- if (catchable && code->side_table->HasEntryAt(frame.pc)) {
+ if (catchable && code->side_table->HasCatchEntryAt(frame.pc)) {
TRACE("----- HANDLE -----\n");
- Push(WasmValue(handle(isolate->pending_exception(), isolate)));
- isolate->clear_pending_exception();
- frame.pc += JumpToHandlerDelta(code, frame.pc);
- TRACE(" => handler #%zu (#%u @%zu)\n", frames_.size() - 1,
- code->function->func_index, frame.pc);
- return WasmInterpreter::HANDLED;
+ HandleScope scope(isolate_);
+ Handle<Object> exception =
+ handle(isolate->pending_exception(), isolate);
+ if (JumpToHandlerDelta(code, exception, &frame.pc)) {
+ isolate->clear_pending_exception();
+ TRACE(" => handler #%zu (#%u @%zu)\n", frames_.size() - 1,
+ code->function->func_index, frame.pc);
+ return WasmInterpreter::HANDLED;
+ } else {
+ TRACE(" => no handler #%zu (#%u @%zu)\n", frames_.size() - 1,
+ code->function->func_index, frame.pc);
+ }
}
TRACE(" => drop frame #%zu (#%u @%zu)\n", frames_.size() - 1,
code->function->func_index, frame.pc);
ResetStack(frame.sp);
+ if (!frame.caught_exception_stack.is_null()) {
+ isolate_->global_handles()->Destroy(
+ frame.caught_exception_stack.location());
+ }
frames_.pop_back();
}
TRACE("----- UNWIND -----\n");
@@ -1208,6 +1330,8 @@ class WasmInterpreterInternals {
sp_t plimit() { return sp + code->function->sig->parameter_count(); }
// Limit of locals.
sp_t llimit() { return plimit() + code->locals.type_list.size(); }
+
+ Handle<FixedArray> caught_exception_stack;
};
// Safety wrapper for values on the operand stack represented as {WasmValue}.
@@ -1291,7 +1415,8 @@ class WasmInterpreterInternals {
// The parameters will overlap the arguments already on the stack.
DCHECK_GE(StackHeight(), arity);
- frames_.push_back({code, 0, StackHeight() - arity});
+ frames_.push_back(
+ {code, 0, StackHeight() - arity, Handle<FixedArray>::null()});
frames_.back().pc = InitLocals(code);
TRACE(" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
code->function->func_index, frames_.back().pc);
@@ -1302,21 +1427,22 @@ class WasmInterpreterInternals {
WasmValue val;
switch (p.kind()) {
#define CASE_TYPE(valuetype, ctype) \
- case ValueType::valuetype: \
+ case valuetype: \
val = WasmValue(ctype{}); \
break;
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
- case ValueType::kOptRef: {
+ case kOptRef: {
val = WasmValue(isolate_->factory()->null_value());
break;
}
- case ValueType::kRef:
- case ValueType::kRtt: // TODO(7748): Implement.
- case ValueType::kStmt:
- case ValueType::kBottom:
- case ValueType::kI8:
- case ValueType::kI16:
+ case kRef: // TODO(7748): Implement.
+ case kRtt:
+ case kRttWithDepth:
+ case kStmt:
+ case kBottom:
+ case kI8:
+ case kI16:
UNREACHABLE();
break;
}
@@ -1343,11 +1469,51 @@ class WasmInterpreterInternals {
return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
}
- int JumpToHandlerDelta(InterpreterCode* code, pc_t pc) {
- ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
- DoStackTransfer(control_transfer_entry.sp_diff + kCatchInArity,
- control_transfer_entry.target_arity);
- return control_transfer_entry.pc_diff;
+ bool JumpToHandlerDelta(InterpreterCode* code,
+ Handle<Object> exception_object, pc_t* pc) {
+ auto it = code->side_table->map_.catch_map.find(*pc);
+ if (it == code->side_table->map_.catch_map.end()) {
+ // No handler in this frame means that we should rethrow to the caller.
+ return false;
+ }
+ CatchControlTransferEntry* handler = nullptr;
+ for (auto& entry : it->second) {
+ if (entry.exception_index < 0) {
+ ResetStack(StackHeight() - entry.sp_diff);
+ *pc += entry.pc_diff;
+ if (entry.exception_index == kRethrowOrDelegateExceptionIndex) {
+ // Recursively try to find a handler in the next enclosing try block
+ // (for the implicit rethrow) or in the delegate target.
+ return JumpToHandlerDelta(code, exception_object, pc);
+ }
+ handler = &entry;
+ break;
+ } else if (MatchingExceptionTag(exception_object,
+ entry.exception_index)) {
+ handler = &entry;
+ const WasmException* exception =
+ &module()->exceptions[entry.exception_index];
+ const FunctionSig* sig = exception->sig;
+ int catch_in_arity = static_cast<int>(sig->parameter_count());
+ DoUnpackException(exception, exception_object);
+ DoStackTransfer(entry.sp_diff + catch_in_arity, catch_in_arity);
+ *pc += handler->pc_diff;
+ break;
+ }
+ }
+ if (!handler) return false;
+ if (frames_.back().caught_exception_stack.is_null()) {
+ Handle<FixedArray> caught_exception_stack =
+ isolate_->factory()->NewFixedArray(
+ code->side_table->max_control_stack_height);
+ caught_exception_stack->FillWithHoles(
+ 0, code->side_table->max_control_stack_height);
+ frames_.back().caught_exception_stack =
+ isolate_->global_handles()->Create(*caught_exception_stack);
+ }
+ frames_.back().caught_exception_stack->set(handler->target_control_index,
+ *exception_object);
+ return true;
}
int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
@@ -1378,6 +1544,10 @@ class WasmInterpreterInternals {
size_t arity) {
DCHECK_GT(frames_.size(), 0);
spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - frames_.back().sp);
+ if (!frames_.back().caught_exception_stack.is_null()) {
+ isolate_->global_handles()->Destroy(
+ frames_.back().caught_exception_stack.location());
+ }
frames_.pop_back();
if (frames_.empty()) {
// A return from the last frame terminates the execution.
@@ -1401,15 +1571,11 @@ class WasmInterpreterInternals {
// Returns true if the call was successful, false if the stack check failed
// and the stack was fully unwound.
- bool DoCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
+ bool DoCall(Decoder* decoder, InterpreterCode** target, pc_t* pc,
pc_t* limit) V8_WARN_UNUSED_RESULT {
frames_.back().pc = *pc;
- PushFrame(target);
- if (!DoStackCheck()) return false;
- *pc = frames_.back().pc;
- *limit = target->end - target->start;
- decoder->Reset(target->start, target->end);
- return true;
+ PushFrame(*target);
+ return DoStackCheck(decoder, target, pc, limit);
}
// Returns true if the tail call was successful, false if the stack check
@@ -1632,28 +1798,28 @@ class WasmInterpreterInternals {
InterpreterCode* code, pc_t pc, int* const len) {
switch (opcode) {
case kExprI32SConvertSatF32:
- Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
+ Push(WasmValue(base::saturated_cast<int32_t>(Pop().to<float>())));
return true;
case kExprI32UConvertSatF32:
- Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
+ Push(WasmValue(base::saturated_cast<uint32_t>(Pop().to<float>())));
return true;
case kExprI32SConvertSatF64:
- Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
+ Push(WasmValue(base::saturated_cast<int32_t>(Pop().to<double>())));
return true;
case kExprI32UConvertSatF64:
- Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
+ Push(WasmValue(base::saturated_cast<uint32_t>(Pop().to<double>())));
return true;
case kExprI64SConvertSatF32:
- Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
+ Push(WasmValue(base::saturated_cast<int64_t>(Pop().to<float>())));
return true;
case kExprI64UConvertSatF32:
- Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
+ Push(WasmValue(base::saturated_cast<uint64_t>(Pop().to<float>())));
return true;
case kExprI64SConvertSatF64:
- Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
+ Push(WasmValue(base::saturated_cast<int64_t>(Pop().to<double>())));
return true;
case kExprI64UConvertSatF64:
- Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
+ Push(WasmValue(base::saturated_cast<uint64_t>(Pop().to<double>())));
return true;
case kExprMemoryInit: {
MemoryInitImmediate<Decoder::kNoValidation> imm(decoder,
@@ -2284,6 +2450,8 @@ class WasmInterpreterInternals {
(AixFpOpWorkaround<float, &nearbyintf>(a)))
UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
+ // Use llabs which will work correctly on both 64-bit and 32-bit.
+ UNOP_CASE(I64x2Abs, i64x2, int2, 2, std::llabs(a))
UNOP_CASE(I32x4Abs, i32x4, int4, 4, std::abs(a))
UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
@@ -2344,6 +2512,11 @@ class WasmInterpreterInternals {
CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
CMPOP_CASE(I64x2Eq, i64x2, int2, int2, 2, a == b)
+ CMPOP_CASE(I64x2Ne, i64x2, int2, int2, 2, a != b)
+ CMPOP_CASE(I64x2LtS, i64x2, int2, int2, 2, a < b)
+ CMPOP_CASE(I64x2GtS, i64x2, int2, int2, 2, a > b)
+ CMPOP_CASE(I64x2LeS, i64x2, int2, int2, 2, a <= b)
+ CMPOP_CASE(I64x2GeS, i64x2, int2, int2, 2, a >= b)
CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
@@ -2489,7 +2662,7 @@ class WasmInterpreterInternals {
case kExpr##op: { \
WasmValue v = Pop(); \
src_type s = v.to_s128().to_##name(); \
- dst_type res; \
+ dst_type res = {0}; \
for (size_t i = 0; i < count; ++i) { \
ctype a = s.val[LANE(start_index + i, s)]; \
auto result = expr; \
@@ -2534,29 +2707,38 @@ class WasmInterpreterInternals {
CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
a)
+ CONVERT_CASE(F64x2ConvertLowI32x4S, int4, i32x4, float2, 2, 0, int32_t,
+ static_cast<double>(a))
+ CONVERT_CASE(F64x2ConvertLowI32x4U, int4, i32x4, float2, 2, 0, uint32_t,
+ static_cast<double>(a))
+ CONVERT_CASE(I32x4TruncSatF64x2SZero, float2, f64x2, int4, 2, 0, double,
+ base::saturated_cast<int32_t>(a))
+ CONVERT_CASE(I32x4TruncSatF64x2UZero, float2, f64x2, int4, 2, 0, double,
+ base::saturated_cast<uint32_t>(a))
+ CONVERT_CASE(F32x4DemoteF64x2Zero, float2, f64x2, float4, 2, 0, float,
+ DoubleToFloat32(a))
+ CONVERT_CASE(F64x2PromoteLowF32x4, float4, f32x4, float2, 2, 0, float,
+ static_cast<double>(a))
#undef CONVERT_CASE
-#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- src_type s1 = v1.to_s128().to_##name(); \
- src_type s2 = v2.to_s128().to_##name(); \
- dst_type res; \
- int64_t min = std::numeric_limits<ctype>::min(); \
- int64_t max = std::numeric_limits<ctype>::max(); \
- for (size_t i = 0; i < count; ++i) { \
- int64_t v = i < count / 2 ? s1.val[LANE(i, s1)] \
- : s2.val[LANE(i - count / 2, s2)]; \
- res.val[LANE(i, res)] = \
- static_cast<dst_ctype>(std::max(min, std::min(max, v))); \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
+#define PACK_CASE(op, src_type, name, dst_type, count, dst_ctype) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ src_type s1 = v1.to_s128().to_##name(); \
+ src_type s2 = v2.to_s128().to_##name(); \
+ dst_type res; \
+ for (size_t i = 0; i < count; ++i) { \
+ int64_t v = i < count / 2 ? s1.val[LANE(i, s1)] \
+ : s2.val[LANE(i - count / 2, s2)]; \
+ res.val[LANE(i, res)] = base::saturated_cast<dst_ctype>(v); \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
}
- PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t)
- PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t, int16_t)
- PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t, int8_t)
- PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t, int8_t)
+ PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t)
+ PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t)
+ PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t)
+ PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t)
#undef PACK_CASE
case kExprS128Select: {
int4 bool_val = Pop().to_s128().to_i32x4();
@@ -2645,9 +2827,7 @@ class WasmInterpreterInternals {
Push(WasmValue(Simd128(res)));
return true;
}
- case kExprV32x4AnyTrue:
- case kExprV16x8AnyTrue:
- case kExprV8x16AnyTrue: {
+ case kExprV128AnyTrue: {
int4 s = Pop().to_s128().to_i32x4();
bool res = s.val[LANE(0, s)] | s.val[LANE(1, s)] | s.val[LANE(2, s)] |
s.val[LANE(3, s)];
@@ -2664,6 +2844,7 @@ class WasmInterpreterInternals {
Push(WasmValue(res)); \
return true; \
}
+ REDUCTION_CASE(V64x2AllTrue, i64x2, int2, 2, &)
REDUCTION_CASE(V32x4AllTrue, i32x4, int4, 4, &)
REDUCTION_CASE(V16x8AllTrue, i16x8, int8, 8, &)
REDUCTION_CASE(V8x16AllTrue, i8x16, int16, 16, &)
@@ -2955,7 +3136,8 @@ class WasmInterpreterInternals {
// Returns true if execution can continue, false if the stack was fully
// unwound. Do call this function immediately *after* pushing a new frame. The
// pc of the top frame will be reset to 0 if the stack check fails.
- bool DoStackCheck() V8_WARN_UNUSED_RESULT {
+ bool DoStackCheck(Decoder* decoder, InterpreterCode** target, pc_t* pc,
+ pc_t* limit) V8_WARN_UNUSED_RESULT {
// The goal of this stack check is not to prevent actual stack overflows,
// but to simulate stack overflows during the execution of compiled code.
// That is why this function uses FLAG_stack_size, even though the value
@@ -2965,13 +3147,20 @@ class WasmInterpreterInternals {
const size_t current_stack_size = (sp_ - stack_.get()) * sizeof(*sp_) +
frames_.size() * sizeof(frames_[0]);
if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
+ *pc = frames_.back().pc;
+ *limit = (*target)->end - (*target)->start;
+ decoder->Reset((*target)->start, (*target)->end);
return true;
}
// The pc of the top frame is initialized to the first instruction. We reset
// it to 0 here such that we report the same position as in compiled code.
frames_.back().pc = 0;
isolate_->StackOverflow();
- return HandleException(isolate_) == WasmInterpreter::HANDLED;
+ if (HandleException(isolate_) == WasmInterpreter::HANDLED) {
+ ReloadFromFrameOnException(decoder, target, pc, limit);
+ return true;
+ }
+ return false;
}
void EncodeI32ExceptionValue(Handle<FixedArray> encoded_values,
@@ -3011,27 +3200,27 @@ class WasmInterpreterInternals {
for (size_t i = 0; i < sig->parameter_count(); ++i) {
WasmValue value = GetStackValue(base_index + i);
switch (sig->GetParam(i).kind()) {
- case ValueType::kI32: {
+ case kI32: {
uint32_t u32 = value.to_u32();
EncodeI32ExceptionValue(encoded_values, &encoded_index, u32);
break;
}
- case ValueType::kF32: {
+ case kF32: {
uint32_t f32 = value.to_f32_boxed().get_bits();
EncodeI32ExceptionValue(encoded_values, &encoded_index, f32);
break;
}
- case ValueType::kI64: {
+ case kI64: {
uint64_t u64 = value.to_u64();
EncodeI64ExceptionValue(encoded_values, &encoded_index, u64);
break;
}
- case ValueType::kF64: {
+ case kF64: {
uint64_t f64 = value.to_f64_boxed().get_bits();
EncodeI64ExceptionValue(encoded_values, &encoded_index, f64);
break;
}
- case ValueType::kS128: {
+ case kS128: {
int4 s128 = value.to_s128().to_i32x4();
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[0]);
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[1]);
@@ -3039,18 +3228,21 @@ class WasmInterpreterInternals {
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[3]);
break;
}
- case ValueType::kRef:
- case ValueType::kOptRef: {
+ case kRef:
+ case kOptRef: {
switch (sig->GetParam(i).heap_representation()) {
case HeapType::kExtern:
- case HeapType::kExn:
case HeapType::kFunc:
case HeapType::kAny: {
Handle<Object> externref = value.to_externref();
encoded_values->set(encoded_index++, *externref);
break;
}
+ case HeapType::kBottom:
+ UNREACHABLE();
case HeapType::kEq:
+ case HeapType::kData:
+ case HeapType::kI31:
default:
// TODO(7748): Implement these.
UNIMPLEMENTED();
@@ -3058,11 +3250,12 @@ class WasmInterpreterInternals {
}
break;
}
- case ValueType::kRtt: // TODO(7748): Implement.
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kStmt:
- case ValueType::kBottom:
+ case kRtt: // TODO(7748): Implement.
+ case kRttWithDepth:
+ case kI8:
+ case kI16:
+ case kStmt:
+ case kBottom:
UNREACHABLE();
}
}
@@ -3075,8 +3268,8 @@ class WasmInterpreterInternals {
// Throw a given existing exception. Returns true if the exception is being
// handled locally by the interpreter, false otherwise (interpreter exits).
- bool DoRethrowException(WasmValue exception) {
- isolate_->ReThrow(*exception.to_externref());
+ bool DoRethrowException(Handle<Object> exception) {
+ isolate_->ReThrow(*exception);
return HandleException(isolate_) == WasmInterpreter::HANDLED;
}
@@ -3123,31 +3316,31 @@ class WasmInterpreterInternals {
for (size_t i = 0; i < sig->parameter_count(); ++i) {
WasmValue value;
switch (sig->GetParam(i).kind()) {
- case ValueType::kI32: {
+ case kI32: {
uint32_t u32 = 0;
DecodeI32ExceptionValue(encoded_values, &encoded_index, &u32);
value = WasmValue(u32);
break;
}
- case ValueType::kF32: {
+ case kF32: {
uint32_t f32_bits = 0;
DecodeI32ExceptionValue(encoded_values, &encoded_index, &f32_bits);
value = WasmValue(Float32::FromBits(f32_bits));
break;
}
- case ValueType::kI64: {
+ case kI64: {
uint64_t u64 = 0;
DecodeI64ExceptionValue(encoded_values, &encoded_index, &u64);
value = WasmValue(u64);
break;
}
- case ValueType::kF64: {
+ case kF64: {
uint64_t f64_bits = 0;
DecodeI64ExceptionValue(encoded_values, &encoded_index, &f64_bits);
value = WasmValue(Float64::FromBits(f64_bits));
break;
}
- case ValueType::kS128: {
+ case kS128: {
int4 s128 = {0, 0, 0, 0};
uint32_t* vals = reinterpret_cast<uint32_t*>(s128.val);
DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[0]);
@@ -3157,11 +3350,10 @@ class WasmInterpreterInternals {
value = WasmValue(Simd128(s128));
break;
}
- case ValueType::kRef:
- case ValueType::kOptRef: {
+ case kRef:
+ case kOptRef: {
switch (sig->GetParam(i).heap_representation()) {
case HeapType::kExtern:
- case HeapType::kExn:
case HeapType::kFunc:
case HeapType::kAny: {
Handle<Object> externref(encoded_values->get(encoded_index++),
@@ -3176,11 +3368,12 @@ class WasmInterpreterInternals {
}
break;
}
- case ValueType::kRtt: // TODO(7748): Implement.
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kStmt:
- case ValueType::kBottom:
+ case kRtt: // TODO(7748): Implement.
+ case kRttWithDepth:
+ case kI8:
+ case kI16:
+ case kStmt:
+ case kBottom:
UNREACHABLE();
}
Push(value);
@@ -3258,7 +3451,7 @@ class WasmInterpreterInternals {
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
- // fall through to the true block.
+ // Fall through to the true block.
len = 1 + imm.length;
TRACE(" true => fallthrough\n");
} else {
@@ -3268,7 +3461,9 @@ class WasmInterpreterInternals {
break;
}
case kExprElse:
- case kExprCatch: {
+ case kExprUnwind:
+ case kExprCatch:
+ case kExprCatchAll: {
len = LookupTargetDelta(code, pc);
TRACE(" end => @%zu\n", pc + len);
break;
@@ -3283,13 +3478,18 @@ class WasmInterpreterInternals {
continue; // Do not bump pc.
}
case kExprRethrow: {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- WasmValue ex = Pop();
- if (ex.to_externref()->IsNull()) {
- return DoTrap(kTrapRethrowNull, pc);
- }
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
+ HandleScope scope(isolate_); // Avoid leaking handles.
+ DCHECK(!frames_.back().caught_exception_stack.is_null());
+ int index = code->side_table->rethrow_map_[pc];
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, frames_.back().caught_exception_stack->Size());
+ Handle<Object> exception = handle(
+ frames_.back().caught_exception_stack->get(index), isolate_);
+ DCHECK(!exception->IsTheHole());
CommitPc(pc); // Needed for local unwinding.
- if (!DoRethrowException(ex)) return;
+ if (!DoRethrowException(exception)) return;
ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
continue; // Do not bump pc.
}
@@ -3351,7 +3551,26 @@ class WasmInterpreterInternals {
case kExprUnreachable: {
return DoTrap(kTrapUnreachable, pc);
}
+ case kExprDelegate: {
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
+ len = 1 + imm.length;
+ break;
+ }
case kExprEnd: {
+ if (code->side_table->rethrow_map_.count(pc)) {
+ // Implicit rethrow after unwind.
+ HandleScope scope(isolate_);
+ DCHECK(!frames_.back().caught_exception_stack.is_null());
+ int index = code->side_table->rethrow_map_[pc];
+ Handle<Object> exception = handle(
+ frames_.back().caught_exception_stack->get(index), isolate_);
+ DCHECK(!exception->IsTheHole());
+ CommitPc(pc); // Needed for local unwinding.
+ if (!DoRethrowException(exception)) return;
+ ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
+ continue; // Do not bump pc.
+ }
break;
}
case kExprI32Const: {
@@ -3438,7 +3657,7 @@ class WasmInterpreterInternals {
InterpreterCode* target = codemap_.GetCode(imm.index);
CHECK(!target->function->imported);
// Execute an internal call.
- if (!DoCall(&decoder, target, &pc, &limit)) return;
+ if (!DoCall(&decoder, &target, &pc, &limit)) return;
code = target;
continue; // Do not bump pc.
} break;
@@ -3453,7 +3672,7 @@ class WasmInterpreterInternals {
switch (result.type) {
case CallResult::INTERNAL:
// The import is a function of this instance. Call it directly.
- if (!DoCall(&decoder, result.interpreter_code, &pc, &limit))
+ if (!DoCall(&decoder, &result.interpreter_code, &pc, &limit))
return;
code = result.interpreter_code;
continue; // Do not bump pc.
@@ -3525,7 +3744,7 @@ class WasmInterpreterInternals {
auto& global = module()->globals[imm.index];
switch (global.type.kind()) {
#define CASE_TYPE(valuetype, ctype) \
- case ValueType::valuetype: { \
+ case valuetype: { \
uint8_t* ptr = \
WasmInstanceObject::GetGlobalStorage(instance_object_, global); \
WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \
@@ -3534,8 +3753,8 @@ class WasmInterpreterInternals {
}
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
- case ValueType::kRef:
- case ValueType::kOptRef: {
+ case kRef:
+ case kOptRef: {
// TODO(7748): Type checks or DCHECKs for ref types?
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
@@ -3547,11 +3766,12 @@ class WasmInterpreterInternals {
global_buffer->set(global_index, *ref);
break;
}
- case ValueType::kRtt: // TODO(7748): Implement.
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kStmt:
- case ValueType::kBottom:
+ case kRtt: // TODO(7748): Implement.
+ case kRttWithDepth:
+ case kI8:
+ case kI16:
+ case kStmt:
+ case kBottom:
UNREACHABLE();
}
len = 1 + imm.length;
@@ -3920,19 +4140,19 @@ class WasmInterpreterInternals {
}
WasmValue val = GetStackValue(i);
switch (val.type().kind()) {
- case ValueType::kI32:
+ case kI32:
PrintF("i32:%d", val.to<int32_t>());
break;
- case ValueType::kI64:
+ case kI64:
PrintF("i64:%" PRId64 "", val.to<int64_t>());
break;
- case ValueType::kF32:
+ case kF32:
PrintF("f32:%a", val.to<float>());
break;
- case ValueType::kF64:
+ case kF64:
PrintF("f64:%la", val.to<double>());
break;
- case ValueType::kS128: {
+ case kS128: {
// This defaults to tracing all S128 values as i32x4 values for now,
// when there is more state to know what type of values are on the
// stack, the right format should be printed here.
@@ -3940,11 +4160,11 @@ class WasmInterpreterInternals {
PrintF("i32x4:%d,%d,%d,%d", s.val[0], s.val[1], s.val[2], s.val[3]);
break;
}
- case ValueType::kStmt:
+ case kStmt:
PrintF("void");
break;
- case ValueType::kRef:
- case ValueType::kOptRef: {
+ case kRef:
+ case kOptRef: {
if (val.type().is_reference_to(HeapType::kExtern)) {
Handle<Object> ref = val.to_externref();
if (ref->IsNull()) {
@@ -3958,13 +4178,14 @@ class WasmInterpreterInternals {
}
break;
}
- case ValueType::kRtt:
+ case kRtt:
+ case kRttWithDepth:
// TODO(7748): Implement properly.
PrintF("rtt");
break;
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kBottom:
+ case kI8:
+ case kI16:
+ case kBottom:
UNREACHABLE();
break;
}
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.h b/deps/v8/test/common/wasm/wasm-interpreter.h
index 4df373df46..ab89f5dc15 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.h
+++ b/deps/v8/test/common/wasm/wasm-interpreter.h
@@ -39,7 +39,17 @@ struct ControlTransferEntry {
uint32_t target_arity;
};
-using ControlTransferMap = ZoneMap<pc_t, ControlTransferEntry>;
+struct CatchControlTransferEntry : public ControlTransferEntry {
+ int exception_index;
+ int target_control_index;
+};
+
+struct ControlTransferMap {
+ explicit ControlTransferMap(Zone* zone) : map(zone), catch_map(zone) {}
+
+ ZoneMap<pc_t, ControlTransferEntry> map;
+ ZoneMap<pc_t, ZoneVector<CatchControlTransferEntry>> catch_map;
+};
// An interpreter capable of executing WebAssembly.
class WasmInterpreter {
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 106d330913..7ddc32fc89 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -113,8 +113,8 @@
#define WASM_HEAP_TYPE(heap_type) static_cast<byte>((heap_type).code() & 0x7f)
-#define WASM_REF_TYPE(type) \
- (type).kind() == ValueType::kRef ? kRefCode : kOptRefCode, \
+#define WASM_REF_TYPE(type) \
+ (type).kind() == kRef ? kRefCode : kOptRefCode, \
WASM_HEAP_TYPE((type).heap_type())
#define WASM_BLOCK(...) kExprBlock, kVoidCode, __VA_ARGS__, kExprEnd
@@ -183,6 +183,10 @@
#define WASM_TRY_CATCH_T(t, trystmt, catchstmt, except) \
kExprTry, static_cast<byte>((t).value_type_code()), trystmt, kExprCatch, \
except, catchstmt, kExprEnd
+#define WASM_TRY_CATCH_CATCH_T(t, trystmt, except1, catchstmt1, except2, \
+ catchstmt2) \
+ kExprTry, static_cast<byte>((t).value_type_code()), trystmt, kExprCatch, \
+ except1, catchstmt1, kExprCatch, except2, catchstmt2, kExprEnd
#define WASM_TRY_CATCH_R(t, trystmt, catchstmt) \
kExprTry, WASM_REF_TYPE(t), trystmt, kExprCatch, catchstmt, kExprEnd
#define WASM_TRY_CATCH_ALL_T(t, trystmt, catchstmt) \
@@ -190,6 +194,12 @@
catchstmt, kExprEnd
#define WASM_TRY_DELEGATE(trystmt, depth) \
kExprTry, kVoidCode, trystmt, kExprDelegate, depth
+#define WASM_TRY_DELEGATE_T(t, trystmt, depth) \
+ kExprTry, static_cast<byte>((t).value_type_code()), trystmt, kExprDelegate, \
+ depth
+#define WASM_TRY_UNWIND_T(t, trystmt, unwindstmt) \
+ kExprTry, static_cast<byte>((t).value_type_code()), trystmt, kExprUnwind, \
+ unwindstmt, kExprEnd
#define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
#define WASM_SELECT_I(tval, fval, cond) \
@@ -215,6 +225,7 @@
val, cond, kExprBrIf, static_cast<byte>(depth), kExprDrop
#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth)
#define WASM_UNREACHABLE kExprUnreachable
+#define WASM_RETURN(...) __VA_ARGS__, kExprReturn
#define WASM_BR_TABLE(key, count, ...) \
key, kExprBrTable, U32V_1(count), __VA_ARGS__
@@ -464,6 +475,7 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
index, val, \
static_cast<byte>(v8::internal::wasm::LoadStoreOpcodeOf(type, true)), \
alignment, ZERO_OFFSET
+#define WASM_RETHROW(index) kExprRethrow, static_cast<byte>(index)
#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
#define WASM_CALL_FUNCTION(index, ...) \
@@ -497,19 +509,30 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
struct_obj, value, WASM_GC_OP(kExprStructSet), static_cast<byte>(typeidx), \
static_cast<byte>(fieldidx)
#define WASM_REF_NULL(type_encoding) kExprRefNull, type_encoding
-#define WASM_REF_FUNC(val) kExprRefFunc, val
+#define WASM_REF_FUNC(index) kExprRefFunc, index
#define WASM_REF_IS_NULL(val) val, kExprRefIsNull
#define WASM_REF_AS_NON_NULL(val) val, kExprRefAsNonNull
#define WASM_REF_EQ(lhs, rhs) lhs, rhs, kExprRefEq
-#define WASM_REF_TEST(obj_type, rtt_type, ref, rtt) \
- ref, rtt, WASM_GC_OP(kExprRefTest), obj_type, rtt_type
-#define WASM_REF_CAST(obj_type, rtt_type, ref, rtt) \
- ref, rtt, WASM_GC_OP(kExprRefCast), obj_type, rtt_type
+#define WASM_REF_TEST(ref, rtt) ref, rtt, WASM_GC_OP(kExprRefTest)
+#define WASM_REF_CAST(ref, rtt) ref, rtt, WASM_GC_OP(kExprRefCast)
// Takes a reference value from the value stack to allow sequences of
// conditional branches.
#define WASM_BR_ON_CAST(depth, rtt) \
rtt, WASM_GC_OP(kExprBrOnCast), static_cast<byte>(depth)
+#define WASM_REF_IS_DATA(ref) ref, WASM_GC_OP(kExprRefIsData)
+#define WASM_REF_AS_DATA(ref) ref, WASM_GC_OP(kExprRefAsData)
+#define WASM_BR_ON_DATA(depth, ref) \
+ ref, WASM_GC_OP(kExprBrOnData), static_cast<byte>(depth)
+#define WASM_REF_IS_FUNC(ref) ref, WASM_GC_OP(kExprRefIsFunc)
+#define WASM_REF_AS_FUNC(ref) ref, WASM_GC_OP(kExprRefAsFunc)
+#define WASM_BR_ON_FUNC(depth, ref) \
+ ref, WASM_GC_OP(kExprBrOnFunc), static_cast<byte>(depth)
+#define WASM_REF_IS_I31(ref) ref, WASM_GC_OP(kExprRefIsI31)
+#define WASM_REF_AS_I31(ref) ref, WASM_GC_OP(kExprRefAsI31)
+#define WASM_BR_ON_I31(depth, ref) \
+ ref, WASM_GC_OP(kExprBrOnI31), static_cast<byte>(depth)
+
#define WASM_ARRAY_NEW_WITH_RTT(index, default_value, length, rtt) \
default_value, length, rtt, WASM_GC_OP(kExprArrayNewWithRtt), \
static_cast<byte>(index)
@@ -526,7 +549,9 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_ARRAY_LEN(typeidx, array) \
array, WASM_GC_OP(kExprArrayLen), static_cast<byte>(typeidx)
-#define WASM_RTT(depth, typeidx) kRttCode, U32V_1(depth), U32V_1(typeidx)
+#define WASM_RTT_WITH_DEPTH(depth, typeidx) \
+ kRttWithDepthCode, U32V_1(depth), U32V_1(typeidx)
+#define WASM_RTT(typeidx) kRttCode, U32V_1(typeidx)
#define WASM_RTT_CANON(typeidx) \
WASM_GC_OP(kExprRttCanon), static_cast<byte>(typeidx)
#define WASM_RTT_SUB(typeidx, supertype) \
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index f9ef2e6708..c74d0ec56c 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -49,31 +49,32 @@ OwnedVector<WasmValue> MakeDefaultInterpreterArguments(Isolate* isolate,
for (size_t i = 0; i < param_count; ++i) {
switch (sig->GetParam(i).kind()) {
- case ValueType::kI32:
+ case kI32:
arguments[i] = WasmValue(int32_t{0});
break;
- case ValueType::kI64:
+ case kI64:
arguments[i] = WasmValue(int64_t{0});
break;
- case ValueType::kF32:
+ case kF32:
arguments[i] = WasmValue(0.0f);
break;
- case ValueType::kF64:
+ case kF64:
arguments[i] = WasmValue(0.0);
break;
- case ValueType::kS128:
+ case kS128:
arguments[i] = WasmValue(Simd128{});
break;
- case ValueType::kOptRef:
+ case kOptRef:
arguments[i] =
WasmValue(Handle<Object>::cast(isolate->factory()->null_value()));
break;
- case ValueType::kRef:
- case ValueType::kRtt:
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kStmt:
- case ValueType::kBottom:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI8:
+ case kI16:
+ case kStmt:
+ case kBottom:
UNREACHABLE();
}
}
@@ -88,26 +89,27 @@ OwnedVector<Handle<Object>> MakeDefaultArguments(Isolate* isolate,
for (size_t i = 0; i < param_count; ++i) {
switch (sig->GetParam(i).kind()) {
- case ValueType::kI32:
- case ValueType::kF32:
- case ValueType::kF64:
- case ValueType::kS128:
+ case kI32:
+ case kF32:
+ case kF64:
+ case kS128:
// Argument here for kS128 does not matter as we should error out before
// hitting this case.
arguments[i] = handle(Smi::zero(), isolate);
break;
- case ValueType::kI64:
+ case kI64:
arguments[i] = BigInt::FromInt64(isolate, 0);
break;
- case ValueType::kOptRef:
+ case kOptRef:
arguments[i] = isolate->factory()->null_value();
break;
- case ValueType::kRef:
- case ValueType::kRtt:
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kStmt:
- case ValueType::kBottom:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI8:
+ case kI16:
+ case kStmt:
+ case kBottom:
UNREACHABLE();
}
}
@@ -169,16 +171,16 @@ WasmInterpretationResult InterpretWasmModule(
if (func->sig->return_count() > 0) {
WasmValue return_value = interpreter.GetReturnValue();
switch (func->sig->GetReturn(0).kind()) {
- case ValueType::kI32:
+ case kI32:
result = return_value.to<int32_t>();
break;
- case ValueType::kI64:
+ case kI64:
result = static_cast<int32_t>(return_value.to<int64_t>());
break;
- case ValueType::kF32:
+ case kF32:
result = static_cast<int32_t>(return_value.to<float>());
break;
- case ValueType::kF64:
+ case kF64:
result = static_cast<int32_t>(return_value.to<double>());
break;
default:
diff --git a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js
index 00ee7f452c..aa6a664621 100644
--- a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js
+++ b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-reftypes
+// Flags: --enable-testing-opcode-in-wasm
// Test that tiering up and tiering down works even if functions cannot be
// compiled with Liftoff.
@@ -11,11 +11,12 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
// Create a simple Wasm module.
function create_builder(i) {
+ const kExprNopForTestingUnsupportedInLiftoff = 0x16;
const builder = new WasmModuleBuilder();
- builder.addFunction('main', kSig_i_r)
+ builder.addFunction('main', kSig_i_i)
.addBody([
- kExprLocalGet, 0, kExprRefIsNull, ...wasmI32Const(i),
- kExprI32Add
+ kExprLocalGet, 0, kExprNopForTestingUnsupportedInLiftoff,
+ ...wasmI32Const(i), kExprI32Add
])
.exportFunc();
return builder;
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index 434d46cc9c..5054dc5327 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -117,6 +117,10 @@
'debug/debug-stepout-scope-part8': [SKIP],
}], # 'arch == arm and mode == debug'
+['mode != debug', {
+ # Test uses a flag only available in debug mode
+ 'debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff': [SKIP],
+}],
##############################################################################
['arch == s390 or arch == s390x', {
@@ -127,20 +131,19 @@
}], # 'arch == s390 or arch == s390x'
##############################################################################
-['lite_mode or variant == jitless', {
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
'debug/wasm/*': [SKIP],
'regress/regress-crbug-840288': [SKIP],
'regress/regress-crbug-1032042': [SKIP],
- 'wasm-*': [SKIP],
-}], # lite_mode or variant == jitless
+}], # not has_webassembly or variant == jitless
##############################################################################
-['variant == turboprop or variant == nci or variant == nci_as_midtier', {
+['variant == turboprop or variant == turboprop_as_toptier', {
# Deopts differently than TurboFan.
'debug/debug-optimize': [SKIP],
'debug/debug-compile-optimized': [SKIP],
-}], # variant == turboprop or variant == nci or variant == nci_as_midtier
+}], # variant == turboprop or variant == turboprop_as_toptier
##############################################################################
# Liftoff needs to be enabled before running these tests.
diff --git a/deps/v8/test/fuzzer/inspector/regress-1166549 b/deps/v8/test/fuzzer/inspector/regress-1166549
new file mode 100644
index 0000000000..58554eb3da
--- /dev/null
+++ b/deps/v8/test/fuzzer/inspector/regress-1166549
@@ -0,0 +1,189 @@
+utils2 = new Proxy(utils, {get: function(target, prop) { if (prop in target) return target[prop]; return i=>i;}});
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest = {};
+InspectorTest._dumpInspectorProtocolMessages = false;
+InspectorTest._commandsForLogging = new Set();
+InspectorTest._sessions = new Set();
+
+InspectorTest.log = utils2.print.bind(utils2);
+InspectorTest.quitImmediately = utils2.quit.bind(utils2);
+
+InspectorTest.logProtocolCommandCalls = function(command) {
+ InspectorTest._commandsForLogging.add(command);
+}
+
+InspectorTest.completeTest = function() {
+ var promises = [];
+ for (var session of InspectorTest._sessions)
+ promises.push(session.Protocol.Debugger.disable());
+ Promise.all(promises).then(() => utils2.quit());
+}
+
+InspectorTest.ContextGroup = class {
+ constructor() {
+ this.id = utils2.createContextGroup();
+ }
+
+ addScript(string, lineOffset, columnOffset, url) {
+ utils2.compileAndRunWithOrigin(this.id, string, url || '', lineOffset || 0, columnOffset || 0, false);
+ }
+
+ connect() {
+ return new InspectorTest.Session(this);
+ }
+
+ reset() {
+ utils2.resetContextGroup(this.id);
+ }
+};
+
+InspectorTest.Session = class {
+ constructor(contextGroup) {
+ this.contextGroup = contextGroup;
+ this._dispatchTable = new Map();
+ this._eventHandlers = new Map();
+ this._requestId = 0;
+ this.Protocol = this._setupProtocol();
+ InspectorTest._sessions.add(this);
+ this.id = utils2.connectSession(contextGroup.id, '', this._dispatchMessage.bind(this));
+ }
+
+ disconnect() {
+ InspectorTest._sessions.delete(this);
+ utils2.disconnectSession(this.id);
+ }
+
+ reconnect() {
+ var state = utils2.disconnectSession(this.id);
+ this.id = utils2.connectSession(this.contextGroup.id, state, this._dispatchMessage.bind(this));
+ }
+
+ async addInspectedObject(serializable) {
+ return this.Protocol.Runtime.evaluate({expression: `inspector.addInspectedObject(${this.id}, ${JSON.stringify(serializable)})`});
+ }
+
+ sendRawCommand(requestId, command, handler) {
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ utils2.print("frontend: " + command);
+ this._dispatchTable.set(requestId, handler);
+ utils2.sendMessageToBackend(this.id, command);
+ }
+
+ _sendCommandPromise(method, params) {
+ if (typeof params !== 'object')
+ utils2.print(`WARNING: non-object params passed to invocation of method ${method}`);
+ if (InspectorTest._commandsForLogging.has(method))
+ utils2.print(method + ' called');
+ var requestId = ++this._requestId;
+ var messageObject = { "id": requestId, "method": method, "params": params };
+ return new Promise(fulfill => this.sendRawCommand(requestId, JSON.stringify(messageObject), fulfill));
+ }
+
+ _setupProtocol() {
+ return new Proxy({}, { get: (target, agentName, receiver) => new Proxy({}, {
+ get: (target, methodName, receiver) => {
+ const eventPattern = /^on(ce)?([A-Z][A-Za-z0-9]+)/;
+ var match = eventPattern.exec(methodName);
+ if (!match)
+ return args => this._sendCommandPromise(`${agentName}.${methodName}`, args || {});
+ var eventName = match[2];
+ eventName = eventName.charAt(0).toLowerCase() + eventName.slice(1);
+ if (match[1])
+ return numOfEvents => this._waitForEventPromise(
+ `${agentName}.${eventName}`, numOfEvents || 1);
+ return listener => this._eventHandlers.set(`${agentName}.${eventName}`, listener);
+ }
+ })});
+ }
+
+ _dispatchMessage(messageString) {
+ var messageObject = JSON.parse(messageString);
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ utils2.print("backend: " + JSON.stringify(messageObject));
+ const kMethodNotFound = -32601;
+ if (messageObject.error && messageObject.error.code === kMethodNotFound) {
+ InspectorTest.log(`Error: Called non-existent method. ${
+ messageObject.error.message} code: ${messagebOject.error.code}`);
+ InspectorTest.completeTest();
+ }
+ try {
+ var messageId = messageObject["id"];
+ if (typeof messageId === "number") {
+ var handler = this._dispatchTable.get(messageId);
+ if (handler) {
+ handler(messageObject);
+ this._dispatchTable.delete(messageId);
+ }
+ } else {
+ var eventName = messageObject["method"];
+ var eventHandler = this._eventHandlers.get(eventName0);
+ if (eventName === "Debugger.scriptParsed" && messageObject.params.url === "wait-for-pending-tasks.js")
+ return;
+ if (eventHandler)
+ eventHandler(messageObject);
+ }
+ } catch (e) {
+ InspectorTest.log("Exception when dispatching message: " + e + "\n" + e.stack + "\n message = " + JSON.stringify(messageObject, null, 2));
+ InspectorTest.completeTest();
+ }
+ };
+
+ _waitForEventPromise(eventName, numOfEvents) {
+ let events = [];
+ return new Promise(fulfill => {
+ this._eventHandlers.set(eventName, result => {
+ --numOfEvents;
+ events.push(result);
+ if (numOfEvents === 0) {
+ delete this._eventHandlers.delete(eventName);
+ fulfill(events.length > 1 ? events : events[0]);
+ }
+ });
+ });
+ }
+};
+
+InspectorTest.start = function(description) {
+ try {
+ InspectorTest.log(description);
+ var contextGroup = new InspectorTest.ContextGroup();
+ var session = contextGroup.connect();
+ return { session: session, contextGroup: contextGroup, Protocol: session.Protocol };
+ } catch (e) {
+ utils2.print(e.stack);
+ }
+}
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks stepping with blackboxed frames on stack');
+
+contextGroup.addScript(
+ `
+function userFoo() {
+ return 1;
+}
+
+function userBoo() {
+ return 2;
+}
+
+function testStepFromUser() {
+ frameworkCall([userFoo, userBoo])
+}
+
+function testStepFromFramework() {
+ frameworkBreakAndCall([userFoo, userBoo]);
+}
+//# sourceURL=user.js`,
+ 21, 4);
+
+Protocol.Debugger.enable()
+ .then(
+ () => Protocol.Debugger.setBlackboxPatterns(
+ {patterns: ['framework\.js']}))
+ .then(() => InspectorTest.completeTest());
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index 0cdaaaff21..e17400c4c3 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -243,8 +243,9 @@ std::string GenerateRandomFlags(FuzzerArgs* args) {
// TODO(mbid,v8:10765): Find a way to generate the kLinear flag sometimes,
// but only for patterns that are supported by the experimental engine.
constexpr size_t kFlagCount = JSRegExp::kFlagCount;
- CHECK_EQ(JSRegExp::kLinear, 1 << (kFlagCount - 1));
- CHECK_EQ(JSRegExp::kDotAll, 1 << (kFlagCount - 2));
+ CHECK_EQ(JSRegExp::kHasIndices, 1 << (kFlagCount - 1));
+ CHECK_EQ(JSRegExp::kLinear, 1 << (kFlagCount - 2));
+ CHECK_EQ(JSRegExp::kDotAll, 1 << (kFlagCount - 3));
STATIC_ASSERT((1 << kFlagCount) - 1 <= 0xFF);
const size_t flags = RandomByte(args) & ((1 << kFlagCount) - 1);
@@ -258,6 +259,7 @@ std::string GenerateRandomFlags(FuzzerArgs* args) {
if (flags & JSRegExp::kSticky) buffer[cursor++] = 'y';
if (flags & JSRegExp::kUnicode) buffer[cursor++] = 'u';
if (flags & JSRegExp::kDotAll) buffer[cursor++] = 's';
+ if (flags & JSRegExp::kHasIndices) buffer[cursor++] = 'd';
return std::string(buffer, cursor);
}
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index dbb50b55f9..6804cfa5c9 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -13,6 +13,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/utils/ostreams.h"
+#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes-inl.h"
@@ -33,6 +34,7 @@ constexpr int kMaxFunctions = 4;
constexpr int kMaxGlobals = 64;
constexpr int kMaxParameters = 15;
constexpr int kMaxReturns = 15;
+constexpr int kMaxExceptions = 4;
class DataRange {
Vector<const uint8_t> data_;
@@ -105,7 +107,7 @@ ValueType GetValueType(DataRange* data) {
}
class WasmGenerator {
- template <WasmOpcode Op, ValueType::Kind... Args>
+ template <WasmOpcode Op, ValueKind... Args>
void op(DataRange* data) {
Generate<Args...>(data);
builder_->Emit(Op);
@@ -116,8 +118,8 @@ class WasmGenerator {
BlockScope(WasmGenerator* gen, WasmOpcode block_type,
Vector<const ValueType> param_types,
Vector<const ValueType> result_types,
- Vector<const ValueType> br_types)
- : gen_(gen) {
+ Vector<const ValueType> br_types, bool emit_end = true)
+ : gen_(gen), emit_end_(emit_end) {
gen->blocks_.emplace_back(br_types.begin(), br_types.end());
if (param_types.size() == 0 && result_types.size() == 0) {
gen->builder_->EmitWithU8(block_type, kWasmStmt.value_type_code());
@@ -146,12 +148,13 @@ class WasmGenerator {
}
~BlockScope() {
- gen_->builder_->Emit(kExprEnd);
+ if (emit_end_) gen_->builder_->Emit(kExprEnd);
gen_->blocks_.pop_back();
}
private:
WasmGenerator* const gen_;
+ bool emit_end_;
};
void block(Vector<const ValueType> param_types,
@@ -161,7 +164,7 @@ class WasmGenerator {
ConsumeAndGenerate(param_types, return_types, data);
}
- template <ValueType::Kind T>
+ template <ValueKind T>
void block(DataRange* data) {
block({}, VectorOf({ValueType::Primitive(T)}), data);
}
@@ -173,7 +176,7 @@ class WasmGenerator {
ConsumeAndGenerate(param_types, return_types, data);
}
- template <ValueType::Kind T>
+ template <ValueKind T>
void loop(DataRange* data) {
loop({}, VectorOf({ValueType::Primitive(T)}), data);
}
@@ -194,16 +197,69 @@ class WasmGenerator {
}
}
- template <ValueType::Kind T, IfType type>
+ template <ValueKind T, IfType type>
void if_(DataRange* data) {
- static_assert(T == ValueType::kStmt || type == kIfElse,
+ static_assert(T == kStmt || type == kIfElse,
"if without else cannot produce a value");
if_({},
- T == ValueType::kStmt ? Vector<ValueType>{}
- : VectorOf({ValueType::Primitive(T)}),
+ T == kStmt ? Vector<ValueType>{} : VectorOf({ValueType::Primitive(T)}),
type, data);
}
+ void try_block_helper(ValueType return_type, DataRange* data) {
+ bool has_catch_all = data->get<uint8_t>() % 2;
+ uint8_t num_catch =
+ data->get<uint8_t>() % (builder_->builder()->NumExceptions() + 1);
+ bool is_delegate =
+ num_catch == 0 && !has_catch_all && data->get<uint8_t>() % 2 == 0;
+ // Allow one more target than there are enclosing try blocks, for delegating
+ // to the caller.
+ uint8_t delegate_target = data->get<uint8_t>() % (try_blocks_.size() + 1);
+ bool is_unwind = num_catch == 0 && !has_catch_all && !is_delegate;
+
+ Vector<const ValueType> return_type_vec = return_type.kind() == kStmt
+ ? Vector<ValueType>{}
+ : VectorOf(&return_type, 1);
+ BlockScope block_scope(this, kExprTry, {}, return_type_vec, return_type_vec,
+ !is_delegate);
+ int control_depth = static_cast<int>(blocks_.size()) - 1;
+ try_blocks_.push_back(control_depth);
+ Generate(return_type, data);
+ try_blocks_.pop_back();
+ catch_blocks_.push_back(control_depth);
+ for (int i = 0; i < num_catch; ++i) {
+ const FunctionSig* exception_type =
+ builder_->builder()->GetExceptionType(i);
+ auto exception_type_vec = VectorOf(exception_type->parameters().begin(),
+ exception_type->parameter_count());
+ builder_->EmitWithU32V(kExprCatch, i);
+ ConsumeAndGenerate(exception_type_vec, return_type_vec, data);
+ }
+ if (has_catch_all) {
+ builder_->Emit(kExprCatchAll);
+ Generate(return_type, data);
+ }
+ if (is_delegate) {
+ DCHECK_GT(blocks_.size(), try_blocks_.size());
+ // If {delegate_target == try_blocks_.size()}, delegate to the caller.
+ int delegate_depth = delegate_target == try_blocks_.size()
+ ? static_cast<int>(blocks_.size()) - 2
+ : static_cast<int>(blocks_.size() - 2 -
+ try_blocks_[delegate_target]);
+ builder_->EmitWithU32V(kExprDelegate, delegate_depth);
+ }
+ catch_blocks_.pop_back();
+ if (is_unwind) {
+ builder_->Emit(kExprUnwind);
+ Generate(return_type, data);
+ }
+ }
+
+ template <ValueKind T>
+ void try_block(DataRange* data) {
+ try_block_helper(ValueType::Primitive(T), data);
+ }
+
void any_block(Vector<const ValueType> param_types,
Vector<const ValueType> return_types, DataRange* data) {
uint8_t block_type = data->get<uint8_t>() % 4;
@@ -237,7 +293,7 @@ class WasmGenerator {
kExprBr, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
}
- template <ValueType::Kind wanted_type>
+ template <ValueKind wanted_type>
void br_if(DataRange* data) {
// There is always at least the block representing the function body.
DCHECK(!blocks_.empty());
@@ -249,7 +305,7 @@ class WasmGenerator {
builder_->EmitWithI32V(
kExprBrIf, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
ConsumeAndGenerate(break_types,
- wanted_type == ValueType::kStmt
+ wanted_type == kStmt
? Vector<ValueType>{}
: VectorOf({ValueType::Primitive(wanted_type)}),
data);
@@ -368,13 +424,13 @@ class WasmGenerator {
}
}
- template <WasmOpcode memory_op, ValueType::Kind... arg_types>
+ template <WasmOpcode memory_op, ValueKind... arg_types>
void memop(DataRange* data) {
const uint8_t align = data->get<uint8_t>() % (max_alignment(memory_op) + 1);
const uint32_t offset = data->get<uint32_t>();
// Generate the index and the arguments, if any.
- Generate<ValueType::kI32, arg_types...>(data);
+ Generate<kI32, arg_types...>(data);
if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(memory_op >> 8))) {
DCHECK(memory_op >> 8 == kAtomicPrefix || memory_op >> 8 == kSimdPrefix);
@@ -386,7 +442,7 @@ class WasmGenerator {
builder_->EmitU32V(offset);
}
- template <WasmOpcode Op, ValueType::Kind... Args>
+ template <WasmOpcode Op, ValueKind... Args>
void atomic_op(DataRange* data) {
const uint8_t align = data->get<uint8_t>() % (max_alignment(Op) + 1);
const uint32_t offset = data->get<uint32_t>();
@@ -398,7 +454,7 @@ class WasmGenerator {
builder_->EmitU32V(offset);
}
- template <WasmOpcode Op, ValueType::Kind... Args>
+ template <WasmOpcode Op, ValueKind... Args>
void op_with_prefix(DataRange* data) {
Generate<Args...>(data);
builder_->EmitWithPrefix(Op);
@@ -411,15 +467,22 @@ class WasmGenerator {
}
}
- template <WasmOpcode Op, int lanes, ValueType::Kind... Args>
+ template <WasmOpcode Op, int lanes, ValueKind... Args>
void simd_lane_op(DataRange* data) {
Generate<Args...>(data);
builder_->EmitWithPrefix(Op);
builder_->EmitByte(data->get<byte>() % lanes);
}
+ template <WasmOpcode Op, int lanes, ValueKind... Args>
+ void simd_lane_memop(DataRange* data) {
+ // Simd load/store instructions that have a lane immediate.
+ memop<Op, Args...>(data);
+ builder_->EmitByte(data->get<byte>() % lanes);
+ }
+
void simd_shuffle(DataRange* data) {
- Generate<ValueType::kS128, ValueType::kS128>(data);
+ Generate<kS128, kS128>(data);
builder_->EmitWithPrefix(kExprI8x16Shuffle);
for (int i = 0; i < kSimd128Size; i++) {
builder_->EmitByte(static_cast<uint8_t>(data->get<byte>() % 32));
@@ -433,12 +496,12 @@ class WasmGenerator {
enum CallDirect : bool { kCallDirect = true, kCallIndirect = false };
- template <ValueType::Kind wanted_type>
+ template <ValueKind wanted_type>
void call(DataRange* data) {
call(data, ValueType::Primitive(wanted_type), kCallDirect);
}
- template <ValueType::Kind wanted_type>
+ template <ValueKind wanted_type>
void call_indirect(DataRange* data) {
call(data, ValueType::Primitive(wanted_type), kCallIndirect);
}
@@ -446,13 +509,13 @@ class WasmGenerator {
void Convert(ValueType src, ValueType dst) {
auto idx = [](ValueType t) -> int {
switch (t.kind()) {
- case ValueType::kI32:
+ case kI32:
return 0;
- case ValueType::kI64:
+ case kI64:
return 1;
- case ValueType::kF32:
+ case kF32:
return 2;
- case ValueType::kF64:
+ case kF64:
return 3;
default:
UNREACHABLE();
@@ -553,34 +616,32 @@ class WasmGenerator {
return {index, type};
}
- template <ValueType::Kind wanted_type>
+ template <ValueKind wanted_type>
void local_op(DataRange* data, WasmOpcode opcode) {
Var local = GetRandomLocal(data);
// If there are no locals and no parameters, just generate any value (if a
// value is needed), or do nothing.
if (!local.is_valid()) {
- if (wanted_type == ValueType::kStmt) return;
+ if (wanted_type == kStmt) return;
return Generate<wanted_type>(data);
}
if (opcode != kExprLocalGet) Generate(local.type, data);
builder_->EmitWithU32V(opcode, local.index);
- if (wanted_type != ValueType::kStmt && local.type.kind() != wanted_type) {
+ if (wanted_type != kStmt && local.type.kind() != wanted_type) {
Convert(local.type, ValueType::Primitive(wanted_type));
}
}
- template <ValueType::Kind wanted_type>
+ template <ValueKind wanted_type>
void get_local(DataRange* data) {
- static_assert(wanted_type != ValueType::kStmt, "illegal type");
+ static_assert(wanted_type != kStmt, "illegal type");
local_op<wanted_type>(data, kExprLocalGet);
}
- void set_local(DataRange* data) {
- local_op<ValueType::kStmt>(data, kExprLocalSet);
- }
+ void set_local(DataRange* data) { local_op<kStmt>(data, kExprLocalSet); }
- template <ValueType::Kind wanted_type>
+ template <ValueKind wanted_type>
void tee_local(DataRange* data) {
local_op<wanted_type>(data, kExprLocalTee);
}
@@ -608,14 +669,14 @@ class WasmGenerator {
return {index, type};
}
- template <ValueType::Kind wanted_type>
+ template <ValueKind wanted_type>
void global_op(DataRange* data) {
- constexpr bool is_set = wanted_type == ValueType::kStmt;
+ constexpr bool is_set = wanted_type == kStmt;
Var global = GetRandomGlobal(data, is_set);
// If there are no globals, just generate any value (if a value is needed),
// or do nothing.
if (!global.is_valid()) {
- if (wanted_type == ValueType::kStmt) return;
+ if (wanted_type == kStmt) return;
return Generate<wanted_type>(data);
}
@@ -627,25 +688,44 @@ class WasmGenerator {
}
}
- template <ValueType::Kind wanted_type>
+ template <ValueKind wanted_type>
void get_global(DataRange* data) {
- static_assert(wanted_type != ValueType::kStmt, "illegal type");
+ static_assert(wanted_type != kStmt, "illegal type");
global_op<wanted_type>(data);
}
- template <ValueType::Kind select_type>
+ template <ValueKind select_type>
void select_with_type(DataRange* data) {
- static_assert(select_type != ValueType::kStmt, "illegal type for select");
- Generate<select_type, select_type, ValueType::kI32>(data);
+ static_assert(select_type != kStmt, "illegal type for select");
+ Generate<select_type, select_type, kI32>(data);
// num_types is always 1.
uint8_t num_types = 1;
builder_->EmitWithU8U8(kExprSelectWithType, num_types,
ValueType::Primitive(select_type).value_type_code());
}
- void set_global(DataRange* data) { global_op<ValueType::kStmt>(data); }
+ void set_global(DataRange* data) { global_op<kStmt>(data); }
+
+ void throw_or_rethrow(DataRange* data) {
+ bool rethrow = data->get<uint8_t>() % 2;
+ if (rethrow && !catch_blocks_.empty()) {
+ int control_depth = static_cast<int>(blocks_.size() - 1);
+ int catch_index =
+ data->get<uint8_t>() % static_cast<int>(catch_blocks_.size());
+ builder_->EmitWithU32V(kExprRethrow,
+ control_depth - catch_blocks_[catch_index]);
+ } else {
+ int tag = data->get<uint8_t>() % builder_->builder()->NumExceptions();
+ FunctionSig* exception_sig = builder_->builder()->GetExceptionType(tag);
+ Vector<const ValueType> exception_types(
+ exception_sig->parameters().begin(),
+ exception_sig->parameter_count());
+ Generate(exception_types, data);
+ builder_->EmitWithU32V(kExprThrow, tag);
+ }
+ }
- template <ValueType::Kind... Types>
+ template <ValueKind... Types>
void sequence(DataRange* data) {
Generate<Types...>(data);
}
@@ -704,10 +784,10 @@ class WasmGenerator {
void Generate(ValueType type, DataRange* data);
- template <ValueType::Kind T>
+ template <ValueKind T>
void Generate(DataRange* data);
- template <ValueType::Kind T1, ValueType::Kind T2, ValueType::Kind... Ts>
+ template <ValueKind T1, ValueKind T2, ValueKind... Ts>
void Generate(DataRange* data) {
// TODO(clemensb): Implement a more even split.
auto first_data = data->split();
@@ -720,6 +800,7 @@ class WasmGenerator {
void ConsumeAndGenerate(Vector<const ValueType> parameter_types,
Vector<const ValueType> return_types,
DataRange* data);
+ bool HasSimd() { return has_simd_; }
private:
WasmFunctionBuilder* builder_;
@@ -729,6 +810,9 @@ class WasmGenerator {
std::vector<ValueType> globals_;
std::vector<uint8_t> mutable_globals_; // indexes into {globals_}.
uint32_t recursion_depth = 0;
+ std::vector<int> try_blocks_;
+ std::vector<int> catch_blocks_;
+ bool has_simd_;
static constexpr uint32_t kMaxRecursionDepth = 64;
@@ -738,66 +822,69 @@ class WasmGenerator {
};
template <>
-void WasmGenerator::block<ValueType::kStmt>(DataRange* data) {
+void WasmGenerator::block<kStmt>(DataRange* data) {
block({}, {}, data);
}
template <>
-void WasmGenerator::loop<ValueType::kStmt>(DataRange* data) {
+void WasmGenerator::loop<kStmt>(DataRange* data) {
loop({}, {}, data);
}
template <>
-void WasmGenerator::Generate<ValueType::kStmt>(DataRange* data) {
+void WasmGenerator::Generate<kStmt>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() == 0) return;
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kStmt>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kStmt,
- ValueType::kStmt, ValueType::kStmt>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kStmt,
- ValueType::kStmt, ValueType::kStmt,
- ValueType::kStmt, ValueType::kStmt,
- ValueType::kStmt, ValueType::kStmt>,
- &WasmGenerator::block<ValueType::kStmt>,
- &WasmGenerator::loop<ValueType::kStmt>,
- &WasmGenerator::if_<ValueType::kStmt, kIf>,
- &WasmGenerator::if_<ValueType::kStmt, kIfElse>,
+ &WasmGenerator::sequence<kStmt, kStmt>,
+ &WasmGenerator::sequence<kStmt, kStmt, kStmt, kStmt>,
+ &WasmGenerator::sequence<kStmt, kStmt, kStmt, kStmt, kStmt, kStmt, kStmt,
+ kStmt>,
+ &WasmGenerator::block<kStmt>,
+ &WasmGenerator::loop<kStmt>,
+ &WasmGenerator::if_<kStmt, kIf>,
+ &WasmGenerator::if_<kStmt, kIfElse>,
&WasmGenerator::br,
- &WasmGenerator::br_if<ValueType::kStmt>,
-
- &WasmGenerator::memop<kExprI32StoreMem, ValueType::kI32>,
- &WasmGenerator::memop<kExprI32StoreMem8, ValueType::kI32>,
- &WasmGenerator::memop<kExprI32StoreMem16, ValueType::kI32>,
- &WasmGenerator::memop<kExprI64StoreMem, ValueType::kI64>,
- &WasmGenerator::memop<kExprI64StoreMem8, ValueType::kI64>,
- &WasmGenerator::memop<kExprI64StoreMem16, ValueType::kI64>,
- &WasmGenerator::memop<kExprI64StoreMem32, ValueType::kI64>,
- &WasmGenerator::memop<kExprF32StoreMem, ValueType::kF32>,
- &WasmGenerator::memop<kExprF64StoreMem, ValueType::kF64>,
- &WasmGenerator::memop<kExprI32AtomicStore, ValueType::kI32>,
- &WasmGenerator::memop<kExprI32AtomicStore8U, ValueType::kI32>,
- &WasmGenerator::memop<kExprI32AtomicStore16U, ValueType::kI32>,
- &WasmGenerator::memop<kExprI64AtomicStore, ValueType::kI64>,
- &WasmGenerator::memop<kExprI64AtomicStore8U, ValueType::kI64>,
- &WasmGenerator::memop<kExprI64AtomicStore16U, ValueType::kI64>,
- &WasmGenerator::memop<kExprI64AtomicStore32U, ValueType::kI64>,
- &WasmGenerator::memop<kExprS128StoreMem, ValueType::kS128>,
+ &WasmGenerator::br_if<kStmt>,
+
+ &WasmGenerator::memop<kExprI32StoreMem, kI32>,
+ &WasmGenerator::memop<kExprI32StoreMem8, kI32>,
+ &WasmGenerator::memop<kExprI32StoreMem16, kI32>,
+ &WasmGenerator::memop<kExprI64StoreMem, kI64>,
+ &WasmGenerator::memop<kExprI64StoreMem8, kI64>,
+ &WasmGenerator::memop<kExprI64StoreMem16, kI64>,
+ &WasmGenerator::memop<kExprI64StoreMem32, kI64>,
+ &WasmGenerator::memop<kExprF32StoreMem, kF32>,
+ &WasmGenerator::memop<kExprF64StoreMem, kF64>,
+ &WasmGenerator::memop<kExprI32AtomicStore, kI32>,
+ &WasmGenerator::memop<kExprI32AtomicStore8U, kI32>,
+ &WasmGenerator::memop<kExprI32AtomicStore16U, kI32>,
+ &WasmGenerator::memop<kExprI64AtomicStore, kI64>,
+ &WasmGenerator::memop<kExprI64AtomicStore8U, kI64>,
+ &WasmGenerator::memop<kExprI64AtomicStore16U, kI64>,
+ &WasmGenerator::memop<kExprI64AtomicStore32U, kI64>,
+ &WasmGenerator::memop<kExprS128StoreMem, kS128>,
+ &WasmGenerator::simd_lane_memop<kExprS128Store8Lane, 16, kS128>,
+ &WasmGenerator::simd_lane_memop<kExprS128Store16Lane, 8, kS128>,
+ &WasmGenerator::simd_lane_memop<kExprS128Store32Lane, 4, kS128>,
+ &WasmGenerator::simd_lane_memop<kExprS128Store64Lane, 2, kS128>,
&WasmGenerator::drop,
- &WasmGenerator::call<ValueType::kStmt>,
- &WasmGenerator::call_indirect<ValueType::kStmt>,
+ &WasmGenerator::call<kStmt>,
+ &WasmGenerator::call_indirect<kStmt>,
&WasmGenerator::set_local,
- &WasmGenerator::set_global};
+ &WasmGenerator::set_global,
+ &WasmGenerator::throw_or_rethrow,
+ &WasmGenerator::try_block<kStmt>};
GenerateOneOf(alternatives, data);
}
template <>
-void WasmGenerator::Generate<ValueType::kI32>(DataRange* data) {
+void WasmGenerator::Generate<kI32>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() <= 1) {
builder_->EmitI32Const(data->get<uint32_t>());
@@ -810,75 +897,74 @@ void WasmGenerator::Generate<ValueType::kI32>(DataRange* data) {
&WasmGenerator::i32_const<3>,
&WasmGenerator::i32_const<4>,
- &WasmGenerator::sequence<ValueType::kI32, ValueType::kStmt>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kI32>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kI32,
- ValueType::kStmt>,
-
- &WasmGenerator::op<kExprI32Eqz, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Eq, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Ne, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32LtS, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32LtU, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32GeS, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32GeU, ValueType::kI32, ValueType::kI32>,
-
- &WasmGenerator::op<kExprI64Eqz, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Eq, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Ne, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64LtS, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64LtU, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64GeS, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64GeU, ValueType::kI64, ValueType::kI64>,
-
- &WasmGenerator::op<kExprF32Eq, ValueType::kF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Ne, ValueType::kF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Lt, ValueType::kF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Ge, ValueType::kF32, ValueType::kF32>,
-
- &WasmGenerator::op<kExprF64Eq, ValueType::kF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Ne, ValueType::kF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Lt, ValueType::kF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Ge, ValueType::kF64, ValueType::kF64>,
-
- &WasmGenerator::op<kExprI32Add, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Sub, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Mul, ValueType::kI32, ValueType::kI32>,
-
- &WasmGenerator::op<kExprI32DivS, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32DivU, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32RemS, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32RemU, ValueType::kI32, ValueType::kI32>,
-
- &WasmGenerator::op<kExprI32And, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Ior, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Xor, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Shl, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32ShrU, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32ShrS, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Ror, ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Rol, ValueType::kI32, ValueType::kI32>,
-
- &WasmGenerator::op<kExprI32Clz, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Ctz, ValueType::kI32>,
- &WasmGenerator::op<kExprI32Popcnt, ValueType::kI32>,
-
- &WasmGenerator::op<kExprI32ConvertI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI32SConvertF32, ValueType::kF32>,
- &WasmGenerator::op<kExprI32UConvertF32, ValueType::kF32>,
- &WasmGenerator::op<kExprI32SConvertF64, ValueType::kF64>,
- &WasmGenerator::op<kExprI32UConvertF64, ValueType::kF64>,
- &WasmGenerator::op<kExprI32ReinterpretF32, ValueType::kF32>,
-
- &WasmGenerator::op_with_prefix<kExprI32SConvertSatF32, ValueType::kF32>,
- &WasmGenerator::op_with_prefix<kExprI32UConvertSatF32, ValueType::kF32>,
- &WasmGenerator::op_with_prefix<kExprI32SConvertSatF64, ValueType::kF64>,
- &WasmGenerator::op_with_prefix<kExprI32UConvertSatF64, ValueType::kF64>,
-
- &WasmGenerator::block<ValueType::kI32>,
- &WasmGenerator::loop<ValueType::kI32>,
- &WasmGenerator::if_<ValueType::kI32, kIfElse>,
- &WasmGenerator::br_if<ValueType::kI32>,
+ &WasmGenerator::sequence<kI32, kStmt>,
+ &WasmGenerator::sequence<kStmt, kI32>,
+ &WasmGenerator::sequence<kStmt, kI32, kStmt>,
+
+ &WasmGenerator::op<kExprI32Eqz, kI32>,
+ &WasmGenerator::op<kExprI32Eq, kI32, kI32>,
+ &WasmGenerator::op<kExprI32Ne, kI32, kI32>,
+ &WasmGenerator::op<kExprI32LtS, kI32, kI32>,
+ &WasmGenerator::op<kExprI32LtU, kI32, kI32>,
+ &WasmGenerator::op<kExprI32GeS, kI32, kI32>,
+ &WasmGenerator::op<kExprI32GeU, kI32, kI32>,
+
+ &WasmGenerator::op<kExprI64Eqz, kI64>,
+ &WasmGenerator::op<kExprI64Eq, kI64, kI64>,
+ &WasmGenerator::op<kExprI64Ne, kI64, kI64>,
+ &WasmGenerator::op<kExprI64LtS, kI64, kI64>,
+ &WasmGenerator::op<kExprI64LtU, kI64, kI64>,
+ &WasmGenerator::op<kExprI64GeS, kI64, kI64>,
+ &WasmGenerator::op<kExprI64GeU, kI64, kI64>,
+
+ &WasmGenerator::op<kExprF32Eq, kF32, kF32>,
+ &WasmGenerator::op<kExprF32Ne, kF32, kF32>,
+ &WasmGenerator::op<kExprF32Lt, kF32, kF32>,
+ &WasmGenerator::op<kExprF32Ge, kF32, kF32>,
+
+ &WasmGenerator::op<kExprF64Eq, kF64, kF64>,
+ &WasmGenerator::op<kExprF64Ne, kF64, kF64>,
+ &WasmGenerator::op<kExprF64Lt, kF64, kF64>,
+ &WasmGenerator::op<kExprF64Ge, kF64, kF64>,
+
+ &WasmGenerator::op<kExprI32Add, kI32, kI32>,
+ &WasmGenerator::op<kExprI32Sub, kI32, kI32>,
+ &WasmGenerator::op<kExprI32Mul, kI32, kI32>,
+
+ &WasmGenerator::op<kExprI32DivS, kI32, kI32>,
+ &WasmGenerator::op<kExprI32DivU, kI32, kI32>,
+ &WasmGenerator::op<kExprI32RemS, kI32, kI32>,
+ &WasmGenerator::op<kExprI32RemU, kI32, kI32>,
+
+ &WasmGenerator::op<kExprI32And, kI32, kI32>,
+ &WasmGenerator::op<kExprI32Ior, kI32, kI32>,
+ &WasmGenerator::op<kExprI32Xor, kI32, kI32>,
+ &WasmGenerator::op<kExprI32Shl, kI32, kI32>,
+ &WasmGenerator::op<kExprI32ShrU, kI32, kI32>,
+ &WasmGenerator::op<kExprI32ShrS, kI32, kI32>,
+ &WasmGenerator::op<kExprI32Ror, kI32, kI32>,
+ &WasmGenerator::op<kExprI32Rol, kI32, kI32>,
+
+ &WasmGenerator::op<kExprI32Clz, kI32>,
+ &WasmGenerator::op<kExprI32Ctz, kI32>,
+ &WasmGenerator::op<kExprI32Popcnt, kI32>,
+
+ &WasmGenerator::op<kExprI32ConvertI64, kI64>,
+ &WasmGenerator::op<kExprI32SConvertF32, kF32>,
+ &WasmGenerator::op<kExprI32UConvertF32, kF32>,
+ &WasmGenerator::op<kExprI32SConvertF64, kF64>,
+ &WasmGenerator::op<kExprI32UConvertF64, kF64>,
+ &WasmGenerator::op<kExprI32ReinterpretF32, kF32>,
+
+ &WasmGenerator::op_with_prefix<kExprI32SConvertSatF32, kF32>,
+ &WasmGenerator::op_with_prefix<kExprI32UConvertSatF32, kF32>,
+ &WasmGenerator::op_with_prefix<kExprI32SConvertSatF64, kF64>,
+ &WasmGenerator::op_with_prefix<kExprI32UConvertSatF64, kF64>,
+
+ &WasmGenerator::block<kI32>,
+ &WasmGenerator::loop<kI32>,
+ &WasmGenerator::if_<kI32, kIfElse>,
+ &WasmGenerator::br_if<kI32>,
&WasmGenerator::memop<kExprI32LoadMem>,
&WasmGenerator::memop<kExprI32LoadMem8S>,
@@ -889,86 +975,64 @@ void WasmGenerator::Generate<ValueType::kI32>(DataRange* data) {
&WasmGenerator::memop<kExprI32AtomicLoad8U>,
&WasmGenerator::memop<kExprI32AtomicLoad16U>,
- &WasmGenerator::atomic_op<kExprI32AtomicAdd, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicSub, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAnd, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicOr, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicXor, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicExchange, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicCompareExchange, ValueType::kI32,
- ValueType::kI32, ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAdd8U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicSub8U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAnd8U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicOr8U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicXor8U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicExchange8U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicCompareExchange8U,
- ValueType::kI32, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAdd16U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicSub16U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicAnd16U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicOr16U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicXor16U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicExchange16U, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::atomic_op<kExprI32AtomicCompareExchange16U,
- ValueType::kI32, ValueType::kI32,
- ValueType::kI32>,
-
- &WasmGenerator::op_with_prefix<kExprV8x16AnyTrue, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprV8x16AllTrue, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16BitMask, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprV16x8AnyTrue, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprV16x8AllTrue, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8BitMask, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprV32x4AnyTrue, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprV32x4AllTrue, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4BitMask, ValueType::kS128>,
- &WasmGenerator::simd_lane_op<kExprI8x16ExtractLaneS, 16,
- ValueType::kS128>,
- &WasmGenerator::simd_lane_op<kExprI8x16ExtractLaneU, 16,
- ValueType::kS128>,
- &WasmGenerator::simd_lane_op<kExprI16x8ExtractLaneS, 8, ValueType::kS128>,
- &WasmGenerator::simd_lane_op<kExprI16x8ExtractLaneU, 8, ValueType::kS128>,
- &WasmGenerator::simd_lane_op<kExprI32x4ExtractLane, 4, ValueType::kS128>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAdd, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicSub, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAnd, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicOr, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicXor, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicExchange, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicCompareExchange, kI32, kI32,
+ kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAdd8U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicSub8U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAnd8U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicOr8U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicXor8U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicExchange8U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicCompareExchange8U, kI32, kI32,
+ kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAdd16U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicSub16U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicAnd16U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicOr16U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicXor16U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicExchange16U, kI32, kI32>,
+ &WasmGenerator::atomic_op<kExprI32AtomicCompareExchange16U, kI32, kI32,
+ kI32>,
+
+ &WasmGenerator::op_with_prefix<kExprV128AnyTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprV8x16AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16BitMask, kS128>,
+ &WasmGenerator::op_with_prefix<kExprV16x8AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8BitMask, kS128>,
+ &WasmGenerator::op_with_prefix<kExprV32x4AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4BitMask, kS128>,
+ &WasmGenerator::op_with_prefix<kExprV64x2AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2BitMask, kS128>,
+ &WasmGenerator::simd_lane_op<kExprI8x16ExtractLaneS, 16, kS128>,
+ &WasmGenerator::simd_lane_op<kExprI8x16ExtractLaneU, 16, kS128>,
+ &WasmGenerator::simd_lane_op<kExprI16x8ExtractLaneS, 8, kS128>,
+ &WasmGenerator::simd_lane_op<kExprI16x8ExtractLaneU, 8, kS128>,
+ &WasmGenerator::simd_lane_op<kExprI32x4ExtractLane, 4, kS128>,
&WasmGenerator::current_memory,
&WasmGenerator::grow_memory,
- &WasmGenerator::get_local<ValueType::kI32>,
- &WasmGenerator::tee_local<ValueType::kI32>,
- &WasmGenerator::get_global<ValueType::kI32>,
- &WasmGenerator::op<kExprSelect, ValueType::kI32, ValueType::kI32,
- ValueType::kI32>,
- &WasmGenerator::select_with_type<ValueType::kI32>,
+ &WasmGenerator::get_local<kI32>,
+ &WasmGenerator::tee_local<kI32>,
+ &WasmGenerator::get_global<kI32>,
+ &WasmGenerator::op<kExprSelect, kI32, kI32, kI32>,
+ &WasmGenerator::select_with_type<kI32>,
- &WasmGenerator::call<ValueType::kI32>,
- &WasmGenerator::call_indirect<ValueType::kI32>};
+ &WasmGenerator::call<kI32>,
+ &WasmGenerator::call_indirect<kI32>,
+ &WasmGenerator::try_block<kI32>};
GenerateOneOf(alternatives, data);
}
template <>
-void WasmGenerator::Generate<ValueType::kI64>(DataRange* data) {
+void WasmGenerator::Generate<kI64>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() <= 1) {
builder_->EmitI64Const(data->get<int64_t>());
@@ -985,42 +1049,41 @@ void WasmGenerator::Generate<ValueType::kI64>(DataRange* data) {
&WasmGenerator::i64_const<7>,
&WasmGenerator::i64_const<8>,
- &WasmGenerator::sequence<ValueType::kI64, ValueType::kStmt>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kI64>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kI64,
- ValueType::kStmt>,
-
- &WasmGenerator::op<kExprI64Add, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Sub, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Mul, ValueType::kI64, ValueType::kI64>,
-
- &WasmGenerator::op<kExprI64DivS, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64DivU, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64RemS, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64RemU, ValueType::kI64, ValueType::kI64>,
-
- &WasmGenerator::op<kExprI64And, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Ior, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Xor, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Shl, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64ShrU, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64ShrS, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Ror, ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Rol, ValueType::kI64, ValueType::kI64>,
-
- &WasmGenerator::op<kExprI64Clz, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Ctz, ValueType::kI64>,
- &WasmGenerator::op<kExprI64Popcnt, ValueType::kI64>,
-
- &WasmGenerator::op_with_prefix<kExprI64SConvertSatF32, ValueType::kF32>,
- &WasmGenerator::op_with_prefix<kExprI64UConvertSatF32, ValueType::kF32>,
- &WasmGenerator::op_with_prefix<kExprI64SConvertSatF64, ValueType::kF64>,
- &WasmGenerator::op_with_prefix<kExprI64UConvertSatF64, ValueType::kF64>,
-
- &WasmGenerator::block<ValueType::kI64>,
- &WasmGenerator::loop<ValueType::kI64>,
- &WasmGenerator::if_<ValueType::kI64, kIfElse>,
- &WasmGenerator::br_if<ValueType::kI64>,
+ &WasmGenerator::sequence<kI64, kStmt>,
+ &WasmGenerator::sequence<kStmt, kI64>,
+ &WasmGenerator::sequence<kStmt, kI64, kStmt>,
+
+ &WasmGenerator::op<kExprI64Add, kI64, kI64>,
+ &WasmGenerator::op<kExprI64Sub, kI64, kI64>,
+ &WasmGenerator::op<kExprI64Mul, kI64, kI64>,
+
+ &WasmGenerator::op<kExprI64DivS, kI64, kI64>,
+ &WasmGenerator::op<kExprI64DivU, kI64, kI64>,
+ &WasmGenerator::op<kExprI64RemS, kI64, kI64>,
+ &WasmGenerator::op<kExprI64RemU, kI64, kI64>,
+
+ &WasmGenerator::op<kExprI64And, kI64, kI64>,
+ &WasmGenerator::op<kExprI64Ior, kI64, kI64>,
+ &WasmGenerator::op<kExprI64Xor, kI64, kI64>,
+ &WasmGenerator::op<kExprI64Shl, kI64, kI64>,
+ &WasmGenerator::op<kExprI64ShrU, kI64, kI64>,
+ &WasmGenerator::op<kExprI64ShrS, kI64, kI64>,
+ &WasmGenerator::op<kExprI64Ror, kI64, kI64>,
+ &WasmGenerator::op<kExprI64Rol, kI64, kI64>,
+
+ &WasmGenerator::op<kExprI64Clz, kI64>,
+ &WasmGenerator::op<kExprI64Ctz, kI64>,
+ &WasmGenerator::op<kExprI64Popcnt, kI64>,
+
+ &WasmGenerator::op_with_prefix<kExprI64SConvertSatF32, kF32>,
+ &WasmGenerator::op_with_prefix<kExprI64UConvertSatF32, kF32>,
+ &WasmGenerator::op_with_prefix<kExprI64SConvertSatF64, kF64>,
+ &WasmGenerator::op_with_prefix<kExprI64UConvertSatF64, kF64>,
+
+ &WasmGenerator::block<kI64>,
+ &WasmGenerator::loop<kI64>,
+ &WasmGenerator::if_<kI64, kIfElse>,
+ &WasmGenerator::br_if<kI64>,
&WasmGenerator::memop<kExprI64LoadMem>,
&WasmGenerator::memop<kExprI64LoadMem8S>,
@@ -1034,83 +1097,56 @@ void WasmGenerator::Generate<ValueType::kI64>(DataRange* data) {
&WasmGenerator::memop<kExprI64AtomicLoad16U>,
&WasmGenerator::memop<kExprI64AtomicLoad32U>,
- &WasmGenerator::atomic_op<kExprI64AtomicAdd, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicSub, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAnd, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicOr, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicXor, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicExchange, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange, ValueType::kI32,
- ValueType::kI64, ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAdd8U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicSub8U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAnd8U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicOr8U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicXor8U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicExchange8U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange8U,
- ValueType::kI32, ValueType::kI64,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAdd16U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicSub16U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAnd16U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicOr16U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicXor16U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicExchange16U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange16U,
- ValueType::kI32, ValueType::kI64,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAdd32U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicSub32U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicAnd32U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicOr32U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicXor32U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicExchange32U, ValueType::kI32,
- ValueType::kI64>,
- &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange32U,
- ValueType::kI32, ValueType::kI64,
- ValueType::kI64>,
-
- &WasmGenerator::simd_lane_op<kExprI64x2ExtractLane, 2, ValueType::kS128>,
-
- &WasmGenerator::get_local<ValueType::kI64>,
- &WasmGenerator::tee_local<ValueType::kI64>,
- &WasmGenerator::get_global<ValueType::kI64>,
- &WasmGenerator::op<kExprSelect, ValueType::kI64, ValueType::kI64,
- ValueType::kI32>,
- &WasmGenerator::select_with_type<ValueType::kI64>,
-
- &WasmGenerator::call<ValueType::kI64>,
- &WasmGenerator::call_indirect<ValueType::kI64>};
+ &WasmGenerator::atomic_op<kExprI64AtomicAdd, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicSub, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAnd, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicOr, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicXor, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicExchange, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange, kI32, kI64,
+ kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAdd8U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicSub8U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAnd8U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicOr8U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicXor8U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicExchange8U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange8U, kI32, kI64,
+ kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAdd16U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicSub16U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAnd16U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicOr16U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicXor16U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicExchange16U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange16U, kI32, kI64,
+ kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAdd32U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicSub32U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicAnd32U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicOr32U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicXor32U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicExchange32U, kI32, kI64>,
+ &WasmGenerator::atomic_op<kExprI64AtomicCompareExchange32U, kI32, kI64,
+ kI64>,
+
+ &WasmGenerator::simd_lane_op<kExprI64x2ExtractLane, 2, kS128>,
+
+ &WasmGenerator::get_local<kI64>,
+ &WasmGenerator::tee_local<kI64>,
+ &WasmGenerator::get_global<kI64>,
+ &WasmGenerator::op<kExprSelect, kI64, kI64, kI32>,
+ &WasmGenerator::select_with_type<kI64>,
+
+ &WasmGenerator::call<kI64>,
+ &WasmGenerator::call_indirect<kI64>,
+ &WasmGenerator::try_block<kI64>};
GenerateOneOf(alternatives, data);
}
template <>
-void WasmGenerator::Generate<ValueType::kF32>(DataRange* data) {
+void WasmGenerator::Generate<kF32>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() <= sizeof(float)) {
builder_->EmitF32Const(data->get<float>());
@@ -1118,57 +1154,56 @@ void WasmGenerator::Generate<ValueType::kF32>(DataRange* data) {
}
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<ValueType::kF32, ValueType::kStmt>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kF32>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kF32,
- ValueType::kStmt>,
-
- &WasmGenerator::op<kExprF32Abs, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Neg, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Ceil, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Floor, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Trunc, ValueType::kF32>,
- &WasmGenerator::op<kExprF32NearestInt, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Sqrt, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Add, ValueType::kF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Sub, ValueType::kF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Mul, ValueType::kF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Div, ValueType::kF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Min, ValueType::kF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF32Max, ValueType::kF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF32CopySign, ValueType::kF32, ValueType::kF32>,
-
- &WasmGenerator::op<kExprF32SConvertI32, ValueType::kI32>,
- &WasmGenerator::op<kExprF32UConvertI32, ValueType::kI32>,
- &WasmGenerator::op<kExprF32SConvertI64, ValueType::kI64>,
- &WasmGenerator::op<kExprF32UConvertI64, ValueType::kI64>,
- &WasmGenerator::op<kExprF32ConvertF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF32ReinterpretI32, ValueType::kI32>,
-
- &WasmGenerator::block<ValueType::kF32>,
- &WasmGenerator::loop<ValueType::kF32>,
- &WasmGenerator::if_<ValueType::kF32, kIfElse>,
- &WasmGenerator::br_if<ValueType::kF32>,
+ &WasmGenerator::sequence<kF32, kStmt>,
+ &WasmGenerator::sequence<kStmt, kF32>,
+ &WasmGenerator::sequence<kStmt, kF32, kStmt>,
+
+ &WasmGenerator::op<kExprF32Abs, kF32>,
+ &WasmGenerator::op<kExprF32Neg, kF32>,
+ &WasmGenerator::op<kExprF32Ceil, kF32>,
+ &WasmGenerator::op<kExprF32Floor, kF32>,
+ &WasmGenerator::op<kExprF32Trunc, kF32>,
+ &WasmGenerator::op<kExprF32NearestInt, kF32>,
+ &WasmGenerator::op<kExprF32Sqrt, kF32>,
+ &WasmGenerator::op<kExprF32Add, kF32, kF32>,
+ &WasmGenerator::op<kExprF32Sub, kF32, kF32>,
+ &WasmGenerator::op<kExprF32Mul, kF32, kF32>,
+ &WasmGenerator::op<kExprF32Div, kF32, kF32>,
+ &WasmGenerator::op<kExprF32Min, kF32, kF32>,
+ &WasmGenerator::op<kExprF32Max, kF32, kF32>,
+ &WasmGenerator::op<kExprF32CopySign, kF32, kF32>,
+
+ &WasmGenerator::op<kExprF32SConvertI32, kI32>,
+ &WasmGenerator::op<kExprF32UConvertI32, kI32>,
+ &WasmGenerator::op<kExprF32SConvertI64, kI64>,
+ &WasmGenerator::op<kExprF32UConvertI64, kI64>,
+ &WasmGenerator::op<kExprF32ConvertF64, kF64>,
+ &WasmGenerator::op<kExprF32ReinterpretI32, kI32>,
+
+ &WasmGenerator::block<kF32>,
+ &WasmGenerator::loop<kF32>,
+ &WasmGenerator::if_<kF32, kIfElse>,
+ &WasmGenerator::br_if<kF32>,
&WasmGenerator::memop<kExprF32LoadMem>,
- &WasmGenerator::simd_lane_op<kExprF32x4ExtractLane, 4, ValueType::kS128>,
+ &WasmGenerator::simd_lane_op<kExprF32x4ExtractLane, 4, kS128>,
- &WasmGenerator::get_local<ValueType::kF32>,
- &WasmGenerator::tee_local<ValueType::kF32>,
- &WasmGenerator::get_global<ValueType::kF32>,
- &WasmGenerator::op<kExprSelect, ValueType::kF32, ValueType::kF32,
- ValueType::kI32>,
- &WasmGenerator::select_with_type<ValueType::kF32>,
+ &WasmGenerator::get_local<kF32>,
+ &WasmGenerator::tee_local<kF32>,
+ &WasmGenerator::get_global<kF32>,
+ &WasmGenerator::op<kExprSelect, kF32, kF32, kI32>,
+ &WasmGenerator::select_with_type<kF32>,
- &WasmGenerator::call<ValueType::kF32>,
- &WasmGenerator::call_indirect<ValueType::kF32>};
+ &WasmGenerator::call<kF32>,
+ &WasmGenerator::call_indirect<kF32>,
+ &WasmGenerator::try_block<kF32>};
GenerateOneOf(alternatives, data);
}
template <>
-void WasmGenerator::Generate<ValueType::kF64>(DataRange* data) {
+void WasmGenerator::Generate<kF64>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() <= sizeof(double)) {
builder_->EmitF64Const(data->get<double>());
@@ -1176,58 +1211,58 @@ void WasmGenerator::Generate<ValueType::kF64>(DataRange* data) {
}
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<ValueType::kF64, ValueType::kStmt>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kF64>,
- &WasmGenerator::sequence<ValueType::kStmt, ValueType::kF64,
- ValueType::kStmt>,
-
- &WasmGenerator::op<kExprF64Abs, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Neg, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Ceil, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Floor, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Trunc, ValueType::kF64>,
- &WasmGenerator::op<kExprF64NearestInt, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Sqrt, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Add, ValueType::kF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Sub, ValueType::kF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Mul, ValueType::kF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Div, ValueType::kF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Min, ValueType::kF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF64Max, ValueType::kF64, ValueType::kF64>,
- &WasmGenerator::op<kExprF64CopySign, ValueType::kF64, ValueType::kF64>,
-
- &WasmGenerator::op<kExprF64SConvertI32, ValueType::kI32>,
- &WasmGenerator::op<kExprF64UConvertI32, ValueType::kI32>,
- &WasmGenerator::op<kExprF64SConvertI64, ValueType::kI64>,
- &WasmGenerator::op<kExprF64UConvertI64, ValueType::kI64>,
- &WasmGenerator::op<kExprF64ConvertF32, ValueType::kF32>,
- &WasmGenerator::op<kExprF64ReinterpretI64, ValueType::kI64>,
-
- &WasmGenerator::block<ValueType::kF64>,
- &WasmGenerator::loop<ValueType::kF64>,
- &WasmGenerator::if_<ValueType::kF64, kIfElse>,
- &WasmGenerator::br_if<ValueType::kF64>,
+ &WasmGenerator::sequence<kF64, kStmt>,
+ &WasmGenerator::sequence<kStmt, kF64>,
+ &WasmGenerator::sequence<kStmt, kF64, kStmt>,
+
+ &WasmGenerator::op<kExprF64Abs, kF64>,
+ &WasmGenerator::op<kExprF64Neg, kF64>,
+ &WasmGenerator::op<kExprF64Ceil, kF64>,
+ &WasmGenerator::op<kExprF64Floor, kF64>,
+ &WasmGenerator::op<kExprF64Trunc, kF64>,
+ &WasmGenerator::op<kExprF64NearestInt, kF64>,
+ &WasmGenerator::op<kExprF64Sqrt, kF64>,
+ &WasmGenerator::op<kExprF64Add, kF64, kF64>,
+ &WasmGenerator::op<kExprF64Sub, kF64, kF64>,
+ &WasmGenerator::op<kExprF64Mul, kF64, kF64>,
+ &WasmGenerator::op<kExprF64Div, kF64, kF64>,
+ &WasmGenerator::op<kExprF64Min, kF64, kF64>,
+ &WasmGenerator::op<kExprF64Max, kF64, kF64>,
+ &WasmGenerator::op<kExprF64CopySign, kF64, kF64>,
+
+ &WasmGenerator::op<kExprF64SConvertI32, kI32>,
+ &WasmGenerator::op<kExprF64UConvertI32, kI32>,
+ &WasmGenerator::op<kExprF64SConvertI64, kI64>,
+ &WasmGenerator::op<kExprF64UConvertI64, kI64>,
+ &WasmGenerator::op<kExprF64ConvertF32, kF32>,
+ &WasmGenerator::op<kExprF64ReinterpretI64, kI64>,
+
+ &WasmGenerator::block<kF64>,
+ &WasmGenerator::loop<kF64>,
+ &WasmGenerator::if_<kF64, kIfElse>,
+ &WasmGenerator::br_if<kF64>,
&WasmGenerator::memop<kExprF64LoadMem>,
- &WasmGenerator::simd_lane_op<kExprF64x2ExtractLane, 2, ValueType::kS128>,
+ &WasmGenerator::simd_lane_op<kExprF64x2ExtractLane, 2, kS128>,
- &WasmGenerator::get_local<ValueType::kF64>,
- &WasmGenerator::tee_local<ValueType::kF64>,
- &WasmGenerator::get_global<ValueType::kF64>,
- &WasmGenerator::op<kExprSelect, ValueType::kF64, ValueType::kF64,
- ValueType::kI32>,
- &WasmGenerator::select_with_type<ValueType::kF64>,
+ &WasmGenerator::get_local<kF64>,
+ &WasmGenerator::tee_local<kF64>,
+ &WasmGenerator::get_global<kF64>,
+ &WasmGenerator::op<kExprSelect, kF64, kF64, kI32>,
+ &WasmGenerator::select_with_type<kF64>,
- &WasmGenerator::call<ValueType::kF64>,
- &WasmGenerator::call_indirect<ValueType::kF64>};
+ &WasmGenerator::call<kF64>,
+ &WasmGenerator::call_indirect<kF64>,
+ &WasmGenerator::try_block<kF64>};
GenerateOneOf(alternatives, data);
}
template <>
-void WasmGenerator::Generate<ValueType::kS128>(DataRange* data) {
+void WasmGenerator::Generate<kS128>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
+ has_simd_ = true;
if (recursion_limit_reached() || data->size() <= sizeof(int32_t)) {
// TODO(v8:8460): v128.const is not implemented yet, and we need a way to
// "bottom-out", so use a splat to generate this.
@@ -1238,331 +1273,216 @@ void WasmGenerator::Generate<ValueType::kS128>(DataRange* data) {
constexpr GenerateFn alternatives[] = {
&WasmGenerator::simd_const,
- &WasmGenerator::simd_lane_op<kExprI8x16ReplaceLane, 16, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::simd_lane_op<kExprI16x8ReplaceLane, 8, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::simd_lane_op<kExprI32x4ReplaceLane, 4, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::simd_lane_op<kExprI64x2ReplaceLane, 2, ValueType::kS128,
- ValueType::kI64>,
- &WasmGenerator::simd_lane_op<kExprF32x4ReplaceLane, 4, ValueType::kS128,
- ValueType::kF32>,
- &WasmGenerator::simd_lane_op<kExprF64x2ReplaceLane, 2, ValueType::kS128,
- ValueType::kF64>,
-
- &WasmGenerator::op_with_prefix<kExprI8x16Splat, ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI8x16Eq, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16Ne, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16LtS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16LtU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16GtS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16GtU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16LeS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16LeU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16GeS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16GeU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16Abs, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16Neg, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16Shl, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI8x16ShrS, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI8x16ShrU, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI8x16Add, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16AddSatS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16AddSatU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16Sub, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16SubSatS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16SubSatU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16MinS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16MinU, ValueType::kS128,
- ValueType::kS128>,
+ &WasmGenerator::simd_lane_op<kExprI8x16ReplaceLane, 16, kS128, kI32>,
+ &WasmGenerator::simd_lane_op<kExprI16x8ReplaceLane, 8, kS128, kI32>,
+ &WasmGenerator::simd_lane_op<kExprI32x4ReplaceLane, 4, kS128, kI32>,
+ &WasmGenerator::simd_lane_op<kExprI64x2ReplaceLane, 2, kS128, kI64>,
+ &WasmGenerator::simd_lane_op<kExprF32x4ReplaceLane, 4, kS128, kF32>,
+ &WasmGenerator::simd_lane_op<kExprF64x2ReplaceLane, 2, kS128, kF64>,
+
+ &WasmGenerator::op_with_prefix<kExprI8x16Splat, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI8x16Eq, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16Ne, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16LtS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16LtU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16GtS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16GtU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16LeS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16LeU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16GeS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16GeU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16Abs, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16Neg, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16Shl, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI8x16ShrS, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI8x16ShrU, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI8x16Add, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16AddSatS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16AddSatU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16Sub, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16SubSatS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16SubSatU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16MinS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16MinU, kS128, kS128>,
// I8x16Mul is prototyped but not in the proposal, thus omitted here.
- &WasmGenerator::op_with_prefix<kExprI8x16MaxS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16MaxU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16RoundingAverageU,
- ValueType::kS128, ValueType::kS128>,
-
- &WasmGenerator::op_with_prefix<kExprI16x8Splat, ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI16x8Eq, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8Ne, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8LtS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8LtU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8GtS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8GtU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8LeS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8LeU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8GeS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8GeU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8Abs, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8Neg, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8Shl, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI16x8ShrS, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI16x8ShrU, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI16x8Add, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8AddSatS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8AddSatU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8Sub, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8SubSatS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8SubSatU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8Mul, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8MinS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8MinU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8MaxS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8MaxU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8RoundingAverageU,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8ExtMulLowI8x16S,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8ExtMulLowI8x16U,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8ExtMulHighI8x16S,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8ExtMulHighI8x16U,
- ValueType::kS128, ValueType::kS128>,
-
- &WasmGenerator::op_with_prefix<kExprI32x4Splat, ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI32x4Eq, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4Ne, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4LtS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4LtU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4GtS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4GtU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4LeS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4LeU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4GeS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4GeU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4Abs, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4Neg, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4Shl, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI32x4ShrS, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI32x4ShrU, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI32x4Add, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4Sub, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4Mul, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4MinS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4MinU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4MaxS, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4MaxU, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4DotI16x8S, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4ExtMulLowI16x8S,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4ExtMulLowI16x8U,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4ExtMulHighI16x8S,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4ExtMulHighI16x8U,
- ValueType::kS128, ValueType::kS128>,
-
- &WasmGenerator::op_with_prefix<kExprI64x2Splat, ValueType::kI64>,
- &WasmGenerator::op_with_prefix<kExprI64x2Neg, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI64x2Shl, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI64x2ShrS, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI64x2ShrU, ValueType::kS128,
- ValueType::kI32>,
- &WasmGenerator::op_with_prefix<kExprI64x2Add, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI64x2Sub, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI64x2Mul, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI64x2ExtMulLowI32x4S,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI64x2ExtMulLowI32x4U,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI64x2ExtMulHighI32x4S,
- ValueType::kS128, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI64x2ExtMulHighI32x4U,
- ValueType::kS128, ValueType::kS128>,
-
- &WasmGenerator::op_with_prefix<kExprF32x4Splat, ValueType::kF32>,
- &WasmGenerator::op_with_prefix<kExprF32x4Eq, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Ne, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Lt, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Gt, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Le, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Ge, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Abs, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Neg, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Sqrt, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Add, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Sub, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Mul, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Div, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Min, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Max, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Pmin, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Pmax, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Ceil, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Floor, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4Trunc, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4NearestInt, ValueType::kS128>,
-
- &WasmGenerator::op_with_prefix<kExprF64x2Splat, ValueType::kF64>,
- &WasmGenerator::op_with_prefix<kExprF64x2Eq, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Ne, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Lt, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Gt, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Le, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Ge, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Abs, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Neg, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Sqrt, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Add, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Sub, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Mul, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Div, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Min, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Max, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Pmin, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Pmax, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Ceil, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Floor, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2Trunc, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF64x2NearestInt, ValueType::kS128>,
-
- &WasmGenerator::op_with_prefix<kExprI32x4SConvertF32x4, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4UConvertF32x4, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4SConvertI32x4, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprF32x4UConvertI32x4, ValueType::kS128>,
-
- &WasmGenerator::op_with_prefix<kExprI8x16SConvertI16x8, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16UConvertI16x8, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8SConvertI32x4, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8UConvertI32x4, ValueType::kS128,
- ValueType::kS128>,
-
- &WasmGenerator::op_with_prefix<kExprI16x8SConvertI8x16Low,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8SConvertI8x16High,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8UConvertI8x16Low,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8UConvertI8x16High,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4SConvertI16x8Low,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4SConvertI16x8High,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4UConvertI16x8Low,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI32x4UConvertI16x8High,
- ValueType::kS128>,
-
- &WasmGenerator::op_with_prefix<kExprS128Not, ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprS128And, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprS128AndNot, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprS128Or, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprS128Xor, ValueType::kS128,
- ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprS128Select, ValueType::kS128,
- ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16MaxS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16MaxU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16RoundingAverageU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16Popcnt, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprI16x8Splat, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI16x8Eq, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8Ne, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8LtS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8LtU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8GtS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8GtU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8LeS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8LeU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8GeS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8GeU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8Abs, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8Neg, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8Shl, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI16x8ShrS, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI16x8ShrU, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI16x8Add, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8AddSatS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8AddSatU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8Sub, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8SubSatS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8SubSatU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8Mul, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8MinS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8MinU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8MaxS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8MaxU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8RoundingAverageU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8ExtMulLowI8x16S, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8ExtMulLowI8x16U, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8ExtMulHighI8x16S, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8ExtMulHighI8x16U, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8Q15MulRSatS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8ExtAddPairwiseI8x16S, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8ExtAddPairwiseI8x16U, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprI32x4Splat, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI32x4Eq, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4Ne, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4LtS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4LtU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4GtS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4GtU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4LeS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4LeU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4GeS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4GeU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4Abs, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4Neg, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4Shl, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI32x4ShrS, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI32x4ShrU, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI32x4Add, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4Sub, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4Mul, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4MinS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4MinU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4MaxS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4MaxU, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4DotI16x8S, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4ExtMulLowI16x8S, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4ExtMulLowI16x8U, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4ExtMulHighI16x8S, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4ExtMulHighI16x8U, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4ExtAddPairwiseI16x8S, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4ExtAddPairwiseI16x8U, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprI64x2Splat, kI64>,
+ &WasmGenerator::op_with_prefix<kExprI64x2Eq, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2Ne, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2LtS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2GtS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2LeS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2GeS, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2Abs, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2Neg, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2Shl, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI64x2ShrS, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI64x2ShrU, kS128, kI32>,
+ &WasmGenerator::op_with_prefix<kExprI64x2Add, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2Sub, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2Mul, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2ExtMulLowI32x4S, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2ExtMulLowI32x4U, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2ExtMulHighI32x4S, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2ExtMulHighI32x4U, kS128, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprF32x4Splat, kF32>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Eq, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Ne, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Lt, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Gt, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Le, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Ge, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Abs, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Neg, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Sqrt, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Add, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Sub, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Mul, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Div, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Min, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Max, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Pmin, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Pmax, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Ceil, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Floor, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4Trunc, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4NearestInt, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprF64x2Splat, kF64>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Eq, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Ne, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Lt, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Gt, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Le, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Ge, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Abs, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Neg, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Sqrt, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Add, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Sub, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Mul, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Div, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Min, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Max, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Pmin, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Pmax, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Ceil, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Floor, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2Trunc, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2NearestInt, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprF64x2PromoteLowF32x4, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2ConvertLowI32x4S, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF64x2ConvertLowI32x4U, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4DemoteF64x2Zero, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4TruncSatF64x2SZero, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4TruncSatF64x2UZero, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprI64x2SConvertI32x4Low, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2SConvertI32x4High, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2UConvertI32x4Low, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2UConvertI32x4High, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprI32x4SConvertF32x4, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4UConvertF32x4, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4SConvertI32x4, kS128>,
+ &WasmGenerator::op_with_prefix<kExprF32x4UConvertI32x4, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprI8x16SConvertI16x8, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16UConvertI16x8, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8SConvertI32x4, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8UConvertI32x4, kS128, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprI16x8SConvertI8x16Low, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8SConvertI8x16High, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8UConvertI8x16Low, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8UConvertI8x16High, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4SConvertI16x8Low, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4SConvertI16x8High, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4UConvertI16x8Low, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4UConvertI16x8High, kS128>,
+
+ &WasmGenerator::op_with_prefix<kExprS128Not, kS128>,
+ &WasmGenerator::op_with_prefix<kExprS128And, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprS128AndNot, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprS128Or, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprS128Xor, kS128, kS128>,
+ &WasmGenerator::op_with_prefix<kExprS128Select, kS128, kS128, kS128>,
&WasmGenerator::simd_shuffle,
- &WasmGenerator::op_with_prefix<kExprI8x16Swizzle, ValueType::kS128,
- ValueType::kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16Swizzle, kS128, kS128>,
&WasmGenerator::memop<kExprS128LoadMem>,
&WasmGenerator::memop<kExprS128Load8x8S>,
@@ -1577,30 +1497,34 @@ void WasmGenerator::Generate<ValueType::kS128>(DataRange* data) {
&WasmGenerator::memop<kExprS128Load64Splat>,
&WasmGenerator::memop<kExprS128Load32Zero>,
&WasmGenerator::memop<kExprS128Load64Zero>,
+ &WasmGenerator::simd_lane_memop<kExprS128Load8Lane, 16, kS128>,
+ &WasmGenerator::simd_lane_memop<kExprS128Load16Lane, 8, kS128>,
+ &WasmGenerator::simd_lane_memop<kExprS128Load32Lane, 4, kS128>,
+ &WasmGenerator::simd_lane_memop<kExprS128Load64Lane, 2, kS128>,
};
GenerateOneOf(alternatives, data);
}
void WasmGenerator::grow_memory(DataRange* data) {
- Generate<ValueType::kI32>(data);
+ Generate<kI32>(data);
builder_->EmitWithU8(kExprMemoryGrow, 0);
}
void WasmGenerator::Generate(ValueType type, DataRange* data) {
switch (type.kind()) {
- case ValueType::kStmt:
- return Generate<ValueType::kStmt>(data);
- case ValueType::kI32:
- return Generate<ValueType::kI32>(data);
- case ValueType::kI64:
- return Generate<ValueType::kI64>(data);
- case ValueType::kF32:
- return Generate<ValueType::kF32>(data);
- case ValueType::kF64:
- return Generate<ValueType::kF64>(data);
- case ValueType::kS128:
- return Generate<ValueType::kS128>(data);
+ case kStmt:
+ return Generate<kStmt>(data);
+ case kI32:
+ return Generate<kI32>(data);
+ case kI64:
+ return Generate<kI64>(data);
+ case kF32:
+ return Generate<kF32>(data);
+ case kF64:
+ return Generate<kF64>(data);
+ case kS128:
+ return Generate<kS128>(data);
default:
UNREACHABLE();
}
@@ -1680,10 +1604,14 @@ void WasmGenerator::ConsumeAndGenerate(Vector<const ValueType> param_types,
}
}
-FunctionSig* GenerateSig(Zone* zone, DataRange* data) {
+enum SigKind { kFunctionSig, kExceptionSig };
+
+FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind) {
// Generate enough parameters to spill some to the stack.
int num_params = int{data->get<uint8_t>()} % (kMaxParameters + 1);
- int num_returns = int{data->get<uint8_t>()} % (kMaxReturns + 1);
+ int num_returns = sig_kind == kFunctionSig
+ ? int{data->get<uint8_t>()} % (kMaxReturns + 1)
+ : 0;
FunctionSig::Builder builder(zone, num_returns, num_params);
for (int i = 0; i < num_returns; ++i) builder.AddReturn(GetValueType(data));
@@ -1711,7 +1639,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
int num_functions = 1 + (range.get<uint8_t>() % kMaxFunctions);
for (int i = 1; i < num_functions; ++i) {
- FunctionSig* sig = GenerateSig(zone, &range);
+ FunctionSig* sig = GenerateSig(zone, &range, kFunctionSig);
uint32_t signature_index = builder.AddSignature(sig);
function_signatures.push_back(signature_index);
}
@@ -1722,6 +1650,12 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
globals.reserve(num_globals);
mutable_globals.reserve(num_globals);
+ int num_exceptions = 1 + (range.get<uint8_t>() % kMaxExceptions);
+ for (int i = 0; i < num_exceptions; ++i) {
+ FunctionSig* sig = GenerateSig(zone, &range, kExceptionSig);
+ builder.AddException(sig);
+ }
+
for (int i = 0; i < num_globals; ++i) {
ValueType type = GetValueType(&range);
// 1/8 of globals are immutable.
@@ -1743,7 +1677,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
Vector<const ValueType> return_types(sig->returns().begin(),
sig->return_count());
gen.Generate(return_types, &function_range);
-
+ if (!CheckHardwareSupportsSimd() && gen.HasSimd()) return false;
f->Emit(kExprEnd);
if (i == 0) builder.AddExport(CStrVector("main"), f);
}
@@ -1773,6 +1707,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
constexpr bool require_valid = true;
EXPERIMENTAL_FLAG_SCOPE(reftypes);
+ EXPERIMENTAL_FLAG_SCOPE(eh);
WasmCompileFuzzer().FuzzWasmModule({data, size}, require_valid);
return 0;
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index c67412b938..597789c7e1 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -115,24 +115,22 @@ PrintSig PrintReturns(const FunctionSig* sig) {
}
const char* ValueTypeToConstantName(ValueType type) {
switch (type.kind()) {
- case ValueType::kI32:
+ case kI32:
return "kWasmI32";
- case ValueType::kI64:
+ case kI64:
return "kWasmI64";
- case ValueType::kF32:
+ case kF32:
return "kWasmF32";
- case ValueType::kF64:
+ case kF64:
return "kWasmF64";
- case ValueType::kS128:
+ case kS128:
return "kWasmS128";
- case ValueType::kOptRef:
+ case kOptRef:
switch (type.heap_representation()) {
case HeapType::kExtern:
return "kWasmExternRef";
case HeapType::kFunc:
return "kWasmFuncRef";
- case HeapType::kExn:
- return "kWasmExnRef";
case HeapType::kAny:
case HeapType::kI31:
case HeapType::kBottom:
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-asm-js-expected.txt b/deps/v8/test/inspector/cpu-profiler/console-profile-asm-js-expected.txt
new file mode 100644
index 0000000000..644bcd76c9
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-asm-js-expected.txt
@@ -0,0 +1,9 @@
+Test console profiles for asm.js.
+testEnableProfilerEarly
+Compiling asm.js module with sentinel 0.
+testEnableProfilerLate
+Compiling asm.js module with sentinel 1.
+testEnableProfilerAfterDebugger
+Compiling asm.js module with sentinel 2.
+testEnableProfilerBeforeDebugger
+Compiling asm.js module with sentinel 3.
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-asm-js.js b/deps/v8/test/inspector/cpu-profiler/console-profile-asm-js.js
new file mode 100644
index 0000000000..6d9cb2b174
--- /dev/null
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-asm-js.js
@@ -0,0 +1,94 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Test console profiles for asm.js.');
+
+function compile(bytes) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (var i = 0; i < bytes.length; i++) {
+ view[i] = bytes[i] | 0;
+ }
+ let module = new WebAssembly.Module(buffer);
+ let fib = undefined;
+ function imp(i) { return fib(i); }
+ let instance = new WebAssembly.Instance(module, {q: {f: imp}});
+ fib = instance.exports.fib;
+ return instance;
+}
+
+function checkError(message) {
+ if (!message.error) return;
+ InspectorTest.log('Error: ');
+ InspectorTest.logMessage(message);
+ InspectorTest.completeTest();
+}
+
+// When build asm.js modules, include a sentinel such that the module will not
+// be reused from the cache.
+let sentinel = 0;
+
+function AsmModule(stdlib, foreign, heap) {
+ "use asm";
+ function f() {
+ return sentinel;
+ }
+ return {f: f};
+}
+
+async function compileAsmJs() {
+ InspectorTest.log(`Compiling asm.js module with sentinel ${sentinel}.`);
+ let code = AsmModule.toString().replace('sentinel', sentinel.toString());
+ ++sentinel;
+ checkError(await Protocol.Runtime.evaluate({expression: `(${code})().f()`}));
+}
+
+async function testEnableProfilerEarly() {
+ InspectorTest.log(arguments.callee.name);
+ checkError(await Protocol.Profiler.enable());
+ checkError(await Protocol.Profiler.start());
+ await compileAsmJs();
+ checkError(await Protocol.Profiler.disable());
+}
+
+async function testEnableProfilerLate() {
+ InspectorTest.log(arguments.callee.name);
+ await compileAsmJs();
+ checkError(await Protocol.Profiler.enable());
+ checkError(await Protocol.Profiler.start());
+ checkError(await Protocol.Profiler.disable());
+}
+
+async function testEnableProfilerAfterDebugger() {
+ InspectorTest.log(arguments.callee.name);
+ checkError(await Protocol.Debugger.enable());
+ await compileAsmJs();
+ checkError(await Protocol.Profiler.enable());
+ checkError(await Protocol.Profiler.start());
+ checkError(await Protocol.Profiler.disable());
+ checkError(await Protocol.Debugger.disable());
+}
+
+async function testEnableProfilerBeforeDebugger() {
+ InspectorTest.log(arguments.callee.name);
+ await compileAsmJs();
+ await Protocol.Profiler.enable();
+ await Protocol.Debugger.enable();
+ checkError(await Protocol.Profiler.start());
+ await Protocol.Debugger.disable();
+ await Protocol.Profiler.disable();
+}
+
+(async function test() {
+ try {
+ await testEnableProfilerEarly();
+ await testEnableProfilerLate();
+ await testEnableProfilerAfterDebugger();
+ await testEnableProfilerBeforeDebugger();
+ } catch (e) {
+ InspectorTest.log('caught: ' + e);
+ }
+})().catch(e => InspectorTest.log('caught: ' + e))
+ .finally(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
index e08d644981..76f5c5436f 100644
--- a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// TODO(v8:10266): Figure out why this fails on tsan with --always-opt.
-// Flags: --no-always-opt
+// Flags: --no-always-opt --no-turbo-inline-js-wasm-calls
let {session, contextGroup, Protocol} = InspectorTest.start(
'Test that console profiles contain wasm function names.');
diff --git a/deps/v8/test/inspector/debugger/destructuring-expected.txt b/deps/v8/test/inspector/debugger/destructuring-expected.txt
new file mode 100644
index 0000000000..47673aefe7
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/destructuring-expected.txt
@@ -0,0 +1,34 @@
+Tests breakable locations in destructuring.
+
+Running test: testBreakLocations
+
+function testFunction() {
+ function func() {
+ |_|return [1, 2];|R|
+ }
+
+ var [|_|a, |_|b] = |C|func();
+|R|}
+
+
+Running test: testSetBreakpoint
+Setting breakpoint at test.js:6:0
+
+ var [a, b] = #func();
+}
+
+Setting breakpoint at test.js:6:7
+
+ var [#a, b] = func();
+}
+
+Setting breakpoint at test.js:6:10
+
+ var [a, #b] = func();
+}
+
+Setting breakpoint at test.js:6:15
+
+ var [a, b] = #func();
+}
+
diff --git a/deps/v8/test/inspector/debugger/destructuring.js b/deps/v8/test/inspector/debugger/destructuring.js
new file mode 100644
index 0000000000..e46e0e4998
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/destructuring.js
@@ -0,0 +1,47 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests breakable locations in destructuring.');
+
+let source = `
+function testFunction() {
+ function func() {
+ return [1, 2];
+ }
+
+ var [a, b] = func();
+}
+//# sourceURL=test.js`;
+
+contextGroup.addScript(source);
+session.setupScriptMap();
+
+InspectorTest.runAsyncTestSuite([
+ async function testBreakLocations() {
+ Protocol.Debugger.enable();
+ let {params:{scriptId}} = await Protocol.Debugger.onceScriptParsed();
+ let {result:{locations}} = await Protocol.Debugger.getPossibleBreakpoints({
+ start: {lineNumber: 0, columnNumber : 0, scriptId}});
+ await session.logBreakLocations(locations);
+ },
+
+ async function testSetBreakpoint() {
+ const SOURCE_LOCATIONS = [
+ {lineNumber: 6, columnNumber: 0},
+ {lineNumber: 6, columnNumber: 7},
+ {lineNumber: 6, columnNumber: 10},
+ {lineNumber: 6, columnNumber: 15},
+ ];
+ for (const {lineNumber, columnNumber} of SOURCE_LOCATIONS) {
+ const url = 'test.js';
+ InspectorTest.log(`Setting breakpoint at ${url}:${lineNumber}:${columnNumber}`);
+ const {result: {breakpointId, locations}} = await Protocol.Debugger.setBreakpointByUrl({
+ lineNumber, columnNumber, url
+ });
+ locations.forEach(location => session.logSourceLocation(location));
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ }
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/for-of-loops-expected.txt b/deps/v8/test/inspector/debugger/for-of-loops-expected.txt
index c742413a6b..41d1ca72cb 100644
--- a/deps/v8/test/inspector/debugger/for-of-loops-expected.txt
+++ b/deps/v8/test/inspector/debugger/for-of-loops-expected.txt
@@ -2,8 +2,6 @@ Tests breakable locations in for-of loops.
Running test: testBreakLocations
-Running test: testStepInto
-
function testFunction() {
var obj = |_|{a : 1};
var arr = |_|[1];
@@ -33,6 +31,8 @@ function testFunction() {
for (let |C|k of |_|iterable) { all.|C|push(k); }
|R|}
+
+Running test: testStepInto
(anonymous) (expr.js:0:0)
@@ -405,3 +405,25 @@ testFunction (test.js:25:11)
if (this.#i < 1) {
return { value: this.i++, done: false };
+
+Running test: testSetBreakpoint
+Setting breakpoint at test.js:25:0
+ };
+ for (var k of #iterable) { all.push(k); }
+ iterable.i = 0;
+
+Setting breakpoint at test.js:25:11
+ };
+ for (var #k of iterable) { all.push(k); }
+ iterable.i = 0;
+
+Setting breakpoint at test.js:25:16
+ };
+ for (var k of #iterable) { all.push(k); }
+ iterable.i = 0;
+
+Setting breakpoint at test.js:25:28
+ };
+ for (var k of iterable) { all.#push(k); }
+ iterable.i = 0;
+
diff --git a/deps/v8/test/inspector/debugger/for-of-loops.js b/deps/v8/test/inspector/debugger/for-of-loops.js
index 0fa0a26a77..9579a91a41 100644
--- a/deps/v8/test/inspector/debugger/for-of-loops.js
+++ b/deps/v8/test/inspector/debugger/for-of-loops.js
@@ -45,7 +45,7 @@ InspectorTest.runAsyncTestSuite([
let {params:{scriptId}} = await Protocol.Debugger.onceScriptParsed();
let {result:{locations}} = await Protocol.Debugger.getPossibleBreakpoints({
start: {lineNumber: 0, columnNumber : 0, scriptId}});
- session.logBreakLocations(locations);
+ await session.logBreakLocations(locations);
},
async function testStepInto() {
@@ -65,18 +65,39 @@ InspectorTest.runAsyncTestSuite([
},
async function testStepIntoAfterBreakpoint() {
- Protocol.Debugger.setBreakpointByUrl({lineNumber: 25, url: 'test.js'});
+ const {result: {breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({
+ lineNumber: 25, columnNumber: 11, url: 'test.js'
+ });
Protocol.Runtime.evaluate({
expression: 'testFunction()//# sourceURL=expr.js'});
await awaitPausedAndDump();
Protocol.Debugger.stepInto();
await awaitPausedAndDump();
await Protocol.Debugger.resume();
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
async function awaitPausedAndDump() {
let {params:{callFrames}} = await Protocol.Debugger.oncePaused();
session.logCallFrames(callFrames);
session.logSourceLocation(callFrames[0].location);
}
+ },
+
+ async function testSetBreakpoint() {
+ const SOURCE_LOCATIONS = [
+ {lineNumber: 25, columnNumber: 0},
+ {lineNumber: 25, columnNumber: 11},
+ {lineNumber: 25, columnNumber: 16},
+ {lineNumber: 25, columnNumber: 28},
+ ];
+ for (const {lineNumber, columnNumber} of SOURCE_LOCATIONS) {
+ const url = 'test.js';
+ InspectorTest.log(`Setting breakpoint at ${url}:${lineNumber}:${columnNumber}`);
+ const {result: {breakpointId, locations}} = await Protocol.Debugger.setBreakpointByUrl({
+ lineNumber, columnNumber, url
+ });
+ locations.forEach(location => session.logSourceLocation(location));
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ }
}
]);
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-after-gc-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-after-gc-expected.txt
new file mode 100644
index 0000000000..773f69990e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-after-gc-expected.txt
@@ -0,0 +1,5 @@
+Checks if we keep alive breakpoint information for top-level functions.
+Result of setting breakpoint in topLevel.js
+[{"scriptId":"3","lineNumber":0,"columnNumber":0}]
+Result of setting breakpoint in moduleFunc.js
+[{"scriptId":"5","lineNumber":0,"columnNumber":22}] \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-after-gc.js b/deps/v8/test/inspector/debugger/set-breakpoint-after-gc.js
new file mode 100644
index 0000000000..f11b81a32b
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-after-gc.js
@@ -0,0 +1,52 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks if we keep alive breakpoint information for top-level functions.');
+
+session.setupScriptMap();
+var executionContextId;
+
+const callGarbageCollector = `
+ %CollectGarbage("");
+ %CollectGarbage("");
+ %CollectGarbage("");
+ %CollectGarbage("");
+`;
+
+const topLevelFunction = `console.log('This is a top level function')`;
+const moduleFunction =
+ `function testFunc() { console.log('This is a module function') }`;
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+async function onExecutionContextCreated(messageObject) {
+ executionContextId = messageObject.params.context.id;
+ await testSetBreakpoint(executionContextId, topLevelFunction, 'topLevel.js');
+ await testSetBreakpoint(executionContextId, moduleFunction, 'moduleFunc.js');
+ InspectorTest.completeTest();
+}
+
+async function testSetBreakpoint(executionContextId, func, url) {
+ const obj = await Protocol.Runtime.compileScript({
+ expression: func,
+ sourceURL: url,
+ persistScript: true,
+ executionContextId: executionContextId
+ });
+ const scriptId = obj.result.scriptId;
+ await Protocol.Runtime.runScript({scriptId});
+ await Protocol.Runtime.evaluate({expression: `${callGarbageCollector}`});
+ const {result: {locations}} =
+ await Protocol.Debugger.setBreakpointByUrl({lineNumber: 0, url});
+ InspectorTest.log(`Result of setting breakpoint in ${url}`);
+ InspectorTest.log(JSON.stringify(locations));
+}
diff --git a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt
index 6d113861dd..4364308d85 100644
--- a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt
+++ b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt
@@ -1,6 +1,22 @@
Tests side-effect-free evaluation
-Paused on 'debugger;'
+
+Running test: basicTest
+Paused on "debugger;"
f() returns 1
g() returns 2
f() returns 1
g() throws EvalError
+
+Running test: testDate
+someGlobalDate.setDate(10) : throws
+new Date().setDate(10) : ok
+someGlobalDate.setFullYear(1991) : throws
+new Date().setFullYear(1991) : ok
+someGlobalDate.setHours(0) : throws
+new Date().setHours(0) : ok
+someGlobalDate.getDate() : ok
+new Date().getDate() : ok
+someGlobalDate.getFullYear() : ok
+new Date().getFullYear() : ok
+someGlobalDate.getHours() : ok
+new Date().getHours() : ok
diff --git a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
index a070334980..4a70fd38a2 100644
--- a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
+++ b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
@@ -5,6 +5,7 @@
let {session, contextGroup, Protocol} = InspectorTest.start('Tests side-effect-free evaluation');
contextGroup.addScript(`
+var someGlobalDate = new Date();
function testFunction()
{
var o = 0;
@@ -15,43 +16,40 @@ function testFunction()
}
//# sourceURL=foo.js`);
-Protocol.Debugger.enable();
-
-Protocol.Debugger.oncePaused().then(debuggerPaused);
-
-Protocol.Runtime.evaluate({ "expression": "setTimeout(testFunction, 0)" });
-
-var topFrameId;
-
-function debuggerPaused(messageObject)
-{
- InspectorTest.log("Paused on 'debugger;'");
-
- topFrameId = messageObject.params.callFrames[0].callFrameId;
- Protocol.Debugger.evaluateOnCallFrame({ callFrameId: topFrameId, expression: "f()"}).then(evaluatedFirst);
-}
-
-function evaluatedFirst(response)
-{
- InspectorTest.log("f() returns " + response.result.result.value);
- Protocol.Debugger.evaluateOnCallFrame({ callFrameId: topFrameId, expression: "g()"}).then(evaluatedSecond);
-}
-
-function evaluatedSecond(response)
-{
- InspectorTest.log("g() returns " + response.result.result.value);
- Protocol.Debugger.evaluateOnCallFrame({ callFrameId: topFrameId, expression: "f()", throwOnSideEffect: true}).then(evaluatedThird);
-}
-
-function evaluatedThird(response)
-{
- InspectorTest.log("f() returns " + response.result.result.value);
- Protocol.Debugger.evaluateOnCallFrame({ callFrameId: topFrameId, expression: "g()", throwOnSideEffect: true}).then(evaluatedFourth);
- InspectorTest.completeTest();
-}
-
-function evaluatedFourth(response)
-{
- InspectorTest.log("g() throws " + response.result.result.className);
- InspectorTest.completeTest();
-}
+InspectorTest.runAsyncTestSuite([
+ async function basicTest() {
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({ 'expression': 'setTimeout(testFunction, 0)' });
+ const {params:{callFrames:[{callFrameId: topFrameId}]}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Paused on "debugger;"');
+ const {result:{result:{value: fResult}}} = await Protocol.Debugger.evaluateOnCallFrame({ callFrameId: topFrameId, expression: 'f()' });
+ InspectorTest.log('f() returns ' + fResult);
+ const {result:{result:{value: gResult}}} = await Protocol.Debugger.evaluateOnCallFrame({ callFrameId: topFrameId, expression: 'g()' });
+ InspectorTest.log('g() returns ' + gResult);
+ const {result:{result:{value: fResultSideEffect}}} = await Protocol.Debugger.evaluateOnCallFrame({ callFrameId: topFrameId, expression: 'f()', throwOnSideEffect: true});
+ InspectorTest.log('f() returns ' + fResultSideEffect);
+ const {result:{result:{className}}} = await Protocol.Debugger.evaluateOnCallFrame({ callFrameId: topFrameId, expression: 'g()', throwOnSideEffect: true});
+ InspectorTest.log('g() throws ' + className);
+ },
+
+ async function testDate() {
+ const check = async (expression) => {
+ const {result:{exceptionDetails}} = await Protocol.Runtime.evaluate({expression, throwOnSideEffect: true});
+ InspectorTest.log(expression + ' : ' + (exceptionDetails ? 'throws' : 'ok'));
+ };
+ // setters are only ok on temporary objects
+ await check('someGlobalDate.setDate(10)');
+ await check('new Date().setDate(10)');
+ await check('someGlobalDate.setFullYear(1991)');
+ await check('new Date().setFullYear(1991)');
+ await check('someGlobalDate.setHours(0)');
+ await check('new Date().setHours(0)');
+ // getters are ok on any Date
+ await check('someGlobalDate.getDate()');
+ await check('new Date().getDate()');
+ await check('someGlobalDate.getFullYear()');
+ await check('new Date().getFullYear()');
+ await check('someGlobalDate.getHours()');
+ await check('new Date().getHours()');
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/wasm-conditional-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/wasm-conditional-breakpoints-expected.txt
new file mode 100644
index 0000000000..f28458c4df
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-conditional-breakpoints-expected.txt
@@ -0,0 +1,66 @@
+Test conditional breakpoints in wasm.
+
+Running test: test
+Instantiating.
+Waiting for wasm script.
+Got wasm script: wasm://wasm/f00dbc56
+Setting breakpoint at offset 34, condition "false"
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 34
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+Setting breakpoint at offset 41, condition "true"
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 41
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+Setting breakpoint at offset 46, condition "$var0.value==3"
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 46
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+Calling fib(5)
+Script wasm://wasm/f00dbc56 byte offset 41: Wasm opcode 0x0d (kExprBrIf)
+$var0: 5 (i32)
+Script wasm://wasm/f00dbc56 byte offset 41: Wasm opcode 0x0d (kExprBrIf)
+$var0: 4 (i32)
+Script wasm://wasm/f00dbc56 byte offset 41: Wasm opcode 0x0d (kExprBrIf)
+$var0: 3 (i32)
+Script wasm://wasm/f00dbc56 byte offset 46: Wasm opcode 0x10 (kExprCallFunction)
+$var0: 3 (i32)
+Script wasm://wasm/f00dbc56 byte offset 41: Wasm opcode 0x0d (kExprBrIf)
+$var0: 2 (i32)
+Script wasm://wasm/f00dbc56 byte offset 41: Wasm opcode 0x0d (kExprBrIf)
+$var0: 1 (i32)
+Script wasm://wasm/f00dbc56 byte offset 41: Wasm opcode 0x0d (kExprBrIf)
+$var0: 2 (i32)
+Script wasm://wasm/f00dbc56 byte offset 41: Wasm opcode 0x0d (kExprBrIf)
+$var0: 3 (i32)
+Script wasm://wasm/f00dbc56 byte offset 46: Wasm opcode 0x10 (kExprCallFunction)
+$var0: 3 (i32)
+Script wasm://wasm/f00dbc56 byte offset 41: Wasm opcode 0x0d (kExprBrIf)
+$var0: 2 (i32)
+Script wasm://wasm/f00dbc56 byte offset 41: Wasm opcode 0x0d (kExprBrIf)
+$var0: 1 (i32)
+fib returned!
diff --git a/deps/v8/test/inspector/debugger/wasm-conditional-breakpoints.js b/deps/v8/test/inspector/debugger/wasm-conditional-breakpoints.js
new file mode 100644
index 0000000000..6099a8a70f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-conditional-breakpoints.js
@@ -0,0 +1,75 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/wasm-inspector-test.js');
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Test conditional breakpoints in wasm.');
+session.setupScriptMap();
+
+const builder = new WasmModuleBuilder();
+
+const fib_body = [
+ kExprLocalGet, 0, // i (for br_if or i32.sub)
+ kExprLocalGet, 0, kExprI32Const, 2, kExprI32LeS, // i < 2 ?
+ kExprBrIf, 0, // --> return i
+ kExprI32Const, 1, kExprI32Sub, // i - 1
+ kExprCallFunction, 0, // fib(i - 1)
+ kExprLocalGet, 0, kExprI32Const, 2, kExprI32Sub, // i - 2
+ kExprCallFunction, 0, // fib(i - 2)
+ kExprI32Add // add (and return)
+];
+const fib = builder.addFunction('fib', kSig_i_i).addBody(fib_body).exportFunc();
+
+const module_bytes = builder.toArray();
+
+const find_offset = opcode => fib.body_offset + fib_body.indexOf(opcode);
+
+const breakpoints = [
+ {loc: find_offset(kExprLocalGet), cond: 'false'},
+ {loc: find_offset(kExprBrIf), cond: 'true'},
+ {loc: find_offset(kExprCallFunction), cond: '$var0.value==3'}
+];
+
+Protocol.Debugger.onPaused(async msg => {
+ var frames = msg.params.callFrames;
+ await session.logSourceLocation(frames[0].location);
+ var frame = msg.params.callFrames[0];
+ for (var scope of frame.scopeChain) {
+ if (scope.type != 'local') continue;
+ var properties = await Protocol.Runtime.getProperties(
+ {'objectId': scope.object.objectId});
+ for (var {name, value} of properties.result.result) {
+ value = await WasmInspectorTest.getWasmValue(value);
+ InspectorTest.log(`${name}: ${value}`);
+ }
+ }
+ Protocol.Debugger.resume();
+});
+
+InspectorTest.runAsyncTestSuite([
+ async function test() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Instantiating.');
+ // Spawn asynchronously:
+ WasmInspectorTest.instantiate(module_bytes);
+ InspectorTest.log('Waiting for wasm script.');
+ const [, {params: wasm_script}] = await Protocol.Debugger.onceScriptParsed(2);
+ InspectorTest.log(`Got wasm script: ${wasm_script.url}`);
+ for (let breakpoint of breakpoints) {
+ InspectorTest.log(`Setting breakpoint at offset ${breakpoint.loc}, condition "${breakpoint.cond}"`);
+ InspectorTest.logMessage(await Protocol.Debugger.setBreakpoint({
+ 'location': {
+ 'scriptId': wasm_script.scriptId,
+ 'lineNumber': 0,
+ 'columnNumber': breakpoint.loc
+ },
+ condition: breakpoint.cond
+ }));
+ }
+ InspectorTest.log('Calling fib(5)');
+ await WasmInspectorTest.evalWithUrl('instance.exports.fib(5)', 'runWasm');
+ InspectorTest.log('fib returned!');
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame-expected.txt b/deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame-expected.txt
index 6f47eac211..04bc977dda 100644
--- a/deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame-expected.txt
@@ -18,33 +18,33 @@ Debugger paused in main.
> globals = Globals
> typeof globals = "object"
> Object.keys(globals) = Array(2)
-> globals[0] = 0
-> globals[1] = 1
-> globals[2] = 2n
-> globals[3] = 3n
-> globals["$global0"] = 0
-> $global0 = 0
-> globals["$global3"] = 2n
-> $global3 = 2n
+> globals[0] = i32 {0}
+> globals[1] = i32 {1}
+> globals[2] = i64 {2n}
+> globals[3] = i64 {3n}
+> globals["$global0"] = i32 {0}
+> $global0 = i32 {0}
+> globals["$global3"] = i64 {2n}
+> $global3 = i64 {2n}
Stepping twice in main.
Debugger paused in main.
-> globals[0] = 0
-> globals[1] = 1
-> globals[2] = 2n
-> globals[3] = 42n
-> globals["$global0"] = 0
-> $global0 = 0
-> globals["$global3"] = 2n
-> $global3 = 2n
+> globals[0] = i32 {0}
+> globals[1] = i32 {1}
+> globals[2] = i64 {2n}
+> globals[3] = i64 {42n}
+> globals["$global0"] = i32 {0}
+> $global0 = i32 {0}
+> globals["$global3"] = i64 {2n}
+> $global3 = i64 {2n}
Changing global from JavaScript.
-> globals[0] = 0
-> globals[1] = 21
-> globals[2] = 2n
-> globals[3] = 42n
-> globals["$global0"] = 0
-> $global0 = 0
-> globals["$global3"] = 2n
-> $global3 = 2n
+> globals[0] = i32 {0}
+> globals[1] = i32 {21}
+> globals[2] = i64 {2n}
+> globals[3] = i64 {42n}
+> globals["$global0"] = i32 {0}
+> $global0 = i32 {0}
+> globals["$global3"] = i64 {2n}
+> $global3 = i64 {2n}
Running test: testFunctions
Compile module.
@@ -54,17 +54,19 @@ Call main.
Debugger paused in main.
> functions = Functions
> typeof functions = "object"
-> Object.keys(functions) = Array(3)
+> Object.keys(functions) = Array(4)
> functions[0] = function 0() { [native code] }
> functions[1] = function 1() { [native code] }
> functions[2] = function 2() { [native code] }
> functions[3] = function 3() { [native code] }
-> functions["$main"] = function 0() { [native code] }
-> $main = function 0() { [native code] }
-> functions["$func1"] = function 1() { [native code] }
-> $func1 = function 1() { [native code] }
-> functions["$func3"] = function 3() { [native code] }
-> $func3 = function 3() { [native code] }
+> functions[4] = function 4() { [native code] }
+> functions["$foo.bar"] = function 0() { [native code] }
+> functions["$main"] = function 1() { [native code] }
+> $main = function 1() { [native code] }
+> functions["$func2"] = function 2() { [native code] }
+> $func2 = function 2() { [native code] }
+> functions["$func4"] = function 4() { [native code] }
+> $func4 = function 4() { [native code] }
Running test: testLocals
Compile module.
@@ -75,22 +77,22 @@ Debugger paused in main.
> locals = Locals
> typeof locals = "object"
> Object.keys(locals) = Array(2)
-> locals[0] = 3
-> locals[1] = 6
-> locals[2] = 0
-> locals["$x"] = 3
-> $x = 3
-> locals["$var2"] = 0
-> $var2 = 0
+> locals[0] = i32 {3}
+> locals[1] = i32 {6}
+> locals[2] = i32 {0}
+> locals["$x"] = i32 {3}
+> $x = i32 {3}
+> locals["$var2"] = i32 {0}
+> $var2 = i32 {0}
Stepping twice in main.
Debugger paused in main.
-> locals[0] = 3
-> locals[1] = 6
-> locals[2] = 42
-> locals["$x"] = 3
-> $x = 3
-> locals["$var2"] = 42
-> $var2 = 42
+> locals[0] = i32 {3}
+> locals[1] = i32 {6}
+> locals[2] = i32 {42}
+> locals["$x"] = i32 {3}
+> $x = i32 {3}
+> locals["$var2"] = i32 {42}
+> $var2 = i32 {42}
Running test: testMemories
Compile module.
@@ -131,5 +133,5 @@ Stepping twice in main.
Debugger paused in main.
> stack = Stack
> Object.keys(stack) = Array(2)
-> stack[0] = 5
-> stack[1] = 42
+> stack[0] = i32 {5}
+> stack[1] = i32 {42}
diff --git a/deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame.js b/deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame.js
index 5ee458fc12..375d78d8bd 100644
--- a/deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame.js
+++ b/deps/v8/test/inspector/debugger/wasm-evaluate-on-call-frame.js
@@ -21,29 +21,34 @@ async function compileModule(builder) {
return [result.result, params.scriptId];
}
-async function instantiateModule({objectId}) {
+async function instantiateModule({objectId}, importObject) {
const {result: {result}} = await Protocol.Runtime.callFunctionOn({
- functionDeclaration: 'function() { return new WebAssembly.Instance(this); }',
+ arguments: importObject ? [importObject] : [],
+ functionDeclaration: 'function(importObject) { return new WebAssembly.Instance(this, importObject); }',
objectId
});
return result;
}
async function dumpOnCallFrame(callFrameId, expression) {
- const {result: {result}} = await Protocol.Debugger.evaluateOnCallFrame({
+ const {result: {result: object}} = await Protocol.Debugger.evaluateOnCallFrame({
callFrameId, expression
});
- if ('description' in result) {
- InspectorTest.log(`> ${expression} = ${result.description}`);
+ if (object.type === 'object' && object.subtype === 'wasmvalue') {
+ const {result: {result: properties}} = await Protocol.Runtime.getProperties({objectId: object.objectId, ownProperties: true})
+ const valueProperty = properties.find(p => p.name === 'value');
+ InspectorTest.log(`> ${expression} = ${object.description} {${valueProperty.value.description}}`);
+ } else if ('description' in object) {
+ InspectorTest.log(`> ${expression} = ${object.description}`);
} else {
- InspectorTest.log(`> ${expression} = ${JSON.stringify(result.value)}`);
+ InspectorTest.log(`> ${expression} = ${JSON.stringify(object.value)}`);
}
}
async function dumpKeysOnCallFrame(callFrameId, object, keys) {
for (const key of keys) {
await dumpOnCallFrame(callFrameId, `${object}[${JSON.stringify(key)}]`);
- if (typeof key === 'string') {
+ if (typeof key === 'string' && key.indexOf('.') < 0) {
await dumpOnCallFrame(callFrameId, `${key}`);
}
}
@@ -150,23 +155,24 @@ InspectorTest.runAsyncTestSuite([
async function testFunctions() {
const builder = new WasmModuleBuilder();
+ builder.addImport('foo', 'bar', kSig_v_v);
const main = builder.addFunction('main', kSig_i_v)
.addBody([
kExprI32Const, 0,
]).exportFunc();
- builder.addFunction('func1', kSig_i_v)
+ builder.addFunction('func2', kSig_i_v)
.addBody([
kExprI32Const, 1,
]);
builder.addFunction(undefined, kSig_i_v)
.addBody([
kExprI32Const, 2,
- ]).exportAs('func1');
+ ]).exportAs('func2');
builder.addFunction(undefined, kSig_i_v)
.addBody([
kExprI32Const, 3,
]);
- const KEYS = [0, 1, 2, 3, '$main', '$func1', '$func3'];
+ const KEYS = [0, 1, 2, 3, 4, '$foo.bar', '$main', '$func2', '$func4'];
InspectorTest.log('Compile module.');
const [module, scriptId] = await compileModule(builder);
@@ -177,7 +183,10 @@ InspectorTest.runAsyncTestSuite([
});
InspectorTest.log('Instantiate module.');
- const instance = await instantiateModule(module);
+ const {result: { result: importObject }} = await Protocol.Runtime.evaluate({
+ expression: `({foo: {bar() { }}})`
+ });
+ const instance = await instantiateModule(module, importObject);
InspectorTest.log('Call main.');
const callMainPromise = Protocol.Runtime.callFunctionOn({
diff --git a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt
index 2c05e3c1a3..8bc226412d 100644
--- a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt
@@ -5,99 +5,99 @@ Testing i32.
Waiting for wasm script.
Setting 20 breakpoints.
Calling main.
-Paused at offset 48; wasm-expression-stack: []; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 50; wasm-expression-stack: [0]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 52; wasm-expression-stack: [0, 1]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 54; wasm-expression-stack: [0, 1, 2]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 56; wasm-expression-stack: [0, 1, 2, 3]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 58; wasm-expression-stack: [0, 1, 2, 3, 4]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 60; wasm-expression-stack: [0, 1, 2, 3, 4, 5]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 62; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 64; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 66; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 68; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 69; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 17]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 70; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 24]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 71; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 30]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 72; wasm-expression-stack: [0, 1, 2, 3, 4, 35]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 73; wasm-expression-stack: [0, 1, 2, 3, 39]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 74; wasm-expression-stack: [0, 1, 2, 42]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 75; wasm-expression-stack: [0, 1, 44]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 76; wasm-expression-stack: [0, 45]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 77; wasm-expression-stack: [45]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 48; wasm-expression-stack: []; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 50; wasm-expression-stack: [0 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 52; wasm-expression-stack: [0 (i32), 1 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 54; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 56; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 58; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 60; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 62; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 64; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 66; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 68; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 69; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 17 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 70; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 24 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 71; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 30 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 72; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 35 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 73; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 39 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 74; wasm-expression-stack: [0 (i32), 1 (i32), 2 (i32), 42 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 75; wasm-expression-stack: [0 (i32), 1 (i32), 44 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 76; wasm-expression-stack: [0 (i32), 45 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
+Paused at offset 77; wasm-expression-stack: [45 (i32)]; local: [0 (i32), 1 (i32), 2 (i32), 3 (i32), 4 (i32), 5 (i32), 6 (i32), 7 (i32), 8 (i32), 9 (i32)]
main returned.
Testing i64.
Waiting for wasm script.
Setting 20 breakpoints.
Calling main.
-Paused at offset 48; wasm-expression-stack: []; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 50; wasm-expression-stack: [0n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 52; wasm-expression-stack: [0n, 1n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 54; wasm-expression-stack: [0n, 1n, 2n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 56; wasm-expression-stack: [0n, 1n, 2n, 3n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 58; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 60; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n, 5n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 62; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n, 5n, 6n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 64; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 66; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 68; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 69; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 17n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 70; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 24n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 71; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n, 5n, 30n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 72; wasm-expression-stack: [0n, 1n, 2n, 3n, 4n, 35n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 73; wasm-expression-stack: [0n, 1n, 2n, 3n, 39n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 74; wasm-expression-stack: [0n, 1n, 2n, 42n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 75; wasm-expression-stack: [0n, 1n, 44n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 76; wasm-expression-stack: [0n, 45n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
-Paused at offset 77; wasm-expression-stack: [45n]; local: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n]
+Paused at offset 48; wasm-expression-stack: []; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 50; wasm-expression-stack: [0n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 52; wasm-expression-stack: [0n (i64), 1n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 54; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 56; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 58; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 60; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 62; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 64; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 66; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 68; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 69; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 17n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 70; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 24n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 71; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 30n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 72; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 35n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 73; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 39n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 74; wasm-expression-stack: [0n (i64), 1n (i64), 2n (i64), 42n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 75; wasm-expression-stack: [0n (i64), 1n (i64), 44n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 76; wasm-expression-stack: [0n (i64), 45n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
+Paused at offset 77; wasm-expression-stack: [45n (i64)]; local: [0n (i64), 1n (i64), 2n (i64), 3n (i64), 4n (i64), 5n (i64), 6n (i64), 7n (i64), 8n (i64), 9n (i64)]
main returned.
Testing f32.
Waiting for wasm script.
Setting 20 breakpoints.
Calling main.
-Paused at offset 48; wasm-expression-stack: []; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 50; wasm-expression-stack: [0]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 52; wasm-expression-stack: [0, 1]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 54; wasm-expression-stack: [0, 1, 2]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 56; wasm-expression-stack: [0, 1, 2, 3]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 58; wasm-expression-stack: [0, 1, 2, 3, 4]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 60; wasm-expression-stack: [0, 1, 2, 3, 4, 5]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 62; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 64; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 66; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 68; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 69; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 17]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 70; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 24]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 71; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 30]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 72; wasm-expression-stack: [0, 1, 2, 3, 4, 35]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 73; wasm-expression-stack: [0, 1, 2, 3, 39]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 74; wasm-expression-stack: [0, 1, 2, 42]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 75; wasm-expression-stack: [0, 1, 44]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 76; wasm-expression-stack: [0, 45]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 77; wasm-expression-stack: [45]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 48; wasm-expression-stack: []; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 50; wasm-expression-stack: [0 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 52; wasm-expression-stack: [0 (f32), 1 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 54; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 56; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 58; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 60; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 62; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 64; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 66; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 68; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 69; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 17 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 70; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 24 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 71; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 30 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 72; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 35 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 73; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 39 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 74; wasm-expression-stack: [0 (f32), 1 (f32), 2 (f32), 42 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 75; wasm-expression-stack: [0 (f32), 1 (f32), 44 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 76; wasm-expression-stack: [0 (f32), 45 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
+Paused at offset 77; wasm-expression-stack: [45 (f32)]; local: [0 (f32), 1 (f32), 2 (f32), 3 (f32), 4 (f32), 5 (f32), 6 (f32), 7 (f32), 8 (f32), 9 (f32)]
main returned.
Testing f64.
Waiting for wasm script.
Setting 20 breakpoints.
Calling main.
-Paused at offset 48; wasm-expression-stack: []; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 50; wasm-expression-stack: [0]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 52; wasm-expression-stack: [0, 1]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 54; wasm-expression-stack: [0, 1, 2]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 56; wasm-expression-stack: [0, 1, 2, 3]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 58; wasm-expression-stack: [0, 1, 2, 3, 4]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 60; wasm-expression-stack: [0, 1, 2, 3, 4, 5]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 62; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 64; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 66; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 68; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 69; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 17]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 70; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 24]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 71; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 30]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 72; wasm-expression-stack: [0, 1, 2, 3, 4, 35]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 73; wasm-expression-stack: [0, 1, 2, 3, 39]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 74; wasm-expression-stack: [0, 1, 2, 42]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 75; wasm-expression-stack: [0, 1, 44]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 76; wasm-expression-stack: [0, 45]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 77; wasm-expression-stack: [45]; local: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 48; wasm-expression-stack: []; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 50; wasm-expression-stack: [0 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 52; wasm-expression-stack: [0 (f64), 1 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 54; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 56; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 58; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 60; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 62; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 64; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 66; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 68; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 69; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 17 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 70; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 24 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 71; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 30 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 72; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 35 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 73; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 39 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 74; wasm-expression-stack: [0 (f64), 1 (f64), 2 (f64), 42 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 75; wasm-expression-stack: [0 (f64), 1 (f64), 44 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 76; wasm-expression-stack: [0 (f64), 45 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
+Paused at offset 77; wasm-expression-stack: [45 (f64)]; local: [0 (f64), 1 (f64), 2 (f64), 3 (f64), 4 (f64), 5 (f64), 6 (f64), 7 (f64), 8 (f64), 9 (f64)]
main returned.
diff --git a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
index 0737de8899..b3e3c38c58 100644
--- a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
+++ b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
@@ -24,8 +24,9 @@ Protocol.Debugger.onPaused(async msg => {
if (scope.type == 'module') continue;
var scope_properties =
await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
- let str = scope_properties.result.result.map(
- elem => WasmInspectorTest.getWasmValue(elem.value)).join(', ');
+ let str = (await Promise.all(scope_properties.result.result.map(
+ elem => WasmInspectorTest.getWasmValue(elem.value))))
+ .join(', ');
line.push(`${scope.type}: [${str}]`);
}
InspectorTest.log(line.join('; '));
diff --git a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
new file mode 100644
index 0000000000..fd79e43626
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
@@ -0,0 +1,37 @@
+Test instrumentation breakpoints in wasm.
+
+Running test: testBreakInStartFunction
+Setting instrumentation breakpoint
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Compiling wasm module.
+Paused at v8://test/compile_module with reason "instrumentation".
+Instantiating module.
+Paused at v8://test/instantiate with reason "instrumentation".
+Paused at wasm://wasm/20da547a with reason "instrumentation".
+Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
+Instantiating a second time (should trigger no breakpoint).
+Paused at v8://test/instantiate2 with reason "instrumentation".
+Done.
+
+Running test: testBreakInStartFunctionCompileTwice
+Setting instrumentation breakpoint
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Instantiating module.
+Paused at v8://test/instantiate with reason "instrumentation".
+Paused at wasm://wasm/20da547a with reason "instrumentation".
+Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
+Instantiating a second time (should trigger another breakpoint).
+Paused at v8://test/instantiate with reason "instrumentation".
+Paused at wasm://wasm/20da547a with reason "instrumentation".
+Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
+Done.
diff --git a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
new file mode 100644
index 0000000000..feeff65999
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
@@ -0,0 +1,68 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/wasm-inspector-test.js');
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Test instrumentation breakpoints in wasm.');
+session.setupScriptMap();
+
+Protocol.Debugger.onPaused(async msg => {
+ let top_frame = msg.params.callFrames[0];
+ let reason = msg.params.reason;
+ InspectorTest.log(`Paused at ${top_frame.url} with reason "${reason}".`);
+ if (!top_frame.url.startsWith('v8://test/')) {
+ await session.logSourceLocation(top_frame.location);
+ }
+ Protocol.Debugger.resume();
+});
+
+// TODO(clemensb): Add test for 'beforeScriptWithSourceMapExecution'.
+// TODO(clemensb): Add test for module without start function.
+
+InspectorTest.runAsyncTestSuite([
+ async function testBreakInStartFunction() {
+ const builder = new WasmModuleBuilder();
+ const start_fn = builder.addFunction('start', kSig_v_v).addBody([kExprNop]);
+ builder.addStart(start_fn.index);
+
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting instrumentation breakpoint');
+ InspectorTest.logMessage(
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'}));
+ InspectorTest.log('Compiling wasm module.');
+ await WasmInspectorTest.compile(builder.toArray());
+ InspectorTest.log('Instantiating module.');
+ await WasmInspectorTest.evalWithUrl(
+ 'new WebAssembly.Instance(module)', 'instantiate');
+ InspectorTest.log(
+ 'Instantiating a second time (should trigger no breakpoint).');
+ await WasmInspectorTest.evalWithUrl(
+ 'new WebAssembly.Instance(module)', 'instantiate2');
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ },
+
+ // If we compile twice, we get two instrumentation breakpoints (which might or
+ // might not be expected, but it's the current behaviour).
+ async function testBreakInStartFunctionCompileTwice() {
+ const builder = new WasmModuleBuilder();
+ const start_fn = builder.addFunction('start', kSig_v_v).addBody([kExprNop]);
+ builder.addStart(start_fn.index);
+
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting instrumentation breakpoint');
+ InspectorTest.logMessage(
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'}));
+ InspectorTest.log('Instantiating module.');
+ await WasmInspectorTest.instantiate(builder.toArray());
+ InspectorTest.log(
+ 'Instantiating a second time (should trigger another breakpoint).');
+ await WasmInspectorTest.instantiate(builder.toArray());
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/wasm-memory-names.js b/deps/v8/test/inspector/debugger/wasm-memory-names.js
index de622a2e95..160e6e83d3 100644
--- a/deps/v8/test/inspector/debugger/wasm-memory-names.js
+++ b/deps/v8/test/inspector/debugger/wasm-memory-names.js
@@ -50,24 +50,6 @@ function createInstance(moduleBytes) {
new WebAssembly.Instance(module, {module_name: {imported_mem: memory}});
}
-async function logMemoryName(msg, Protocol) {
- let callFrames = msg.params.callFrames;
- InspectorTest.log('Paused in debugger.');
-
- let scopeChain = callFrames[0].scopeChain;
- for (let scope of scopeChain) {
- if (scope.type != 'module') continue;
- let moduleObjectProps = (await Protocol.Runtime.getProperties({
- 'objectId': scope.object.objectId
- })).result.result;
-
- for (let prop of moduleObjectProps) {
- if (prop.name === 'instance' || prop.name === 'module') continue;
- InspectorTest.log(`name: ${prop.name}`);
- }
- }
-}
-
async function check(moduleBytes) {
Protocol.Runtime.evaluate({
expression: `
@@ -91,8 +73,17 @@ async function check(moduleBytes) {
InspectorTest.log('Running main.');
Protocol.Runtime.evaluate({expression: 'instance.exports.main()'});
- let msg = await Protocol.Debugger.oncePaused();
- await logMemoryName(msg, Protocol);
+ const {params: {callFrames: [{callFrameId}]}} =
+ await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Paused in debugger.');
+ const {result: {result: {objectId}}} =
+ await Protocol.Debugger.evaluateOnCallFrame(
+ {callFrameId, expression: `memories`});
+ const {result: {result: properties}} =
+ await Protocol.Runtime.getProperties({objectId});
+ for (const {name} of properties) {
+ InspectorTest.log(`name: ${name}`);
+ }
await Protocol.Debugger.resume();
InspectorTest.log('Finished.');
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
index 4fdec85b8d..a7c8d9eedb 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
@@ -16,39 +16,45 @@ Scope:
at C (interpreted) (0:169):
- scope (wasm-expression-stack):
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $var2: 0 (number)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $var2: 0 (f32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 0 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 0 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (number)
- 1: 3 (number)
+ 0: 42 (i32)
+ 1: 3 (i32)
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $f32_local: 7.199999809265137 (number)
- $0: 0 (number)
- $var5: 0 (number)
- $v128_local: Uint8Array(16)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $f32_local: 7.199999809265137 (f32)
+ $0: 0 (f32)
+ $var5: 0 (f32)
+ $v128_local: i32x4 0x00000017 0x00000017 0x00000017 0x00000017 (v128)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 0 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 0 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 42 (number)
+ $var0: 42 (i32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 0 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 0 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -58,41 +64,47 @@ Script wasm://wasm/e33badc2 byte offset 171: Wasm opcode 0x24 (kExprGlobalSet)
Scope:
at C (interpreted) (0:171):
- scope (wasm-expression-stack):
- 0: 42 (number)
+ 0: 42 (i32)
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $var2: 0 (number)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $var2: 0 (f32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 0 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 0 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (number)
- 1: 3 (number)
+ 0: 42 (i32)
+ 1: 3 (i32)
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $f32_local: 7.199999809265137 (number)
- $0: 0 (number)
- $var5: 0 (number)
- $v128_local: Uint8Array(16)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $f32_local: 7.199999809265137 (f32)
+ $0: 0 (f32)
+ $var5: 0 (f32)
+ $v128_local: i32x4 0x00000017 0x00000017 0x00000017 0x00000017 (v128)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 0 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 0 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 42 (number)
+ $var0: 42 (i32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 0 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 0 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -103,39 +115,45 @@ Scope:
at C (interpreted) (0:173):
- scope (wasm-expression-stack):
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $var2: 0 (number)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $var2: 0 (f32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (number)
- 1: 3 (number)
+ 0: 42 (i32)
+ 1: 3 (i32)
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $f32_local: 7.199999809265137 (number)
- $0: 0 (number)
- $var5: 0 (number)
- $v128_local: Uint8Array(16)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $f32_local: 7.199999809265137 (f32)
+ $0: 0 (f32)
+ $var5: 0 (f32)
+ $v128_local: i32x4 0x00000017 0x00000017 0x00000017 0x00000017 (v128)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 42 (number)
+ $var0: 42 (i32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -145,41 +163,47 @@ Script wasm://wasm/e33badc2 byte offset 175: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at C (interpreted) (0:175):
- scope (wasm-expression-stack):
- 0: 47 (number)
+ 0: 47 (i32)
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $var2: 0 (number)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $var2: 0 (f32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (number)
- 1: 3 (number)
+ 0: 42 (i32)
+ 1: 3 (i32)
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $f32_local: 7.199999809265137 (number)
- $0: 0 (number)
- $var5: 0 (number)
- $v128_local: Uint8Array(16)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $f32_local: 7.199999809265137 (f32)
+ $0: 0 (f32)
+ $var5: 0 (f32)
+ $v128_local: i32x4 0x00000017 0x00000017 0x00000017 0x00000017 (v128)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 42 (number)
+ $var0: 42 (i32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -190,39 +214,45 @@ Scope:
at C (interpreted) (0:177):
- scope (wasm-expression-stack):
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 47 (number)
- $var2: 0 (number)
+ $i32_arg: 42 (i32)
+ $i32_local: 47 (i32)
+ $var2: 0 (f32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (number)
- 1: 3 (number)
+ 0: 42 (i32)
+ 1: 3 (i32)
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $f32_local: 7.199999809265137 (number)
- $0: 0 (number)
- $var5: 0 (number)
- $v128_local: Uint8Array(16)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $f32_local: 7.199999809265137 (f32)
+ $0: 0 (f32)
+ $var5: 0 (f32)
+ $v128_local: i32x4 0x00000017 0x00000017 0x00000017 0x00000017 (v128)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 42 (number)
+ $var0: 42 (i32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -232,29 +262,33 @@ Script wasm://wasm/e33badc2 byte offset 160: Wasm opcode 0x1a (kExprDrop)
Scope:
at B (liftoff) (0:160):
- scope (wasm-expression-stack):
- 0: 42 (number)
- 1: 3 (number)
+ 0: 42 (i32)
+ 1: 3 (i32)
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $f32_local: 7.199999809265137 (number)
- $0: 0 (number)
- $var5: 0 (number)
- $v128_local: Uint8Array(16)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $f32_local: 7.199999809265137 (f32)
+ $0: 0 (f32)
+ $var5: 0 (f32)
+ $v128_local: i32x4 0x00000017 0x00000017 0x00000017 0x00000017 (v128)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 42 (number)
+ $var0: 42 (i32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -264,28 +298,32 @@ Script wasm://wasm/e33badc2 byte offset 161: Wasm opcode 0x1a (kExprDrop)
Scope:
at B (liftoff) (0:161):
- scope (wasm-expression-stack):
- 0: 42 (number)
+ 0: 42 (i32)
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $f32_local: 7.199999809265137 (number)
- $0: 0 (number)
- $var5: 0 (number)
- $v128_local: Uint8Array(16)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $f32_local: 7.199999809265137 (f32)
+ $0: 0 (f32)
+ $var5: 0 (f32)
+ $v128_local: i32x4 0x00000017 0x00000017 0x00000017 0x00000017 (v128)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 42 (number)
+ $var0: 42 (i32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -296,26 +334,30 @@ Scope:
at B (liftoff) (0:162):
- scope (wasm-expression-stack):
- scope (local):
- $i32_arg: 42 (number)
- $i32_local: 0 (number)
- $f32_local: 7.199999809265137 (number)
- $0: 0 (number)
- $var5: 0 (number)
- $v128_local: Uint8Array(16)
+ $i32_arg: 42 (i32)
+ $i32_local: 0 (i32)
+ $f32_local: 7.199999809265137 (f32)
+ $0: 0 (f32)
+ $var5: 0 (f32)
+ $v128_local: i32x4 0x00000017 0x00000017 0x00000017 0x00000017 (v128)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 42 (number)
+ $var0: 42 (i32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
@@ -326,12 +368,14 @@ Scope:
at A (liftoff) (0:130):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 42 (number)
+ $var0: 42 (i32)
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
module: Module
- $exported_memory: Memory(1)
- globals: "$exported_global": 42 (number)
+ functions: "$A (liftoff)": (Function), "$B (liftoff)": (Function), "$C (interpreted)": (Function)
+ globals: "$exported_global": 42 (i32)
+ memories: "$exported_memory": (Memory)
+ tables: "$exported_table": (Table)
at (anonymous) (0:17):
- scope (global):
-- skipped globals
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
index e3f47f8a2e..9ab6c323bf 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
@@ -14,13 +14,15 @@ at wasm_A (0:38):
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 3 (number)
+ $var0: 3 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Setting breakpoint at offset 39 on script v8://test/runWasm
@@ -41,13 +43,15 @@ at wasm_A (0:39):
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 3 (number)
+ $var0: 3 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -56,10 +60,11 @@ Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 3 (number)
+ $var0: 3 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -67,12 +72,13 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 3 (number)
+ 0: 3 (i32)
- scope (local):
- $var0: 3 (number)
+ $var0: 3 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -81,10 +87,11 @@ Scope:
at wasm_B (0:49):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 3 (number)
+ $var0: 3 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -92,12 +99,13 @@ Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (wasm-expression-stack):
- 0: 3 (number)
+ 0: 3 (i32)
- scope (local):
- $var0: 3 (number)
+ $var0: 3 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -105,13 +113,14 @@ Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (wasm-expression-stack):
- 0: 3 (number)
- 1: 1 (number)
+ 0: 3 (i32)
+ 1: 1 (i32)
- scope (local):
- $var0: 3 (number)
+ $var0: 3 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -119,12 +128,13 @@ Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (wasm-expression-stack):
- 0: 2 (number)
+ 0: 2 (i32)
- scope (local):
- $var0: 3 (number)
+ $var0: 3 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -136,13 +146,15 @@ at wasm_A (0:38):
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 2 (number)
+ $var0: 2 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -154,13 +166,15 @@ at wasm_A (0:39):
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 2 (number)
+ $var0: 2 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -169,10 +183,11 @@ Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 2 (number)
+ $var0: 2 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -180,12 +195,13 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 2 (number)
+ 0: 2 (i32)
- scope (local):
- $var0: 2 (number)
+ $var0: 2 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -194,10 +210,11 @@ Scope:
at wasm_B (0:49):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 2 (number)
+ $var0: 2 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -205,12 +222,13 @@ Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (wasm-expression-stack):
- 0: 2 (number)
+ 0: 2 (i32)
- scope (local):
- $var0: 2 (number)
+ $var0: 2 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -218,13 +236,14 @@ Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (wasm-expression-stack):
- 0: 2 (number)
- 1: 1 (number)
+ 0: 2 (i32)
+ 1: 1 (i32)
- scope (local):
- $var0: 2 (number)
+ $var0: 2 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -232,12 +251,13 @@ Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (wasm-expression-stack):
- 0: 1 (number)
+ 0: 1 (i32)
- scope (local):
- $var0: 2 (number)
+ $var0: 2 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -249,13 +269,15 @@ at wasm_A (0:38):
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 1 (number)
+ $var0: 1 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -267,13 +289,15 @@ at wasm_A (0:39):
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 1 (number)
+ $var0: 1 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -282,10 +306,11 @@ Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 1 (number)
+ $var0: 1 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -293,12 +318,13 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 1 (number)
+ 0: 1 (i32)
- scope (local):
- $var0: 1 (number)
+ $var0: 1 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -307,10 +333,11 @@ Scope:
at wasm_B (0:49):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 1 (number)
+ $var0: 1 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -318,12 +345,13 @@ Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (wasm-expression-stack):
- 0: 1 (number)
+ 0: 1 (i32)
- scope (local):
- $var0: 1 (number)
+ $var0: 1 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -331,13 +359,14 @@ Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (wasm-expression-stack):
- 0: 1 (number)
- 1: 1 (number)
+ 0: 1 (i32)
+ 1: 1 (i32)
- scope (local):
- $var0: 1 (number)
+ $var0: 1 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -345,12 +374,13 @@ Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (wasm-expression-stack):
- 0: 0 (number)
+ 0: 0 (i32)
- scope (local):
- $var0: 1 (number)
+ $var0: 1 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -362,13 +392,15 @@ at wasm_A (0:38):
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 0 (number)
+ $var0: 0 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -380,13 +412,15 @@ at wasm_A (0:39):
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 0 (number)
+ $var0: 0 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -395,10 +429,11 @@ Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 0 (number)
+ $var0: 0 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -406,12 +441,13 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 0 (number)
+ 0: 0 (i32)
- scope (local):
- $var0: 0 (number)
+ $var0: 0 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
Paused:
@@ -420,10 +456,11 @@ Scope:
at wasm_B (0:61):
- scope (wasm-expression-stack):
- scope (local):
- $var0: 0 (number)
+ $var0: 0 (i32)
- scope (module):
instance: exports: "main" (Function)
module: Module
+ functions: "$wasm_A": (Function), "$wasm_B": (Function)
at (anonymous) (0:17):
-- skipped
exports.main returned!
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-check-expected.txt b/deps/v8/test/inspector/debugger/wasm-stack-check-expected.txt
index 08cfb12c06..caf32e07b5 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack-check-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stack-check-expected.txt
@@ -6,8 +6,8 @@ Wait for script
Got wasm script: wasm://wasm/c84b7cde
Run
Expecting to pause at 61
-Paused at offset 61; wasm-expression-stack: []; local: [12]
-Paused at offset 62; wasm-expression-stack: []; local: [12]
-Paused at offset 64; wasm-expression-stack: [12]; local: [12]
-Paused at offset 66; wasm-expression-stack: [12, 1]; local: [12]
-Paused at offset 67; wasm-expression-stack: [13]; local: [12]
+Paused at offset 61; wasm-expression-stack: []; local: [12 (i32)]
+Paused at offset 62; wasm-expression-stack: []; local: [12 (i32)]
+Paused at offset 64; wasm-expression-stack: [12 (i32)]; local: [12 (i32)]
+Paused at offset 66; wasm-expression-stack: [12 (i32), 1 (i32)]; local: [12 (i32)]
+Paused at offset 67; wasm-expression-stack: [13 (i32)]; local: [12 (i32)]
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-check.js b/deps/v8/test/inspector/debugger/wasm-stack-check.js
index 13f78446ea..4189abd3e1 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack-check.js
+++ b/deps/v8/test/inspector/debugger/wasm-stack-check.js
@@ -70,8 +70,9 @@ async function inspect(frame) {
if (scope.type == 'module') continue;
var scope_properties =
await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
- let str = scope_properties.result.result.map(
- elem => WasmInspectorTest.getWasmValue(elem.value)).join(', ');
+ let str = (await Promise.all(scope_properties.result.result.map(
+ elem => WasmInspectorTest.getWasmValue(elem.value))))
+ .join(', ');
line.push(`${scope.type}: [${str}]`);
}
InspectorTest.log(line.join('; '));
diff --git a/deps/v8/test/inspector/debugger/wasm-step-a-lot-expected.txt b/deps/v8/test/inspector/debugger/wasm-step-a-lot-expected.txt
new file mode 100644
index 0000000000..4750954f06
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-step-a-lot-expected.txt
@@ -0,0 +1,28 @@
+Tests repeated stepping through a large function (should not OOM)
+
+Running test: test
+Setting up global instance variable.
+Got wasm script: wasm://wasm/8f70f0e2
+Setting breakpoint
+Paused 50 and running...
+Paused 100 and running...
+Paused 150 and running...
+Paused 200 and running...
+Paused 250 and running...
+Paused 300 and running...
+Paused 350 and running...
+Paused 400 and running...
+Paused 450 and running...
+Paused 500 and running...
+Paused 550 and running...
+Paused 600 and running...
+Paused 650 and running...
+Paused 700 and running...
+Paused 750 and running...
+Paused 800 and running...
+Paused 850 and running...
+Paused 900 and running...
+Paused 950 and running...
+Paused 1000 and running...
+test function returned.
+Paused 1003 times.
diff --git a/deps/v8/test/inspector/debugger/wasm-step-a-lot.js b/deps/v8/test/inspector/debugger/wasm-step-a-lot.js
new file mode 100644
index 0000000000..df0e983d53
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-step-a-lot.js
@@ -0,0 +1,56 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Lower the maximum code space size to detect missed garbage collection
+// earlier.
+// Flags: --wasm-max-code-space=2
+
+utils.load('test/inspector/wasm-inspector-test.js');
+
+const {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Tests repeated stepping through a large function (should not OOM)');
+session.setupScriptMap();
+
+const builder = new WasmModuleBuilder();
+
+const body = [kExprLocalGet, 0];
+// Stepping through a long function will repeatedly recreate stepping code, with
+// corresponding side tables. This should not run OOM
+// (https://crbug.com/1168564).
+// We use calls such that stack checks are executed reliably.
+for (let i = 0; i < 500; ++i) body.push(...wasmI32Const(i), kExprI32Add);
+const func_test =
+ builder.addFunction('test', kSig_i_i).addBody(body).exportFunc();
+const module_bytes = builder.toArray();
+
+let paused = 0;
+Protocol.Debugger.onPaused(msg => {
+ ++paused;
+ if (paused % 50 == 0) InspectorTest.log(`Paused ${paused} and running...`);
+ Protocol.Debugger.stepOver();
+});
+
+InspectorTest.runAsyncTestSuite([
+ async function test() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting up global instance variable.');
+ WasmInspectorTest.instantiate(module_bytes);
+ const [, {params: wasmScript}] = await Protocol.Debugger.onceScriptParsed(2);
+
+ InspectorTest.log('Got wasm script: ' + wasmScript.url);
+
+ InspectorTest.log('Setting breakpoint');
+ await Protocol.Debugger.setBreakpoint({
+ location: {
+ scriptId: wasmScript.scriptId,
+ lineNumber: 0,
+ columnNumber: func_test.body_offset
+ }
+ });
+
+ await Protocol.Runtime.evaluate({ expression: 'instance.exports.test()' });
+ InspectorTest.log('test function returned.');
+ InspectorTest.log(`Paused ${paused} times.`);
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt b/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt
index 3424b6987a..45d0d74e8d 100644
--- a/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt
@@ -7,11 +7,11 @@ Paused at:
--- 0 ---
Script wasm://wasm/a9a86c5e byte offset 46: Wasm opcode 0x6d (kExprI32DivS)
scope at div (0:46):
- $a: 1
- $b: 0
- $unused: 4711
- $local_zero: 0
- $local_const_11: 11
+ $a: 1 (i32)
+ $b: 0 (i32)
+ $unused: 4711 (i32)
+ $local_zero: 0 (i32)
+ $local_const_11: 11 (i32)
--- 1 ---
try {
instance.exports.#div(1, 0, 4711); // traps (div by zero)
@@ -37,11 +37,11 @@ Paused at:
--- 0 ---
Script wasm://wasm/a9a86c5e byte offset 46: Wasm opcode 0x6d (kExprI32DivS)
scope at div (0:46):
- $a: -2147483648
- $b: -1
- $unused: 4711
- $local_zero: 0
- $local_const_11: 11
+ $a: -2147483648 (i32)
+ $b: -1 (i32)
+ $unused: 4711 (i32)
+ $local_zero: 0 (i32)
+ $local_const_11: 11 (i32)
--- 1 ---
try {
instance.exports.#div(0x80000000, -1, 4711); // traps (unrepresentable)
diff --git a/deps/v8/test/inspector/debugger/wasm-step-after-trap.js b/deps/v8/test/inspector/debugger/wasm-step-after-trap.js
index 6ccf83df58..fec9555ce8 100644
--- a/deps/v8/test/inspector/debugger/wasm-step-after-trap.js
+++ b/deps/v8/test/inspector/debugger/wasm-step-after-trap.js
@@ -80,8 +80,9 @@ async function printLocalScope(frame) {
if (scope.type != 'local') continue;
let properties = await Protocol.Runtime.getProperties(
{'objectId': scope.object.objectId});
- for (let value of properties.result.result) {
- InspectorTest.log(` ${value.name}: ${value.value.value}`);
+ for (let {name, value} of properties.result.result) {
+ value = await WasmInspectorTest.getWasmValue(value);
+ InspectorTest.log(` ${name}: ${value}`);
}
}
}
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging-expected.txt
index 066f5c354e..314f68db9a 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging-expected.txt
@@ -2,14 +2,14 @@ Tests that Liftoff does not merge opcodes while stepping
Running test: test
Setting breakpoint at offset 33.
-Paused at offset 33: [0]
-Paused at offset 35: [0, 0]
-Paused at offset 36: [0, 1]
-Paused at offset 33: [-1]
-Paused at offset 35: [-1, -1]
-Paused at offset 36: [-1, 0]
-Paused at offset 38: [-1]
-Paused at offset 33: [13]
-Paused at offset 35: [13, 13]
-Paused at offset 36: [13, 0]
-Paused at offset 38: [13]
+Paused at offset 33: [0 (i32)]
+Paused at offset 35: [0 (i32), 0 (i32)]
+Paused at offset 36: [0 (i32), 1 (i32)]
+Paused at offset 33: [-1 (i32)]
+Paused at offset 35: [-1 (i32), -1 (i32)]
+Paused at offset 36: [-1 (i32), 0 (i32)]
+Paused at offset 38: [-1 (i32)]
+Paused at offset 33: [13 (i32)]
+Paused at offset 35: [13 (i32), 13 (i32)]
+Paused at offset 36: [13 (i32), 0 (i32)]
+Paused at offset 38: [13 (i32)]
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js b/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js
index 2522386f2a..4e4135a306 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js
@@ -56,8 +56,8 @@ async function printPauseLocationAndStep(msg) {
if (scope.type == 'module') continue;
let scope_properties =
await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
- scopes[scope.type] = scope_properties.result.result.map(
- elem => WasmInspectorTest.getWasmValue(elem.value));
+ scopes[scope.type] = await Promise.all(scope_properties.result.result.map(
+ elem => WasmInspectorTest.getWasmValue(elem.value)));
}
let values = scopes['local'].concat(scopes['wasm-expression-stack']).join(', ');
InspectorTest.log(`Paused at offset ${loc.columnNumber}: [${values}]`);
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
index 12865dec50..f9890d5a3a 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
@@ -14,9 +14,9 @@ Setting breakpoint on offset 54 (on the setlocal before the call), url wasm://wa
Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
at wasm_B (0:54):
- scope (wasm-expression-stack):
- {"0":3}
+ 0: 3 (i32)
- scope (local):
- {"$var0":4}
+ $var0: 4 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -26,9 +26,8 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10 (kExprCallFunction)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":3}
+ $var0: 3 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -38,16 +37,13 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:38):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {}
- scope (module):
-- skipped
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":3}
+ $var0: 3 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -57,16 +53,13 @@ Debugger.stepOver called
Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:39):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {}
- scope (module):
-- skipped
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":3}
+ $var0: 3 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -76,9 +69,8 @@ Debugger.stepOut called
Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c (kExprBr)
at wasm_B (0:58):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":3}
+ $var0: 3 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -88,9 +80,9 @@ Debugger.stepOut called
Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
at wasm_B (0:54):
- scope (wasm-expression-stack):
- {"0":2}
+ 0: 2 (i32)
- scope (local):
- {"$var0":3}
+ $var0: 3 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -100,9 +92,8 @@ Debugger.stepOver called
Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10 (kExprCallFunction)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":2}
+ $var0: 2 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -112,9 +103,8 @@ Debugger.stepOver called
Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c (kExprBr)
at wasm_B (0:58):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":2}
+ $var0: 2 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -124,9 +114,9 @@ Debugger.resume called
Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
at wasm_B (0:54):
- scope (wasm-expression-stack):
- {"0":1}
+ 0: 1 (i32)
- scope (local):
- {"$var0":2}
+ $var0: 2 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -136,9 +126,8 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10 (kExprCallFunction)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":1}
+ $var0: 1 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -148,16 +137,13 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:38):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {}
- scope (module):
-- skipped
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":1}
+ $var0: 1 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -167,9 +153,8 @@ Debugger.stepOut called
Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c (kExprBr)
at wasm_B (0:58):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":1}
+ $var0: 1 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -179,9 +164,8 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
at wasm_B (0:45):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":1}
+ $var0: 1 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -191,9 +175,9 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 47: Wasm opcode 0x04 (kExprIf)
at wasm_B (0:47):
- scope (wasm-expression-stack):
- {"0":1}
+ 0: 1 (i32)
- scope (local):
- {"$var0":1}
+ $var0: 1 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -203,9 +187,8 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
at wasm_B (0:49):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":1}
+ $var0: 1 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -215,9 +198,9 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 51: Wasm opcode 0x41 (kExprI32Const)
at wasm_B (0:51):
- scope (wasm-expression-stack):
- {"0":1}
+ 0: 1 (i32)
- scope (local):
- {"$var0":1}
+ $var0: 1 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -227,9 +210,10 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
at wasm_B (0:53):
- scope (wasm-expression-stack):
- {"0":1,"1":1}
+ 0: 1 (i32)
+ 1: 1 (i32)
- scope (local):
- {"$var0":1}
+ $var0: 1 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -239,9 +223,9 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
at wasm_B (0:54):
- scope (wasm-expression-stack):
- {"0":0}
+ 0: 0 (i32)
- scope (local):
- {"$var0":1}
+ $var0: 1 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -251,9 +235,8 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10 (kExprCallFunction)
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":0}
+ $var0: 0 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -263,16 +246,13 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:38):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {}
- scope (module):
-- skipped
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":0}
+ $var0: 0 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -282,16 +262,13 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:39):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {}
- scope (module):
-- skipped
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":0}
+ $var0: 0 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -301,16 +278,13 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 40: Wasm opcode 0x0b (kExprEnd)
at wasm_A (0:40):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {}
- scope (module):
-- skipped
at wasm_B (0:56):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":0}
+ $var0: 0 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
@@ -320,9 +294,8 @@ Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c (kExprBr)
at wasm_B (0:58):
- scope (wasm-expression-stack):
- {}
- scope (local):
- {"$var0":0}
+ $var0: 0 (i32)
- scope (module):
-- skipped
at (anonymous) (0:17):
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
index 18766b89e8..6cece203ae 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
@@ -91,15 +91,12 @@ async function waitForPauseAndStep(stepAction) {
if (scope.type === 'global' || scope.type === 'module') {
InspectorTest.logObject(' -- skipped');
} else {
- const object = {};
- const {result: {result: properties}} =
- await Protocol.Runtime.getProperties({
- objectId: scope.object.objectId
- });
- for (const {name, value: {value}} of properties) {
- object[name] = value;
+ let properties = await Protocol.Runtime.getProperties(
+ {objectId: scope.object.objectId});
+ for (let {name, value} of properties.result.result) {
+ value = await WasmInspectorTest.getWasmValue(value);
+ InspectorTest.log(` ${name}: ${value}`);
}
- InspectorTest.log(` ${JSON.stringify(object)}`);
}
}
}
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index ede16a957d..a98df5e010 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -19,6 +19,14 @@
# This test worked in the wasm interpreter, but fails when using Liftoff for
# debugging.
'debugger/wasm-externref-global': [FAIL],
+
+ # https://crbug.com/1080638
+ # The initial CL only fixed the crash. The test still causes an endless
+ # loop instead of properly reporting a RangeError for a stack overflow.
+ 'regress/regress-crbug-1080638': [SKIP],
+
+ # https://crbug.com/v8/11338
+ 'runtime-call-stats/enable-disable': [PASS, ['verify_csa', SKIP]],
}], # ALWAYS
##############################################################################
@@ -47,6 +55,17 @@
}], # variant != default
##############################################################################
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
+ 'debugger/asm-js-stack': [SKIP],
+ 'debugger/asm-js-breakpoint-before-exec': [SKIP],
+ 'debugger/asm-js-breakpoint-during-exec': [SKIP],
+ 'debugger/wasm-*': [SKIP],
+ 'cpu-profiler/console-profile-wasm': [SKIP],
+ 'runtime/get-properties': [SKIP],
+}], # not has_webassembly or variant == jitless
+
+##############################################################################
['lite_mode or variant == jitless', {
# Lite mode does not allocate feedback vector.
'type-profiler/type-profile-start-stop': [SKIP],
@@ -54,13 +73,6 @@
'type-profiler/type-profile-with-to-string-tag': [SKIP],
'type-profiler/type-profile-with-classes': [SKIP],
'type-profiler/type-profile-disable': [SKIP],
-
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
- 'debugger/asm-js-stack': [SKIP],
- 'debugger/asm-js-breakpoint-before-exec': [SKIP],
- 'debugger/asm-js-breakpoint-during-exec': [SKIP],
- 'debugger/wasm-*': [SKIP],
- 'cpu-profiler/console-profile-wasm': [SKIP],
}], # 'lite_mode or variant == jitless'
##############################################################################
@@ -108,6 +120,12 @@
'debugger/wasm-scope-info*': [SKIP],
}], # '(arch == mipsel or arch == mips64el) and not simd_mips'
+##############################################################################
+['arch == riscv64', {
+ # SIMD support is still in progress.
+ 'debugger/wasm-scope-info*': [SKIP],
+}], # 'arch == riscv64'
+
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
@@ -122,6 +140,10 @@
['tsan == True', {
# TSan handles SIGPROF incorrectly (https://crbug.com/v8/9869).
'cpu-profiler/console-profile-wasm': [SKIP],
+
+ # This test is just slow on TSan, and TSan coverage is not needed to test
+ # that we do not run OOM. Thus skip it on TSan.
+ 'debugger/wasm-step-a-lot': [SKIP],
}], # 'tsan == True'
##############################################################################
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 3bd225ea1d..52eb76eabb 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -429,8 +429,8 @@ void IsolateData::installAdditionalCommandLineAPI(
CHECK(context->GetIsolate() == isolate());
v8::HandleScope handle_scope(isolate());
v8::Context::Scope context_scope(context);
- v8::ScriptOrigin origin(
- v8::String::NewFromUtf8Literal(isolate(), "internal-console-api"));
+ v8::ScriptOrigin origin(isolate(), v8::String::NewFromUtf8Literal(
+ isolate(), "internal-console-api"));
v8::ScriptCompiler::Source scriptSource(
additional_console_api_.Get(isolate()), origin);
v8::MaybeLocal<v8::Script> script =
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1080638-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1080638-expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1080638-expected.txt
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1080638.js b/deps/v8/test/inspector/regress/regress-crbug-1080638.js
new file mode 100644
index 0000000000..8ae7707d74
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1080638.js
@@ -0,0 +1,28 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {Protocol} = InspectorTest.start('Recursive proxy prototype does not crash inspector crbug.com/1080638');
+
+const reproductionCode = `
+const t = { id: 1 }
+const p = new Proxy(t, {
+ get(target, prop, receiver) {
+ console.log(receiver);
+ return Reflect.get(target, prop);
+ }
+});
+
+const q = Object.create(p);
+console.log(q.id);
+`;
+
+(async function logPropertyWithProxyPrototype() {
+ await Protocol.Runtime.enable();
+ const response = await Protocol.Runtime.evaluate({
+ expression: reproductionCode,
+ replMode: true,
+ });
+ InspectorTest.logMessage(response);
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/console-message-omit-data-urls-expected.txt b/deps/v8/test/inspector/runtime/console-message-omit-data-urls-expected.txt
new file mode 100644
index 0000000000..b54aae3197
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-message-omit-data-urls-expected.txt
@@ -0,0 +1,6 @@
+Checks that we only send along non-data urls.
+Test log with data uri.
+console api called: Hello World!
+ callFrame: function test (url: )
+ callFrame: function (url: test.js)
+exception details: Uncaught ReferenceError: Exception is not defined (url: ) \ No newline at end of file
diff --git a/deps/v8/test/inspector/runtime/console-message-omit-data-urls.js b/deps/v8/test/inspector/runtime/console-message-omit-data-urls.js
new file mode 100644
index 0000000000..3400c692fa
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-message-omit-data-urls.js
@@ -0,0 +1,63 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that we only send along non-data urls.');
+
+var expectedMessages = 2;
+var messages = [];
+
+Protocol.Runtime.enable();
+Protocol.Console.enable();
+
+Protocol.Runtime.onConsoleAPICalled(consoleAPICalled);
+Protocol.Runtime.onExceptionThrown(exceptionThrown);
+
+contextGroup.addScript(`
+async function test() {
+ console.log("Hello World!");
+ throw new Exception("Exception thrown");
+}
+//# sourceURL=data:,pseudoDataUrl`);
+
+function consoleAPICalled(result)
+{
+ const msgText = result.params.args[0].value;
+ const callFrames = result.params.stackTrace.callFrames;
+ let messageParts = [];
+ messageParts.push(`console api called: ${msgText}`);
+ for (frame of callFrames) {
+ messageParts.push(` callFrame: function ${frame.functionName} (url: ${frame.url})`);
+ }
+ messages.push(messageParts.join("\n"));
+
+ if (!(--expectedMessages)) {
+ done();
+ }
+}
+
+function exceptionThrown(result)
+{
+ const exceptionDetails = result.params.exceptionDetails;
+ const url = exceptionDetails.url;
+ const text = exceptionDetails.text;
+ messages.push(`exception details: ${text} (url: ${url ? url : ""})`)
+
+ if (!(--expectedMessages)) {
+ done();
+ }
+}
+
+function done()
+{
+ messages.sort();
+ for (var message of messages) {
+ InspectorTest.log(message);
+ }
+ InspectorTest.completeTest();
+}
+
+(async function test() {
+ InspectorTest.log('Test log with data uri.');
+ await Protocol.Runtime.evaluate({ expression: `test()//# sourceURL=test.js`});
+})();
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index 8a764d0b0c..33521c8281 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -105,6 +105,16 @@ Running test: testArrayBuffer
[[ArrayBufferByteLength]]
[[ArrayBufferData]]
+Running test: testArrayBufferFromWebAssemblyMemory
+[[Int8Array]]
+[[Uint8Array]]
+[[Int16Array]]
+[[Int32Array]]
+[[ArrayBufferByteLength]]
+[[ArrayBufferData]]
+[[WebAssemblyMemory]]
+ __proto__ own object undefined
+
Running test: testDetachedArrayBuffer
[[IsDetached]] true
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index 737616ef4c..bc3ea8799f 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -57,6 +57,25 @@ InspectorTest.runAsyncTestSuite([
}
},
+ async function testArrayBufferFromWebAssemblyMemory() {
+ let objectId = await evaluateToObjectId('new WebAssembly.Memory({initial: 1}).buffer');
+ let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: true });
+ for (let prop of props.result.result) {
+ if (prop.name === '__proto__')
+ continue;
+ InspectorTest.log(prop.name);
+ await logGetPropertiesResult(prop.value.objectId);
+ }
+ for (let prop of props.result.internalProperties) {
+ InspectorTest.log(prop.name);
+ // Skip printing the values of the virtual typed arrays.
+ if (/\[\[.*Array\]\]/.test(prop.name))
+ continue;
+ if (prop.value.objectId)
+ await logGetPropertiesResult(prop.value.objectId);
+ }
+ },
+
async function testDetachedArrayBuffer() {
await Protocol.Runtime.evaluate({ expression: 'var a = new ArrayBuffer(16)' });
await Protocol.Runtime.evaluate({ expression: 'var b = new Uint32Array(a)' });
diff --git a/deps/v8/test/inspector/tasks.cc b/deps/v8/test/inspector/tasks.cc
index 08c829e761..79f40c0e27 100644
--- a/deps/v8/test/inspector/tasks.cc
+++ b/deps/v8/test/inspector/tasks.cc
@@ -20,8 +20,8 @@ void ExecuteStringTask::Run(IsolateData* data) {
v8::HandleScope handle_scope(data->isolate());
v8::Local<v8::Context> context = data->GetDefaultContext(context_group_id_);
v8::Context::Scope context_scope(context);
- v8::ScriptOrigin origin(ToV8String(data->isolate(), name_), line_offset_,
- column_offset_,
+ v8::ScriptOrigin origin(data->isolate(), ToV8String(data->isolate(), name_),
+ line_offset_, column_offset_,
/* resource_is_shared_cross_origin */ false,
/* script_id */ -1,
/* source_map_url */ v8::Local<v8::Value>(),
diff --git a/deps/v8/test/inspector/wasm-inspector-test.js b/deps/v8/test/inspector/wasm-inspector-test.js
index 67333dcbb6..47d8419055 100644
--- a/deps/v8/test/inspector/wasm-inspector-test.js
+++ b/deps/v8/test/inspector/wasm-inspector-test.js
@@ -7,25 +7,40 @@ utils.load('test/mjsunit/wasm/wasm-module-builder.js');
WasmInspectorTest = {}
InspectorTest.getWasmOpcodeName = getOpcodeName;
-WasmInspectorTest.evalWithUrl = (code, url) =>
- Protocol.Runtime
- .evaluate({'expression': code + '\n//# sourceURL=v8://test/' + url})
- .then(printIfFailure);
+WasmInspectorTest.evalWithUrl = async function(code, url) {
+ return await Protocol.Runtime
+ .evaluate({'expression': code + '\n//# sourceURL=v8://test/' + url})
+ .then(printIfFailure);
+};
-WasmInspectorTest.instantiateFromBuffer = function(bytes, imports) {
+WasmInspectorTest.compileFromBuffer = (function(bytes) {
var buffer = new ArrayBuffer(bytes.length);
var view = new Uint8Array(buffer);
for (var i = 0; i < bytes.length; ++i) {
view[i] = bytes[i] | 0;
}
- const module = new WebAssembly.Module(buffer);
- return new WebAssembly.Instance(module, imports);
-}
+ return new WebAssembly.Module(buffer);
+}).toString();
+
+WasmInspectorTest.instantiateFromBuffer =
+ (function(bytes, imports) {
+ return new WebAssembly.Instance(compileFromBuffer(bytes), imports);
+ })
+ .toString()
+ .replace('compileFromBuffer', WasmInspectorTest.compileFromBuffer);
-WasmInspectorTest.instantiate = async function(bytes, instance_name = 'instance') {
- const instantiate_code = `var ${instance_name} = (${WasmInspectorTest.instantiateFromBuffer})(${JSON.stringify(bytes)});`;
+WasmInspectorTest.compile = async function(bytes, module_name = 'module') {
+ const compile_code = `var ${module_name} = (${
+ WasmInspectorTest.compileFromBuffer})(${JSON.stringify(bytes)});`;
+ await WasmInspectorTest.evalWithUrl(compile_code, 'compile_module');
+};
+
+WasmInspectorTest.instantiate =
+ async function(bytes, instance_name = 'instance') {
+ const instantiate_code = `var ${instance_name} = (${
+ WasmInspectorTest.instantiateFromBuffer})(${JSON.stringify(bytes)});`;
await WasmInspectorTest.evalWithUrl(instantiate_code, 'instantiate');
-}
+};
WasmInspectorTest.dumpScopeProperties = async function(message) {
printIfFailure(message);
@@ -33,11 +48,17 @@ WasmInspectorTest.dumpScopeProperties = async function(message) {
var value_str = await getScopeValues(value.name, value.value);
InspectorTest.log(' ' + value.name + ': ' + value_str);
}
-}
+};
-WasmInspectorTest.getWasmValue = value => {
- return value.unserializableValue ?? value.value;
-}
+WasmInspectorTest.getWasmValue = async function(value) {
+ let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
+ printIfFailure(msg);
+ const value_type = msg.result.result.find(({name}) => name === 'type');
+ const value_value = msg.result.result.find(({name}) => name === 'value');
+ return `${
+ value_value.value.unserializableValue ??
+ value_value.value.value} (${value_type.value.value})`;
+};
function printIfFailure(message) {
if (!message.result) {
@@ -47,19 +68,29 @@ function printIfFailure(message) {
}
async function getScopeValues(name, value) {
- if (value.type == 'object') {
- if (value.subtype === 'typedarray' || value.subtype == 'webassemblymemory') return value.description;
+ async function printValue(value) {
+ if (value.type === 'object' && value.subtype === 'wasmvalue') {
+ return await WasmInspectorTest.getWasmValue(value);
+ } else if ('className' in value) {
+ return `(${value.className})`;
+ }
+ return `${value.unserializableValue ?? value.value} (${
+ value.subtype ?? value.type})`;
+ }
+ if (value.type === 'object' && value.subtype !== 'wasmvalue') {
+ if (value.subtype === 'typedarray' || value.subtype == 'webassemblymemory')
+ return value.description;
if (name === 'instance') return dumpInstanceProperties(value);
if (name === 'module') return value.description;
let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
printIfFailure(msg);
- const printProperty = function({name, value}) {
- return `"${name}": ${WasmInspectorTest.getWasmValue(value)} (${value.subtype ?? value.type})`;
+ async function printProperty({name, value}) {
+ return `"${name}": ${await printValue(value)}`;
}
- return msg.result.result.map(printProperty).join(', ');
+ return (await Promise.all(msg.result.result.map(printProperty))).join(', ');
}
- return `${WasmInspectorTest.getWasmValue(value)} (${value.subtype ?? value.type})`;
+ return await printValue(value);
}
function recursiveGetPropertiesWrapper(value, depth) {
@@ -68,10 +99,12 @@ function recursiveGetPropertiesWrapper(value, depth) {
async function recursiveGetProperties(value, depth) {
if (depth > 0) {
- const properties = await Promise.all(value.result.result.map(
- x => {return Protocol.Runtime.getProperties({objectId: x.value.objectId});}));
- const recursiveProperties = await Promise.all(properties.map(
- x => {return recursiveGetProperties(x, depth - 1);}));
+ const properties = await Promise.all(value.result.result.map(x => {
+ return Protocol.Runtime.getProperties({objectId: x.value.objectId});
+ }));
+ const recursiveProperties = await Promise.all(properties.map(x => {
+ return recursiveGetProperties(x, depth - 1);
+ }));
return recursiveProperties.flat();
}
return value;
@@ -83,17 +116,17 @@ async function dumpInstanceProperties(instanceObj) {
}
const exportsName = 'exports';
- let exportsObj = await Protocol.Runtime.callFunctionOn(
- {objectId: instanceObj.objectId,
- functionDeclaration: invokeGetter.toString(),
- arguments: [{value: JSON.stringify(exportsName)}]
- });
+ let exportsObj = await Protocol.Runtime.callFunctionOn({
+ objectId: instanceObj.objectId,
+ functionDeclaration: invokeGetter.toString(),
+ arguments: [{value: JSON.stringify(exportsName)}]
+ });
printIfFailure(exportsObj);
let exports = await Protocol.Runtime.getProperties(
{objectId: exportsObj.result.result.objectId});
printIfFailure(exports);
- const printExports = function(value) {
+ function printExports(value) {
return `"${value.name}" (${value.value.className})`;
}
const formattedExports = exports.result.result.map(printExports).join(', ');
diff --git a/deps/v8/test/intl/date-format/UnwrapDateTimeFormatUseOrdinaryHasInstance.js b/deps/v8/test/intl/date-format/UnwrapDateTimeFormatUseOrdinaryHasInstance.js
new file mode 100644
index 0000000000..52bfda0845
--- /dev/null
+++ b/deps/v8/test/intl/date-format/UnwrapDateTimeFormatUseOrdinaryHasInstance.js
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Verify ECMA402 PR 500 Use OrdinaryHasInstance in normative optional steps
+// https://github.com/tc39/ecma402/pull/500
+
+Object.defineProperty(Intl.DateTimeFormat, Symbol.hasInstance, {
+ get() { throw new Error("Intl.DateTimeFormat[@@hasInstance] lookup"); }
+});
+
+var dtf;
+assertDoesNotThrow(() => dtf = new Intl.DateTimeFormat());
+assertDoesNotThrow(() => dtf.format(new Date()));
+assertDoesNotThrow(() => dtf.resolvedOptions());
diff --git a/deps/v8/test/intl/displaynames/languagecanonical.js b/deps/v8/test/intl/displaynames/languagecanonical.js
new file mode 100644
index 0000000000..23723296a7
--- /dev/null
+++ b/deps/v8/test/intl/displaynames/languagecanonical.js
@@ -0,0 +1,71 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test the calling of the effect of CanonicalizeUnicodeLocaleId(code).
+// in step 1.c of
+// https://tc39.es/ecma402/#sec-canonicalcodefordisplaynames
+//
+// The following Data generated by
+// METAFILE="third_party/icu/source/data/misc/metadata.txt"
+// egrep "^[ ]{12}[a-z]{2,3}(_[a-z]{4})?(_[A-Za-z]{2})?(_[a-z]{5,8})*{" $METAFILE | \
+// sed -e "s/^ */ \"/g" | tr "_{\n" "-\"," |fold -s -w 70
+//
+let testCases = [
+"aa-saaho", "aam", "aar", "abk", "adp", "afr", "agp", "ais", "aju",
+"aka", "alb", "als", "amh", "ara", "arb", "arg", "arm", "art-lojban",
+"asd", "asm", "aue", "ava", "ave", "aym", "ayr", "ayx", "aze", "azj",
+"bak", "bam", "baq", "baz", "bcc", "bcl", "bel", "ben", "bgm", "bh",
+"bhk", "bih", "bis", "bjd", "bjq", "bkb", "bod", "bos", "bre", "btb",
+"bul", "bur", "bxk", "bxr", "cat", "ccq", "cel-gaulish", "ces",
+"cha", "che", "chi", "chu", "chv", "cjr", "cka", "cld", "cmk", "cmn",
+"cnr", "cor", "cos", "coy", "cqu", "cre", "cwd", "cym", "cze", "daf",
+"dan", "dap", "deu", "dgo", "dhd", "dik", "diq", "dit", "div", "djl",
+"dkl", "drh", "drr", "drw", "dud", "duj", "dut", "dwl", "dzo", "ekk",
+"ell", "elp", "emk", "eng", "epo", "esk", "est", "eus", "ewe", "fao",
+"fas", "fat", "fij", "fin", "fra", "fre", "fry", "fuc", "ful", "gav",
+"gaz", "gbc", "gbo", "geo", "ger", "gfx", "ggn", "ggo", "ggr", "gio",
+"gla", "gle", "glg", "gli", "glv", "gno", "gre", "grn", "gti", "gug",
+"guj", "guv", "gya", "hat", "hau", "hbs", "hdn", "hea", "heb", "her",
+"him", "hin", "hmo", "hrr", "hrv", "hun", "hy-arevmda", "hye", "ibi",
+"ibo", "ice", "ido", "iii", "ike", "iku", "ile", "ill", "ilw", "in",
+"ina", "ind", "ipk", "isl", "ita", "iw", "izi", "jar", "jav", "jeg",
+"ji", "jpn", "jw", "kal", "kan", "kas", "kat", "kau", "kaz", "kdv",
+"kgc", "kgd", "kgh", "khk", "khm", "kik", "kin", "kir", "kmr", "knc",
+"kng", "knn", "koj", "kom", "kon", "kor", "kpp", "kpv", "krm", "ktr",
+"kua", "kur", "kvs", "kwq", "kxe", "kxl", "kzh", "kzj", "kzt", "lao",
+"lat", "lav", "lbk", "leg", "lii", "lim", "lin", "lit", "llo", "lmm",
+"ltz", "lub", "lug", "lvs", "mac", "mah", "mal", "mao", "mar", "may",
+"meg", "mgx", "mhr", "mkd", "mlg", "mlt", "mnk", "mnt", "mo", "mof",
+"mol", "mon", "mri", "msa", "mst", "mup", "mwd", "mwj", "mya", "myd",
+"myt", "nad", "nau", "nav", "nbf", "nbl", "nbx", "ncp", "nde", "ndo",
+"nep", "nld", "nln", "nlr", "nno", "nns", "nnx", "no", "no-bokmal",
+"no-nynorsk", "nob", "noo", "nor", "npi", "nts", "nxu", "nya", "oci",
+"ojg", "oji", "ori", "orm", "ory", "oss", "oun", "pan", "pbu", "pcr",
+"per", "pes", "pli", "plt", "pmc", "pmu", "pnb", "pol", "por", "ppa",
+"ppr", "prs", "pry", "pus", "puz", "que", "quz", "rmr", "rmy", "roh",
+"ron", "rum", "run", "rus", "sag", "san", "sap", "sca", "scc", "scr",
+"sgl", "sgn-BR", "sgn-CO", "sgn-DE", "sgn-DK", "sgn-ES", "sgn-FR",
+"sgn-GB", "sgn-GR", "sgn-IE", "sgn-IT", "sgn-JP", "sgn-MX", "sgn-NI",
+"sgn-NL", "sgn-NO", "sgn-PT", "sgn-SE", "sgn-US", "sgn-ZA", "sh",
+"sin", "skk", "slk", "slo", "slv", "sme", "smo", "sna", "snd", "som",
+"sot", "spa", "spy", "sqi", "src", "srd", "srp", "ssw", "sul", "sum",
+"sun", "swa", "swc", "swe", "swh", "tah", "tam", "tat", "tdu", "tel",
+"tgg", "tgk", "tgl", "tha", "thc", "thw", "thx", "tib", "tid", "tie",
+"tir", "tkk", "tl", "tlw", "tmp", "tne", "tnf", "ton", "tsf", "tsn",
+"tso", "ttq", "tuk", "tur", "tw", "twi", "uig", "ukr", "umu",
+"und-aaland", "und-arevela", "und-arevmda", "und-bokmal",
+"und-hakka", "und-hepburn-heploc", "und-lojban", "und-nynorsk",
+"und-saaho", "und-xiang", "unp", "uok", "urd", "uzb", "uzn", "ven",
+"vie", "vol", "wel", "wgw", "wit", "wiw", "wln", "wol", "xba", "xho",
+"xia", "xkh", "xpe", "xrq", "xsj", "xsl", "ybd", "ydd", "yen", "yid",
+"yiy", "yma", "ymt", "yor", "yos", "yuu", "zai", "zh-guoyu",
+"zh-hakka", "zh-xiang", "zha", "zho", "zir", "zsm", "zul", "zyb",
+"fra", "frb", "frc", "frd", "fre", "frf", "frg", "frh", "fri", "frj",
+"frk", "frl", "frm", "frn", "fro", "frp", "frq", "frr", "frs", "frt",
+"fru", "frv", "lud", "lug", "lul", "nzn", "nzs",
+ ];
+let dn = new Intl.DisplayNames("en", {type: "language"})
+testCases.forEach(function(locale) {
+ assertEquals(dn.of(new Intl.Locale(locale)), dn.of(locale))
+})
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index ee54c92461..eb162bc697 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -60,6 +60,11 @@
'regress-7770': [SKIP],
}], # 'system == android'
+['msan == True', {
+ # https://bugs.chromium.org/p/v8/issues/detail?id=11438
+ 'regress-364374': [SKIP],
+}], # msan == True
+
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
diff --git a/deps/v8/test/intl/number-format/UnwrapNumberFormatUseOrdinaryHasInstance.js b/deps/v8/test/intl/number-format/UnwrapNumberFormatUseOrdinaryHasInstance.js
new file mode 100644
index 0000000000..ced7cba917
--- /dev/null
+++ b/deps/v8/test/intl/number-format/UnwrapNumberFormatUseOrdinaryHasInstance.js
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Verify ECMA402 PR 500 Use OrdinaryHasInstance in normative optional steps
+// https://github.com/tc39/ecma402/pull/500
+
+Object.defineProperty(Intl.NumberFormat, Symbol.hasInstance, {
+ get() { throw new Error("Intl.NumberFormat[@@hasInstance] lookup"); }
+});
+
+var nf;
+assertDoesNotThrow(() => nf = new Intl.NumberFormat());
+assertDoesNotThrow(() => nf.format(123));
+assertDoesNotThrow(() => nf.resolvedOptions());
diff --git a/deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js b/deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js
index 518fe52bde..ab0e96e46e 100644
--- a/deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js
+++ b/deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js
@@ -5,8 +5,6 @@
// Make sure passing 1 or false to patched construtor won't cause crash
Object.defineProperty(Intl.NumberFormat, Symbol.hasInstance, { value: _ => true });
-assertThrows(() =>
- Intl.NumberFormat.call(1), TypeError);
+assertDoesNotThrow(() => Intl.NumberFormat.call(1));
-assertThrows(() =>
- Intl.NumberFormat.call(false), TypeError);
+assertDoesNotThrow(() => Intl.NumberFormat.call(false));
diff --git a/deps/v8/test/intl/regress-11350.js b/deps/v8/test/intl/regress-11350.js
new file mode 100644
index 0000000000..6c9f82780f
--- /dev/null
+++ b/deps/v8/test/intl/regress-11350.js
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test Long Locale handle minimize and maximize correctly.
+
+let ext = "-u-cu-eur-em-default-hc-h23-ks-level1-lb-strict-lw-normal-" +
+ "ms-metric-nu-latn-rg-atzzzz-sd-atat1-ss-none-tz-atvie-va-posix";
+
+// Test maximize()
+assertEquals ("de-Latn-DE" + ext,
+ (new Intl.Locale("de" + ext)).maximize().toString());
+
+assertEquals ("de-Latn-DE" + ext,
+ (new Intl.Locale("de-DE" + ext)).maximize().toString());
+
+assertEquals ("de-Latn-DE" + ext,
+ (new Intl.Locale("de-Latn" + ext)).maximize().toString());
+
+assertEquals ("de-Latn-DE" + ext,
+ (new Intl.Locale("de-Latn-DE" + ext)).maximize().toString());
+
+assertEquals ("de-Hant-DE" + ext,
+ (new Intl.Locale("de-Hant" + ext)).maximize().toString());
+
+assertEquals ("de-Hant-AT" + ext,
+ (new Intl.Locale("de-Hant-AT" + ext)).maximize().toString());
+
+assertEquals ("de-Latn-AT" + ext,
+ (new Intl.Locale("de-AT" + ext)).maximize().toString());
+
+// Test minimize()
+assertEquals ("de" + ext,
+ (new Intl.Locale("de-Latn-DE" + ext)).minimize().toString());
+
+assertEquals ("de" + ext,
+ (new Intl.Locale("de-Latn" + ext)).minimize().toString());
+
+assertEquals ("de" + ext,
+ (new Intl.Locale("de-DE" + ext)).minimize().toString());
+
+assertEquals ("de-AT" + ext,
+ (new Intl.Locale("de-Latn-AT" + ext)).minimize().toString());
+
+assertEquals ("de-Hant" + ext,
+ (new Intl.Locale("de-Hant" + ext)).minimize().toString());
+
+assertEquals ("de-Hant-AT" + ext,
+ (new Intl.Locale("de-Hant-AT" + ext)).minimize().toString());
diff --git a/deps/v8/test/intl/regress-1170305.js b/deps/v8/test/intl/regress-1170305.js
new file mode 100644
index 0000000000..74bc7207ad
--- /dev/null
+++ b/deps/v8/test/intl/regress-1170305.js
@@ -0,0 +1,16 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test in Chinese locale there is space between the day and hour field.
+let opt = {year: 'numeric', month: '2-digit', day: '2-digit', hour: '2-digit',
+ minute: '2-digit', second: '2-digit', hour12: false, timeZone: "UTC"};
+let d = new Date("2021-01-27T03:15:04Z");
+
+["zh", "zh-CN", "zh-Hant", "zh-TW", "zh-Hans"].forEach(function(l) {
+ // Ensure both 27 (day) and 03 (hour) can be found in the string.
+ assertTrue(d.toLocaleString(l, opt).indexOf("27") >= 0);
+ assertTrue(d.toLocaleString(l, opt).indexOf("03") >= 0);
+ // Ensure there is no case that 27 (day) and 03 (hour) concat together.
+ assertEquals(-1, d.toLocaleString(l, opt).indexOf("2703"));
+});
diff --git a/deps/v8/test/intl/regress-1177623.js b/deps/v8/test/intl/regress-1177623.js
new file mode 100644
index 0000000000..dcdd6d1852
--- /dev/null
+++ b/deps/v8/test/intl/regress-1177623.js
@@ -0,0 +1,5 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals("UTC", Intl.DateTimeFormat('en', { timeZone: 'Zulu' }).resolvedOptions().timeZone);
diff --git a/deps/v8/test/intl/regress-1177812.js b/deps/v8/test/intl/regress-1177812.js
new file mode 100644
index 0000000000..7b24f128d3
--- /dev/null
+++ b/deps/v8/test/intl/regress-1177812.js
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+for (let tz of [ false, [], {}, function () {}]) {
+ assertThrows(() => new Date().toLocaleString(undefined, { timeZone: tz }), RangeError);
+}
diff --git a/deps/v8/test/message/fail/class-fields-static-throw.out b/deps/v8/test/message/fail/class-fields-static-throw.out
index 456d1f38c0..417f4c00ea 100644
--- a/deps/v8/test/message/fail/class-fields-static-throw.out
+++ b/deps/v8/test/message/fail/class-fields-static-throw.out
@@ -2,5 +2,5 @@
static x = foo();
^
ReferenceError: foo is not defined
- at Function.<static_fields_initializer> (*%(basename)s:8:14)
- at *%(basename)s:1:1 \ No newline at end of file
+ at Function.<static_initializer> (*%(basename)s:8:14)
+ at *%(basename)s:1:1
diff --git a/deps/v8/test/message/fail/modules-import-assertions-fail-1.mjs b/deps/v8/test/message/fail/modules-import-assertions-fail-1.mjs
new file mode 100644
index 0000000000..0033baab53
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-assertions-fail-1.mjs
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+//
+// Flags: --harmony-import-assertions
+
+import "modules-skip-1-import-assertions-fail.mjs" assert { type: "notARealType"}
diff --git a/deps/v8/test/message/fail/modules-import-assertions-fail-1.out b/deps/v8/test/message/fail/modules-import-assertions-fail-1.out
new file mode 100644
index 0000000000..1b3be22192
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-assertions-fail-1.out
@@ -0,0 +1 @@
+undefined:0: Error: Invalid module type was asserted \ No newline at end of file
diff --git a/deps/v8/test/message/fail/modules-import-assertions-fail-2.mjs b/deps/v8/test/message/fail/modules-import-assertions-fail-2.mjs
new file mode 100644
index 0000000000..bf7cdb3d4e
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-assertions-fail-2.mjs
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+//
+// Flags: --harmony-import-assertions
+
+import "modules-skip-1-import-assertions-fail.mjs" assert { type: "json"}
diff --git a/deps/v8/test/message/fail/modules-import-assertions-fail-2.out b/deps/v8/test/message/fail/modules-import-assertions-fail-2.out
new file mode 100644
index 0000000000..f7c9b6a442
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-assertions-fail-2.out
@@ -0,0 +1,4 @@
+undefined:1: SyntaxError: Unexpected token / in JSON at position 0
+// Copyright 2021 the V8 project authors. All rights reserved.
+^
+SyntaxError: Unexpected token / in JSON at position 0 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/modules-import-assertions-fail-3.mjs b/deps/v8/test/message/fail/modules-import-assertions-fail-3.mjs
new file mode 100644
index 0000000000..3a6a1b1a01
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-assertions-fail-3.mjs
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+//
+// Flags: --harmony-import-assertions
+
+import "modules-skip-3-import-assertions-fail.json"
diff --git a/deps/v8/test/message/fail/modules-import-assertions-fail-3.out b/deps/v8/test/message/fail/modules-import-assertions-fail-3.out
new file mode 100644
index 0000000000..2de8442c13
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-assertions-fail-3.out
@@ -0,0 +1,4 @@
+*modules-skip-3-import-assertions-fail.json:1: SyntaxError: Unexpected token ':'
+{ "life": 42 }
+ ^
+SyntaxError: Unexpected token ':' \ No newline at end of file
diff --git a/deps/v8/test/message/fail/modules-skip-1-import-assertions-fail.mjs b/deps/v8/test/message/fail/modules-skip-1-import-assertions-fail.mjs
new file mode 100644
index 0000000000..75b4c3fbf1
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-skip-1-import-assertions-fail.mjs
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export function life() { return 42; }
diff --git a/deps/v8/test/message/fail/modules-skip-3-import-assertions-fail.json b/deps/v8/test/message/fail/modules-skip-3-import-assertions-fail.json
new file mode 100644
index 0000000000..15385d56a0
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-skip-3-import-assertions-fail.json
@@ -0,0 +1 @@
+{ "life": 42 }
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index f3d8d7c995..633eadddcd 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -55,20 +55,25 @@
}], # no_i18n == True
##############################################################################
-['lite_mode or variant == jitless', {
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
'mjsunit/fail/assert-promise-result-wasm-compile-fail': [SKIP],
'mjsunit/fail/assert-in-promise-fail-recursive': [FAIL],
'fail/wasm-*': [SKIP],
'wasm-*': [SKIP],
-
- # Test output requires --validate-asm, which is disabled in jitless mode.
'asm-*': [SKIP],
-}], # lite_mode or variant == jitless
+}], # not has_webassembly or variant == jitless
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
}],
+################################################################################
+['arch == ppc64', {
+ # Tests that require Simd enabled.
+ 'wasm-trace-memory': [SKIP],
+}],
+
+
]
diff --git a/deps/v8/test/message/wasm-trace-memory.js b/deps/v8/test/message/wasm-trace-memory.js
index b31dbb8e1f..e1091ad4c7 100644
--- a/deps/v8/test/message/wasm-trace-memory.js
+++ b/deps/v8/test/message/wasm-trace-memory.js
@@ -4,6 +4,7 @@
// Flags: --no-stress-opt --trace-wasm-memory --no-liftoff
// Flags: --experimental-wasm-simd
+// Flags: --enable-sse3 --enable-sse4-1
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/BUILD.gn b/deps/v8/test/mjsunit/BUILD.gn
index 184bf5b596..1acef4ef64 100644
--- a/deps/v8/test/mjsunit/BUILD.gn
+++ b/deps/v8/test/mjsunit/BUILD.gn
@@ -12,7 +12,6 @@ group("v8_mjsunit") {
data = [
"./",
- "../../tools/arguments.js",
"../../tools/arguments.mjs",
"../../tools/clusterfuzz/v8_mock.js",
"../../tools/clusterfuzz/v8_mock_archs.js",
diff --git a/deps/v8/test/mjsunit/array-bounds-check-removal.js b/deps/v8/test/mjsunit/array-bounds-check-removal.js
index f2625c4590..303514947e 100644
--- a/deps/v8/test/mjsunit/array-bounds-check-removal.js
+++ b/deps/v8/test/mjsunit/array-bounds-check-removal.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc --no-always-opt
+// Flags: --allow-natives-syntax --expose-gc --no-always-opt --opt
var a = new Int32Array(1024);
@@ -97,29 +97,69 @@ test_base(a, 3, true);
check_test_base(a, 3, true);
test_base(a, 3, false);
check_test_base(a, 3, false);
+assertOptimized(test_base);
+
+function test_base_for_dictionary_map(a, base, condition) {
+ a[base + 1] = 1;
+ a[base + 4] = 2;
+ a[base + 3] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base + 4;
+ if (condition) {
+ a[base + 1] = 1;
+ a[base + 2] = 2;
+ a[base + 2] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base + 4;
+ } else {
+ a[base + 6] = 1;
+ a[base + 4] = 2;
+ a[base + 3] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base - 4;
+ }
+}
// Test that we deopt on failed bounds checks.
var dictionary_map_array = new Int32Array(128);
-test_base(dictionary_map_array, 5, true);
-%PrepareFunctionForOptimization(test_base);
-test_base(dictionary_map_array, 6, true);
-test_base(dictionary_map_array, 5, false);
-test_base(dictionary_map_array, 6, false);
-%OptimizeFunctionOnNextCall(test_base);
-test_base(dictionary_map_array, -2, true);
-assertUnoptimized(test_base);
+test_base_for_dictionary_map(dictionary_map_array, 5, true);
+%PrepareFunctionForOptimization(test_base_for_dictionary_map);
+test_base_for_dictionary_map(dictionary_map_array, 6, true);
+test_base_for_dictionary_map(dictionary_map_array, 5, false);
+test_base_for_dictionary_map(dictionary_map_array, 6, false);
+%OptimizeFunctionOnNextCall(test_base_for_dictionary_map);
+test_base_for_dictionary_map(dictionary_map_array, -2, true);
+assertUnoptimized(test_base_for_dictionary_map);
-// Forget about the dictionary_map_array's map.
-%ClearFunctionFeedback(test_base);
-%PrepareFunctionForOptimization(test_base);
+function test_base_for_oob(a, base, condition) {
+ a[base + 1] = 1;
+ a[base + 4] = 2;
+ a[base + 3] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base + 4;
+ if (condition) {
+ a[base + 1] = 1;
+ a[base + 2] = 2;
+ a[base + 2] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base + 4;
+ } else {
+ a[base + 6] = 1;
+ a[base + 4] = 2;
+ a[base + 3] = 3;
+ a[base + 2] = 4;
+ a[base + 4] = base - 4;
+ }
+}
-test_base(a, 5, true);
-test_base(a, 6, true);
-test_base(a, 5, false);
-test_base(a, 6, false);
-%OptimizeFunctionOnNextCall(test_base);
-test_base(a, 2048, true);
-assertUnoptimized(test_base);
+%PrepareFunctionForOptimization(test_base_for_oob);
+test_base_for_oob(a, 5, true);
+test_base_for_oob(a, 6, true);
+test_base_for_oob(a, 5, false);
+test_base_for_oob(a, 6, false);
+%OptimizeFunctionOnNextCall(test_base_for_oob);
+test_base_for_oob(a, 2048, true);
+assertUnoptimized(test_base_for_oob);
function test_minus(base,cond) {
a[base - 1] = 1;
@@ -178,7 +218,7 @@ short_test(short_a, 50);
%OptimizeFunctionOnNextCall(short_test);
short_a.length = 10;
short_test(short_a, 0);
-assertUnoptimized(test_base);
+assertUnoptimized(short_test);
// A test for when we would modify a phi index.
diff --git a/deps/v8/test/mjsunit/baseline/cross-realm.js b/deps/v8/test/mjsunit/baseline/cross-realm.js
new file mode 100644
index 0000000000..1d0fb6b0a2
--- /dev/null
+++ b/deps/v8/test/mjsunit/baseline/cross-realm.js
@@ -0,0 +1,68 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --sparkplug
+
+// Tier-up across Realms
+
+// Ensure a feedback vector is created when sharing baseline code.
+(function() {
+ function factory1() {
+ return function(a) {
+ return a;
+ }
+ }
+
+ var realm1 = Realm.createAllowCrossRealmAccess();
+ var realm2 = Realm.createAllowCrossRealmAccess();
+
+ let f1 = Realm.eval(realm1, "(" + factory1.toString() + ")")();
+ let f2 = Realm.eval(realm2, "(" + factory1.toString() + ")")();
+ %NeverOptimizeFunction(f1);
+ %NeverOptimizeFunction(f2);
+
+ %CompileBaseline(f1);
+ assertEquals(0, f1(0));
+ assertTrue(isBaseline(f1));
+ assertFalse(isBaseline(f2));
+
+ assertEquals(0, f2(0));
+ assertTrue(isBaseline(f1));
+ assertTrue(isBaseline(f2));
+})();
+
+// Ensure a feedback vector is created when sharing baseline code and a closure
+// feedback cell array already exists.
+(function() {
+ function factory2() {
+ return function(a) {
+ return a;
+ }
+ }
+
+ var realm1 = Realm.createAllowCrossRealmAccess();
+ var realm2 = Realm.createAllowCrossRealmAccess();
+
+ let f1 = Realm.eval(realm1, "(" + factory2.toString() + ")")();
+ let realmFactory = Realm.eval(realm2, "(" + factory2.toString() + ")");
+ let f2 = realmFactory();
+ let f3 = realmFactory();
+ %NeverOptimizeFunction(f1);
+ %NeverOptimizeFunction(f2);
+ %NeverOptimizeFunction(f3);
+
+ assertEquals(0, f2(0));
+ %CompileBaseline(f1);
+ assertEquals(0, f1(0));
+ assertTrue(isBaseline(f1));
+ assertFalse(isBaseline(f2));
+ assertFalse(isBaseline(f3));
+
+ assertEquals(0, f3(0));
+ assertTrue(isBaseline(f3));
+ assertFalse(isBaseline(f2));
+
+ assertEquals(0, f2(0));
+ assertTrue(isBaseline(f2));
+})();
diff --git a/deps/v8/test/mjsunit/baseline/test-baseline-module-helper.mjs b/deps/v8/test/mjsunit/baseline/test-baseline-module-helper.mjs
new file mode 100644
index 0000000000..bedcfb69be
--- /dev/null
+++ b/deps/v8/test/mjsunit/baseline/test-baseline-module-helper.mjs
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export default 12; \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs b/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs
new file mode 100644
index 0000000000..409465c210
--- /dev/null
+++ b/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs
@@ -0,0 +1,24 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --super-ic --sparkplug
+
+export let exported = 17;
+import imported from 'test-baseline-module-helper.mjs';
+
+function run(f, ...args) {
+ try { f(...args); } catch (e) {}
+ %CompileBaseline(f);
+ return f(...args);
+}
+
+function construct(f, ...args) {
+ try { new f(...args); } catch (e) {}
+ %CompileBaseline(f);
+ return new f(...args);
+}
+
+assertEquals(17, run((o)=>{ return exported; }));
+assertEquals(12, run((o)=>{ return imported; }));
+assertEquals(20, run((o)=>{ exported = 20; return exported; }));
diff --git a/deps/v8/test/mjsunit/baseline/test-baseline.js b/deps/v8/test/mjsunit/baseline/test-baseline.js
new file mode 100644
index 0000000000..b35a7ffbff
--- /dev/null
+++ b/deps/v8/test/mjsunit/baseline/test-baseline.js
@@ -0,0 +1,315 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --super-ic --sparkplug
+
+function run(f, ...args) {
+ try { f(...args); } catch (e) {}
+ %CompileBaseline(f);
+ return f(...args);
+}
+
+function construct(f, ...args) {
+ try { new f(...args); } catch (e) {}
+ %CompileBaseline(f);
+ return new f(...args);
+}
+
+// Constants
+assertEquals(run(()=>undefined), undefined);
+assertEquals(run(()=>null), null);
+assertEquals(run(()=>true), true);
+assertEquals(run(()=>false), false);
+assertEquals(run(()=>"bla"), "bla");
+assertEquals(run(()=>42), 42);
+assertEquals(run(()=>0), 0);
+
+// Variables
+assertEquals(run(()=>{let a = 42; return a}), 42);
+assertEquals(run(()=>{let a = 42; let b = 32; return a}), 42);
+
+// Arguments
+assertEquals(run((a)=>a, 42), 42);
+assertEquals(run((a,b)=>b, 1, 42), 42);
+assertEquals(run((a,b,c)=>c, 1, 2, 42), 42);
+
+// Property load
+assertEquals(run((o)=>o.a, {a:42}), 42);
+assertEquals(run((o, k)=>o[k], {a:42}, "a"), 42);
+
+// Property store
+assertEquals(run((o)=>{o.a=42; return o}, {}).a, 42);
+assertEquals(run((o, k)=>{o[k]=42; return o}, {}, "a").a, 42);
+
+// Global load/store
+global_x = 45;
+assertEquals(run(()=>global_x), 45);
+run(()=>{ global_x = 49 })
+assertEquals(global_x, 49);
+
+// Context load
+(function () {
+ let x = 42;
+ assertEquals(run(()=>{return x;}), 42);
+})();
+(function () {
+ let x = 4;
+ x = 42;
+ assertEquals(run(()=>{return x;}), 42);
+})();
+
+// Context store
+(function () {
+ let x = 4;
+ run(()=>{x = 42;});
+ assertEquals(x, 42);
+})();
+
+
+// Super
+// var o = {__proto__:{a:42}, m() { return super.a }};
+// assertEquals(run(o.m), 42);
+
+// Control flow
+assertEquals(run((x)=>{ if(x) return 5; return 10;}), 10);
+assertEquals(run(()=>{ var x = 0; for(var i = 1; i; i=0) x=10; return x;}), 10);
+assertEquals(run(()=>{ var x = 0; for(var i = 0; i < 10; i+=1) x+=1; return x;}), 10);
+assertEquals(run(()=>{ var x = 0; for(var i = 0; i < 10; ++i) x+=1; return x;}), 10);
+
+// Typeof
+function testTypeOf(o, t) {
+ let types = ['number', 'string', 'symbol', 'boolean', 'bigint', 'undefined',
+ 'function', 'object'];
+ assertEquals(t, eval('run(()=>typeof ' + o + ')'));
+ assertTrue(eval('run(()=>typeof ' + o + ' == "' + t + '")'));
+ var other_types = types.filter((x) => x !== t);
+ for (var other of other_types) {
+ assertFalse(eval('run(()=>typeof ' + o + ' == "' + other + '")'));
+ }
+}
+
+testTypeOf('undefined', 'undefined');
+testTypeOf('null', 'object');
+testTypeOf('true', 'boolean');
+testTypeOf('false', 'boolean');
+testTypeOf('42.42', 'number');
+testTypeOf('42', 'number');
+testTypeOf('42n', 'bigint');
+testTypeOf('"42"', 'string');
+testTypeOf('Symbol(42)', 'symbol');
+testTypeOf('{}', 'object');
+testTypeOf('[]', 'object');
+//testTypeOf('new Proxy({}, {})', 'object');
+//testTypeOf('new Proxy([], {})', 'object');
+testTypeOf('(_ => 42)', 'function');
+testTypeOf('function() {}', 'function');
+testTypeOf('function*() {}', 'function');
+testTypeOf('async function() {}', 'function');
+testTypeOf('async function*() {}', 'function');
+//testTypeOf('new Proxy(_ => 42, {})', 'function');
+//testTypeOf('class {}', 'function');
+testTypeOf('Object', 'function');
+
+// Binop
+assertEquals(run((a,b)=>{return a+b}, 41, 1), 42);
+assertEquals(run((a,b)=>{return a*b}, 21, 2), 42);
+assertEquals(run((a)=>{return a+3}, 39), 42);
+assertEquals(run((a,b)=>{return a&b}, 0x23, 0x7), 0x3);
+assertEquals(run((a)=>{return a&0x7}, 0x23), 0x3);
+assertEquals(run((a,b)=>{return a|b}, 0x23, 0x7), 0x27);
+assertEquals(run((a)=>{return a|0x7}, 0x23), 0x27);
+assertEquals(run((a,b)=>{return a^b}, 0x23, 0x7), 0x24);
+assertEquals(run((a)=>{return a^0x7}, 0x23), 0x24);
+
+// Unop
+assertEquals(run((x)=>{return x++}, 41), 41);
+assertEquals(run((x)=>{return ++x}, 41), 42);
+assertEquals(run((x)=>{return x--}, 41), 41);
+assertEquals(run((x)=>{return --x}, 41), 40);
+assertEquals(run((x)=>{return !x}, 41), false);
+assertEquals(run((x)=>{return ~x}, 41), ~41);
+
+// Calls
+function f0() { return 42; }
+function f1(x) { return x; }
+function f2(x, y) { return x + y; }
+function f3(x, y, z) { return y + z; }
+assertEquals(run(()=>{return f0()}), 42);
+assertEquals(run(()=>{return f1(42)}), 42);
+assertEquals(run(()=>{return f2(41, 1)}), 42);
+assertEquals(run(()=>{return f3(1, 2, 40)}), 42);
+
+// Mapped Arguments
+function mapped_args() {
+ return [arguments.length, ...arguments];
+}
+function mapped_args_dup(a,a) {
+ return [arguments.length, ...arguments];
+}
+assertEquals(run(mapped_args, 1, 2, 3), [3,1,2,3]);
+assertEquals(run(mapped_args_dup, 1, 2, 3), [3,1,2,3]);
+
+// Unmapped Arguments
+function unmapped_args() {
+ "use strict";
+ return [arguments.length, ...arguments];
+}
+assertEquals(run(unmapped_args, 1, 2, 3), [3,1,2,3]);
+
+// Rest Arguments
+function rest_args(...rest) {
+ return [rest.length, ...rest];
+}
+assertEquals(run(rest_args, 1, 2, 3), [3,1,2,3]);
+
+// Property call
+let obj = {
+ f0: () => { return 42; },
+ f1: (x) => { return x; },
+ f2: (x, y) => { return x + y; },
+ f3: (x, y, z) => { return y + z; }
+}
+assertEquals(run(()=>{return obj.f0()}), 42);
+assertEquals(run(()=>{return obj.f1(42)}), 42);
+assertEquals(run(()=>{return obj.f2(41, 1)}), 42);
+assertEquals(run(()=>{return obj.f3(1, 2, 40)}), 42);
+
+// Call with spread
+let ns = [2, 40];
+assertEquals(run(()=>{return f3("x", ...ns)}), 42);
+
+// Construct
+function C(a, b, c) { this.x = 39 + b + c; }
+assertEquals(run(()=>{return (new C("a", 1, 2)).x}), 42);
+assertEquals(run(()=>{return (new C("a", ...ns)).x}), 81);
+
+// Construct Array
+assertEquals(run(()=>{return new Array(1, 2, 39);}).reduce((a,x)=>a+x), 42);
+
+// Call Runtime
+assertMatches(run(() => { return %NewRegExpWithBacktrackLimit("ax", "", 50); }), "ax");
+run(() => { %CompileBaseline(()=>{}); });
+
+// Call Intrinsics
+assertEquals(run(()=>{return %_IsSmi(42)}), true);
+
+// CallRuntimeForPair
+assertEquals(run(()=>{with (f0) return f0();}), 42);
+
+// Closure
+assertEquals(run((o)=>{if (true) {let x = o; return ()=>x}}, 42)(), 42);
+assertEquals(run((o)=>{return ()=>o}, 42)(), 42);
+
+// Object / Array Literals
+assertEquals(run((o)=>{return {a:42}}), {a:42});
+assertEquals(run((o)=>{return [42]}), [42]);
+assertEquals(run((o)=>{return []}), []);
+assertEquals(run((o)=>{return {}}), {});
+assertEquals(run((o)=>{return {...o}}, {a:42}), {a:42});
+assertEquals(run((o)=>{return /42/}), /42/);
+assertEquals(run((o)=>{return [...o]}, [1,2,3,4]), [1,2,3,4]);
+
+// Construct
+// Throw if the super() isn't a constructor
+class T extends Object { constructor() { super() } }
+T.__proto__ = null;
+assertThrows(()=>construct(T));
+
+run((o)=>{ try { } finally { } });
+
+// SwitchOnSmiNoFeeback
+run((o) => {
+ var x = 0;
+ var y = 0;
+ while (true) {
+ try {
+ x++;
+ if (x == 2) continue;
+ if (x == 5) break;
+ } finally {
+ y++;
+ }
+ }
+ return x + y;
+}, 10);
+
+// GetIterator
+assertEquals(run((o)=>{
+ let sum = 0; for (x of [1, 2]) {sum += x;} return sum;}), 3);
+
+// ForIn
+assertEquals(run((o)=>{ let sum = 0; for (let k in o) { sum += o[k] }; return sum }, {a:41,b:1}), 42);
+
+// In
+assertTrue(run((o, k)=>{return k in o}, {a:1}, "a"));
+assertFalse(run((o, k)=>{return k in o}, {a:1}, "b"));
+
+class D {}
+assertTrue(run((o, c)=>{return o instanceof c}, new D(), D));
+assertTrue(run((o, c)=>{return o instanceof c}, new D(), Object));
+assertFalse(run((o, c)=>{return o instanceof c}, new D(), RegExp));
+
+// CreateArrayFromIterable
+assertEquals(run((a)=>{return [...a]}, [1,2,3]), [1,2,3]);
+
+// Generator
+let gen = run(function*() {
+ yield 1;
+ yield 2;
+ yield 3;
+});
+let i = 1;
+for (let val of gen) {
+ assertEquals(i++, val);
+}
+assertEquals(4, i);
+
+// Async await
+run(async function() {
+ await 1;
+ await 1;
+ await 1;
+ return 42;
+}).then(x=>assertEquals(42, x));
+
+// Try-catch
+assertEquals(run((x)=>{
+ if (x) {
+ try {
+ if (x) throw x;
+ return 45;
+ } catch (e) {
+ return e;
+ }
+ }
+}, 42), 42);
+
+// Tier-up via InterpreterEntryTrampoline
+(function() {
+ function factory() {
+ return function(a) {
+ return a;
+ };
+ }
+ let f1 = factory();
+ let f2 = factory();
+ %NeverOptimizeFunction(f1);
+ %NeverOptimizeFunction(f2);
+
+ assertEquals(f1(0), 0);
+ assertEquals(f2(0), 0);
+ assertTrue(isInterpreted(f1))
+ assertFalse(isBaseline(f1));
+ assertTrue(isInterpreted(f2))
+ assertFalse(isBaseline(f2));
+
+ %CompileBaseline(f1);
+ assertEquals(f1(0), 0);
+ assertTrue(isBaseline(f1));
+ assertFalse(isBaseline(f2));
+
+ assertEquals(f2(0), 0);
+ assertTrue(isBaseline(f1));
+ assertTrue(isBaseline(f2));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-slice-clone.js b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
index fc002da2c3..c6294b85b5 100644
--- a/deps/v8/test/mjsunit/compiler/array-slice-clone.js
+++ b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --no-stress-flush-bytecode
// Test CloneFastJSArray inserted by JSCallReducer for Array.prototype.slice.
diff --git a/deps/v8/test/mjsunit/compiler/number-divide.js b/deps/v8/test/mjsunit/compiler/number-divide.js
index 1c7710c1f8..c971ad013e 100644
--- a/deps/v8/test/mjsunit/compiler/number-divide.js
+++ b/deps/v8/test/mjsunit/compiler/number-divide.js
@@ -10,6 +10,7 @@
(function() {
// We need a separately polluted % with NumberOrOddball feedback.
function bar(x) { return x / 2; }
+ %EnsureFeedbackVectorForFunction(bar);
bar(undefined); // The % feedback is now NumberOrOddball.
// Now just use the gadget above in a way that only after RETYPE
@@ -40,6 +41,7 @@
(function() {
// We need a separately polluted % with NumberOrOddball feedback.
function bar(x) { return x / 2; }
+ %EnsureFeedbackVectorForFunction(bar);
bar(undefined); // The % feedback is now NumberOrOddball.
// Now just use the gadget above in a way that only after RETYPE
diff --git a/deps/v8/test/mjsunit/compiler/regress-1177368.js b/deps/v8/test/mjsunit/compiler/regress-1177368.js
new file mode 100644
index 0000000000..9f42321270
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1177368.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let __v_20 = new Int32Array();
+__v_20.set({
+ get length() {
+ %ArrayBufferDetach(__v_20.buffer);
+ }
+ });
+
+function bar() { return array[0]; }
+var array = new Float32Array(1000);
+%PrepareFunctionForOptimization(bar);
+bar();
+bar();
+%OptimizeFunctionOnNextCall(bar);
+bar();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1177369.js b/deps/v8/test/mjsunit/compiler/regress-1177369.js
new file mode 100644
index 0000000000..a20b36ed77
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1177369.js
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+try {
+ let array = new ArrayBuffer();
+ array.constructor = {
+ get [Symbol.species]() {
+ %ArrayBufferDetach(array);
+ }
+ };
+ array.slice();
+} catch (e) {}
+
+var array = new Int8Array(100);
+function foo() {
+ for (var i = 0; i < 100; i += 4) {
+ array[i] = i;
+ }
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
index f4edee5907..3234c61c96 100644
--- a/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js
+++ b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
@@ -28,6 +28,7 @@
// Flags: --allow-natives-syntax
// Flags: --concurrent-recompilation --block-concurrent-recompilation
// Flags: --nostress-opt --no-always-opt
+// Flags: --no-turbo-direct-heap-access
// --nostress-opt is in place because this particular optimization
// (guaranteeing that the Array prototype chain has no elements) is
@@ -44,7 +45,6 @@ if (!%IsConcurrentRecompilationSupported()) {
function f1(a, i) {
return a[i] + 0.5;
}
-%PrepareFunctionForOptimization(f1);
%PrepareFunctionForOptimization(f1);
var arr = [0.0,,2.5];
@@ -53,7 +53,9 @@ assertEquals(0.5, f1(arr, 0));
// Optimized code of f1 depends on initial object and array maps.
%OptimizeFunctionOnNextCall(f1, "concurrent");
-// Kick off recompilation;
+// Kick off recompilation. Note that the NoElements protector is read by the
+// compiler in the main-thread phase of compilation, i.e., before the store to
+// Object.prototype below.
assertEquals(0.5, f1(arr, 0));
// Invalidate current initial object map after compile graph has been created.
Object.prototype[1] = 1.5;
@@ -65,5 +67,5 @@ assertUnoptimized(f1, "no sync");
// Sync with background thread to conclude optimization, which bails out
// due to map dependency.
assertUnoptimized(f1, "sync");
-//Clear type info for stress runs.
+// Clear type info for stress runs.
%ClearFunctionFeedback(f1);
diff --git a/deps/v8/test/mjsunit/concurrent-initial-prototype-change-2.js b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-2.js
new file mode 100644
index 0000000000..585388468f
--- /dev/null
+++ b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-2.js
@@ -0,0 +1,69 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+// Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --nostress-opt --no-always-opt
+
+// --nostress-opt is in place because this particular optimization
+// (guaranteeing that the Array prototype chain has no elements) is
+// maintained isolate-wide. Once it's been "broken" by the change
+// to the Object prototype below, future compiles will not use the
+// optimization anymore, and the code will remain optimized despite
+// additional changes to the prototype chain.
+
+if (!%IsConcurrentRecompilationSupported()) {
+ print("Concurrent recompilation is disabled. Skipping this test.");
+ quit();
+}
+
+function f1(a, i) {
+ return a[i] + 0.5;
+}
+
+%PrepareFunctionForOptimization(f1);
+var arr = [0.0,,2.5];
+assertEquals(0.5, f1(arr, 0));
+assertEquals(0.5, f1(arr, 0));
+
+// Optimized code of f1 depends on initial object and array maps.
+%OptimizeFunctionOnNextCall(f1, "concurrent");
+// Kick off recompilation.
+assertEquals(0.5, f1(arr, 0));
+// Invalidate current initial object map.
+Object.prototype[1] = 1.5;
+assertEquals(2, f1(arr, 1));
+// Not yet optimized since concurrent recompilation is blocked.
+assertUnoptimized(f1, "no sync");
+// Let concurrent recompilation proceed.
+%UnblockConcurrentRecompilation();
+// Sync with background thread to conclude optimization, which may or may not
+// bailout due to map dependency, depending on whether the compiler read the
+// NoElements protector before or after the store to Object.prototype above.
+assertEquals(2, f1(arr, 1));
+// Clear type info for stress runs.
+%ClearFunctionFeedback(f1);
diff --git a/deps/v8/test/mjsunit/const-dict-tracking.js b/deps/v8/test/mjsunit/const-dict-tracking.js
new file mode 100644
index 0000000000..752423443b
--- /dev/null
+++ b/deps/v8/test/mjsunit/const-dict-tracking.js
@@ -0,0 +1,262 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+//
+// Tests tracking of constness of properties stored in dictionary
+// mode prototypes.
+
+
+var unique_id = 0;
+// Creates a function with unique SharedFunctionInfo to ensure the feedback
+// vector is unique for each test case.
+function MakeFunctionWithUniqueSFI(...args) {
+ assertTrue(args.length > 0);
+ var body = `/* Unique comment: ${unique_id++} */ ` + args.pop();
+ return new Function(...args, body);
+}
+
+// Invalidation by store handler.
+(function() {
+ var proto = Object.create(null);
+ proto.z = 1;
+ assertFalse(%HasFastProperties(proto));
+
+ var o = Object.create(proto);
+
+ function read_z() {
+ return o.z;
+ }
+ function update_z(new_value) {
+ proto.z = new_value;
+ }
+
+ // Allocate feedback vector, but we don't want to optimize the function.
+ %PrepareFunctionForOptimization(read_z);
+ for (var i = 0; i < 4; i++) {
+ read_z();
+ }
+ assertTrue(%HasOwnConstDataProperty(proto, "z"));
+
+ // Allocate feedback vector, but we don't want to optimize the function.
+ %PrepareFunctionForOptimization(update_z);
+ for (var i = 0; i < 4; i++) {
+ // Overwriting with same value maintains const-ness.
+ update_z(1);
+ }
+
+
+ assertTrue(%HasOwnConstDataProperty(proto, "z"));
+
+ update_z(2);
+
+ assertFalse(%HasOwnConstDataProperty(proto, "z"));
+ assertEquals(2, read_z());
+})();
+
+// Properties become const when dict mode object becomes prototype.
+(function() {
+ var proto = Object.create(null);
+ var proto_shadow = Object.create(null);
+
+ proto.z = 1;
+ proto_shadow.z = 1;
+
+ // Make sure that z is marked as mutable.
+ proto.z = 2;
+ proto_shadow.z = 2;
+
+ assertFalse(%HasFastProperties(proto));
+ assertTrue(%HaveSameMap(proto, proto_shadow));
+
+ var o = Object.create(proto);
+
+ assertFalse(%HasFastProperties(proto));
+ // proto must have received new map.
+ assertFalse(%HaveSameMap(proto, proto_shadow));
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ %HasOwnConstDataProperty(proto, "z"));
+})();
+
+// Properties become const when fast mode object becomes prototype.
+(function() {
+ var proto = {}
+ var proto_shadow = {};
+
+ proto.z = 1;
+ proto_shadow.z = 1;
+
+ // Make sure that z is marked as mutable.
+ proto.z = 2;
+ proto_shadow.z = 2;
+
+ assertTrue(%HasFastProperties(proto));
+ assertTrue(%HaveSameMap(proto, proto_shadow));
+
+ var o = Object.create(proto);
+
+ assertFalse(%HasFastProperties(proto));
+ // proto must have received new map.
+ assertFalse(%HaveSameMap(proto, proto_shadow));
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ %HasOwnConstDataProperty(proto, "z"));
+})();
+
+function testbench(o, proto, update_proto, check_constness) {
+ var check_z = MakeFunctionWithUniqueSFI("obj", "return obj.z;");
+
+ if (check_constness && %IsDictPropertyConstTrackingEnabled())
+ assertTrue(%HasOwnConstDataProperty(proto, "z"));
+
+ // Allocate feedback vector, but we don't want to optimize the function.
+ %PrepareFunctionForOptimization(check_z);
+ for (var i = 0; i < 4; i++) {
+ check_z(o);
+ }
+
+ update_proto();
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ if (check_constness)
+ assertFalse(%HasOwnConstDataProperty(proto, "z"));
+ assertFalse(%HasFastProperties(proto));
+ }
+
+ assertEquals("2", check_z(o));
+}
+
+// Simple update.
+(function() {
+ var proto = Object.create(null);
+ proto.z = "1";
+ assertFalse(%HasFastProperties(proto));
+
+ var o = Object.create(proto);
+
+ function update_z() {
+ proto.z = "2";
+ }
+
+ testbench(o, proto, update_z, true);
+})();
+
+// Update using Object.assign.
+(function() {
+ var proto = Object.create(null);
+ proto.z = "1";
+ assertFalse(%HasFastProperties(proto));
+
+ var o = Object.create(proto);
+
+ function update_z() {
+ Object.assign(proto, {z: "2"});
+ }
+
+ testbench(o, proto, update_z, true);
+})();
+
+// Update using Object.defineProperty
+(function() {
+ var proto = Object.create(null);
+ proto.z = "1";
+ assertFalse(%HasFastProperties(proto));
+
+ var o = Object.create(proto);
+
+ function update_z() {
+ Object.defineProperty(proto, 'z', {
+ value: "2",
+ configurable: true,
+ enumerable: true,
+ writable: true
+ });
+ }
+
+ testbench(o, proto, update_z, true);
+})();
+
+
+// Update using setter
+(function() {
+ var proto = Object.create(null);
+ Object.defineProperty(proto, "z", {
+ get : function () {return this.z_val;},
+ set : function (new_z) {this.z_val = new_z;}
+ });
+
+ proto.z = "1";
+ assertFalse(%HasFastProperties(proto));
+
+ var o = Object.create(proto);
+
+ function update_z() {
+ proto.z = "2";
+ }
+
+ testbench(o, proto, update_z, false);
+})();
+
+// Proxy test 1: Update via proxy.
+(function() {
+ var proto = Object.create(null);
+
+ var proxy = new Proxy(proto, {});
+
+ proxy.z = "1";
+ assertFalse(%HasFastProperties(proto));
+
+ var o = Object.create(proxy);
+
+ function update_z() {
+ proxy.z = "2";
+ }
+
+ testbench(o, proto, update_z, false);
+})();
+
+// Proxy test 2: Update on proto.
+(function() {
+ var proto = Object.create(null);
+
+ var proxy = new Proxy(proto, {});
+
+ proto.z = "1";
+ assertFalse(%HasFastProperties(proto));
+
+ var o = Object.create(proxy);
+
+ function update_z() {
+ proto.z = "2";
+ }
+
+ testbench(o, proto, update_z, false);
+})();
+
+// Proxy test 3: Update intercepted.
+(function() {
+ var proto = Object.create(null);
+
+ var handler = {
+ get: function(target, prop) {
+ return target.the_value;
+ },
+ set: function(target, prop, value) {
+ return target.the_value = value;
+ }
+ };
+
+ var proxy = new Proxy(proto, handler);
+
+ proxy.z = "1";
+ assertFalse(%HasFastProperties(proto));
+
+ var o = Object.create(proxy);
+
+ function update_z() {
+ proxy.z = "2";
+ }
+
+ testbench(o, proto, update_z, false);
+
+})();
diff --git a/deps/v8/test/mjsunit/const-field-tracking-2.js b/deps/v8/test/mjsunit/const-field-tracking-2.js
index b0eb8c749f..c1da5cf0dc 100644
--- a/deps/v8/test/mjsunit/const-field-tracking-2.js
+++ b/deps/v8/test/mjsunit/const-field-tracking-2.js
@@ -191,7 +191,7 @@ function TestStoreToConstantFieldOfConstantObject(the_value, other_value) {
assertOptimized(store);
// Storing other value deoptimizes because of failed value check.
store(other_value);
- assertOptimized(store);
+ assertUnoptimized(store);
assertEquals(other_value, constant_object.a.v);
}
diff --git a/deps/v8/test/mjsunit/d8/d8-fuzzable-worker.js b/deps/v8/test/mjsunit/d8/d8-fuzzable-worker.js
new file mode 100644
index 0000000000..2ed1eed01a
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8/d8-fuzzable-worker.js
@@ -0,0 +1,69 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for a more fuzzable way for creating workers based on functions.
+
+(function TestWorker() {
+ function workerCode1() {
+ onmessage = function() {
+ postMessage('hi');
+ }
+ }
+
+ let w = new Worker(workerCode1, {type: 'function'});
+ w.postMessage('');
+ assertEquals('hi', w.getMessage());
+})();
+
+(function TestPassingNumberParam() {
+ function workerCode2(n) {
+ onmessage = function() {
+ postMessage('worker ' + (n + 1));
+ }
+ }
+
+ w = new Worker(workerCode2, {type: 'function', arguments: [2021]});
+ w.postMessage('');
+ assertEquals('worker 2022', w.getMessage());
+})();
+
+(function TestPassingStringParam() {
+ function workerCode3(s) {
+ onmessage = function() {
+ postMessage('worker ' + s);
+ }
+ }
+
+ w = new Worker(workerCode3, {type: 'function', arguments: ['hello']});
+ w.postMessage('');
+ assertEquals('worker hello', w.getMessage());
+})();
+
+(function TestPassingObjectParam() {
+ function workerCode4(o) {
+ onmessage = function() {
+ postMessage('worker ' + (o.x + 1));
+ }
+ }
+
+ w = new Worker(workerCode4, {type: 'function', arguments: [{x: 1}]});
+ w.postMessage('');
+ assertEquals('worker 2', w.getMessage());
+})();
+
+(function TestPassingFunctionParam() {
+ function workerCode5(f) {
+ eval(f);
+ onmessage = function() {
+ postMessage('worker ' + func());
+ }
+ }
+
+ let config = {'func': function func() { return 'hi';} };
+
+ w = new Worker(workerCode5, {type: 'function',
+ arguments: [config.func.toString()]});
+ w.postMessage('');
+ assertEquals('worker hi', w.getMessage());
+})();
diff --git a/deps/v8/test/mjsunit/dictionary-properties.js b/deps/v8/test/mjsunit/dictionary-properties.js
index cffa48547e..4455792e5f 100644
--- a/deps/v8/test/mjsunit/dictionary-properties.js
+++ b/deps/v8/test/mjsunit/dictionary-properties.js
@@ -47,10 +47,20 @@ var slow_proto = new SlowPrototype;
function ic() { return slow_proto.bar; }
ic();
ic();
-assertTrue(%HasFastProperties(slow_proto.__proto__));
+assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(slow_proto.__proto__));
// Prototypes stay fast even after deleting properties.
-assertTrue(%HasFastProperties(SlowPrototype.prototype));
+assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(SlowPrototype.prototype));
var fast_proto = new SlowPrototype();
-assertTrue(%HasFastProperties(SlowPrototype.prototype));
-assertTrue(%HasFastProperties(fast_proto.__proto__));
+assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(SlowPrototype.prototype));
+assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(fast_proto.__proto__));
+
+
+if (!%IsDictPropertyConstTrackingEnabled()) {
+ assertTrue(%HasFastProperties(SlowPrototype.prototype));
+ assertTrue(%HasFastProperties(fast_proto.__proto__));
+}
diff --git a/deps/v8/test/mjsunit/dictionary-prototypes.js b/deps/v8/test/mjsunit/dictionary-prototypes.js
index 0186c63f91..7e4f8390b8 100644
--- a/deps/v8/test/mjsunit/dictionary-prototypes.js
+++ b/deps/v8/test/mjsunit/dictionary-prototypes.js
@@ -360,7 +360,8 @@ function TestAddingPropertyToAlmostDictionaryPrototype() {
for (let i = 0; i < 2; ++i) {
o.x0;
}
- assertTrue(%HasFastProperties(Bar.prototype));
+ if (!%IsDictPropertyConstTrackingEnabled())
+ assertTrue(%HasFastProperties(Bar.prototype));
for (let i = 0; i < 11; ++i) {
// First, the property is looked up from Foo.
diff --git a/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
index 39ebea9ceb..0516ec5f16 100644
--- a/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
@@ -25,7 +25,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --no-lazy-feedback-allocation
+
+// Lazy feedback allocation is disabled to guard against the case that a
+// second-level function like assertTrue gets its feedback vector allocated
+// immediately before the top-level function like f25 is compiled. In that case,
+// assertTrue would be inlined but would cause a deopt because it had not yet
+// collected any feedback data, and then the subsequent assertOptimized would
+// fail.
// Check that the following functions are optimizable.
var functions = [ f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
diff --git a/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js b/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
index 4aa816f6cd..5dceb9db18 100644
--- a/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
+++ b/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
@@ -72,8 +72,6 @@ function assertAccessorDescriptor(object, name) {
[ID(4294967295)]() { return '4294967295' + super.m(); }
}
- assertSame(Derived.prototype, Derived.prototype.a[%HomeObjectSymbol()]);
-
assertMethodDescriptor(Derived.prototype, "a");
assertMethodDescriptor(Derived.prototype, "b");
assertMethodDescriptor(Derived.prototype, 0);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js b/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js
index 514e54630c..7c14ec4686 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js
@@ -26,6 +26,8 @@ function TestSetWithModifiedIterator(ctor) {
arrayIteratorProto.next = originalNext;
}
%PrepareFunctionForOptimization(TestSetWithModifiedIterator);
+%EnsureFeedbackVectorForFunction(assertTrue);
+%EnsureFeedbackVectorForFunction(assertEquals);
TestSetWithModifiedIterator(Set);
TestSetWithModifiedIterator(Set);
TestSetWithModifiedIterator(Set);
diff --git a/deps/v8/test/mjsunit/es6/computed-property-names-super.js b/deps/v8/test/mjsunit/es6/computed-property-names-super.js
index 40b0eab942..bf52f2f7d1 100644
--- a/deps/v8/test/mjsunit/es6/computed-property-names-super.js
+++ b/deps/v8/test/mjsunit/es6/computed-property-names-super.js
@@ -24,8 +24,6 @@ function ID(x) {
[ID(1)]() { return '1' + super.m(); },
};
- assertSame(object, object.a[%HomeObjectSymbol()]);
-
assertEquals('a proto m', object.a());
assertEquals('b proto m', object.b());
assertEquals('0 proto m', object[0]());
diff --git a/deps/v8/test/mjsunit/es6/home-object-in-context.js b/deps/v8/test/mjsunit/es6/home-object-in-context.js
new file mode 100644
index 0000000000..70013ffe5c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/home-object-in-context.js
@@ -0,0 +1,196 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function TestSuperInObjectLiteralMethod() {
+ let my_proto = {
+   __proto__ : {'x': 'right' },
+   m() { return super.x; }
+ };
+ let o = {__proto__: my_proto};
+ assertEquals('right', o.m());
+})();
+
+(function TestSuperInObjectLiteralGetter() {
+ let my_proto = {
+   __proto__ : {'x': 'right' },
+   get p() { return super.x; }
+ };
+ let o = {__proto__: my_proto};
+ assertEquals('right', o.p);
+})();
+
+(function TestSuperInObjectLiteralSetter() {
+ let read_value;
+ let my_proto = {
+   __proto__ : {'x': 'right' },
+   set p(x) { read_value = super.x; }
+ };
+ let o = {__proto__: my_proto};
+ o.p = 'whatever';
+ assertEquals('right', read_value);
+})();
+
+(function TestSuperInObjectLiteralProperty() {
+ class OuterBase {};
+ OuterBase.prototype.x = 'right';
+ class Outer extends OuterBase {
+ m2() {
+ let my_proto = {
+    __proto__ : {'x': 'wrong' },
+    m: () => super.x,
+ };
+ let o = {__proto__: my_proto};
+ return o.m();
+ }
+ }
+ assertEquals('right', (new Outer()).m2());
+})();
+
+(function TestMethodScopes() {
+ let object = { // object literal 1 starts
+   __proto__: { // object literal 2 starts
+     method1() { return 'right'; }
+   }, // object literal 2 ends
+   method2() {
+     return super.method1();
+   }
+ }; // object literal 1 ends
+ assertEquals('right', object.method2());
+})();
+
+(function TestEvalInObjectLiteral() {
+ let o = {__proto__: {x: 'right'},
+ x: 'wrong',
+ m() {
+ let r = 0;
+ eval('r = super.x;');
+ return r;
+ }
+ };
+
+ assertEquals('right', o.m());
+})();
+
+(function TestEvalInMethod() {
+ class A {};
+ A.prototype.x = 'right';
+
+ class B extends A {
+ m() {
+ let r;
+ eval('r = super.x;');
+ return r;
+ }
+ };
+ B.prototype.x = 'wrong';
+
+ let b = new B();
+ assertEquals('right', b.m());
+})();
+
+(function TestSuperInsidePropertyInitializer() {
+ class OuterBase {}
+ OuterBase.prototype.prop = 'wrong';
+ OuterBase.prop = 'wrong';
+
+ class Outer extends OuterBase {
+   m() {
+     class A { }
+     A.prototype.prop = 'right';
+
+     class B extends A {
+       x = () => { return super.prop; };
+     }
+
+     B.prototype.prop = 'wrong';
+     return (new B()).x();
+   }
+ }
+ Outer.prototype.prop = 'wrong';
+ Outer.prop = 'wrong';
+
+ assertEquals('right', (new Outer()).m());
+})();
+
+(function TestSuperInsideStaticPropertyInitializer() {
+ class OuterBase {}
+ OuterBase.prototype.prop = 'wrong';
+ OuterBase.prop = 'wrong';
+
+ class Outer extends OuterBase {
+ m() {
+   class A { }
+ A.prop = 'right';
+ A.prototype.prop = 'wrong';
+ class B extends A {
+ static x = super.prop;
+ }
+ B.prop = 'wrong';
+ B.prototype.prop = 'wrong';
+ return B.x;
+ }
+ }
+ Outer.prototype.prop = 'wrong';
+ Outer.prop = 'wrong';
+
+ assertEquals('right', (new Outer).m());
+})();
+
+(function TestSuperInsideStaticPropertyInitializer2() {
+ class A extends class {
+ a() { return 'wrong'; }
+ } {
+ m() {
+ class C extends class {
+ static a() { return 'right'; }
+ } {
+ static static_prop = super.a;
+ };
+ return C.static_prop;
+ }
+ };
+ assertEquals('right', (new A()).m()());
+})();
+
+(function TestSuperInsideExtends() {
+ class C extends class {
+ static a = 'right';
+ } {
+ static m = class D extends new Proxy(function f() {},
+ {get:(t, k) => {
+ if (k == "prototype") {
+ return Function.prototype;
+ }
+ return super.a;
+ }
+ }) {}
+ };
+ assertEquals('right', C.m.a);
+})();
+
+// Same as the previous test but without a Proxy.
+(function TestSuperInsideExtends2() {
+ function f(x) {
+   function A() { }
+   A.x = x;
+   return A;
+ }
+
+ class B {};
+ B.a = 'right';
+
+ // How to write "super" inside the extends clause? The "class extends value"
+ // needs to be a constructor.
+ class C extends B {
+   static m = class D extends f({m2: () => { return super.a;}}) { }
+ }
+
+ // C.m is a class. Its "parent class" is a function (returned by f). C.m.x
+ // binds to the parent's x, which is whatever we passed as a param to f.
+ // In this case, it's an object which has a property m2.
+
+ // Since m2 is an arrow function, and not a method, "super" inside it
+ // doesn't bind to the object where m2 is defined, but outside.
+ assertEquals('right', C.m.x.m2());
+})();
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js
index c56a552bdc..d449fe4330 100644
--- a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js
@@ -29,6 +29,9 @@ function TestMapConstructorEntrySideEffect(ctor) {
}
%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
+%EnsureFeedbackVectorForFunction(assertTrue);
+%EnsureFeedbackVectorForFunction(assertFalse);
+%EnsureFeedbackVectorForFunction(assertEquals);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
diff --git a/deps/v8/test/mjsunit/es6/object-literals-super.js b/deps/v8/test/mjsunit/es6/object-literals-super.js
index b31a498767..50d0e0d943 100644
--- a/deps/v8/test/mjsunit/es6/object-literals-super.js
+++ b/deps/v8/test/mjsunit/es6/object-literals-super.js
@@ -4,59 +4,6 @@
// Flags: --allow-natives-syntax
-
-(function TestHomeObject() {
- var object = {
- method() {
- return super.method();
- },
- get getter() {
- return super.getter;
- },
- set setter(v) {
- super.setter = v;
- },
- get accessor() {
- return super.accessor;
- },
- set accessor(v) {
- super.accessor = v;
- },
-
- methodNoSuper() {},
- get getterNoSuper() {},
- set setterNoSuper(v) {},
- get accessorNoSuper() {},
- set accessorNoSuper(v) {},
- propertyNoSuper: function() {},
- propertyWithParenNoSuper: (function() {}),
- propertyWithParensNoSuper: ((function() {}))
- };
-
- assertEquals(object, object.method[%HomeObjectSymbol()]);
- var desc = Object.getOwnPropertyDescriptor(object, 'getter');
- assertEquals(object, desc.get[%HomeObjectSymbol()]);
- desc = Object.getOwnPropertyDescriptor(object, 'setter');
- assertEquals(object, desc.set[%HomeObjectSymbol()]);
- desc = Object.getOwnPropertyDescriptor(object, 'accessor');
- assertEquals(object, desc.get[%HomeObjectSymbol()]);
- assertEquals(object, desc.set[%HomeObjectSymbol()]);
-
- assertEquals(undefined, object.methodNoSuper[%HomeObjectSymbol()]);
- desc = Object.getOwnPropertyDescriptor(object, 'getterNoSuper');
- assertEquals(undefined, desc.get[%HomeObjectSymbol()]);
- desc = Object.getOwnPropertyDescriptor(object, 'setterNoSuper');
- assertEquals(undefined, desc.set[%HomeObjectSymbol()]);
- desc = Object.getOwnPropertyDescriptor(object, 'accessorNoSuper');
- assertEquals(undefined, desc.get[%HomeObjectSymbol()]);
- assertEquals(undefined, desc.set[%HomeObjectSymbol()]);
- assertEquals(undefined, object.propertyNoSuper[%HomeObjectSymbol()]);
- assertEquals(undefined, object.propertyWithParenNoSuper[%HomeObjectSymbol()]);
- assertEquals(undefined,
- object.propertyWithParensNoSuper[%HomeObjectSymbol()]);
-})();
-
-
(function TestMethod() {
var object = {
__proto__: {
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js b/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
index 365d01f32b..3c0204466b 100644
--- a/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
+++ b/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
@@ -48,7 +48,7 @@
// Assert that the function was deoptimized (dependency to the constant
// value).
- assertFalse(isOptimized(C.prototype.foo));
+ assertUnoptimized(C.prototype.foo);
})();
(function TestSuperpropertyAccessInlined() {
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt.js b/deps/v8/test/mjsunit/es6/super-ic-opt.js
index 13b39bdec1..c360184a18 100644
--- a/deps/v8/test/mjsunit/es6/super-ic-opt.js
+++ b/deps/v8/test/mjsunit/es6/super-ic-opt.js
@@ -118,7 +118,7 @@
// Assert that the function was deoptimized (dependency to the constant
// value).
- assertFalse(isOptimized(D.prototype.foo));
+ assertUnoptimized(D.prototype.foo);
})();
(function TestPropertyIsNonConstantData() {
@@ -239,7 +239,7 @@
assertEquals("new value", r);
// Assert that the function was deoptimized (holder changed).
- assertFalse(isOptimized(C.prototype.foo));
+ assertUnoptimized(C.prototype.foo);
})();
(function TestUnexpectedHomeObjectPrototypeDeoptimizes() {
@@ -278,7 +278,7 @@
assertEquals("new value", r);
// Assert that the function was deoptimized.
- assertEquals(false, isOptimized(D.prototype.foo));
+ assertUnoptimized(D.prototype.foo);
})();
(function TestUnexpectedReceiverDoesNotDeoptimize() {
diff --git a/deps/v8/test/mjsunit/fast-prototype.js b/deps/v8/test/mjsunit/fast-prototype.js
index 341ea9dc11..43f1413e93 100644
--- a/deps/v8/test/mjsunit/fast-prototype.js
+++ b/deps/v8/test/mjsunit/fast-prototype.js
@@ -78,14 +78,16 @@ function test(use_new, add_first, set__proto__) {
assertFalse(%HasFastProperties(proto));
DoProtoMagic(proto, set__proto__);
// Making it a prototype makes it fast again.
- assertTrue(%HasFastProperties(proto));
+ assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(proto));
} else {
DoProtoMagic(proto, set__proto__);
// Still fast
- assertTrue(%HasFastProperties(proto));
+ assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(proto));
AddProps(proto);
- // Still fast.
- assertTrue(%HasFastProperties(proto));
+ assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(proto));
}
return proto;
}
@@ -111,15 +113,19 @@ function test_fast_prototype() {
assertTrue(key == 'a');
break;
}
- assertTrue(%HasFastProperties(x));
+ if (!%IsDictPropertyConstTrackingEnabled())
+ assertTrue(%HasFastProperties(x));
delete x.b;
for (key in x) {
assertTrue(key == 'a');
break;
}
- assertTrue(%HasFastProperties(x));
+
+ assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(x));
x.d = 4;
- assertTrue(%HasFastProperties(x));
+ assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(x));
for (key in x) {
assertTrue(key == 'a');
break;
diff --git a/deps/v8/test/mjsunit/harmony/atomics-value-check.js b/deps/v8/test/mjsunit/harmony/atomics-value-check.js
index 053bc6dfc5..31ffbabded 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-value-check.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-value-check.js
@@ -8,11 +8,12 @@
var sab = new SharedArrayBuffer(4);
var sta = new Int8Array(sab);
sta[0] = 5;
-var workerScript =
- `onmessage=function(msg) {
- postMessage(0);
- };`;
-var worker = new Worker(workerScript, {type: 'string'});
+function workerCode() {
+ onmessage = function(msg) {
+ postMessage(0);
+ };
+}
+var worker = new Worker(workerCode, {type: 'function'});
var value_obj = {
valueOf: function() {worker.postMessage({sab:sab}, [sta.buffer]);
diff --git a/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-2timeout.js b/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-2timeout.js
index 8df56a5771..b5569635e6 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-2timeout.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-2timeout.js
@@ -6,7 +6,7 @@
load("test/mjsunit/harmony/atomics-waitasync-helpers.js");
-const script = `
+function workerCode() {
const sab = new SharedArrayBuffer(16);
const i32a = new Int32Array(sab);
@@ -28,7 +28,8 @@ const script = `
postMessage("notify return value " + notify_return_value);
},
() => { postMessage("unexpected"); });
- }`;
+ };
+}
const expected_messages = [
"fast timed-out",
@@ -36,4 +37,4 @@ const expected_messages = [
"slow ok"
];
-runTestWithWorker(script, expected_messages);
+runTestWithWorker(workerCode, expected_messages);
diff --git a/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-buffer-out-of-scope-timeout.js b/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-buffer-out-of-scope-timeout.js
index 78a339acf0..40c7412038 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-buffer-out-of-scope-timeout.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-buffer-out-of-scope-timeout.js
@@ -6,7 +6,7 @@
load("test/mjsunit/harmony/atomics-waitasync-helpers.js");
-const script = `
+function workerCode() {
onmessage = function() {
(function() {
const sab = new SharedArrayBuffer(16);
@@ -31,7 +31,8 @@ const script = `
const notify_return_value = Atomics.notify(i32a2, 0);
postMessage("notify return value " + notify_return_value);
- }`;
+ };
+}
const expected_messages = [
"notify return value 1",
@@ -39,4 +40,4 @@ const expected_messages = [
"result timed-out"
];
-runTestWithWorker(script, expected_messages);
+runTestWithWorker(workerCode, expected_messages);
diff --git a/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeout.js b/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeout.js
index 98af45f73f..c30c41b02b 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeout.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeout.js
@@ -6,7 +6,7 @@
load("test/mjsunit/harmony/atomics-waitasync-helpers.js");
-const script = `
+function workerCode() {
onmessage = function() {
const sab = new SharedArrayBuffer(16);
const i32a = new Int32Array(sab);
@@ -17,10 +17,11 @@ const script = `
result.value.then(
(value) => { postMessage("result " + value); },
() => { postMessage("unexpected"); });
- }`;
+ };
+}
const expected_messages = [
"result timed-out"
];
-runTestWithWorker(script, expected_messages);
+runTestWithWorker(workerCode, expected_messages);
diff --git a/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeouts-and-no-timeouts.js b/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeouts-and-no-timeouts.js
index a5ecd131e4..4a4c1e0875 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeouts-and-no-timeouts.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-waitasync-1thread-timeouts-and-no-timeouts.js
@@ -6,14 +6,12 @@
const N = 10;
-const script = `
+function workerCode(N) {
const sab = new SharedArrayBuffer(16);
const i32a = new Int32Array(sab);
const location = 0;
const expected_value = 0;
- const N = ${N};
-
function start() {
// Create N async waiters; the even ones without timeout and the odd ones
// with timeout.
@@ -36,15 +34,16 @@ const script = `
postMessage("notify return value " + notify_return_value);
}
- function onmessage(param) {
+ onmessage = function(param) {
if (param == "start") {
start();
} else if (param == "wakeUpRemainingWaiters") {
wakeUpRemainingWaiters();
}
- }`
+ };
+}
-const w = new Worker(script, {type : 'string'});
+const w = new Worker(workerCode, {type: 'function', arguments: [N]});
w.postMessage("start");
// Verify that all timed out waiters timed out in timeout order.
diff --git a/deps/v8/test/mjsunit/harmony/atomics-waitasync-helpers.js b/deps/v8/test/mjsunit/harmony/atomics-waitasync-helpers.js
index 4fd35b4cdd..4c07617d44 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-waitasync-helpers.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-waitasync-helpers.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-function runTestWithWorker(script, expected_messages) {
- const w = new Worker(script, {type : 'string'});
+function runTestWithWorker(worker_code, expected_messages) {
+ const w = new Worker(worker_code, {type: 'function'});
w.postMessage('start');
let i = 0;
while (i < expected_messages.length) {
diff --git a/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-waits.js b/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-waits.js
index 5f4609498e..9bbe863478 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-waits.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-waits.js
@@ -10,16 +10,18 @@
const location = 0;
(function createWorker() {
- const script = `onmessage = function(msg) {
- if (msg.sab) {
- const i32a = new Int32Array(msg.sab);
- // Start 2 async waits in the same location.
- const result1 = Atomics.waitAsync(i32a, ${location}, 0);
- const result2 = Atomics.waitAsync(i32a, ${location}, 0);
- postMessage('worker waiting');
+ function workerCode(location) {
+ onmessage = function(msg) {
+ if (msg.sab) {
+ const i32a = new Int32Array(msg.sab);
+ // Start 2 async waits in the same location.
+ const result1 = Atomics.waitAsync(i32a, location, 0);
+ const result2 = Atomics.waitAsync(i32a, location, 0);
+ postMessage('worker waiting');
+ }
}
- }`;
- const w = new Worker(script, {type : 'string'});
+ }
+ const w = new Worker(workerCode, {type: 'function', arguments: [location]});
w.postMessage({sab: sab});
const m = w.getMessage();
assertEquals('worker waiting', m);
diff --git a/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-workers.js b/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-workers.js
index c27a141f9a..71cc335dab 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-workers.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-2-workers.js
@@ -10,18 +10,21 @@
const location = 0;
(function createWorker() {
- const script = `onmessage = function(msg) {
- if (msg.sab) {
- const i32a = new Int32Array(msg.sab);
- Atomics.waitAsync(i32a, ${location}, 0);
- postMessage('worker waiting');
+ function workerCode(location) {
+ onmessage = function(msg) {
+ if (msg.sab) {
+ const i32a = new Int32Array(msg.sab);
+ Atomics.waitAsync(i32a, location, 0);
+ postMessage('worker waiting');
+ }
}
- }`;
+ }
// Create 2 workers which wait on the same location.
let workers = [];
const worker_count = 2;
for (let i = 0; i < worker_count; ++i) {
- workers[i] = new Worker(script, {type : 'string'});
+ workers[i] = new Worker(workerCode,
+ {type: 'function', arguments: [location]});
workers[i].postMessage({sab: sab});
const m = workers[i].getMessage();
assertEquals('worker waiting', m);
diff --git a/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-no-timeout.js b/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-no-timeout.js
index e698a6e321..02d1e8413e 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-no-timeout.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-no-timeout.js
@@ -9,14 +9,16 @@
const i32a = new Int32Array(sab);
(function createWorker() {
- const script = `onmessage = function(msg) {
- if (msg.sab) {
- const i32a = new Int32Array(msg.sab);
- const result = Atomics.waitAsync(i32a, 0, 0);
- postMessage('worker waiting');
+ function workerCode() {
+ onmessage = function(msg) {
+ if (msg.sab) {
+ const i32a = new Int32Array(msg.sab);
+ const result = Atomics.waitAsync(i32a, 0, 0);
+ postMessage('worker waiting');
+ }
}
- }`;
- const w = new Worker(script, {type : 'string'});
+ };
+ const w = new Worker(workerCode, {type: 'function'});
w.postMessage({sab: sab});
const m = w.getMessage();
assertEquals('worker waiting', m);
diff --git a/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-timeout.js b/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-timeout.js
index 6db3ec77ab..74fcbad627 100644
--- a/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-timeout.js
+++ b/deps/v8/test/mjsunit/harmony/atomics-waitasync-worker-shutdown-before-wait-finished-timeout.js
@@ -9,14 +9,16 @@
const i32a = new Int32Array(sab);
(function createWorker() {
- const script = `onmessage = function(msg) {
- if (msg.sab) {
- const i32a = new Int32Array(msg.sab);
- const result = Atomics.waitAsync(i32a, 0, 0, 100000);
- postMessage('worker waiting');
+ function workerCode() {
+ onmessage = function(msg) {
+ if (msg.sab) {
+ const i32a = new Int32Array(msg.sab);
+ const result = Atomics.waitAsync(i32a, 0, 0, 100000);
+ postMessage('worker waiting');
+ }
}
- }`;
- const w = new Worker(script, {type : 'string'});
+ }
+ const w = new Worker(workerCode, {type: 'function'});
w.postMessage({sab: sab});
const m = w.getMessage();
assertEquals('worker waiting', m);
diff --git a/deps/v8/test/mjsunit/harmony/class-static-blocks.js b/deps/v8/test/mjsunit/harmony/class-static-blocks.js
new file mode 100644
index 0000000000..b4bb710e68
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/class-static-blocks.js
@@ -0,0 +1,134 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-class-static-blocks
+
+{
+ // Basic functionality
+ let log = [];
+ class C {
+ static { log.push("block1"); }
+ static { log.push("block2"); }
+ }
+ assertArrayEquals(["block1", "block2"], log);
+}
+
+{
+ // Static blocks run in textual order interleaved with field initializers.
+ let log = [];
+ class C {
+ static { log.push("block1"); }
+ static public_static_method() {}
+ static public_field = log.push("public_field");
+ static { log.push("block2"); }
+ static #private_field = log.push("private_field");
+ static { log.push("block3"); }
+ }
+ assertArrayEquals(["block1",
+ "public_field",
+ "block2",
+ "private_field",
+ "block3"], log);
+}
+
+{
+ // Static blocks have access to private fields.
+ let exfil;
+ class C {
+ #foo;
+ constructor(x) { this.#foo = x; }
+ static {
+ exfil = function(o) { return o.#foo; };
+ }
+ }
+ assertEquals(exfil(new C(42)), 42);
+}
+
+{
+ // 'this' is the constructor.
+ let log = [];
+ class C {
+ static x = 42;
+ static {
+ log.push(this.x);
+ }
+ }
+ assertArrayEquals([42], log);
+}
+
+{
+ // super.property accesses work as expected.
+ let log = [];
+ class B {
+ static foo = 42;
+ static get field_getter() { return "field_getter"; }
+ static set field_setter(x) { log.push(x); };
+ static method() { return "bar"; }
+ }
+ class C extends B {
+ static {
+ log.push(super.foo);
+ log.push(super.field_getter);
+ super.field_setter = "C";
+ log.push(super.method());
+ }
+ }
+ assertArrayEquals([42, "field_getter", "C", "bar"], log);
+}
+
+{
+ // Each static block is its own var and let scope.
+ let log = [];
+ let f;
+ class C {
+ static {
+ var x = "x1";
+ let y = "y1";
+ log.push(x);
+ log.push(y);
+ }
+ static {
+ var x = "x2";
+ let y = "y2";
+ f = () => [x, y];
+ }
+ static {
+ assertThrows(() => x, ReferenceError);
+ assertThrows(() => y, ReferenceError);
+ }
+ }
+ assertArrayEquals(["x1", "y1"], log);
+ assertArrayEquals(["x2", "y2"], f());
+}
+
+{
+ // new.target is undefined.
+ let log = [];
+ class C {
+ static {
+ log.push(new.target);
+ }
+ }
+ assertArrayEquals([undefined], log);
+}
+
+function assertDoesntParse(expr, context_start, context_end) {
+ assertThrows(() => {
+ eval(`${context_start} class C { static { ${expr} } } ${context_end}`);
+ }, SyntaxError);
+}
+
+for (let [s, e] of [['', ''],
+ ['function* g() {', '}'],
+ ['async function af() {', '}'],
+ ['async function* ag() {', '}']]) {
+ assertDoesntParse('arguments;', s, e);
+ assertDoesntParse('arguments[0] = 42;', s, e);
+ assertDoesntParse('super();', s, e);
+ assertDoesntParse('yield 42;', s, e);
+ assertDoesntParse('await 42;', s, e);
+ // 'await' is disallowed as an identifier.
+ assertDoesntParse('let await;', s, e);
+ assertDoesntParse('await;', s, e);
+}
diff --git a/deps/v8/test/mjsunit/harmony/futex.js b/deps/v8/test/mjsunit/harmony/futex.js
index 0ae1f6dd8a..69c5d0999a 100644
--- a/deps/v8/test/mjsunit/harmony/futex.js
+++ b/deps/v8/test/mjsunit/harmony/futex.js
@@ -189,23 +189,24 @@ if (this.Worker) {
// i32a[4]:
// always 0. Each worker is waiting on this index.
- var workerScript =
- `onmessage = function(msg) {
- var id = msg.id;
- var i32a = new Int32Array(msg.sab);
-
- // Wait on i32a[4] (should be zero).
- var result = Atomics.wait(i32a, 4, 0);
- // Set i32a[id] to 1 to notify the main thread which workers were
- // woken up.
- Atomics.store(i32a, id, 1);
- postMessage(result);
- };`;
+ function workerCode() {
+ onmessage = function(msg) {
+ var id = msg.id;
+ var i32a = new Int32Array(msg.sab);
+
+ // Wait on i32a[4] (should be zero).
+ var result = Atomics.wait(i32a, 4, 0);
+ // Set i32a[id] to 1 to notify the main thread which workers were
+ // woken up.
+ Atomics.store(i32a, id, 1);
+ postMessage(result);
+ };
+ }
var id;
var workers = [];
for (id = 0; id < 4; id++) {
- workers[id] = new Worker(workerScript, {type: 'string'});
+ workers[id] = new Worker(workerCode, {type: 'function'});
workers[id].postMessage({sab: sab, id: id});
}
diff --git a/deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js b/deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js
index 6d6510fcde..7c59912d13 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-fetch-errored.js
@@ -9,5 +9,5 @@ import('no-such-file').catch(e => error1 = e);
import('no-such-file').catch(e => error2 = e);
%PerformMicrotaskCheckpoint();
-assertEquals(error1, error2);
-assertEquals(typeof error1, "string");
+assertEquals(error1.message, error2.message);
+assertEquals(typeof error1.message, "string");
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-15-top-level-await.mjs b/deps/v8/test/mjsunit/harmony/modules-import-15-top-level-await.mjs
index ab1f0e44dd..48be20d565 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-15-top-level-await.mjs
+++ b/deps/v8/test/mjsunit/harmony/modules-import-15-top-level-await.mjs
@@ -48,7 +48,7 @@ async function test3() {
let x = await import('nonexistent-file.mjs');
%AbortJS('failure: should be unreachable');
} catch(e) {
- assertTrue(e.startsWith('d8: Error reading'));
+ assertTrue(e.message.startsWith('d8: Error reading'));
ran = true;
}
}
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-15.mjs b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
index 9fad3f99aa..d7a590e442 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
+++ b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
@@ -50,7 +50,7 @@ async function test3() {
let x = await import('nonexistent-file.mjs');
%AbortJS('failure: should be unreachable');
} catch(e) {
- assertTrue(e.startsWith('d8: Error reading'));
+ assertTrue(e.message.startsWith('d8: Error reading'));
ran = true;
}
}
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-1.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-1.mjs
new file mode 100644
index 0000000000..eaeffa4967
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-1.mjs
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-import-assertions
+
+import { life } from 'modules-skip-1.mjs' assert { };
+
+assertEquals(42, life());
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-2.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-2.mjs
new file mode 100644
index 0000000000..ac0295870d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-2.mjs
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-import-assertions
+
+import json from 'modules-skip-1.json' assert { type: 'json' };
+
+assertEquals(42, json.life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-3.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-3.mjs
new file mode 100644
index 0000000000..9a648fcc6e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-3.mjs
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-import-assertions
+
+import {life} from 'modules-skip-imports-json-1.mjs';
+
+assertEquals(42, life());
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-4.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-4.mjs
new file mode 100644
index 0000000000..99d486abf7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-4.mjs
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-import-assertions
+
+import json from 'modules-skip-1.json' assert { type: 'json', notARealAssertion: 'value'};
+
+assertEquals(42, json.life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-1.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-1.mjs
new file mode 100644
index 0000000000..c1daa47eaa
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-1.mjs
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var life;
+import('modules-skip-1.mjs', { }).then(namespace => life = namespace.life());
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-10.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-10.mjs
new file mode 100644
index 0000000000..beef27d68c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-10.mjs
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var result1;
+var result2;
+import('modules-skip-1.json', { get assert() { throw 'bad \'assert\' getter!'; } }).then(
+ () => assertUnreachable('Should have failed due to throwing getter'),
+ error => result1 = error);
+import('modules-skip-1.json', { assert: { get assertionKey() { throw 'bad \'assertionKey\' getter!'; } } }).then(
+ () => assertUnreachable('Should have failed due to throwing getter'),
+ error => result2 = error);
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals('bad \'assert\' getter!', result1);
+assertEquals('bad \'assertionKey\' getter!', result2); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-11.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-11.mjs
new file mode 100644
index 0000000000..5b4b0704f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-11.mjs
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions --harmony-top-level-await
+
+var life1;
+var life2;
+import('modules-skip-1.json', { assert: { type: 'json' } }).then(
+ namespace => life1 = namespace.default.life);
+
+// Try loading the same module a second time.
+import('modules-skip-1.json', { assert: { type: 'json' } }).then(
+ namespace => life2 = namespace.default.life);
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals(42, life1);
+assertEquals(42, life2);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-2.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-2.mjs
new file mode 100644
index 0000000000..041e330448
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-2.mjs
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var life;
+import('modules-skip-1.mjs', { assert: { } }).then(
+ namespace => life = namespace.life());
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-3.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-3.mjs
new file mode 100644
index 0000000000..56a9062b11
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-3.mjs
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var life;
+import('modules-skip-1.json', { assert: { type: 'json' } }).then(
+ namespace => life = namespace.default.life);
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-4.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-4.mjs
new file mode 100644
index 0000000000..05a1929ff3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-4.mjs
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var result;
+import('modules-skip-1.json', { assert: { type: 'notARealType' } }).then(
+ () => assertUnreachable('Should have failed due to bad module type'),
+ error => result = error.message);
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals('Invalid module type was asserted', result);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-5.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-5.mjs
new file mode 100644
index 0000000000..2019cfd12a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-5.mjs
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var life;
+import('modules-skip-imports-json-1.mjs',).then(namespace => life = namespace.life());
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-6.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-6.mjs
new file mode 100644
index 0000000000..3388aefb5c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-6.mjs
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var life;
+import('modules-skip-1.json', { assert: { type: 'json', notARealAssertion: 'value' } }).then(
+ namespace => life = namespace.default.life);
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-7.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-7.mjs
new file mode 100644
index 0000000000..b45e5b692c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-7.mjs
@@ -0,0 +1,63 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var result1;
+var result2;
+var result3;
+var result4;
+var result5;
+var result6;
+var result7;
+var result8;
+var result9;
+var result10;
+import('modules-skip-1.json', null).then(
+ () => assertUnreachable('Should have failed due to non-object parameter'),
+ error => result1 = error.message);
+import('modules-skip-1.json', 7).then(
+ () => assertUnreachable('Should have failed due to non-object parameter'),
+ error => result2 = error.message);
+import('modules-skip-1.json', 'string').then(
+ () => assertUnreachable('Should have failed due to non-object parameter'),
+ error => result3 = error.message);
+import('modules-skip-1.json', { assert: null}).then(
+ () => assertUnreachable('Should have failed due to bad assert object'),
+ error => result4 = error.message);
+import('modules-skip-1.json', { assert: 7}).then(
+ () => assertUnreachable('Should have failed due to bad assert object'),
+ error => result5 = error.message);
+import('modules-skip-1.json', { assert: 'string'}).then(
+ () => assertUnreachable('Should have failed due to bad assert object'),
+ error => result6 = error.message);
+import('modules-skip-1.json', { assert: { a: null }}).then(
+ () => assertUnreachable('Should have failed due to bad assert object'),
+ error => result7 = error.message);
+import('modules-skip-1.json', { assert: { a: undefined }}).then(
+ () => assertUnreachable('Should have failed due to bad assertion value'),
+ error => result8 = error.message);
+import('modules-skip-1.json', { assert: { a: 7 }}).then(
+ () => assertUnreachable('Should have failed due to bad assertion value'),
+ error => result9 = error.message);
+ import('modules-skip-1.json', { assert: { a: { } }}).then(
+ () => assertUnreachable('Should have failed due to bad assertion value'),
+ error => result10 = error.message);
+
+%PerformMicrotaskCheckpoint();
+
+const argumentNotObjectError = 'The second argument to import() must be an object';
+const assertOptionNotObjectError = 'The \'assert\' option must be an object';
+const assertionValueNotStringError = 'Import assertion value must be a string';
+
+assertEquals(argumentNotObjectError, result1);
+assertEquals(argumentNotObjectError, result2);
+assertEquals(argumentNotObjectError, result3);
+assertEquals(assertOptionNotObjectError, result4);
+assertEquals(assertOptionNotObjectError, result5);
+assertEquals(assertOptionNotObjectError, result6);
+assertEquals(assertionValueNotStringError, result7);
+assertEquals(assertionValueNotStringError, result8);
+assertEquals(assertionValueNotStringError, result9);
+assertEquals(assertionValueNotStringError, result10);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-8.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-8.mjs
new file mode 100644
index 0000000000..95e1a1e707
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-8.mjs
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var life;
+import('modules-skip-1.mjs', undefined).then(
+ namespace => life = namespace.life());
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-9.mjs b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-9.mjs
new file mode 100644
index 0000000000..2a03c31cf5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-assertions-dynamic-9.mjs
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-import-assertions
+
+var life;
+import('modules-skip-1.mjs', { assert: undefined }).then(
+ namespace => life = namespace.life());
+
+%PerformMicrotaskCheckpoint();
+
+assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-1.json b/deps/v8/test/mjsunit/harmony/modules-skip-1.json
new file mode 100644
index 0000000000..c26c11d682
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-1.json
@@ -0,0 +1 @@
+{ "life": 42}
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-imports-json-1.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-imports-json-1.mjs
new file mode 100644
index 0000000000..3930313dab
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-imports-json-1.mjs
@@ -0,0 +1,6 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import json from "modules-skip-1.json" assert { type: "json" };
+export function life() { return json.life; }
diff --git a/deps/v8/test/mjsunit/harmony/private-brand-checks.js b/deps/v8/test/mjsunit/harmony/private-brand-checks.js
new file mode 100644
index 0000000000..8ee8774480
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-brand-checks.js
@@ -0,0 +1,567 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-brand-checks --allow-natives-syntax
+
+// Objects for which all our brand checks return false.
+const commonFalseCases = [{}, function() {}, []];
+// Values for which all our brand checks throw.
+const commonThrowCases = [100, 'foo', undefined, null];
+
+(function TestReturnValue() {
+ class A {
+ m() {
+ assertEquals(typeof (#x in this), 'boolean');
+ assertEquals(typeof (#x in {}), 'boolean');
+ }
+ #x = 1;
+ }
+})();
+
+(function TestPrivateField() {
+ class A {
+ m(other) {
+ return #x in other;
+ }
+ #x = 1;
+ }
+ let a = new A();
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(a.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { a.m(o) }, TypeError);
+ }
+
+ class B {
+ #x = 5;
+ }
+ assertFalse(a.m(new B()));
+})();
+
+(function TestPrivateFieldWithValueUndefined() {
+ class A {
+ m(other) {
+ return #x in other;
+ }
+ #x;
+ }
+ let a = new A();
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(a.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { a.m(o) }, TypeError);
+ }
+
+ class B {
+ #x;
+ }
+ assertFalse(a.m(new B()));
+})();
+
+(function TestPrivateMethod() {
+ class A {
+ #pm() {
+ }
+ m(other) {
+ return #pm in other;
+ }
+ }
+ let a = new A();
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(a.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { a.m(o) }, TypeError);
+ }
+
+ class B {
+ #pm() {}
+ }
+ assertFalse(a.m(new B()));
+})();
+
+(function TestPrivateGetter() {
+ class A {
+ get #foo() {
+ }
+ m(other) {
+ return #foo in other;
+ }
+ }
+ let a = new A();
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(a.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { a.m(o) }, TypeError);
+ }
+
+ class B {
+ get #foo() {}
+ }
+ assertFalse(a.m(new B()));
+})();
+
+(function TestPrivateSetter() {
+ class A {
+ set #foo(a) {
+ }
+ m(other) {
+ return #foo in other;
+ }
+ }
+ let a = new A();
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(a.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { a.m(o) }, TypeError);
+ }
+
+ class B {
+ set #foo(a) {}
+ }
+ assertFalse(a.m(new B()));
+})();
+
+(function TestPrivateGetterAndSetter() {
+ class A {
+ get #foo() {}
+ set #foo(a) {
+ }
+ m(other) {
+ return #foo in other;
+ }
+ }
+ let a = new A();
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(a.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { a.m(o) }, TypeError);
+ }
+
+ class B {
+ get #foo() {}
+ set #foo(a) {}
+ }
+ assertFalse(a.m(new B()));
+})();
+
+(function TestPrivateStaticField() {
+ class A {
+ static m(other) {
+ return #x in other;
+ }
+ static #x = 1;
+ }
+ assertTrue(A.m(A));
+ assertFalse(A.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(A.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { A.m(o) }, TypeError);
+ }
+
+ class B {
+ static #x = 5;
+ }
+ assertFalse(A.m(B));
+})();
+
+(function TestPrivateStaticMethod() {
+ class A {
+ static m(other) {
+ return #pm in other;
+ }
+ static #pm() {}
+ }
+ assertTrue(A.m(A));
+ assertFalse(A.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(A.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { A.m(o) }, TypeError);
+ }
+
+ class B {
+ static #pm() {};
+ }
+ assertFalse(A.m(B));
+})();
+
+(function TestPrivateStaticGetter() {
+ class A {
+ static m(other) {
+ return #x in other;
+ }
+ static get #x() {}
+ }
+ assertTrue(A.m(A));
+ assertFalse(A.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(A.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { A.m(o) }, TypeError);
+ }
+
+ class B {
+ static get #x() {};
+ }
+ assertFalse(A.m(B));
+})();
+
+(function TestPrivateStaticSetter() {
+ class A {
+ static m(other) {
+ return #x in other;
+ }
+ static set #x(x) {}
+ }
+ assertTrue(A.m(A));
+ assertFalse(A.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(A.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { A.m(o) }, TypeError);
+ }
+
+ class B {
+ static set #x(x) {};
+ }
+ assertFalse(A.m(B));
+})();
+
+(function TestPrivateStaticGetterAndSetter() {
+ class A {
+ static m(other) {
+ return #x in other;
+ }
+ static get #x() {}
+ static set #x(x) {}
+ }
+ assertTrue(A.m(A));
+ assertFalse(A.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(A.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { A.m(o) }, TypeError);
+ }
+
+ class B {
+ static get #x() {}
+ static set #x(x) {};
+ }
+ assertFalse(A.m(B));
+})();
+
+(function TestPrivateIdentifiersAreDistinct() {
+ function GenerateClass() {
+ class A {
+ m(other) {
+ return #x in other;
+ }
+ #x = 0;
+ }
+ return new A();
+ }
+ let a1 = GenerateClass();
+ let a2 = GenerateClass();
+ assertTrue(a1.m(a1));
+ assertFalse(a1.m(a2));
+ assertFalse(a2.m(a1));
+ assertTrue(a2.m(a2));
+})();
+
+(function TestSubclasses() {
+ class A {
+ m(other) { return #foo in other; }
+ #foo;
+ }
+ class B extends A {}
+ assertTrue((new A()).m(new B()));
+})();
+
+(function TestFakeSubclassesWithPrivateField() {
+ class A {
+ #foo;
+ m() { return #foo in this; }
+ }
+ let a = new A();
+ assertTrue(a.m());
+
+ // Plug an object into the prototype chain; it's not a real instance of the
+ // class.
+ let fake = {__proto__: a};
+ assertFalse(fake.m());
+})();
+
+(function TestFakeSubclassesWithPrivateMethod() {
+ class A {
+ #pm() {}
+ m() { return #pm in this; }
+ }
+ let a = new A();
+ assertTrue(a.m());
+
+ // Plug an object into the prototype chain; it's not a real instance of the
+ // class.
+ let fake = {__proto__: a};
+ assertFalse(fake.m());
+})();
+
+(function TestPrivateNameUnknown() {
+ assertThrows(() => { eval(`
+ class A {
+ m(other) { return #lol in other; }
+ }
+ new A().m();
+ `)}, SyntaxError, /must be declared in an enclosing class/);
+})();
+
+(function TestEvalWithPrivateField() {
+ class A {
+ m(other) {
+ let result;
+ eval('result = #x in other;');
+ return result;
+ }
+ #x = 1;
+ }
+ let a = new A();
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(a.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { a.m(o) }, TypeError);
+ }
+})();
+
+(function TestEvalWithPrivateMethod() {
+ class A {
+ m(other) {
+ let result;
+ eval('result = #pm in other;');
+ return result;
+ }
+ #pm() {}
+ }
+ let a = new A();
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(a.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { a.m(o) }, TypeError);
+ }
+})();
+
+(function TestEvalWithStaticPrivateField() {
+ class A {
+ static m(other) {
+ let result;
+ eval('result = #x in other;');
+ return result;
+ }
+ static #x = 1;
+ }
+ assertTrue(A.m(A));
+ assertFalse(A.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(A.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { A.m(o) }, TypeError);
+ }
+})();
+
+(function TestEvalWithStaticPrivateMethod() {
+ class A {
+ static m(other) {
+ let result;
+ eval('result = #pm in other;');
+ return result;
+ }
+ static #pm() {}
+ }
+ assertTrue(A.m(A));
+ assertFalse(A.m(new A()));
+ for (o of commonFalseCases) {
+ assertFalse(A.m(o));
+ }
+ for (o of commonThrowCases) {
+ assertThrows(() => { A.m(o) }, TypeError);
+ }
+})();
+
+(function TestCombiningWithOtherExpressions() {
+ class A {
+ m() {
+ assertFalse(#x in {} in {} in {});
+ assertTrue(#x in this in {true: 0});
+ assertTrue(#x in {} < 1 + 1);
+ assertFalse(#x in this < 1);
+
+ assertThrows(() => { eval('#x in {} = 4')});
+ assertThrows(() => { eval('(#x in {}) = 4')});
+ }
+ #x;
+ }
+ new A().m();
+})();
+
+(function TestHalfConstructedObjects() {
+ let half_constructed;
+ class A {
+ m() {
+ assertTrue(#x in this);
+ assertFalse(#y in this);
+ }
+ #x = 0;
+ #y = (() => { half_constructed = this; throw 'lol';})();
+ }
+
+ try {
+ new A();
+ } catch {
+ }
+ half_constructed.m();
+})();
+
+(function TestPrivateFieldOpt() {
+ class A {
+ m(other) {
+ return #x in other;
+ }
+ #x = 1;
+ }
+ let a = new A();
+ %PrepareFunctionForOptimization(A.prototype.m);
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ %OptimizeFunctionOnNextCall(A.prototype.m);
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+
+ class B {
+ #x = 5;
+ }
+ assertFalse(a.m(new B()));
+})();
+
+(function TestPrivateMethodOpt() {
+ class A {
+ #pm() {
+ }
+ m(other) {
+ return #pm in other;
+ }
+ }
+ let a = new A();
+ %PrepareFunctionForOptimization(A.prototype.m);
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+ %OptimizeFunctionOnNextCall(A.prototype.m);
+ assertTrue(a.m(a));
+ assertTrue(a.m(new A()));
+
+ class B {
+ #pm() {}
+ }
+ assertFalse(a.m(new B()));
+})();
+
+(function TestPrivateStaticFieldOpt() {
+ class A {
+ static m(other) {
+ return #x in other;
+ }
+ static #x = 1;
+ }
+ %PrepareFunctionForOptimization(A.m);
+ assertTrue(A.m(A));
+ %OptimizeFunctionOnNextCall(A.m);
+ assertTrue(A.m(A));
+
+ class B {
+ static #x = 5;
+ }
+ assertFalse(A.m(B));
+})();
+
+(function TestPrivateStaticMethodOpt() {
+ class A {
+ static m(other) {
+ return #pm in other;
+ }
+ static #pm() {}
+ }
+ %PrepareFunctionForOptimization(A.m);
+ assertTrue(A.m(A));
+ %OptimizeFunctionOnNextCall(A.m);
+ assertTrue(A.m(A));
+
+ class B {
+ static #pm() {};
+ }
+ assertFalse(A.m(B));
+})();
+
+(function TestPrivateFieldWithProxy() {
+ class A {
+ m(other) {
+ return #x in other;
+ }
+ #x = 1;
+ }
+ let a = new A();
+
+ const p = new Proxy(a, {get: function() { assertUnreachable(); } });
+ assertFalse(a.m(p));
+})();
+
+(function TestHeritagePosition() {
+ class A {
+ #x; // A.#x
+ static C = class C extends (function () {
+ return class D {
+ exfil(obj) { return #x in obj; }
+ exfilEval(obj) { return eval("#x in obj"); }
+ };
+ }) { // C body starts
+ #x; // C.#x
+ } // C body ends
+ } // A ends
+ let c = new A.C();
+ let d = new c();
+ // #x inside D binds to A.#x, so only objects of A pass the check.
+ assertTrue(d.exfil(new A()));
+ assertFalse(d.exfil(c));
+ assertFalse(d.exfil(d));
+ assertTrue(d.exfilEval(new A()));
+ assertFalse(d.exfilEval(c));
+ assertFalse(d.exfilEval(d));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/regexp-match-indices-no-flag.js b/deps/v8/test/mjsunit/harmony/regexp-match-indices-no-flag.js
new file mode 100644
index 0000000000..06cf89ad06
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-match-indices-no-flag.js
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-harmony-regexp-match-indices
+
+// Redefined hasIndices should not reflect in flags without
+// --harmony-regexp-match-indices
+{
+ let re = /./;
+ Object.defineProperty(re, "hasIndices", { get: function() { return true; } });
+ assertEquals("", re.flags);
+}
diff --git a/deps/v8/test/mjsunit/harmony/regexp-match-indices.js b/deps/v8/test/mjsunit/harmony/regexp-match-indices.js
index 61d3c9d0d0..b393c878b0 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-match-indices.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-match-indices.js
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-regexp-match-indices --expose-gc --stack-size=100
+// Flags: --harmony-regexp-match-indices --allow-natives-syntax
+// Flags: --expose-gc --stack-size=100
+// Flags: --no-force-slow-path
// Sanity test.
{
- const re = /a+(?<Z>z)?/;
+ const re = /a+(?<Z>z)?/d;
const m = re.exec("xaaaz");
assertEquals(m.indices, [[1, 5], [4, 5]]);
@@ -15,7 +17,7 @@
// Capture groups that are not matched return `undefined`.
{
- const re = /a+(?<Z>z)?/;
+ const re = /a+(?<Z>z)?/d;
const m = re.exec("xaaay");
assertEquals(m.indices, [[1, 4], undefined]);
@@ -24,7 +26,7 @@
// Two capture groups.
{
- const re = /a+(?<A>zz)?(?<B>ii)?/;
+ const re = /a+(?<A>zz)?(?<B>ii)?/d;
const m = re.exec("xaaazzii");
assertEquals(m.indices, [[1, 8], [4, 6], [6, 8]]);
@@ -33,16 +35,16 @@
// No capture groups.
{
- const re = /a+/;
+ const re = /a+/d;
const m = re.exec("xaaazzii");
- assertEquals(m.indices [[1, 4]]);
+ assertEquals(m.indices, [[1, 4]]);
assertEquals(m.indices.groups, undefined);
}
// No match.
{
- const re = /a+/;
+ const re = /a+/d;
const m = re.exec("xzzii");
assertEquals(null, m);
@@ -50,8 +52,8 @@
// Unnamed capture groups.
{
- const re = /a+(z)?/;
- const m = re.exec("xaaaz")
+ const re = /a+(z)?/d;
+ const m = re.exec("xaaaz");
assertEquals(m.indices, [[1, 5], [4, 5]]);
assertEquals(m.indices.groups, undefined)
@@ -59,7 +61,7 @@
// Named and unnamed capture groups.
{
- const re = /a+(z)?(?<Y>y)?/;
+ const re = /a+(z)?(?<Y>y)?/d;
const m = re.exec("xaaazyy")
assertEquals(m.indices, [[1, 6], [4, 5], [5, 6]]);
@@ -69,7 +71,7 @@
// Verify property overwrite.
{
- const re = /a+(?<Z>z)?/;
+ const re = /a+(?<Z>z)?/d;
const m = re.exec("xaaaz");
m.indices = null;
@@ -98,7 +100,7 @@
}
});
- const re = /a+(?<Z>z)?/;
+ const re = /a+(?<Z>z)?/d;
const m = re.exec("xaaaz");
assertEquals(m.indices.groups, {'Z': [4, 5]})
@@ -106,14 +108,14 @@
// Test atomic regexp.
{
- const m = /undefined/.exec();
+ const m = (/undefined/d).exec();
assertEquals(m.indices, [[0, 9]]);
}
// Test deleting unrelated fields does not break.
{
- const m = /undefined/.exec();
+ const m = (/undefined/d).exec();
delete m['index'];
gc();
assertEquals(m.indices, [[0, 9]]);
@@ -121,7 +123,7 @@
// Stack overflow.
{
- const re = /a+(?<Z>z)?/;
+ const re = /a+(?<Z>z)?/d;
const m = re.exec("xaaaz");
function rec() {
@@ -138,9 +140,39 @@
// Match between matches.
{
- const re = /a+(?<A>zz)?(?<B>ii)?/;
+ const re = /a+(?<A>zz)?(?<B>ii)?/d;
const m = re.exec("xaaazzii");
assertTrue(/b+(?<C>cccc)?/.test("llllllbbbbbbcccc"));
assertEquals(m.indices, [[1, 8], [4, 6], [6, 8]]);
assertEquals(m.indices.groups, {'A': [4, 6], 'B': [6, 8]});
}
+
+// Redefined hasIndices should reflect in flags.
+{
+ let re = /./;
+ Object.defineProperty(re, "hasIndices", { get: function() { return true; } });
+ assertEquals("d", re.flags);
+}
+
+{
+ // The flags field of a regexp should be sorted.
+ assertEquals("dgmsy", (/asdf/dymsg).flags);
+
+ // The 'hasIndices' member should be set according to the hasIndices flag.
+ assertTrue((/asdf/dymsg).hasIndices);
+ assertFalse((/asdf/ymsg).hasIndices);
+
+ // The new fields installed on the regexp prototype map shouldn't make
+ // unmodified regexps slow.
+
+ // TODO(v8:11248) Enabling v8_dict_property_const_tracking currently evokes
+ // that the original fast mode prototype for regexes is converted to a
+ // dictionary mode one, which makes %RegexpIsUnmodified fail. Once we support
+ // directly creating the regex prototype in dictionary mode if
+ // v8_dict_property_const_tracking is enabled, change %RegexpIsUnmodified to
+ // know about the canonical dictionary mode prototype, too.
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ %RegexpIsUnmodified(/asdf/);
+ %RegexpIsUnmodified(/asdf/d);
+ }
+}
diff --git a/deps/v8/test/mjsunit/json2.js b/deps/v8/test/mjsunit/json2.js
index d3cd3d5d84..f56fd43f58 100644
--- a/deps/v8/test/mjsunit/json2.js
+++ b/deps/v8/test/mjsunit/json2.js
@@ -180,11 +180,11 @@ Object.defineProperty(non_enum, "b", { value: 2, enumerable: false });
non_enum.c = 3;
TestStringify('{"a":1,"c":3}', non_enum);
-var str = "external_string";
+var str = "external";
try {
externalizeString(str, true);
} catch (e) { }
-TestStringify("\"external_string\"", str, null, 0);
+TestStringify("\"external\"", str, null, 0);
var o = {};
o.somespecialproperty = 10;
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 23b846bf7d..144579703a 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -175,6 +175,7 @@ var V8OptimizationStatus = {
kTopmostFrameIsTurboFanned: 1 << 11,
kLiteMode: 1 << 12,
kMarkedForDeoptimization: 1 << 13,
+ kBaseline: 1 << 14,
};
// Returns true if --lite-mode is on and we can't ever turn on optimization.
@@ -189,6 +190,9 @@ var isAlwaysOptimize;
// Returns true if given function in interpreted.
var isInterpreted;
+// Returns true if given function in baseline.
+var isBaseline;
+
// Returns true if given function is optimized.
var isOptimized;
@@ -676,7 +680,9 @@ var prettyPrinted;
// to stress test the deoptimizer.
return;
}
- assertFalse((opt_status & V8OptimizationStatus.kOptimized) !== 0, name_opt);
+ var is_optimized = (opt_status & V8OptimizationStatus.kOptimized) !== 0;
+ var is_baseline = (opt_status & V8OptimizationStatus.kBaseline) !== 0;
+ assertFalse(is_optimized && !is_baseline, name_opt);
}
assertOptimized = function assertOptimized(
@@ -731,6 +737,14 @@ var prettyPrinted;
(opt_status & V8OptimizationStatus.kInterpreted) !== 0;
}
+ isBaseline = function isBaseline(fun) {
+ var opt_status = OptimizationStatus(fun, "");
+ assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
+ "not a function");
+ return (opt_status & V8OptimizationStatus.kOptimized) === 0 &&
+ (opt_status & V8OptimizationStatus.kBaseline) !== 0;
+ }
+
isOptimized = function isOptimized(fun) {
var opt_status = OptimizationStatus(fun, "");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 6e76e71450..f021b73a7f 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -136,6 +136,7 @@
'regress/regress-1122': [PASS, SLOW],
'regress/regress-605470': [PASS, SLOW],
'regress/regress-655573': [PASS, SLOW],
+ 'regress/regress-1034322': [PASS, SLOW, NO_VARIANTS, ['mode != release', SKIP]],
'regress/regress-1200351': [PASS, SLOW],
'regress/regress-crbug-808192': [PASS, SLOW, NO_VARIANTS, ['arch not in (ia32, x64)', SKIP]],
'regress/regress-crbug-918301': [PASS, SLOW, NO_VARIANTS, ['mode != release or dcheck_always_on', SKIP], ['(arch == arm or arch == arm64) and simulator_run', SKIP], ['tsan', SKIP]],
@@ -151,11 +152,11 @@
'wasm/compare-exchange64-stress': [PASS, SLOW, NO_VARIANTS],
# Very slow on ARM and MIPS, contains no architecture dependent code.
- 'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips)', SKIP]],
- 'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips)', SKIP]],
- 'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips)', SKIP]],
- 'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips)', SKIP]],
- 'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips)', SKIP]],
+ 'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64)', SKIP]],
+ 'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64)', SKIP]],
+ 'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64)', SKIP]],
+ 'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64)', SKIP]],
+ 'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64)', SKIP]],
# TODO(bmeurer): Flaky timeouts (sometimes <1s, sometimes >3m).
'unicodelctest': [PASS, NO_VARIANTS],
@@ -271,6 +272,7 @@
'stack-traces-overflow': [SKIP],
'unicode-test': [SKIP],
'whitespaces': [SKIP],
+ 'baseline/*': [SKIP],
# Unsuitable for GC stress because coverage information is lost on GC.
'code-coverage-ad-hoc': [SKIP],
@@ -323,26 +325,14 @@
}], # 'gc_stress'
##############################################################################
-['lite_mode or variant == jitless', {
- # Skip tests not suitable for lite_mode.
-
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
- 'regress/regress-5888': [SKIP],
- 'regress/regress-5911': [SKIP],
- 'regress/regress-813440': [SKIP],
- 'regress/regress-crbug-746835': [SKIP],
- 'regress/regress-crbug-772056': [SKIP],
- 'regress/regress-crbug-816961': [SKIP],
- 'regress/regress-crbug-969498': [SKIP],
- 'regress/regress-crbug-1047368': [SKIP],
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
+ # Skip tests that require webassembly.
+ 'regress/asm/*': [SKIP],
'regress/wasm/*': [SKIP],
- 'regress/regress-8947': [SKIP],
- 'regress/regress-9209': [SKIP],
- 'regress/regress-1034394': [SKIP],
- 'regress/regress-v8-9106': [SKIP],
+
'wasm/*': [SKIP],
- # Other tests that use asm / wasm / optimized code.
'asm/asm-heap': [SKIP],
'asm/asm-validation': [SKIP],
'asm/call-stdlib': [SKIP],
@@ -354,36 +344,24 @@
'asm/regress-937650': [SKIP],
'asm/regress-9531': [SKIP],
'asm/return-types': [SKIP],
- 'regress/regress-599719': [SKIP],
- 'regress/regress-6196': [SKIP],
- 'regress/regress-6700': [SKIP],
- 'regress/regress-6838-2': [SKIP],
- 'regress/regress-6838-3': [SKIP],
- 'regress/regress-6838-4': [SKIP],
- 'regress/regress-9022': [SKIP],
- 'regress/regress-9832': [SKIP],
- 'regress/regress-crbug-934138': [SKIP],
- 'regress/regress-crbug-976934': [SKIP],
+ # Tests tracing when generating wasm in TurboFan.
+ 'tools/compiler-trace-flags-wasm': [SKIP],
+}], # not has_webassembly or variant == jitless
+
+##############################################################################
+['lite_mode or variant == jitless', {
# Timeouts in lite / jitless mode.
'asm/embenchen/*': [SKIP],
# Tests that generate code at runtime.
'code-comments': [SKIP],
- 'regress/regress-617526': [SKIP],
- 'regress/regress-7893': [SKIP],
- 'regress/regress-8377': [SKIP],
- 'regress/regress-863810': [SKIP],
- 'regress/regress-crbug-721835': [SKIP],
- 'regress/regress-crbug-759327': [SKIP],
- 'regress/regress-crbug-898974': [SKIP],
'regexp-tier-up': [SKIP],
'regexp-tier-up-multiple': [SKIP],
'regress/regress-996234': [SKIP],
# These tests check that we can trace the compiler.
'tools/compiler-trace-flags': [SKIP],
- 'tools/compiler-trace-flags-wasm': [SKIP],
# Too slow on arm64 simulator and debug: https://crbug.com/v8/7783
'md5': [PASS, ['arch == arm64 and mode == debug and simulator_run', SKIP]],
@@ -394,6 +372,9 @@
# Flag --interpreted-frames-native-stack incompatible with jitless
'regress/regress-10138': [SKIP],
'regress/regress-1078913': [SKIP],
+
+ # Baseline incompatible with jitless
+ 'baseline/*': [SKIP]
}], # 'lite_mode or variant == jitless'
##############################################################################
@@ -593,6 +574,9 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7102
# Flaky due to huge string allocation.
'regress/regress-748069': [SKIP],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=11438
+ 'regress/regress-crbug-627935': [SKIP],
}], # 'msan == True'
##############################################################################
@@ -783,6 +767,86 @@
}], # 'arch == mips64el or arch == mips64'
##############################################################################
+['arch == riscv64', {
+
+ # Slow tests which times out in debug mode.
+ 'try': [PASS, ['mode == debug', SKIP]],
+ 'array-constructor': [PASS, ['mode == debug', SKIP]],
+
+ # Slow in release mode on RISC-V.
+ 'compiler/regress-stacktrace-methods': [PASS, SLOW],
+ 'array-splice': [PASS, SLOW],
+
+ # Long running test.
+ 'string-indexof-2': [PASS, SLOW],
+
+ # Long running tests. Skipping because having them timeout takes too long on
+ # the buildbot.
+ 'compiler/alloc-number': [SKIP],
+ 'regress/regress-490': [SKIP],
+ 'regress/regress-create-exception': [SKIP],
+ 'regress/regress-3247124': [SKIP],
+
+ # Requires bigger stack size in the Genesis and if stack size is increased,
+ # the test requires too much time to run. However, the problem test covers
+ # should be platform-independent.
+ 'regress/regress-1132': [SKIP],
+
+ # Currently always deopt on minus zero
+ 'math-floor-of-div-minus-zero': [SKIP],
+
+ # Requires too much memory on RISC-V.
+ 'regress/regress-752764': [SKIP],
+ 'regress/regress-779407': [SKIP],
+ 'harmony/bigint/regressions': [SKIP],
+
+ # https://github.com/v8-riscv/v8/issues/53
+ 'wasm/float-constant-folding': [SKIP],
+
+ 'wasm/memory_2gb_oob': [SKIP], # OOM: sorry, best effort max memory size test
+ 'wasm/memory_4gb_oob': [SKIP], # OOM: sorry, best effort max memory size test
+
+ # This often fails in debug mode because it is too slow
+ 'd8/d8-performance-now': [PASS, ['mode == debug', SKIP]],
+
+ # Some atomic functions are not yet implemented
+ 'wasm/compare-exchange64-stress': [SKIP],
+ 'wasm/compare-exchange-stress': [SKIP],
+ 'regress/wasm/regress-1045225': [SKIP],
+ 'regress/wasm/regress-1045737': [SKIP],
+ 'regress/wasm/regress-1048241': [SKIP],
+ 'regress/wasm/regress-1074586-b': [SKIP],
+ 'regress/wasm/regress-1075953': [SKIP],
+ 'regress/wasm/regress-1074586': [SKIP],
+ 'regress/wasm/regress-1079449': [SKIP],
+ 'regress/wasm/regress-1080902': [SKIP],
+ 'regress/wasm/regress-1140549': [SKIP],
+ 'regress/wasm/regress-1153442': [SKIP],
+ 'regress/wasm/regress-1168116': [SKIP],
+ 'wasm/atomics': [SKIP],
+ 'wasm/atomics-non-shared': [SKIP],
+ 'wasm/grow-shared-memory': [SKIP],
+ 'wasm/shared-memory': [SKIP],
+
+ # https://github.com/v8-riscv/v8/issues/418
+ 'regress/regress-1138075': [SKIP],
+ 'regress/regress-1138611': [SKIP],
+}], # 'arch == riscv64'
+
+['arch == riscv64 and variant == stress_incremental_marking', {
+ # https://github.com/v8-riscv/v8/issues/414
+ 'wasm/externref-globals-liftoff': [SKIP],
+}], #'arch == riscv64 and variant == stress-incremental-marking'
+
+##############################################################################
+['system == macos', {
+ # TODO(machenbach): These tests are x25 slower on 4-core Mac Minis. They can
+ # be unskipped as soon as the pools only contain 8-core+ Macs.
+ 'wasm/compare-exchange-stress': [SKIP],
+ 'wasm/compare-exchange64-stress': [SKIP],
+}], # 'system == macos'
+
+##############################################################################
['system == windows', {
# Too slow with turbo fan.
'math-floor-of-div': [PASS, ['mode == debug', SKIP]],
@@ -846,10 +910,6 @@
# isolates.
'wasm/lazy-compilation': [SKIP],
- # Tier down/up Wasm NativeModule in debugging is non-deterministic with
- # multiple isolates (https://crbug.com/v8/10099).
- 'wasm/tier-down-to-liftoff': [SKIP],
-
# Tier down/up Wasm functions is non-deterministic with
# multiple isolates, as dynamic tiering relies on a array shared
# in the module, that can be modified by all instances.
@@ -976,7 +1036,8 @@
'compiler/regress-905555-2': [SKIP],
'compiler/regress-905555': [SKIP],
'compiler/regress-9945-1': [SKIP],
- 'concurrent-initial-prototype-change': [SKIP],
+ 'concurrent-initial-prototype-change-1': [SKIP],
+ 'concurrent-initial-prototype-change-2': [SKIP],
'regress/regress-356053': [SKIP],
'regress/regress-embedded-cons-string': [SKIP],
@@ -988,6 +1049,15 @@
# BUG(v8:9975).
'es6/typedarray-copywithin': [SKIP],
+
+ # BUG(v8:11319): Predictable crashes with --wasm-tier-up.
+ 'wasm/compiled-module-serialization': [SKIP],
+ 'wasm/graceful_shutdown_during_tierup': [SKIP],
+ 'wasm/print-code': [SKIP],
+ 'regress/wasm/regress-11024': [SKIP],
+ 'regress/wasm/regress-7785': [SKIP],
+ 'regress/wasm/regress-808848': [SKIP],
+ 'regress/wasm/regress-808980': [SKIP],
}], # 'predictable == True'
##############################################################################
@@ -1058,6 +1128,9 @@
# Too memory hungry on Odroid devices.
'regress/regress-678917': [PASS, ['arch == arm and not simulator_run', SKIP]],
+
+ # Baseline tests don't make sense with optimization stressing.
+ 'baseline/*': [SKIP],
}], # variant == stress
##############################################################################
@@ -1150,7 +1223,7 @@
}],
##############################################################################
-['variant == turboprop', {
+['variant == turboprop or variant == turboprop_as_toptier', {
# Deopts differently than TurboFan.
'compiler/native-context-specialization-hole-check': [SKIP],
'compiler/number-comparison-truncations': [SKIP],
@@ -1206,7 +1279,7 @@
'compiler/abstract-equal-receiver': [FAIL],
'compiler/constant-fold-cow-array': [FAIL],
'compiler/promise-resolve-stable-maps': [FAIL],
-}], # variant == turboprop
+}], # variant == turboprop or variant = turboprop_as_toptier
##############################################################################
['variant == top_level_await', {
@@ -1265,17 +1338,16 @@
# serializer.
'asm/*': [SKIP],
'compiler/regress-439743': [SKIP],
+ 'regress/asm/regress-6196': [SKIP],
+ 'regress/asm/regress-7893': [SKIP],
+ 'regress/asm/regress-8377': [SKIP],
+ 'regress/asm/regress-617526': [SKIP],
+ 'regress/asm/regress-crbug-898974': [SKIP],
+ 'regress/asm/regress-crbug-976934': [SKIP],
'regress/regress-441099': [SKIP],
- 'regress/regress-617526': [SKIP],
- 'regress/regress-6196': [SKIP],
'regress/regress-677685': [SKIP],
- 'regress/regress-7893': [SKIP],
'regress/regress-799690': [SKIP],
- 'regress/regress-8377': [SKIP],
- 'regress/regress-crbug-1047368': [SKIP],
- 'regress/regress-crbug-898974': [SKIP],
'regress/regress-crbug-935800': [SKIP],
- 'regress/regress-crbug-976934': [SKIP],
'regress/wasm/*': [SKIP],
'wasm/*': [SKIP],
# Investigate (IsScript).
@@ -1283,10 +1355,6 @@
'harmony/private-fields-special-object': [SKIP],
# Skip, since import errors since they refer to the script via debug symbols
'harmony/import-from-instantiation-errored': [SKIP],
- # Investigate (JSFunction in startup serializer).
- 'regress/regress-1034394': [SKIP],
- 'regress/regress-863810': [SKIP],
- 'regress/regress-crbug-772056': [SKIP],
# The entire snapshotting code assumes that the snapshot size fits
# into an int, so it doesn't support huge TypedArrays.
'regress/regress-319722-ArrayBuffer': [SKIP],
@@ -1295,8 +1363,6 @@
# Investigate (IsFixedArrayBase).
'regress/regress-786784': [SKIP],
'regress/regress-v8-9656': [SKIP],
- # Investigate (startup_serializer_->ReferenceMapContains(obj)).
- 'regress/regress-813440': [SKIP],
# Investigate (segfault).
'regress/regress-crbug-397662': [SKIP],
# Script referenced only through context-dependent SourceTextModule
@@ -1312,142 +1378,6 @@
'regress/wasm/regress-9017': [SKIP],
}], # variant == slow_path
-################################################################################
-['variant == nci or variant == nci_as_midtier', {
- # Deopts differently than TurboFan.
- # Deoptimization support is still incomplete in general, since deopts can
- # only happen when explicitly requested by tests. NCI code objects are then
- # set as marked_for_deoptimization *and never unset*, which means functions
- # with attached NCI code objects will never again count as optimized.
- # TODO(jgruber): Fix this once deopts can occur outside tests.
- 'compiler/is-being-interpreted*': [SKIP],
- 'compiler/number-comparison-truncations': [SKIP],
- 'compiler/redundancy-elimination': [SKIP],
- 'compiler/regress-9945-*': [SKIP],
- 'regress/regress-1049982-1': [SKIP],
- 'regress/regress-1049982-2': [SKIP],
- 'compiler/test-dynamic-map-*': [SKIP],
- 'es6/collections-constructor-iterator-side-effect': [SKIP],
- 'es6/collections-constructor-with-modified-protoype': [SKIP],
- # assertUnoptimized: assumes full turbofan pipeline.
- 'allocation-site-info': [SKIP],
- 'array-bounds-check-removal': [SKIP],
- 'array-constructor-feedback': [SKIP],
- 'array-literal-feedback': [SKIP],
- 'array-literal-transitions': [SKIP],
- 'array-push5': [SKIP],
- 'array-store-and-grow': [SKIP],
- 'check-bounds-array-index': [SKIP],
- 'check-bounds-string-from-char-code-at': [SKIP],
- 'compiler/abstract-equal-oddball': [SKIP],
- 'compiler/abstract-equal-receiver': [SKIP],
- 'compiler/abstract-equal-symbol': [SKIP],
- 'compiler/abstract-equal-undetectable': [SKIP],
- 'compiler/array-multiple-receiver-maps': [SKIP],
- 'compiler/bigint-add-no-deopt-loop': [SKIP],
- 'compiler/bound-functions-serialize': [SKIP],
- 'compiler/concurrent-invalidate-transition-map': [SKIP],
- 'compiler/concurrent-proto-change': [SKIP],
- 'compiler/constant-fold-cow-array': [SKIP],
- 'compiler/dataview-deopt': [SKIP],
- 'compiler/dataview-detached': [SKIP],
- 'compiler/dataview-get': [SKIP],
- 'compiler/dataview-set': [SKIP],
- 'compiler/deopt-inlined-from-call': [SKIP],
- 'compiler/field-representation-tracking': [SKIP],
- 'compiler/globals-change-writable': [SKIP],
- 'compiler/globals-freeze-*': [SKIP],
- 'compiler/manual-concurrent-recompile': [SKIP],
- 'compiler/native-context-specialization-hole-check': [SKIP],
- 'compiler/number-divide': [SKIP],
- 'compiler/opt-higher-order-functions': [SKIP],
- 'compiler/promise-resolve-stable-maps': [SKIP],
- 'compiler/regress-905555-2': [SKIP],
- 'compiler/regress-905555': [SKIP],
- 'compiler/regress-9945-1': [SKIP],
- 'compiler/regress-9945-2': [SKIP],
- 'compiler/stress-deopt-count-2': [SKIP],
- 'compiler/strict-equal-receiver': [SKIP],
- 'compiler/string-from-code-point': [SKIP],
- 'concurrent-initial-prototype-change': [SKIP],
- 'const-field-tracking-2': [SKIP],
- 'const-field-tracking': [SKIP],
- 'deopt-recursive-eager-once': [SKIP],
- 'deopt-recursive-lazy-once': [SKIP],
- 'deopt-recursive-soft-once': [SKIP],
- 'deopt-unlinked': [SKIP],
- 'deopt-with-fp-regs': [SKIP],
- 'ensure-growing-store-learns': [SKIP],
- 'es6/array-iterator-turbo': [SKIP],
- 'es6/iterator-eager-deopt': [SKIP],
- 'es6/iterator-lazy-deopt': [SKIP],
- 'field-type-tracking': [SKIP],
- 'frozen-array-reduce': [SKIP],
- 'getters-on-elements': [SKIP],
- 'harmony/regexp-overriden-exec': [SKIP],
- 'keyed-load-hole-to-undefined': [SKIP],
- 'keyed-load-with-symbol-key': [SKIP],
- 'mjsunit_numfuzz': [SKIP],
- 'mjsunit': [SKIP],
- 'never-optimize': [SKIP],
- 'non-extensible-array-reduce': [SKIP],
- 'noopt': [SKIP],
- 'object-seal': [SKIP],
- 'optimized-array-every': [SKIP],
- 'optimized-array-findindex': [SKIP],
- 'optimized-array-find': [SKIP],
- 'optimized-array-some': [SKIP],
- 'optimized-filter': [SKIP],
- 'optimized-map': [SKIP],
- 'regress/regress-1016450': [SKIP],
- 'regress/regress-1034449': [SKIP],
- 'regress/regress-1073440': [SKIP],
- 'regress/regress-1112155': [SKIP],
- 'regress/regress-347914': [SKIP],
- 'regress/regress-3709': [SKIP],
- 'regress/regress-385565': [SKIP],
- 'regress/regress-410912': [SKIP],
- 'regress/regress-618608': [SKIP],
- 'regress/regress-9002': [SKIP],
- 'regress/regress-9441': [SKIP],
- 'regress/regress-961709-classes-opt': [SKIP],
- 'regress/regress-bind-deoptimize': [SKIP],
- 'regress/regress-crbug-500497': [SKIP],
- 'regress/regress-crbug-594183': [SKIP],
- 'regress/regress-embedded-cons-string': [SKIP],
- 'regress/regress-unlink-closures-on-deopt': [SKIP],
- 'regress/wasm/regress-02256b': [SKIP],
- 'regress/wasm/regress-02256': [SKIP],
- 'sealed-array-reduce': [SKIP],
- 'setters-on-elements': [SKIP],
- 'smi-mul-const': [SKIP],
- 'smi-mul': [SKIP],
- 'unary-minus-deopt': [SKIP],
- # TurbofanStaticAssert: assumes full turbofan pipeline.
- 'compiler/catch-block-load': [SKIP],
- 'compiler/concurrent-inlining-1': [SKIP],
- 'compiler/concurrent-inlining-2': [SKIP],
- 'compiler/constant-fold-add-static': [SKIP],
- 'compiler/construct-bound-function': [SKIP],
- 'compiler/construct-object': [SKIP],
- 'compiler/construct-receiver': [SKIP],
- 'compiler/diamond-followedby-branch': [SKIP],
- 'compiler/inlined-call-polymorphic': [SKIP],
- 'compiler/js-create-arguments': [SKIP],
- 'compiler/js-create': [SKIP],
- 'compiler/load-elimination-const-field': [SKIP],
- 'compiler/serializer-accessors': [SKIP],
- 'compiler/serializer-apply': [SKIP],
- 'compiler/serializer-call': [SKIP],
- 'compiler/serializer-dead-after-jump': [SKIP],
- 'compiler/serializer-dead-after-return': [SKIP],
- 'compiler/serializer-feedback-propagation-1': [SKIP],
- 'compiler/serializer-feedback-propagation-2': [SKIP],
- 'compiler/serializer-transition-propagation': [SKIP],
- # crbug.com/v8/11110
- 'es6/super-ic-opt*': [SKIP],
-}], # variant == nci or variant == nci_as_midtier
-
['((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64, s390x])', {
# Requires scalar lowering for 64x2 SIMD instructions, which are not
# implemented yet.
@@ -1471,4 +1401,37 @@
'wasm/shared-memory-worker-stress': [PASS, SLOW, ['tsan', SKIP]],
}], # variant == stress_incremental_marking
+##############################################################################
+['no_simd_sse == True', {
+ 'wasm/exceptions-simd': [SKIP],
+ 'wasm/liftoff-simd-params': [SKIP],
+ 'wasm/multi-value-simd': [SKIP],
+ 'wasm/simd-*': [SKIP],
+ 'regress/wasm/regress-10309': [SKIP],
+ 'regress/wasm/regress-10831': [SKIP],
+ 'regress/wasm/regress-1054466': [SKIP],
+ 'regress/wasm/regress-1065599': [SKIP],
+ 'regress/wasm/regress-1070078': [SKIP],
+ 'regress/wasm/regress-1081030': [SKIP],
+ 'regress/wasm/regress-1111522': [SKIP],
+ 'regress/wasm/regress-1112124': [SKIP],
+ 'regress/wasm/regress-1116019': [SKIP],
+ 'regress/wasm/regress-1124885': [SKIP],
+ 'regress/wasm/regress-1132461': [SKIP],
+ 'regress/wasm/regress-1161555': [SKIP],
+ 'regress/wasm/regress-1161954': [SKIP],
+ 'regress/wasm/regress-1165966': [SKIP],
+}], # no_simd_sse == True
+
+##############################################################################
+# TODO(v8:11421): Port baseline compiler to ia32, Arm, MIPS, S390 and PPC
+['arch not in (x64, arm64)', {
+ 'baseline/*': [SKIP],
+}],
+
+##############################################################################
+['variant == experimental_regexp', {
+ 'regress/regress-779407': [SKIP],
+}], # variant == experimental_regexp
+
]
diff --git a/deps/v8/test/mjsunit/object-seal.js b/deps/v8/test/mjsunit/object-seal.js
index 684d94a6b8..f951b83579 100644
--- a/deps/v8/test/mjsunit/object-seal.js
+++ b/deps/v8/test/mjsunit/object-seal.js
@@ -520,7 +520,7 @@ assertDoesNotThrow(function() {
});
});
obj.propertyA = 42;
-assertEquals(obj.propertyA, 42);
+assertEquals(obj, obj.propertyA);
assertThrows(function() {
Object.defineProperty(obj, 'abc', {
value: obj,
@@ -683,7 +683,7 @@ assertDoesNotThrow(function() {
});
});
obj.propertyA = 42;
-assertEquals(obj.propertyA, 42);
+assertEquals(obj, obj.propertyA);
assertThrows(function() {
Object.defineProperty(obj, 'abc', {
value: obj,
@@ -967,7 +967,7 @@ assertDoesNotThrow(function() {
});
});
obj.propertyA = 42;
-assertEquals(obj.propertyA, 42);
+assertEquals(obj, obj.propertyA);
assertThrows(function() {
Object.defineProperty(obj, 'abc', {
value: obj,
@@ -1121,7 +1121,7 @@ assertDoesNotThrow(function() {
});
});
obj.propertyA = 42;
-assertEquals(obj.propertyA, 42);
+assertEquals(obj, obj.propertyA);
assertThrows(function() {
Object.defineProperty(obj, 'abc', {
value: obj,
diff --git a/deps/v8/test/mjsunit/regexp-linear-flag.js b/deps/v8/test/mjsunit/regexp-linear-flag.js
index 029db097ce..d388f4d497 100644
--- a/deps/v8/test/mjsunit/regexp-linear-flag.js
+++ b/deps/v8/test/mjsunit/regexp-linear-flag.js
@@ -33,3 +33,10 @@ assertFalse((/asdf/ymsg).linear);
// unmodified regexps slow.
assertTrue(%RegexpIsUnmodified(/asdf/));
assertTrue(%RegexpIsUnmodified(/asdf/l));
+
+// Redefined .linear should reflect in flags.
+{
+ let re = /./;
+ Object.defineProperty(re, "linear", { get: function() { return true; } });
+ assertEquals("l", re.flags);
+}
diff --git a/deps/v8/test/mjsunit/regexp-no-linear-flag.js b/deps/v8/test/mjsunit/regexp-no-linear-flag.js
index 7df34aa830..eb5ea16073 100644
--- a/deps/v8/test/mjsunit/regexp-no-linear-flag.js
+++ b/deps/v8/test/mjsunit/regexp-no-linear-flag.js
@@ -15,8 +15,10 @@ assertThrows(() => new RegExp("((a*)*)*\1", "l"), SyntaxError)
assertFalse(RegExp.prototype.hasOwnProperty('linear'));
assertFalse(/123/.hasOwnProperty('linear'));
+// Redefined .linear shouldn't reflect in flags without
+// --enable-experimental-regexp-engine.
{
let re = /./;
- re.linear = true;
+ Object.defineProperty(re, "linear", { get: function() { return true; } });
assertEquals("", re.flags);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-575364.js b/deps/v8/test/mjsunit/regress/asm/regress-575364.js
index 0b967a0b53..0b967a0b53 100644
--- a/deps/v8/test/mjsunit/regress/regress-575364.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-575364.js
diff --git a/deps/v8/test/mjsunit/regress/regress-592352.js b/deps/v8/test/mjsunit/regress/asm/regress-592352.js
index 7947fdba2c..7947fdba2c 100644
--- a/deps/v8/test/mjsunit/regress/regress-592352.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-592352.js
diff --git a/deps/v8/test/mjsunit/regress/regress-599719.js b/deps/v8/test/mjsunit/regress/asm/regress-599719.js
index 89353a6787..89353a6787 100644
--- a/deps/v8/test/mjsunit/regress/regress-599719.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-599719.js
diff --git a/deps/v8/test/mjsunit/regress/regress-599825.js b/deps/v8/test/mjsunit/regress/asm/regress-599825.js
index 1b05bee615..1b05bee615 100644
--- a/deps/v8/test/mjsunit/regress/regress-599825.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-599825.js
diff --git a/deps/v8/test/mjsunit/regress/regress-608630.js b/deps/v8/test/mjsunit/regress/asm/regress-608630.js
index 58a95af7c3..58a95af7c3 100644
--- a/deps/v8/test/mjsunit/regress/regress-608630.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-608630.js
diff --git a/deps/v8/test/mjsunit/regress/regress-613928.js b/deps/v8/test/mjsunit/regress/asm/regress-613928.js
index 7de4da76d9..7de4da76d9 100644
--- a/deps/v8/test/mjsunit/regress/regress-613928.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-613928.js
diff --git a/deps/v8/test/mjsunit/regress/regress-617525.js b/deps/v8/test/mjsunit/regress/asm/regress-617525.js
index fb22f6af85..fb22f6af85 100644
--- a/deps/v8/test/mjsunit/regress/regress-617525.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-617525.js
diff --git a/deps/v8/test/mjsunit/regress/regress-617526.js b/deps/v8/test/mjsunit/regress/asm/regress-617526.js
index b3e02fcfca..b3e02fcfca 100644
--- a/deps/v8/test/mjsunit/regress/regress-617526.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-617526.js
diff --git a/deps/v8/test/mjsunit/regress/regress-617529.js b/deps/v8/test/mjsunit/regress/asm/regress-617529.js
index 042fef1809..042fef1809 100644
--- a/deps/v8/test/mjsunit/regress/regress-617529.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-617529.js
diff --git a/deps/v8/test/mjsunit/regress/regress-618608.js b/deps/v8/test/mjsunit/regress/asm/regress-618608.js
index 33c5fbf188..33c5fbf188 100644
--- a/deps/v8/test/mjsunit/regress/regress-618608.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-618608.js
diff --git a/deps/v8/test/mjsunit/regress/regress-6196.js b/deps/v8/test/mjsunit/regress/asm/regress-6196.js
index 1c61b0fa9a..1c61b0fa9a 100644
--- a/deps/v8/test/mjsunit/regress/regress-6196.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-6196.js
diff --git a/deps/v8/test/mjsunit/regress/regress-6298.js b/deps/v8/test/mjsunit/regress/asm/regress-6298.js
index c3f4de3c2d..c3f4de3c2d 100644
--- a/deps/v8/test/mjsunit/regress/regress-6298.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-6298.js
diff --git a/deps/v8/test/mjsunit/regress/regress-6431.js b/deps/v8/test/mjsunit/regress/asm/regress-6431.js
index 7b99b3fa87..7b99b3fa87 100644
--- a/deps/v8/test/mjsunit/regress/regress-6431.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-6431.js
diff --git a/deps/v8/test/mjsunit/regress/regress-6700.js b/deps/v8/test/mjsunit/regress/asm/regress-6700.js
index c20cefd02c..c20cefd02c 100644
--- a/deps/v8/test/mjsunit/regress/regress-6700.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-6700.js
diff --git a/deps/v8/test/mjsunit/regress/regress-6838-1.js b/deps/v8/test/mjsunit/regress/asm/regress-6838-1.js
index bab6a194d3..bab6a194d3 100644
--- a/deps/v8/test/mjsunit/regress/regress-6838-1.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-6838-1.js
diff --git a/deps/v8/test/mjsunit/regress/regress-6838-2.js b/deps/v8/test/mjsunit/regress/asm/regress-6838-2.js
index 31b94b43c2..31b94b43c2 100644
--- a/deps/v8/test/mjsunit/regress/regress-6838-2.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-6838-2.js
diff --git a/deps/v8/test/mjsunit/regress/regress-6838-3.js b/deps/v8/test/mjsunit/regress/asm/regress-6838-3.js
index 639ffa5da7..639ffa5da7 100644
--- a/deps/v8/test/mjsunit/regress/regress-6838-3.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-6838-3.js
diff --git a/deps/v8/test/mjsunit/regress/regress-6838-4.js b/deps/v8/test/mjsunit/regress/asm/regress-6838-4.js
index 6c6f8e0f73..6c6f8e0f73 100644
--- a/deps/v8/test/mjsunit/regress/regress-6838-4.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-6838-4.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-775710.js b/deps/v8/test/mjsunit/regress/asm/regress-775710.js
index 5e6fb8c50b..5e6fb8c50b 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-775710.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-775710.js
diff --git a/deps/v8/test/mjsunit/regress/regress-7893.js b/deps/v8/test/mjsunit/regress/asm/regress-7893.js
index 5ed008e7cf..5ed008e7cf 100644
--- a/deps/v8/test/mjsunit/regress/regress-7893.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-7893.js
diff --git a/deps/v8/test/mjsunit/regress/regress-8377.js b/deps/v8/test/mjsunit/regress/asm/regress-8377.js
index 32d2eb74fb..32d2eb74fb 100644
--- a/deps/v8/test/mjsunit/regress/regress-8377.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-8377.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8505.js b/deps/v8/test/mjsunit/regress/asm/regress-8505.js
index c1becbe454..c1becbe454 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8505.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-8505.js
diff --git a/deps/v8/test/mjsunit/regress/regress-9022.js b/deps/v8/test/mjsunit/regress/asm/regress-9022.js
index 7922c3996b..7922c3996b 100644
--- a/deps/v8/test/mjsunit/regress/regress-9022.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-9022.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1006592.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-1006592.js
index c051d0861a..c051d0861a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1006592.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-1006592.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-714971.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-714971.js
index d72c7a0fad..d72c7a0fad 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-714971.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-714971.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-715455.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-715455.js
index 87b240227d..87b240227d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-715455.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-715455.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-719384.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-719384.js
index 8b6a8385b0..8b6a8385b0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-719384.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-719384.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-721835.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-721835.js
index 80f99e6dd5..80f99e6dd5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-721835.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-721835.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-722348.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-722348.js
index 6c99e70e43..6c99e70e43 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-722348.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-722348.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-759327.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-759327.js
index 4aed8a456a..4aed8a456a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-759327.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-759327.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-771428.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-771428.js
index 3bebfa102c..3bebfa102c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-771428.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-771428.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-898974.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-898974.js
index 1b9b07ab74..1b9b07ab74 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-898974.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-898974.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-934138.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-934138.js
index 2d23486717..2d23486717 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-934138.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-934138.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-969368.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-969368.js
index cfc60a3279..cfc60a3279 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-969368.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-969368.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-976934.js b/deps/v8/test/mjsunit/regress/asm/regress-crbug-976934.js
index 4c31615933..4c31615933 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-976934.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-crbug-976934.js
diff --git a/deps/v8/test/mjsunit/regress/regress-wasm-crbug-599413.js b/deps/v8/test/mjsunit/regress/asm/regress-wasm-crbug-599413.js
index 113fc892cc..113fc892cc 100644
--- a/deps/v8/test/mjsunit/regress/regress-wasm-crbug-599413.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-wasm-crbug-599413.js
diff --git a/deps/v8/test/mjsunit/regress/regress-wasm-crbug-618602.js b/deps/v8/test/mjsunit/regress/asm/regress-wasm-crbug-618602.js
index aa5bca9d80..aa5bca9d80 100644
--- a/deps/v8/test/mjsunit/regress/regress-wasm-crbug-618602.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-wasm-crbug-618602.js
diff --git a/deps/v8/test/mjsunit/regress/async-generator-is-awaiting.js b/deps/v8/test/mjsunit/regress/async-generator-is-awaiting.js
new file mode 100644
index 0000000000..869c1861ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/async-generator-is-awaiting.js
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Async generator builtins that suspend the generator set the is_awaiting bit
+// to 1 before awaiting. This is cleared when resumed. This tests that the bit
+// is set after the await operation successfully completes (i.e. returns the
+// Promise), since it can throw, and that thrown exception can be caught by
+// script. Otherwise the is_awaiting bit won't be cleared.
+
+// This makes `await new Promise(() => {})` throw.
+Object.defineProperty(Promise.prototype, 'constructor', {
+ get() { throw 42; }
+});
+
+// AsyncGeneratorAwait
+{
+ async function *f() {
+ try {
+ await new Promise(() => {});
+ } catch (e) {
+ }
+ }
+
+ f().next();
+}
+
+// AsyncGeneratorYield
+{
+ async function *f() {
+ try {
+ yield new Promise(() => {});
+ } catch (e) {
+ }
+ }
+
+ f().next();
+}
+
+// AsyncGeneratorReturn isn't affected because it's not possible, in script, to
+// catch an error thrown by a return resumption. It'll be caught by the
+// synthetic try-catch around the whole body of the async generator, which will
+// correctly reset the is_awaiting bit.
diff --git a/deps/v8/test/mjsunit/regress/regress-1034322.js b/deps/v8/test/mjsunit/regress/regress-1034322.js
new file mode 100644
index 0000000000..c81493fbfe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1034322.js
@@ -0,0 +1,30 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --stack-size=103
+
+let ticks = 0;
+
+function v0() {
+ try { v1(); } catch {}
+ // This triggers the deopt that may overflow the stack.
+ try { undefined[null] = null; } catch {}
+}
+
+function v1() {
+ while (!v0()) {
+ // Trigger OSR early to get a crashing case asap.
+ if (ticks == 5) %OptimizeOsr();
+ // With the bug fixed, there's no easy way to trigger termination. Instead,
+ // run until we reach a certain number of ticks. The crash triggers locally
+ // at tick 7562, thus running until 20k ticks to be somewhat safe.
+ if (ticks >= 20000) exit(0);
+ ticks++;
+ }
+}
+
+%PrepareFunctionForOptimization(v0);
+%PrepareFunctionForOptimization(v1);
+
+v0();
diff --git a/deps/v8/test/mjsunit/regress/regress-1075514.js b/deps/v8/test/mjsunit/regress/regress-1075514.js
new file mode 100644
index 0000000000..ff0510c36a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1075514.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const re = /$/;
+
+// The runtime path (Runtime::kRegExpExec).
+assertEquals(["a"], "a".split(re));
+assertEquals("", RegExp.input);
+
+// Runtime / compilation to generated code.
+assertEquals(["a"], "a".split(re));
+assertEquals("", RegExp.input);
+
+// Generated code.
+assertEquals(["a"], "a".split(re));
+assertEquals("", RegExp.input);
+
+// Once again just because we can.
+assertEquals(["a"], "a".split(re));
+assertEquals("", RegExp.input);
diff --git a/deps/v8/test/mjsunit/regress/regress-1163715.js b/deps/v8/test/mjsunit/regress/regress-1163715.js
new file mode 100644
index 0000000000..c0838c213a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1163715.js
@@ -0,0 +1,27 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turboprop --allow-natives-syntax
+
+let last_value;
+let throwFunc;
+
+function foo(count) {
+ let val = 1;
+ for (let i = 16; i < count; ++i) {
+ try {
+ throwFunc();
+ } catch (e) {
+ }
+ val *= 2;
+ last_value = val;
+ }
+}
+
+%PrepareFunctionForOptimization(foo);
+foo(20);
+foo(21);
+%OptimizeFunctionOnNextCall(foo);
+foo(47);
+assertEquals(2147483648, last_value);
diff --git a/deps/v8/test/mjsunit/regress/regress-1166138.js b/deps/v8/test/mjsunit/regress/regress-1166138.js
index f3e4bde83e..b4145d8ae9 100644
--- a/deps/v8/test/mjsunit/regress/regress-1166138.js
+++ b/deps/v8/test/mjsunit/regress/regress-1166138.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --no-enable-experimental-regexp-engine
+
let badregexp = "(?:" + " ".repeat(32768*2)+ ")*";
reg = RegExp(badregexp);
assertThrows(() => reg.test(), SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1168435.js b/deps/v8/test/mjsunit/regress/regress-1168435.js
new file mode 100644
index 0000000000..9667f4d7fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1168435.js
@@ -0,0 +1,22 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --concurrent-inlining
+
+function bar() {
+ arr = new Array(4);
+ iter = arr[Symbol.iterator];
+ return iter;
+}
+
+function foo(a) {
+ iter = bar();
+ return iter.isPrototypeOf(iter);
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1170261.js b/deps/v8/test/mjsunit/regress/regress-1170261.js
new file mode 100644
index 0000000000..ef2f446429
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1170261.js
@@ -0,0 +1,25 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function foo(unused1, unused2, bigint) {
+ const temp = -bigint;
+}
+
+function bar() {
+ const arr = Array();
+ const obj = Object();
+ arr.reduce(foo, 0)
+}
+
+%PrepareFunctionForOptimization(foo);
+foo(0, 0, 2316465375n);
+%OptimizeFunctionOnNextCall(foo);
+foo(0, 0, 2316465375n);
+
+%PrepareFunctionForOptimization(bar);
+bar();
+%OptimizeFunctionOnNextCall(bar);
+bar();
diff --git a/deps/v8/test/mjsunit/regress/regress-1172797.js b/deps/v8/test/mjsunit/regress/regress-1172797.js
new file mode 100644
index 0000000000..05d39a1b86
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1172797.js
@@ -0,0 +1,48 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turboprop --opt --no-always-opt
+
+
+var v_0 = {};
+function f_0(o, v) {
+ o.f = v;
+}
+
+function f_1() {
+ return v_0.f;
+}
+
+%PrepareFunctionForOptimization(f_0);
+f_0(v_0, 42);
+f_0(v_0, 42);
+%OptimizeFunctionOnNextCall(f_0);
+f_0(v_0, 42);
+
+// TP tier up
+%PrepareFunctionForOptimization(f_1);
+f_1();
+f_1();
+%OptimizeFunctionOnNextCall(f_1);
+f_1();
+// Now TF tier up
+%PrepareFunctionForOptimization(f_1);
+f_1();
+%TierupFunctionOnNextCall(f_1);
+f_1();
+
+assertOptimized(f_0);
+// TODO(mythria): Add an option to assert on the optimization tier and assert
+// f_1 is optimized with TurboFan.
+assertOptimized(f_1);
+// Store in f_0 should trigger a change to the constness of the field.
+f_0(v_0, 53);
+// f_0 does a eager deopt and lets the interpreter update the field constness.
+assertUnoptimized(f_0);
+if (!%IsTopTierTurboprop()) {
+ // f_1 has TurboFan code and should deopt because of dependency change.
+ assertUnoptimized(f_1);
+}
+assertEquals(v_0.f, 53);
+assertEquals(f_1(), 53);
diff --git a/deps/v8/test/mjsunit/regress/regress-1176318.js b/deps/v8/test/mjsunit/regress/regress-1176318.js
new file mode 100644
index 0000000000..8e3fc7ed30
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1176318.js
@@ -0,0 +1,59 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var p01;
+var p02;
+var p03;
+var p04;
+var p05;
+var p06;
+var p07;
+var p08;
+var p09;
+var p10;
+var p11;
+var p12;
+var p13;
+var p14;
+var p15;
+var p16;
+var p17;
+var p18;
+var p19;
+var p20;
+var p21;
+var p22;
+var p23;
+var p24;
+var p25;
+var p26;
+var p27;
+var p28;
+var p29;
+var p30;
+var p31;
+var p32;
+var p33;
+var p34;
+var p35;
+var p36;
+var p37;
+var p38;
+var p39;
+var p40;
+var p41;
+var p42;
+var p43;
+var p44;
+
+p = { get b() {} };
+for (x in p) {}
+p = this;
+
+function foo() {
+ p.bla = p[42];
+ p.__defineGetter__('bla', function() {});
+}
+foo();
+try { var q = {}(); } catch(_) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-1176504.js b/deps/v8/test/mjsunit/regress/regress-1176504.js
new file mode 100644
index 0000000000..0eaf27e8eb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1176504.js
@@ -0,0 +1,15 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy-feedback-allocation
+
+function foo() {
+ 'use strict';
+ x = 42;
+}
+
+__proto__ = {x: 1};
+
+assertThrows(foo);
+assertThrows(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-1180012.js b/deps/v8/test/mjsunit/regress/regress-1180012.js
new file mode 100644
index 0000000000..89c0619736
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1180012.js
@@ -0,0 +1,16 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+var __v_10 = {};
+var __v_9 = [-1];
+function __f_7() {
+ (__v_10[65535] | 65535) / __v_9[2147483648];
+}
+%PrepareFunctionForOptimization(__f_7);
+__f_7();
+__f_7();
+%OptimizeFunctionOnNextCall(__f_7);
+__f_7();
diff --git a/deps/v8/test/mjsunit/regress/regress-1181246.js b/deps/v8/test/mjsunit/regress/regress-1181246.js
new file mode 100644
index 0000000000..b92cc0c4f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1181246.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+Object.defineProperty(String.prototype, "0", { __v_1: 1});
+var __f_2 = function() {
+ function __f_2() {
+ ''[0];
+ };
+ %PrepareFunctionForOptimization(__f_2);
+ return __f_2;
+}();
+%PrepareFunctionForOptimization(__f_2);
+__f_2();
+__f_2();
+%OptimizeFunctionOnNextCall(__f_2);
+__f_2();
diff --git a/deps/v8/test/mjsunit/regress/regress-2326.js b/deps/v8/test/mjsunit/regress/regress-2326.js
index d2edf2b164..9265ecb62d 100644
--- a/deps/v8/test/mjsunit/regress/regress-2326.js
+++ b/deps/v8/test/mjsunit/regress/regress-2326.js
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// This tests that we do not share optimized code across closures that
-// were optimized using OSR (for a particular OSR entry AST id) even if
+// This tests that we do not share optimized code across closures that were
+// optimized using OSR (for a particular OSR entry bytecode offset) even if
// caching of optimized code kicks in.
function makeClosure() {
diff --git a/deps/v8/test/mjsunit/regress/regress-5902.js b/deps/v8/test/mjsunit/regress/regress-5902.js
index 6054104570..4f053a773f 100644
--- a/deps/v8/test/mjsunit/regress/regress-5902.js
+++ b/deps/v8/test/mjsunit/regress/regress-5902.js
@@ -52,4 +52,5 @@ Object.getOwnPropertyNames(global).forEach(function(name) {
});
// There should be no dictionary mode builtin objects.
-assertEquals([], log);
+if (!%IsDictPropertyConstTrackingEnabled())
+ assertEquals([], log);
diff --git a/deps/v8/test/mjsunit/regress/regress-666046.js b/deps/v8/test/mjsunit/regress/regress-666046.js
index 5cdaa11ab5..a71970825d 100644
--- a/deps/v8/test/mjsunit/regress/regress-666046.js
+++ b/deps/v8/test/mjsunit/regress/regress-666046.js
@@ -33,7 +33,9 @@ var o = new A();
foo(o);
foo(o);
foo(o);
-assertTrue(%HasFastProperties(proto));
+assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(proto));
+
// Contruct a double value that looks like a tagged pointer.
var buffer = new ArrayBuffer(8);
@@ -49,7 +51,8 @@ proto.a4 = {a: 0};
delete proto.a4;
// |proto| must sill be fast.
-assertTrue(%HasFastProperties(proto));
+assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(proto));
// Add a double field instead of deleted a4 that looks like a tagged pointer.
proto.boom = boom;
diff --git a/deps/v8/test/mjsunit/regress/regress-7115.js b/deps/v8/test/mjsunit/regress/regress-7115.js
index 837c11e930..8bbb1ded20 100644
--- a/deps/v8/test/mjsunit/regress/regress-7115.js
+++ b/deps/v8/test/mjsunit/regress/regress-7115.js
@@ -7,13 +7,18 @@
function TestBuiltinSubclassing(Builtin) {
assertTrue(%HasFastProperties(Builtin));
assertTrue(%HasFastProperties(Builtin.prototype));
- assertTrue(%HasFastProperties(Builtin.prototype.__proto__));
+ assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(Builtin.prototype.__proto__));
class SubClass extends Builtin {}
- assertTrue(%HasFastProperties(Builtin));
- assertTrue(%HasFastProperties(Builtin.prototype));
- assertTrue(%HasFastProperties(Builtin.prototype.__proto__));
+ assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(Builtin));
+ assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(Builtin.prototype));
+ assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(Builtin.prototype.__proto__));
+
}
let TypedArray = Uint8Array.__proto__;
diff --git a/deps/v8/test/mjsunit/regress/regress-740694.js b/deps/v8/test/mjsunit/regress/regress-740694.js
index dbe6db916c..1bccd20272 100644
--- a/deps/v8/test/mjsunit/regress/regress-740694.js
+++ b/deps/v8/test/mjsunit/regress/regress-740694.js
@@ -18,5 +18,5 @@ var promise = __f_0();
promise.then(assertUnreachable,
err => { done = true; error = err });
%PerformMicrotaskCheckpoint();
-assertTrue(error.startsWith('d8: Error reading'));
+assertTrue(error.message.startsWith('d8: Error reading'));
assertTrue(done);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1158138.js b/deps/v8/test/mjsunit/regress/regress-crbug-1158138.js
new file mode 100644
index 0000000000..802be6eaf1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1158138.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let a = { foo: 4 };
+Object.seal(a);
+assertTrue(Object.getOwnPropertyDescriptor(a, 'foo').writable);
+Object.defineProperty(a, 'foo', { writable: false });
+assertFalse(Object.getOwnPropertyDescriptor(a, 'foo').writable);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1161847-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-1.js
new file mode 100644
index 0000000000..282d9b8787
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-1.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(first_run) {
+ let o = { x: 0 };
+ if (first_run) assertTrue(%HasOwnConstDataProperty(o, 'x'));
+ Object.defineProperty(o, 'x', { writable: false });
+ delete o.x;
+ o.x = 23;
+ if (first_run) assertFalse(%HasOwnConstDataProperty(o, 'x'));
+}
+%PrepareFunctionForOptimization(foo);
+foo(true);
+foo(false);
+%OptimizeFunctionOnNextCall(foo);
+foo(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js
new file mode 100644
index 0000000000..ec61fee068
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(first_run) {
+ let o = { x: 0 };
+ if (first_run) assertTrue(%HasOwnConstDataProperty(o, 'x'));
+ Object.defineProperty(o, 'x', { get() { return 1; }, configurable: true, enumerable: true });
+ delete o.x;
+ o.x = 23;
+ if (first_run) assertFalse(%HasOwnConstDataProperty(o, 'x'));
+}
+%PrepareFunctionForOptimization(foo);
+foo(true);
+foo(false);
+%OptimizeFunctionOnNextCall(foo);
+foo(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1162473.js b/deps/v8/test/mjsunit/regress/regress-crbug-1162473.js
new file mode 100644
index 0000000000..bea98388b1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1162473.js
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const script = `__proto__ = Realm.global(Realm.create());`;
+const w = new Worker(script, {type : 'string'});
+w.postMessage('hi');
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1166095.js b/deps/v8/test/mjsunit/regress/regress-crbug-1166095.js
new file mode 100644
index 0000000000..19ff88cc15
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1166095.js
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --trace-turbo-reduction
+
+function foo() {
+ const v11 = new Int8Array(150);
+ Object(v11,...v11,v11);
+}
+
+for (i = 0; i < 100; i++)
+ foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1167918.js b/deps/v8/test/mjsunit/regress/regress-crbug-1167918.js
new file mode 100644
index 0000000000..a27a7a4342
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1167918.js
@@ -0,0 +1,17 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class A {
+}
+class B extends A {
+ m() {
+ let o = {
+ m2() {
+ }
+ };
+ () => { super.x; }
+ }
+}
+let b = new B();
+b.m();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1167981.js b/deps/v8/test/mjsunit/regress/regress-crbug-1167981.js
new file mode 100644
index 0000000000..a6f81f1ff0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1167981.js
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class A {
+ constructor() {
+ x => { super[y] = 55; };
+ class x extends Object() { constructor() {} };
+ new x();
+ }
+};
+assertThrows(() => {new A();});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1167988.js b/deps/v8/test/mjsunit/regress/regress-crbug-1167988.js
new file mode 100644
index 0000000000..4571b36a86
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1167988.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let o1 = {
+ [() => {}]() {
+ return super.m();
+ }
+};
+
+let o2 = {
+ get [() => {}]() {
+ return super.m();
+ }
+};
+
+let o3 = {
+ [() => {}]: 1,
+ m2() { super.x; }
+};
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1168055.js b/deps/v8/test/mjsunit/regress/regress-crbug-1168055.js
new file mode 100644
index 0000000000..b7ed4df8ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1168055.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let arr1 = [{
+ set ['a'](x) {
+ super.x;
+ },
+ y: 1
+}];
+let arr2 = new Float32Array(arr1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1171195.js b/deps/v8/test/mjsunit/regress/regress-crbug-1171195.js
new file mode 100644
index 0000000000..bae0473ece
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1171195.js
@@ -0,0 +1,160 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function OriginalRegressionTest() {
+ function lazy() {
+ class X {
+ static x = function() {
+ function f() { eval(); }
+ };
+ }
+ }
+ lazy();
+})();
+
+(function TestEvalInsideFunctionInsideInitializer() {
+ function lazy() {
+ class A {}
+ class B extends A {
+ x = function() {
+ eval('super.y');
+ };
+ }
+ return B;
+ }
+ let c = lazy();
+ let o = new c();
+ assertThrows(() => {o.x()});
+})();
+
+(function TestEvalInsideArrowFunctionInsideInitializer() {
+ let result;
+ function lazy() {
+ class A {}
+ A.prototype.y = 42;
+ class B extends A {
+ x = () => {
+ eval('result = super.y');
+ };
+ }
+ return B;
+ }
+ let c = lazy();
+ let o = new c();
+ o.x();
+ assertEquals(42, result);
+})();
+
+(function TestEvalInsideFunctionInsideMethod() {
+ class A {}
+ A.prototype.x = 42;
+ class B extends A {
+ m() {
+ function f() {
+ eval("super.x;");
+ }
+ return f;
+ }
+ }
+ let f = (new B()).m();
+ assertThrows(() => { f(); });
+})();
+
+// Same as the previous test, except for object literals.
+(function TestEvalInsideFunctionInsideObjectLiteralMethod() {
+ let o = {
+ m() {
+ function f() {
+ eval("super.x;");
+ }
+ return f;
+ }
+ };
+ let f = o.m();
+ assertThrows(() => { f(); });
+})();
+
+(function TestEvalInsideArrowFunctionInsideMethod() {
+ let result;
+ class A {}
+ A.prototype.x = 42;
+ class B extends A {
+ m() {
+ let f = () => {
+ eval("result = super.x;");
+ }
+ return f;
+ }
+ }
+ let o = new B();
+ o.m()();
+ assertEquals(42, result);
+})();
+
+(function TestEvalInsideArrowFunctionInsideObjectLiteralMethod() {
+ let result;
+ let o = {
+ __proto__: {'x': 42},
+ m() {
+ let f = () => {
+ eval("result = super.x;");
+ }
+ return f;
+ }
+ };
+ o.m()();
+ assertEquals(42, result);
+})();
+
+(function TestSkippingMethodWithEvalInsideInnerFunc() {
+ function lazy() {
+ class MyClass {
+ test_method() {
+ var var1;
+ function f1() { eval(''); }
+ function skippable() { }
+ }
+ }
+ var o = new MyClass(); return o.test_method;
+ }
+ lazy();
+})();
+
+(function TestSkippingMethod() {
+ function lazy() {
+ class A {}
+ class B extends A {
+ skip_me() { return super.bar; }
+ }
+ }
+ lazy();
+})();
+
+(function TestSkippingObjectLiteralMethod() {
+ function lazy() {
+ let o = {
+ skip_me() { return super.bar; }
+ };
+ }
+ lazy();
+})();
+
+(function TestSkippingMethodWithEval() {
+ function lazy() {
+ class A {}
+ class B extends A {
+ skip_me() { eval(''); }
+ }
+ }
+ lazy();
+})();
+
+(function TestSkippingObjectLiteralMethodWithEval() {
+ function lazy() {
+ let o = {
+ skip_me() { eval(''); }
+ };
+ }
+ lazy();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1171600.js b/deps/v8/test/mjsunit/regress/regress-crbug-1171600.js
new file mode 100644
index 0000000000..16deaf6943
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1171600.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function TestSpreadAfterMethodUsingSuper() {
+ let v = {
+ m() {
+ { super.x; };
+ },
+ ...[() => {}]
+ };
+})();
+
+(function TestSpreadAfterMethodUsingEval() {
+ let v = {
+ m() {
+ { eval(); };
+ },
+ ...[() => {}]
+ };
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1177058.js b/deps/v8/test/mjsunit/regress/regress-crbug-1177058.js
new file mode 100644
index 0000000000..d2745ac742
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1177058.js
@@ -0,0 +1,15 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function __f_8() {
+ Object.prototype.__defineGetter__(0, () => {
+ throw Error();
+ });
+})();
+
+function __f_9() {
+};
+assertThrows( () => { new Worker(__f_9, {
+ type: 'function',
+ arguments: [,]})});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-605060.js b/deps/v8/test/mjsunit/regress/regress-crbug-605060.js
index d2dc79a310..e048dff115 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-605060.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-605060.js
@@ -7,4 +7,5 @@
Array.prototype.__defineGetter__('map', function(){});
Array.prototype.__defineGetter__('map', function(){});
Array.prototype.__defineGetter__('map', function(){});
-assertTrue(%HasFastProperties(Array.prototype));
+assertEquals(!%IsDictPropertyConstTrackingEnabled(),
+ %HasFastProperties(Array.prototype));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-11360.js b/deps/v8/test/mjsunit/regress/regress-v8-11360.js
new file mode 100644
index 0000000000..e2ab8f8560
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-11360.js
@@ -0,0 +1,212 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function TestCompoundAssignmentToPrivateField() {
+ class C {
+ #foo = 1;
+ m() {
+ return this.#foo += 1;
+ }
+ }
+
+ assertEquals(2, (new C()).m());
+})();
+
+(function TestCompoundAssignmentToPrivateFieldWithOnlyGetter() {
+ class C {
+ get #foo() { return 1; }
+ m() {
+ return this.#foo += 1;
+ }
+ }
+
+ assertThrows(() => { (new C()).m(); });
+})();
+
+(function TestCompoundAssignmentToPrivateFieldWithOnlySetter() {
+ class C {
+ set #foo(a) { }
+ m() {
+ return this.#foo += 1;
+ }
+ }
+
+ assertThrows(() => { (new C()).m(); });
+})();
+
+(function TestCompoundAssignmentToPrivateFieldWithGetterAndSetter() {
+ class C {
+ get #foo() { return 1; }
+ set #foo(a) { }
+ m() {
+ return this.#foo += 1;
+ }
+ }
+
+ assertEquals(2, (new C()).m());
+})();
+
+(function TestCompoundAssignmentToPrivateMethod() {
+ class C {
+ m() {
+ return this.#pm += 1;
+ }
+ #pm() {}
+ }
+
+ assertThrows(() => { (new O()).m(); });
+})();
+
+(function TestCompoundAssignmentToStaticPrivateField() {
+ class C {
+ static #foo = 1;
+ m() {
+ return C.#foo += 1;
+ }
+ }
+
+ assertEquals(2, (new C()).m());
+})();
+
+(function TestCompoundAssignmentToStaticPrivateFieldWithOnlyGetter() {
+ class C {
+ static get #foo() { return 1; }
+ m() {
+ return C.#foo += 1;
+ }
+ }
+
+ assertThrows(() => { (new C()).m(); });
+})();
+
+(function TestCompoundAssignmentToStaticPrivateFieldWithOnlySetter() {
+ class C {
+ static set #foo(a) { }
+ m() {
+ return C.#foo += 1;
+ }
+ }
+
+ assertThrows(() => { (new C()).m(); });
+})();
+
+(function TestCompoundAssignmentToStaticPrivateFieldWithGetterAndSetter() {
+ class C {
+ static get #foo() { return 1; }
+ static set #foo(a) { }
+ m() {
+ return C.#foo += 1;
+ }
+ }
+
+ assertEquals(2, (new C()).m());
+})();
+
+(function TestCompoundAssignmentToStaticPrivateMethod() {
+ class C {
+ m() {
+ return C.#pm += 1;
+ }
+ static #pm() {}
+ }
+
+ assertThrows(() => { (new O()).m(); });
+})();
+
+// The following tests test the above cases w/ brand check failures.
+
+(function TestBrandCheck_CompoundAssignmentToPrivateField() {
+ class C {
+ #foo = 1;
+ m() {
+ return this.#foo += 1;
+ }
+ }
+
+ assertThrows(() => { C.prototype.m.call({}); }, TypeError,
+ /Cannot read private member/);
+
+ // It's the same error we get from this case:
+ class C2 {
+ #foo = 1;
+ m() {
+ return this.#foo;
+ }
+ }
+
+ assertThrows(() => { C2.prototype.m.call({}); }, TypeError,
+ /Cannot read private member/);
+})();
+
+(function TestBrandCheck_CompoundAssignmentToPrivateFieldWithOnlyGetter() {
+ class C {
+ get #foo() { return 1; }
+ m() {
+ return this.#foo += 1;
+ }
+ }
+
+ assertThrows(() => { C.prototype.m.call({}); }, TypeError,
+ /Object must be an instance of class/);
+
+ // It's the same error we get from this case:
+ class C2 {
+ get #foo() { return 1; }
+ m() {
+ return this.#foo;
+ }
+ }
+
+ assertThrows(() => { C2.prototype.m.call({}); }, TypeError,
+ /Object must be an instance of class/);
+})();
+
+(function TestBrandCheck_CompoundAssignmentToPrivateFieldWithOnlySetter() {
+ class C {
+ set #foo(a) { }
+ m() {
+ return this.#foo += 1;
+ }
+ }
+
+ assertThrows(() => { C.prototype.m.call({}); }, TypeError,
+ /Object must be an instance of class/);
+})();
+
+(function TestBrandCheck_CompoundAssignmentToPrivateFieldWithGetterAndSetter() {
+ class C {
+ get #foo() { return 1; }
+ set #foo(a) { }
+ m() {
+ return this.#foo += 1;
+ }
+ }
+
+ assertThrows(() => { C.prototype.m.call({}); }, TypeError,
+ /Object must be an instance of class/);
+
+ // It's the same error we get from this case:
+ class C2 {
+ get #foo() { return 1; }
+ set #foo(a) { }
+ m() {
+ return this.#foo;
+ }
+ }
+
+ assertThrows(() => { C2.prototype.m.call({}); }, TypeError,
+ /Object must be an instance of class/);
+})();
+
+(function TestBrandCheck_CompoundAssignmentToPrivateMethod() {
+ class C {
+ m() {
+ return this.#pm += 1;
+ }
+ #pm() {}
+ }
+
+ assertThrows(() => { C.prototype.m.call({}); }, TypeError,
+ /Object must be an instance of class/);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-1034394.js b/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js
index 99519d8ffe..99519d8ffe 100644
--- a/deps/v8/test/mjsunit/regress/regress-1034394.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js b/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js
index 2c35add9ad..2fbb82ff71 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js
@@ -44,7 +44,7 @@ kExprI32Const, 0x83, 0x01, // i32.const
kSimdPrefix, kExprI32x4Splat, // i32x4.splat
kSimdPrefix, kExprI32x4Eq, // i32x4.eq
kSimdPrefix, kExprI32x4Eq, // i32x4.eq
-kSimdPrefix, kExprV8x16AnyTrue, // v8x16.any_true
+kSimdPrefix, kExprV128AnyTrue, // v128.any_true
kExprEnd, // end @64
]);
builder.addExport('main', 0);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js b/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js
index 55833a76b6..7deceb5dea 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js
@@ -17,7 +17,7 @@ builder.addFunction(undefined, 0 /* sig */).addBodyWithEnd([
kSimdPrefix, kExprI16x8Splat, // i16x8.splat
kExprMemorySize, 0x00, // memory.size
kSimdPrefix, kExprI16x8ShrS, 0x01, // i16x8.shr_s
- kSimdPrefix, kExprV8x16AnyTrue, // v8x16.any_true
+ kSimdPrefix, kExprV128AnyTrue, // v128.any_true
kExprMemorySize, 0x00, // memory.size
kExprI32RemS, // i32.rem_s
kExprEnd, // end @15
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1070078.js b/deps/v8/test/mjsunit/regress/wasm/regress-1070078.js
index 5301c5747d..51293755e4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1070078.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1070078.js
@@ -30,7 +30,7 @@ builder.addFunction(undefined, 0 /* sig */).addBodyWithEnd([
0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // i8x16.shuffle
kSimdPrefix, kExprI8x16LeU, // i8x16.le_u
- kSimdPrefix, kExprV8x16AnyTrue, // v8x16.any_true
+ kSimdPrefix, kExprV128AnyTrue, // v128.any_true
kExprMemoryGrow, 0x00, // memory.grow
kExprDrop,
kExprEnd, // end @233
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1081030.js b/deps/v8/test/mjsunit/regress/wasm/regress-1081030.js
index 7430c62128..afebaa7ca1 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1081030.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1081030.js
@@ -17,7 +17,7 @@ builder.addFunction(undefined, 0 /* sig */).addBodyWithEnd([
kExprF32Const, 0xf8, 0xf8, 0xf8, 0xf8,
kSimdPrefix, kExprF32x4Splat, // f32x4.splat
kSimdPrefix, kExprF32x4Min, 0x01, // f32x4.min
- kSimdPrefix, kExprV32x4AnyTrue, 0x01, // i32x4.any_true
+ kSimdPrefix, kExprV128AnyTrue, 0x01, // v128.any_true
kExprEnd, // end @16
]);
builder.addExport('main', 0);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-11335.js b/deps/v8/test/mjsunit/regress/wasm/regress-11335.js
new file mode 100644
index 0000000000..cb1c679faf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-11335.js
@@ -0,0 +1,56 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --interrupt-budget=100
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function makeFFI(func, t) {
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addType(makeSig([t,t,t,t,t,t,t,t,t,t], [t]));
+ builder.addImport("m", "func", sig_index);
+ // Try to create a frame with lots of spilled values and parameters
+ // on the stack to try to catch GC bugs in the reference maps for
+ // the different parts of the stack.
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
+ kExprLocalGet, 2, // --
+ kExprLocalGet, 3, // --
+ kExprLocalGet, 4, // --
+ kExprLocalGet, 5, // --
+ kExprLocalGet, 6, // --
+ kExprLocalGet, 7, // --
+ kExprLocalGet, 8, // --
+ kExprLocalGet, 9, // --
+ kExprCallFunction, 0, // --
+ kExprDrop, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
+ kExprLocalGet, 2, // --
+ kExprLocalGet, 3, // --
+ kExprLocalGet, 4, // --
+ kExprLocalGet, 5, // --
+ kExprLocalGet, 6, // --
+ kExprLocalGet, 7, // --
+ kExprLocalGet, 8, // --
+ kExprLocalGet, 9, // --
+ kExprCallFunction, 0, // --
+ ]) // --
+ .exportFunc();
+
+ return builder.instantiate({m: {func: func}}).exports.main;
+}
+
+function print10(a, b, c, d, e, f, g, h, i) {
+ gc();
+}
+(function F64Test() {
+ var main = makeFFI(print10, kWasmF64);
+ for (var i = 1; i < 2e+80; i *= -1137) {
+ main(i - 1, i, i + 2, i + 3, i + 4, i + 5, i + 6, i + 7, i + 8);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1161555.js b/deps/v8/test/mjsunit/regress/wasm/regress-1161555.js
new file mode 100644
index 0000000000..186aa626dc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1161555.js
@@ -0,0 +1,38 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-simd --wasm-lazy-compilation
+
+// Test case copied from clusterfuzz, this exercises a bug in WasmCompileLazy
+// where we are not correctly pushing the full 128-bits of a SIMD register.
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const __v_0 = new WasmModuleBuilder();
+__v_0.addImportedMemory('m', 'imported_mem');
+__v_0.addFunction('main', makeSig([], [])).addBodyWithEnd([
+ kExprI32Const, 0, kSimdPrefix, kExprS128LoadMem, 0, 0, kExprCallFunction,
+ 0x01, kExprEnd
+]);
+__v_0.addFunction('function2', makeSig([kWasmS128], [])).addBodyWithEnd([
+ kExprI32Const, 17, kExprLocalGet, 0, kSimdPrefix, kExprS128StoreMem, 0, 0,
+ kExprI32Const, 9, kExprLocalGet, 0, kExprCallFunction, 0x02, kExprEnd
+]);
+__v_0.addFunction('function3', makeSig([kWasmI32, kWasmS128], []))
+ .addBodyWithEnd([
+ kExprI32Const, 32, kExprLocalGet, 1, kSimdPrefix, kExprS128StoreMem, 0, 0,
+ kExprEnd
+ ]);
+__v_0.addExport('main');
+var __v_1 = new WebAssembly.Memory({
+ initial: 1,
+});
+const __v_2 = __v_0.instantiate({m: {imported_mem: __v_1}});
+const __v_3 = new Uint8Array(__v_1.buffer);
+for (let __v_4 = 0; __v_4 < 16; __v_4++) {
+ __v_3[__v_4] = __v_4 * 2;
+}
+__v_2.exports.main();
+for (let __v_5 = 0; __v_5 < 16; __v_5++) {
+ assertEquals(__v_3[__v_5], __v_3[__v_5 + 32]);
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1168116.js b/deps/v8/test/mjsunit/regress/wasm/regress-1168116.js
new file mode 100644
index 0000000000..380bc6d043
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1168116.js
@@ -0,0 +1,48 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([kWasmF32, kWasmF32, kWasmI32, kWasmI32, kWasmI32, kWasmExternRef, kWasmI32, kWasmI32, kWasmI32, kWasmI32], [kWasmI64]));
+// Generate function 1 (out of 2).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: l_ffiiiniiii
+// body:
+]);
+// Generate function 2 (out of 2).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: l_ffiiiniiii
+// body:
+kExprLocalGet, 0x00, // local.get
+kExprLocalGet, 0x01, // local.get
+kExprLocalGet, 0x02, // local.get
+kExprLocalGet, 0x03, // local.get
+kExprI32Const, 0x05, // i32.const
+kExprLocalGet, 0x05, // local.get
+kExprLocalGet, 0x06, // local.get
+kExprLocalGet, 0x07, // local.get
+kExprI32Const, 0x5b, // i32.const
+kExprI32Const, 0x30, // i32.const
+kExprCallFunction, 0x01, // call function #1: l_ffiiiniiii
+kExprLocalGet, 0x00, // local.get
+kExprLocalGet, 0x01, // local.get
+kExprLocalGet, 0x02, // local.get
+kExprLocalGet, 0x03, // local.get
+kExprLocalGet, 0x07, // local.get
+kExprLocalGet, 0x05, // local.get
+kExprLocalGet, 0x06, // local.get
+kExprLocalGet, 0x07, // local.get
+kExprI32Const, 0x7f, // i32.const
+kExprI64DivS, // i64.div_s
+kExprF64Eq, // f64.eq
+kExprI32DivU, // i32.div_u
+kExprTableGet, 0x7f, // table.get
+kExprI64ShrS, // i64.shr_s
+]);
+assertThrows(function() { builder.instantiate(); }, WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1171788.js b/deps/v8/test/mjsunit/regress/wasm/regress-1171788.js
new file mode 100644
index 0000000000..2ca8112327
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1171788.js
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig(
+ [
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmFuncRef, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32
+ ],
+ [kWasmF64]));
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: d_iiiiniiiii
+// body:
+kExprLocalGet, 0x03, // local.get
+kExprLocalGet, 0x08, // local.get
+kExprLocalGet, 0x00, // local.get
+kExprI32Const, 0x01, // i32.const
+kExprLocalGet, 0x04, // local.get
+kExprLocalGet, 0x05, // local.get
+kExprLocalGet, 0x06, // local.get
+kExprLocalGet, 0x00, // local.get
+kExprLocalGet, 0x07, // local.get
+kExprLocalGet, 0x06, // local.get
+kExprCallFunction, 0x00, // call function #0: d_iiiiniiiii
+kExprLocalGet, 0x00, // local.get
+kExprLocalGet, 0x01, // local.get
+kExprLocalGet, 0x00, // local.get
+kExprLocalGet, 0x08, // local.get
+kExprLocalGet, 0x01, // local.get
+kExprLocalGet, 0x00, // local.get
+kExprLocalGet, 0x01, // local.get
+kExprLocalGet, 0x07, // local.get
+kExprLocalGet, 0x08, // local.get
+kExprLocalGet, 0x09, // local.get
+kExprCallFunction, 0x00, // call function #0: d_iiiiniiiii
+kExprUnreachable, // unreachable
+kExprEnd, // end @46
+]);
+assertThrows(function() { builder.instantiate(); }, WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1179025.js b/deps/v8/test/mjsunit/regress/wasm/regress-1179025.js
new file mode 100644
index 0000000000..cf77cdc809
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1179025.js
@@ -0,0 +1,42 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(1, 1, false, true);
+builder.addType(makeSig([], []));
+builder.addType(makeSig([kWasmI64], [kWasmF32]));
+// Generate function 1 (out of 2).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: v_v
+// body:
+kExprNop, // nop
+kExprEnd, // end @2
+]);
+// Generate function 2 (out of 2).
+builder.addFunction(undefined, 1 /* sig */)
+ .addLocals(kWasmI64, 1)
+ .addBodyWithEnd([
+// signature: f_l
+// body:
+kExprBlock, kWasmF32, // block @3 f32
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x01, // i32.const
+ kExprIf, kWasmI64, // if @9 i64
+ kExprI64Const, 0x00, // i64.const
+ kExprElse, // else @13
+ kExprUnreachable, // unreachable
+ kExprEnd, // end @15
+ kAtomicPrefix, kExprI64AtomicStore, 0x03, 0x04, // i64.atomic.store64
+ kExprF32Const, 0x00, 0x00, 0x00, 0x00, // f32.const
+ kExprEnd, // end @25
+kExprDrop, // drop
+kExprF32Const, 0x00, 0x00, 0x80, 0x51, // f32.const
+kExprEnd, // end @32
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1179065.js b/deps/v8/test/mjsunit/regress/wasm/regress-1179065.js
new file mode 100644
index 0000000000..508494e920
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1179065.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging --wasm-dynamic-tiering
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(1, 10);
+builder.addFunction('load', kSig_i_i).addBody([
+ // signature: i_i
+ // body:
+ kExprLocalGet, 0, // local.get
+ kExprI32LoadMem, 0, 0, // i32.load_mem
+]).exportFunc();
+const instance = builder.instantiate();
+// Call multiple times to trigger dynamic tiering.
+for (let i = 0; i < 20; ++i) {
+ instance.exports.load(1);
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js b/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js
new file mode 100644
index 0000000000..907cf563c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(28, 32, false);
+builder.addFunction(undefined, kSig_i_v)
+ .addLocals(kWasmI32, 61)
+ .addBody([
+kExprI64Const, 0x0, // i64.const
+kExprI32Const, 0x0, // i32.const
+kExprIf, kWasmStmt, // if
+ kExprI32Const, 0x0, // i32.const
+ kExprI32LoadMem, 0x01, 0x23, // i32.load
+ kExprBrTable, 0x01, 0x00, 0x00, // br_table
+ kExprEnd, // end
+kExprI64SExtendI16, // i64.extend16_s
+kExprI32Const, 0x00, // i32.const
+kExprLocalGet, 0x00, // local.get
+kExprI32StoreMem16, 0x00, 0x10, // i32.store16
+kExprUnreachable, // unreachable
+]).exportAs('main');
+const instance = builder.instantiate();
+assertThrows(instance.exports.main, WebAssembly.RuntimeError, 'unreachable');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1180690.js b/deps/v8/test/mjsunit/regress/wasm/regress-1180690.js
new file mode 100644
index 0000000000..c5e1016ad6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1180690.js
@@ -0,0 +1,29 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --wasm-test-streaming --wasm-lazy-compilation --wasm-lazy-validation
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+
+(function f1() {
+ const builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1);
+ builder.addFunction('main', kSig_i_i).addBody([
+ kExprLocalGet, 0,
+ kExprI32LoadMem, 0, 0
+ ]).exportFunc();
+ const instance = builder.instantiate();
+ instance.exports.main();
+})();
+
+(function f2() {
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('id', kSig_i_i).addBody([]).exportFunc();
+ const buffer = builder.toBuffer();
+ const instance = builder.instantiate();
+ try {
+ instance.exports.id();
+ } catch {}
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-5888.js b/deps/v8/test/mjsunit/regress/wasm/regress-5888.js
index 6481c79338..6481c79338 100644
--- a/deps/v8/test/mjsunit/regress/regress-5888.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5888.js
diff --git a/deps/v8/test/mjsunit/regress/regress-5911.js b/deps/v8/test/mjsunit/regress/wasm/regress-5911.js
index 9d6d4ae5b8..9d6d4ae5b8 100644
--- a/deps/v8/test/mjsunit/regress/regress-5911.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5911.js
diff --git a/deps/v8/test/mjsunit/regress/regress-813440.js b/deps/v8/test/mjsunit/regress/wasm/regress-813440.js
index f4df95daae..f4df95daae 100644
--- a/deps/v8/test/mjsunit/regress/regress-813440.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-813440.js
diff --git a/deps/v8/test/mjsunit/regress/regress-863810.js b/deps/v8/test/mjsunit/regress/wasm/regress-863810.js
index 841909a207..841909a207 100644
--- a/deps/v8/test/mjsunit/regress/regress-863810.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-863810.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8896.js b/deps/v8/test/mjsunit/regress/wasm/regress-8896.js
index 51f1b27188..ef953b9c64 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8896.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8896.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
// Flags: --experimental-wasm-eh --allow-natives-syntax
+// Disable Liftoff so we can serialize the module.
+// Flags: --no-liftoff
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/regress/regress-8947.js b/deps/v8/test/mjsunit/regress/wasm/regress-8947.js
index 17507b1002..17507b1002 100644
--- a/deps/v8/test/mjsunit/regress/regress-8947.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8947.js
diff --git a/deps/v8/test/mjsunit/regress/regress-9209.js b/deps/v8/test/mjsunit/regress/wasm/regress-9209.js
index 92e3658ca7..92e3658ca7 100644
--- a/deps/v8/test/mjsunit/regress/regress-9209.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9209.js
diff --git a/deps/v8/test/mjsunit/regress/regress-9832.js b/deps/v8/test/mjsunit/regress/wasm/regress-9832.js
index f8b40fd661..05b63b0984 100644
--- a/deps/v8/test/mjsunit/regress/regress-9832.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9832.js
@@ -15,7 +15,6 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
kExprI32Add,
]).exportFunc();
builder.addFunction("main", kSig_i_i)
- .addLocals(kWasmExnRef, 1)
.addBody([
kExprTry, kWasmStmt,
kExprLocalGet, 0,
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1047368.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1047368.js
index 800cf61879..800cf61879 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1047368.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1047368.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168386.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168386.js
new file mode 100644
index 0000000000..226c11fbd4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168386.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --interrupt-budget=100
+
+function __f_0(__v_8) {
+ var __v_9 = "mod_";
+ var __v_10 = eval(
+ 'function Module(stdlib, foreign, heap) {\n' +
+ ' "use asm";\n' +
+ ' function ' + __v_9 + '(dividend) {\n' +
+ ' dividend = dividend | 0;\n' +
+ ' return ((dividend | 0) % ' + __v_8 + ') | 0;\n'
+ + ' }\n' +
+ ' return { f: ' + __v_9 + '}\n'
+ + '}; Module');
+ return __v_10().f;
+}
+try {
+ const __v_5 = -1;
+ const __v_6 = __f_0(1);
+ for (var __v_7 = 0; __v_7 < 100; __v_7++) {
+ __v_7 % __v_5 | __v_6();
+ }
+} catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1172912.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1172912.js
new file mode 100644
index 0000000000..22243e49bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1172912.js
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-reftypes --experimental-wasm-typed-funcref
+
+let raw = new Uint8Array([
+ 0x00, 0x61, 0x73, 0x6d, // wasm magic
+ 0x01, 0x00, 0x00, 0x00, // wasm version
+
+ 0x01, // section: types
+ 0x05, // section length
+ 0x01, // types count
+ 0x60, // function type
+ 0x00, // param count
+ 0x01, // return count
+ 0x7f, // i32
+
+ 0x03, // section: functions
+ 0x02, // section size: 2
+ 0x01, // function count: 1
+ 0x00, // sig index: 0
+
+ 0x07, // section: exports
+ 0x08, // section size
+ 0x01, // exports count
+ 0x04, // name length: 4
+ 0x6d, 0x61, 0x69, 0x6e, // name: "main"
+ 0x00, // export kind: function
+ 0x00, // export function index: 0
+
+ 0x0a, // section: code
+ 0x0d, // section length
+ 0x01, // functions count: 1
+ 0x0b, // body size
+ 0x00, // locals count
+ 0xd2, 0x00, // ref.func 0
+ 0xd1, // ref.is_null
+ 0x04, 0x40, // if [void]
+ 0x05, // else
+ 0x0b, // end
+ 0x41, 0x2a, // i32.const: 42
+ 0x0b, // end
+]);
+let buff = raw.buffer;
+let mod = new WebAssembly.Module(buff);
+let inst = new WebAssembly.Instance(mod);
+let result = inst.exports.main();
+assertEquals(42, result);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-746835.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-746835.js
index ab5e3549d4..ab5e3549d4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-746835.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-746835.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772056.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-772056.js
index d9fb4d51d2..d9fb4d51d2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-772056.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-772056.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-816961.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-816961.js
index c1637ad2c5..c1637ad2c5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-816961.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-816961.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-969498.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-969498.js
index 4dddcb3bd5..4dddcb3bd5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-969498.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-969498.js
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9106.js b/deps/v8/test/mjsunit/regress/wasm/regress-v8-9106.js
index f51c2e9498..f51c2e9498 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-9106.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-v8-9106.js
diff --git a/deps/v8/test/mjsunit/smi-mul-const.js b/deps/v8/test/mjsunit/smi-mul-const.js
index fd26c835c3..ea515404a9 100644
--- a/deps/v8/test/mjsunit/smi-mul-const.js
+++ b/deps/v8/test/mjsunit/smi-mul-const.js
@@ -50,6 +50,7 @@ function limit_range(a) {
// Limit the range of 'a' to enable no-overflow optimizations.
return Math.max(Math.min(a | 0, 10), -10);
}
+%EnsureFeedbackVectorForFunction(limit_range);
function mul_by_neg_127(a) { return limit_range(a) * -127; }
function mul_by_neg_128(a) { return limit_range(a) * -128; }
diff --git a/deps/v8/test/mjsunit/stack-traces-class-fields.js b/deps/v8/test/mjsunit/stack-traces-class-fields.js
index d40abbab9a..03edb81188 100644
--- a/deps/v8/test/mjsunit/stack-traces-class-fields.js
+++ b/deps/v8/test/mjsunit/stack-traces-class-fields.js
@@ -43,13 +43,13 @@ function testClassConstruction() {
// ReferenceError: FAIL is not defined
// at thrower
-// at <static_fields_initializer>
+// at <static_initializer>
// at testClassConstruction
// at testTrace
testTrace(
"during class construction",
testClassConstruction,
- ["thrower", "<static_fields_initializer>"],
+ ["thrower", "<static_initializer>"],
["anonymous"]
);
diff --git a/deps/v8/test/mjsunit/stack-traces-custom.js b/deps/v8/test/mjsunit/stack-traces-custom.js
index 0cb53faaec..8982548df4 100644
--- a/deps/v8/test/mjsunit/stack-traces-custom.js
+++ b/deps/v8/test/mjsunit/stack-traces-custom.js
@@ -2,28 +2,43 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
+function testMethodNames(o) {
+ try {
+ o.k = 42;
+ } catch (e) {
+ Error.prepareStackTrace = function(e, frames) { return frames; };
+ var frames = e.stack;
+ Error.prepareStackTrace = undefined;
+ assertEquals("f", frames[0].getMethodName());
+ assertEquals(null, frames[1].getMethodName());
+ assertEquals("h1", frames[2].getMethodName());
+ assertEquals("j", frames[3].getMethodName());
+ assertEquals("k", frames[4].getMethodName());
+ assertEquals("testMethodNames", frames[5].getMethodName());
+ }
+}
+
var o = {
f: function() { throw new Error(); },
- get j() { o.h(); },
+ get j() { o.h1(); },
set k(_) { o.j; },
};
-o.g1 = function() { o.f() }
-o.g2 = o.g1;
-o.h = function() { o.g1() }
-
-try {
- o.k = 42;
-} catch (e) {
- Error.prepareStackTrace = function(e, frames) { return frames; };
- var frames = e.stack;
- Error.prepareStackTrace = undefined;
- assertEquals("f", frames[0].getMethodName());
- assertEquals(null, frames[1].getMethodName());
- assertEquals("h", frames[2].getMethodName());
- assertEquals("j", frames[3].getMethodName());
- assertEquals("k", frames[4].getMethodName());
- assertEquals(null, frames[5].getMethodName());
-}
+const duplicate = function() { o.f() }
+o.g1 = duplicate;
+o.g2 = duplicate;
+o.h1 = function() { o.g1() }
+o.h2 = o.h1;
+
+// Test in dictionary mode first.
+assertFalse(%HasFastProperties(o));
+testMethodNames(o);
+
+// Same test but with fast mode object.
+o = %ToFastProperties(o);
+assertTrue(%HasFastProperties(o));
+testMethodNames(o);
function testMethodName(f, frameNumber, expectedName) {
try {
diff --git a/deps/v8/test/mjsunit/string-external-cached.js b/deps/v8/test/mjsunit/string-external-cached.js
index bfecd17314..dd8a99382b 100644
--- a/deps/v8/test/mjsunit/string-external-cached.js
+++ b/deps/v8/test/mjsunit/string-external-cached.js
@@ -53,7 +53,7 @@ function test() {
assertEquals('B', charat_str[i].charAt(3*16 + 11));
}
- charat_short = "0123456789ABC";
+ charat_short = "01234";
try { // String can only be externalized once
externalizeString(charat_short, true);
} catch (ex) { }
diff --git a/deps/v8/test/mjsunit/wasm/atomics-stress.js b/deps/v8/test/mjsunit/wasm/atomics-stress.js
index 9eb18050cb..19a9a0ccfb 100644
--- a/deps/v8/test/mjsunit/wasm/atomics-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics-stress.js
@@ -299,25 +299,27 @@ function getSequence(start, end) {
}
function spawnWorkers() {
+ function workerCode() {
+ onmessage = function(msg) {
+ if (msg.module) {
+ let module = msg.module;
+ let mem = msg.mem;
+ this.instance = new WebAssembly.Instance(module, {m: {imported_mem: mem}});
+ postMessage({instantiated: true});
+ } else {
+ let address = msg.address;
+ let sequence = msg.sequence;
+ let index = msg.index;
+ let spin = msg.spin;
+ let result = instance.exports["worker" + index](address, sequence, spin);
+ postMessage({index: index, sequence: sequence, result: result});
+ }
+ }
+ }
+
let workers = [];
for (let i = 0; i < kNumberOfWorker; i++) {
- let worker = new Worker(
- `onmessage = function(msg) {
- if (msg.module) {
- let module = msg.module;
- let mem = msg.mem;
- this.instance = new WebAssembly.Instance(module, {m: {imported_mem: mem}});
- postMessage({instantiated: true});
- } else {
- let address = msg.address;
- let sequence = msg.sequence;
- let index = msg.index;
- let spin = msg.spin;
- let result = instance.exports["worker" + index](address, sequence, spin);
- postMessage({index: index, sequence: sequence, result: result});
- }
- }`,
- {type: 'string'});
+ let worker = new Worker(workerCode, {type: 'function'});
workers.push(worker);
}
return workers;
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/box2d.js b/deps/v8/test/mjsunit/wasm/embenchen/box2d.js
index d8800e7758..fdca531bf6 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/box2d.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/box2d.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --validate-asm --allow-natives-syntax
+// Flags: --validate-asm --allow-natives-syntax --wasm-loop-unrolling
var EXPECTED_OUTPUT =
/frame averages: .+ \+- .+, range: .+ to .+ \n/;
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/corrections.js b/deps/v8/test/mjsunit/wasm/embenchen/corrections.js
index e8c46316b8..4e965699c0 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/corrections.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/corrections.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --validate-asm --allow-natives-syntax
+// Flags: --validate-asm --allow-natives-syntax --wasm-loop-unrolling
var EXPECTED_OUTPUT = 'final: 40006013:58243.\n';
var Module = {
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js b/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js
index 17d52a33b7..09c0079cc6 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --validate-asm --allow-natives-syntax
+// Flags: --validate-asm --allow-natives-syntax --wasm-loop-unrolling
var EXPECTED_OUTPUT =
'stretch tree of depth 10\t check: -1\n' +
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-global.js b/deps/v8/test/mjsunit/wasm/exceptions-global.js
deleted file mode 100644
index 136cee808a..0000000000
--- a/deps/v8/test/mjsunit/wasm/exceptions-global.js
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-eh --allow-natives-syntax
-
-// Note that this test does not pass --experimental-wasm-reftypes on purpose so
-// that we make sure the two flags can be controlled separately/independently.
-
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-// First we just test that "exnref" global variables are allowed.
-(function TestGlobalExnRefSupported() {
- print(arguments.callee.name);
- let builder = new WasmModuleBuilder();
- let g = builder.addGlobal(kWasmExnRef);
- builder.addFunction("push_and_drop_exnref", kSig_v_v)
- .addBody([
- kExprGlobalGet, g.index,
- kExprDrop,
- ]).exportFunc();
- let instance = builder.instantiate();
-
- assertDoesNotThrow(instance.exports.push_and_drop_exnref);
-})();
-
-// Test default value that global "exnref" variables are initialized with.
-(function TestGlobalExnRefDefaultValue() {
- print(arguments.callee.name);
- let builder = new WasmModuleBuilder();
- let g = builder.addGlobal(kWasmExnRef);
- builder.addFunction('push_and_return_exnref', kSig_e_v)
- .addBody([kExprGlobalGet, g.index])
- .exportFunc();
- let instance = builder.instantiate();
-
- assertEquals(null, instance.exports.push_and_return_exnref());
-})();
-
-// Test custom initialization index for a global "exnref" variable.
-(function TestGlobalExnRefInitIndex() {
- print(arguments.callee.name);
- let builder = new WasmModuleBuilder();
- let g1_index = builder.addImportedGlobal("m", "exn", kWasmExnRef);
- let g2 = builder.addGlobal(kWasmExnRef);
- g2.init_index = g1_index; // Initialize {g2} to equal {g1}.
- builder.addFunction('push_and_return_exnref', kSig_e_v)
- .addBody([kExprGlobalGet, g2.index])
- .exportFunc();
- let exception = { x: "my fancy exception" };
- let instance = builder.instantiate({ "m": { "exn": exception }});
-
- assertSame(exception, instance.exports.push_and_return_exnref());
-})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index 1b1f66f410..d7539119ab 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -7,21 +7,6 @@
load("test/mjsunit/wasm/wasm-module-builder.js");
load("test/mjsunit/wasm/exceptions-utils.js");
-// First we just test that "exnref" local variables are allowed.
-(function TestLocalExnRef() {
- print(arguments.callee.name);
- let builder = new WasmModuleBuilder();
- builder.addFunction("push_and_drop_exnref", kSig_v_v)
- .addLocals(kWasmExnRef, 1)
- .addBody([
- kExprLocalGet, 0,
- kExprDrop,
- ]).exportFunc();
- let instance = builder.instantiate();
-
- assertDoesNotThrow(instance.exports.push_and_drop_exnref);
-})();
-
// The following method doesn't attempt to catch an raised exception.
(function TestThrowSimple() {
print(arguments.callee.name);
@@ -249,7 +234,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprCatch, except,
kExprEnd,
// Calling through JS produces a wrapped exceptions which does not match
- // the br_on_exn.
+ // the catch.
kExprTry, kWasmStmt,
kExprCallFunction, imp,
kExprCatch, except,
diff --git a/deps/v8/test/mjsunit/wasm/externref.js b/deps/v8/test/mjsunit/wasm/externref.js
index 741f95955b..a954f273ae 100644
--- a/deps/v8/test/mjsunit/wasm/externref.js
+++ b/deps/v8/test/mjsunit/wasm/externref.js
@@ -206,15 +206,18 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function testExternRefLocalDefaultValue() {
print(arguments.callee.name);
- const builder = new WasmModuleBuilder();
- builder.addFunction('main', kSig_r_v)
- .addBody([kExprLocalGet, 0])
- .addLocals(kWasmExternRef, 1)
- .exportFunc();
+ const numLocals = 3;
+ for (let i = 0; i < numLocals; ++i) {
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('main', kSig_r_v)
+ .addBody([kExprLocalGet, i])
+ .addLocals(kWasmExternRef, numLocals)
+ .exportFunc();
- const instance = builder.instantiate();
+ const instance = builder.instantiate();
- assertEquals(null, instance.exports.main());
+ assertEquals(null, instance.exports.main());
+ }
})();
(function testImplicitReturnNullAsExternRef() {
@@ -285,3 +288,48 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
instance.exports.main({hello: 4}, 5, {world: 6}, null, {bar: 7});
})();
+
+(function testGCInStackCheckUnalignedFrameSize() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+
+ const gc_sig = builder.addType(kSig_v_v);
+ const mysig = makeSig(
+ [
+ kWasmExternRef, kWasmI32, kWasmExternRef, kWasmExternRef, kWasmExternRef
+ ],
+ []);
+ const func_sig = builder.addType(mysig);
+ const triggerGC_index = builder.addImport('q', 'triggerGC', gc_sig);
+ const func_index = builder.addImport('q', 'func', func_sig);
+
+ const foo = builder.addFunction('foo', func_sig).addBody([
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
+ kExprLocalGet, 2, // --
+ kExprLocalGet, 3, // --
+ kExprLocalGet, 4, // --
+ kExprCallFunction, func_index
+ ]).addLocals(kWasmI32, 1);
+
+ builder.addFunction('main', func_sig)
+ .addBody([
+ kExprCallFunction, triggerGC_index, // --
+ kExprLocalGet, 0, // --
+ kExprLocalGet, 1, // --
+ kExprLocalGet, 2, // --
+ kExprLocalGet, 3, // --
+ kExprLocalGet, 4, // --
+ kExprCallFunction, foo.index
+ ])
+ .exportFunc();
+
+ const instance = builder.instantiate({
+ q: {
+ triggerGC: () => %ScheduleGCInStackCheck(),
+ func: (ref) => assertEquals(ref.hello, 4)
+ }
+ });
+
+ instance.exports.main({hello: 4}, 5, {world: 6}, null, {bar: 7});
+})();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
index 7940ab5f19..660ec08e90 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --stress-compaction
+// Flags: --expose-wasm --stress-compaction --wasm-loop-unrolling
load('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/mjsunit/wasm/grow-shared-memory.js b/deps/v8/test/mjsunit/wasm/grow-shared-memory.js
index b5c86cb5ef..2e3e42e8e5 100644
--- a/deps/v8/test/mjsunit/wasm/grow-shared-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-shared-memory.js
@@ -36,14 +36,18 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
(function TestPostMessageWithGrow() {
print(arguments.callee.name);
- let worker = new Worker(workerHelpers +
- `onmessage = function(obj) {
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(1 === obj.memory.grow(1));
- assertTrue(obj.memory.buffer.byteLength === obj.expected_size);
- assertIsWasmSharedMemory(obj.memory);
- postMessage("OK");
- }`, {type: 'string'});
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(obj) {
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(1 === obj.memory.grow(1));
+ assertTrue(obj.memory.buffer.byteLength === obj.expected_size);
+ assertIsWasmSharedMemory(obj.memory);
+ postMessage("OK");
+ }
+ }
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]});
let memory = new WebAssembly.Memory({initial: 1, maximum: 5, shared: true});
let obj = {memory: memory, expected_size: 2 * kPageSize};
@@ -59,17 +63,21 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
// operations are performed on the same memory object.
(function TestWorkersWithGrowEarlyWorkerTerminate() {
print(arguments.callee.name);
- let workerScript = workerHelpers +
- `onmessage = function(obj) {
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(obj) {
assertIsWasmSharedMemory(obj.memory);
obj.memory.grow(1);
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === obj.expected_size);
postMessage("OK");
- };`;
+ };
+ }
- let workers = [new Worker(workerScript, {type: 'string'}),
- new Worker(workerScript, {type: 'string'})];
+ let workers = [new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]}),
+ new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]})];
let memory = new WebAssembly.Memory({initial: 1, maximum: 5, shared: true});
let expected_pages = 1;
for (let worker of workers) {
@@ -86,21 +94,24 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
// PostMessage of Multiple memories and grow
(function TestGrowSharedWithMultipleMemories() {
print(arguments.callee.name);
- let workerScript = workerHelpers +
- `onmessage = function(obj) {
- let expected_size = 0;
- let kPageSize = 0x10000;
- for (let memory of obj.memories) {
- assertIsWasmSharedMemory(memory);
- assertTrue(expected_size === memory.grow(2));
- expected_size+=2;
- assertIsWasmSharedMemory(memory);
- assertTrue(memory.buffer.byteLength === expected_size * kPageSize);
- }
- postMessage("OK");
- };`;
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(obj) {
+ let expected_size = 0;
+ let kPageSize = 0x10000;
+ for (let memory of obj.memories) {
+ assertIsWasmSharedMemory(memory);
+ assertTrue(expected_size === memory.grow(2));
+ expected_size+=2;
+ assertIsWasmSharedMemory(memory);
+ assertTrue(memory.buffer.byteLength === expected_size * kPageSize);
+ }
+ postMessage("OK");
+ };
+ }
- let worker = new Worker(workerScript, {type: 'string'});
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]});
let memories = [new WebAssembly.Memory({initial: 0, maximum: 2, shared: true}),
new WebAssembly.Memory({initial: 2, maximum: 10, shared: true}),
new WebAssembly.Memory({initial: 4, maximum: 12, shared: true})];
@@ -116,21 +127,25 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
// SharedMemory Object shared between different instances
(function TestPostMessageJSAndWasmInterop() {
print(arguments.callee.name);
- let worker = new Worker(workerHelpers +
- `onmessage = function(obj) {
- let kPageSize = 0x10000;
- assertIsWasmSharedMemory(obj.memory);
- let instance = new WebAssembly.Instance(
- obj.module, {m: {memory: obj.memory}});
- assertTrue(5 === obj.memory.grow(10));
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
- assertTrue(15 === instance.exports.grow(5));
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 20 * kPageSize);
- postMessage("OK");
- }`, {type: 'string'});
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(obj) {
+ let kPageSize = 0x10000;
+ assertIsWasmSharedMemory(obj.memory);
+ let instance = new WebAssembly.Instance(
+ obj.module, {m: {memory: obj.memory}});
+ assertTrue(5 === obj.memory.grow(10));
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
+ assertTrue(15 === instance.exports.grow(5));
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 20 * kPageSize);
+ postMessage("OK");
+ }
+ }
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
@@ -148,21 +163,25 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
(function TestConsecutiveJSAndWasmSharedGrow() {
print(arguments.callee.name);
- let worker = new Worker(workerHelpers +
- `onmessage = function(obj) {
- let kPageSize = 0x10000;
- assertIsWasmSharedMemory(obj.memory);
- let instance = new WebAssembly.Instance(
- obj.module, {m: {memory: obj.memory}});
- assertTrue(5 === obj.memory.grow(10));
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
- assertTrue(15 === instance.exports.grow(5));
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 20 * kPageSize);
- postMessage("OK");
- }`, {type: 'string'});
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(obj) {
+ let kPageSize = 0x10000;
+ assertIsWasmSharedMemory(obj.memory);
+ let instance = new WebAssembly.Instance(
+ obj.module, {m: {memory: obj.memory}});
+ assertTrue(5 === obj.memory.grow(10));
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
+ assertTrue(15 === instance.exports.grow(5));
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 20 * kPageSize);
+ postMessage("OK");
+ }
+ }
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
@@ -179,21 +198,25 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
(function TestConsecutiveWasmSharedGrow() {
print(arguments.callee.name);
- let worker = new Worker(workerHelpers +
- `onmessage = function(obj) {
- let kPageSize = 0x10000;
- assertIsWasmSharedMemory(obj.memory);
- let instance = new WebAssembly.Instance(
- obj.module, {m: {memory: obj.memory}});
- assertTrue(5 === obj.memory.grow(10));
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
- assertTrue(17 === instance.exports.grow_twice(2));
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 19 * kPageSize);
- postMessage("OK");
- }`, {type: 'string'});
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(obj) {
+ let kPageSize = 0x10000;
+ assertIsWasmSharedMemory(obj.memory);
+ let instance = new WebAssembly.Instance(
+ obj.module, {m: {memory: obj.memory}});
+ assertTrue(5 === obj.memory.grow(10));
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
+ assertTrue(17 === instance.exports.grow_twice(2));
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 19 * kPageSize);
+ postMessage("OK");
+ }
+ }
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
@@ -217,22 +240,26 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
(function TestConsecutiveSharedGrowAndMemorySize() {
print(arguments.callee.name);
- let worker = new Worker(workerHelpers +
- `onmessage = function(obj) {
- let kPageSize = 0x10000;
- assertIsWasmSharedMemory(obj.memory);
- let instance = new WebAssembly.Instance(
- obj.module, {m: {memory: obj.memory}});
- assertTrue(5 === obj.memory.grow(10));
- assertTrue(15 === instance.exports.memory_size());
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
- assertTrue(19 === instance.exports.grow_and_size(2));
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 19 * kPageSize);
- postMessage("OK");
- }`, {type: 'string'});
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(obj) {
+ let kPageSize = 0x10000;
+ assertIsWasmSharedMemory(obj.memory);
+ let instance = new WebAssembly.Instance(
+ obj.module, {m: {memory: obj.memory}});
+ assertTrue(5 === obj.memory.grow(10));
+ assertTrue(15 === instance.exports.memory_size());
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
+ assertTrue(19 === instance.exports.grow_and_size(2));
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 19 * kPageSize);
+ postMessage("OK");
+ }
+ }
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
@@ -266,32 +293,36 @@ let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
// integrity checking and bounds checks testing are needed.
(function TestSpotCheckMemoryWithSharedGrow() {
print(arguments.callee.name);
- let worker = new Worker(workerHelpers +
- `onmessage = function(obj) {
- let kPageSize = 0x10000;
- assertIsWasmSharedMemory(obj.memory);
- let instance = new WebAssembly.Instance(
- obj.module, {m: {memory: obj.memory}});
- assertTrue(5 === obj.memory.grow(10));
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
- // Store again, and verify that the previous stores are still reflected.
- instance.exports.atomic_store(15 * kPageSize - 4, 0xACED);
- assertTrue(0xACED === instance.exports.atomic_load(0));
- assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4));
- assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4));
- assertTrue(15 === instance.exports.grow(2));
- assertIsWasmSharedMemory(obj.memory);
- assertTrue(obj.memory.buffer.byteLength === 17 * kPageSize);
- // Validate previous writes.
- instance.exports.atomic_store(17 * kPageSize - 4, 0xACED);
- assertTrue(0xACED === instance.exports.atomic_load(0));
- assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4));
- assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4));
- assertTrue(0xACED === instance.exports.atomic_load(17 * kPageSize - 4));
- postMessage("OK");
- }`, {type: 'string'});
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(obj) {
+ let kPageSize = 0x10000;
+ assertIsWasmSharedMemory(obj.memory);
+ let instance = new WebAssembly.Instance(
+ obj.module, {m: {memory: obj.memory}});
+ assertTrue(5 === obj.memory.grow(10));
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
+ // Store again, and verify that the previous stores are still reflected.
+ instance.exports.atomic_store(15 * kPageSize - 4, 0xACED);
+ assertTrue(0xACED === instance.exports.atomic_load(0));
+ assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4));
+ assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4));
+ assertTrue(15 === instance.exports.grow(2));
+ assertIsWasmSharedMemory(obj.memory);
+ assertTrue(obj.memory.buffer.byteLength === 17 * kPageSize);
+ // Validate previous writes.
+ instance.exports.atomic_store(17 * kPageSize - 4, 0xACED);
+ assertTrue(0xACED === instance.exports.atomic_load(0));
+ assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4));
+ assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4));
+ assertTrue(0xACED === instance.exports.atomic_load(17 * kPageSize - 4));
+ postMessage("OK");
+ }
+ }
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
diff --git a/deps/v8/test/mjsunit/wasm/js-api.js b/deps/v8/test/mjsunit/wasm/js-api.js
index 1151be4489..ca0f5e74c1 100644
--- a/deps/v8/test/mjsunit/wasm/js-api.js
+++ b/deps/v8/test/mjsunit/wasm/js-api.js
@@ -580,7 +580,7 @@ assertTrue(isConstructor(Table));
assertThrows(
() => Table(), TypeError, /must be invoked with 'new'/);
assertThrows(
- () => new Table(1), TypeError, 'WebAssembly.Module(): Argument 0 must be a table descriptor');
+ () => new Table(1), TypeError, 'WebAssembly.Table(): Argument 0 must be a table descriptor');
assertThrows(
() => new Table({initial: 1, element: 1}), TypeError, /must be a WebAssembly reference type/);
assertThrows(
diff --git a/deps/v8/test/mjsunit/wasm/loop-unrolling.js b/deps/v8/test/mjsunit/wasm/loop-unrolling.js
new file mode 100644
index 0000000000..b0e125413f
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/loop-unrolling.js
@@ -0,0 +1,146 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-typed-funcref --experimental-wasm-eh
+// Flags: --wasm-loop-unrolling
+// Needed for exceptions-utils.js.
+// Flags: --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+load("test/mjsunit/wasm/exceptions-utils.js");
+
+// Test the interaction between multireturn and loop unrolling.
+(function MultiBlockResultTest() {
+ let builder = new WasmModuleBuilder();
+
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ ...wasmI32Const(1),
+ kExprLet, kWasmStmt, 1, 1, kWasmI32,
+ kExprLoop, kWasmStmt,
+ ...wasmI32Const(10),
+ kExprLet, kWasmStmt, 1, 1, kWasmI32,
+ kExprLocalGet, 0,
+ kExprLocalGet, 1,
+ kExprI32Sub,
+ kExprLocalGet, 2,
+ kExprI32Add,
+ kExprReturn, // (second let) - (first let) + parameter
+ kExprEnd,
+ kExprEnd,
+ kExprEnd,
+ ...wasmI32Const(0)])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(100), 109);
+})();
+
+// Test the interaction between the eh proposal and loop unrolling.
+
+(function TestRethrowNested() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except1 = builder.addException(kSig_v_v);
+ let except2 = builder.addException(kSig_v_v);
+ builder.addFunction("rethrow_nested", kSig_i_i)
+ .addBody([
+ kExprLoop, kWasmI32,
+ kExprTry, kWasmI32,
+ kExprLoop, kWasmI32,
+ kExprThrow, except2,
+ kExprEnd,
+ kExprCatch, except2,
+ kExprTry, kWasmI32,
+ kExprThrow, except1,
+ kExprCatch, except1,
+ kExprLocalGet, 0,
+ kExprI32Const, 0,
+ kExprI32Eq,
+ kExprIf, kWasmStmt,
+ kExprLoop, kWasmStmt,
+ kExprRethrow, 2,
+ kExprEnd,
+ kExprEnd,
+ kExprLocalGet, 0,
+ kExprI32Const, 1,
+ kExprI32Eq,
+ kExprIf, kWasmStmt,
+ kExprLoop, kWasmStmt,
+ kExprRethrow, 3,
+ kExprEnd,
+ kExprEnd,
+ kExprI32Const, 23,
+ kExprEnd,
+ kExprEnd,
+ kExprEnd])
+ .exportFunc();
+ let instance = builder.instantiate();
+
+ assertWasmThrows(instance, except1, [],
+ () => instance.exports.rethrow_nested(0));
+ assertWasmThrows(instance, except2, [],
+ () => instance.exports.rethrow_nested(1));
+ assertEquals(23, instance.exports.rethrow_nested(2));
+})();
+
+(function TestThrow() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except1 = builder.addException(kSig_v_v);
+ builder.addFunction("throw", kSig_i_i)
+ .addBody([
+ kExprLoop, kWasmStmt,
+ kExprLocalGet, 0,
+ kExprI32Const, 10,
+ kExprI32GtS,
+ kExprIf, kWasmStmt,
+ kExprThrow, except1,
+ kExprElse,
+ kExprLocalGet, 0,
+ kExprI32Const, 1,
+ kExprI32Add,
+ kExprLocalSet, 0,
+ kExprBr, 1,
+ kExprEnd,
+ kExprEnd,
+ kExprLocalGet, 0
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+ assertWasmThrows(instance, except1, [], ()=>instance.exports.throw(0));
+})();
+
+(function TestThrowCatch() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except1 = builder.addException(kSig_v_v);
+ builder.addFunction("throw_catch", kSig_i_i)
+ .addBody([
+ kExprLoop, kWasmI32,
+ kExprTry, kWasmI32,
+ kExprLocalGet, 0,
+ kExprI32Const, 10,
+ kExprI32GtS,
+ kExprIf, kWasmStmt,
+ kExprThrow, except1,
+ kExprElse,
+ kExprLocalGet, 0,
+ kExprI32Const, 1,
+ kExprI32Add,
+ kExprLocalSet, 0,
+ kExprBr, 2,
+ kExprEnd,
+ kExprLocalGet, 0,
+ kExprCatch, except1,
+ kExprLocalGet, 0,
+ kExprEnd,
+ kExprEnd])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+ assertEquals(11, instance.exports.throw_catch(0));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/memory64.js b/deps/v8/test/mjsunit/wasm/memory64.js
new file mode 100644
index 0000000000..e764635846
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/memory64.js
@@ -0,0 +1,83 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-memory64
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// We use standard JavaScript doubles to represent bytes and offsets. They offer
+// enough precision (53 bits) for every allowed memory size.
+
+function BasicMemory64Tests(num_pages) {
+ const num_bytes = num_pages * kPageSize;
+ print(`Testing ${num_bytes} bytes (${num_pages} pages)`);
+
+ let builder = new WasmModuleBuilder();
+ builder.addMemory64(num_pages, num_pages, true);
+
+ builder.addFunction('load', makeSig([kWasmF64], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0, // local.get 0
+ kExprI64UConvertF64, // i64.uconvert_sat.f64
+ kExprI32LoadMem, 0, 0, // i32.load_mem align=1 offset=0
+ ])
+ .exportFunc();
+ builder.addFunction('store', makeSig([kWasmF64, kWasmI32], []))
+ .addBody([
+ kExprLocalGet, 0, // local.get 0
+ kExprI64UConvertF64, // i64.uconvert_sat.f64
+ kExprLocalGet, 1, // local.get 1
+ kExprI32StoreMem, 0, 0, // i32.store_mem align=1 offset=0
+ ])
+ .exportFunc();
+
+ let module = builder.instantiate();
+ let memory = module.exports.memory;
+ let load = module.exports.load;
+ let store = module.exports.store;
+
+ let array = new Int8Array(memory.buffer);
+ assertEquals(num_bytes, array.length);
+
+ assertEquals(0, load(num_bytes - 4));
+ assertThrows(() => load(num_bytes - 3));
+
+ store(num_bytes - 4, 0x12345678);
+ assertEquals(0x12345678, load(num_bytes - 4));
+
+ let kStoreOffset = 27;
+ store(kStoreOffset, 11);
+ assertEquals(11, load(kStoreOffset));
+
+ // Now check 100 random positions. All except for kStoreOffset should be zero.
+ for (let i = 0; i < 100; ++i) {
+ let position = Math.floor(Math.random() * num_bytes);
+ if (position == kStoreOffset) continue;
+ assertEquals(0, array[position]);
+ }
+}
+
+(function TestSmallMemory() {
+ print(arguments.callee.name);
+ BasicMemory64Tests(4);
+})();
+
+(function Test3GBMemory() {
+ print(arguments.callee.name);
+ let num_pages = 3 * 1024 * 1024 * 1024 / kPageSize;
+ // This test can fail if 3GB of memory cannot be allocated.
+ try {
+ BasicMemory64Tests(num_pages);
+ } catch (e) {
+ assertInstanceof(e, RangeError);
+ assertMatches(/Out of memory/, e.message);
+ }
+})();
+
+// TODO(clemensb): Allow for memories >4GB and enable this test.
+//(function Test5GBMemory() {
+// print(arguments.callee.name);
+// let num_pages = 5 * 1024 * 1024 * 1024 / kPageSize;
+// BasicMemory64Tests(num_pages);
+//})();
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index 0f870e7815..d5a4e7119f 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --expose-gc --stress-compaction --allow-natives-syntax
+// Flags: --expose-wasm --expose-gc --stress-compaction --allow-natives-syntax --wasm-loop-unrolling
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/multi-value.js b/deps/v8/test/mjsunit/wasm/multi-value.js
index ca7467b759..2b22482d4d 100644
--- a/deps/v8/test/mjsunit/wasm/multi-value.js
+++ b/deps/v8/test/mjsunit/wasm/multi-value.js
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-mv
+// Flags: --experimental-wasm-mv --wasm-loop-unrolling
load("test/mjsunit/wasm/wasm-module-builder.js");
(function MultiBlockResultTest() {
print("MultiBlockResultTest");
let builder = new WasmModuleBuilder();
- let sig_i_ii = builder.addType(kSig_i_ii);
let sig_ii_v = builder.addType(kSig_ii_v);
builder.addFunction("main", kSig_i_ii)
diff --git a/deps/v8/test/mjsunit/wasm/origin-trial-flags.js b/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
deleted file mode 100644
index eae8ceb58c..0000000000
--- a/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --noexperimental-wasm-threads --allow-natives-syntax
-
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-function instantiateModuleWithThreads() {
- // Build a WebAssembly module which uses threads-features.
- const builder = new WasmModuleBuilder();
- const shared = true;
- builder.addMemory(2, 10, false, shared);
- builder.addFunction('main', kSig_i_ii)
- .addBody([
- kExprLocalGet, 0, kExprLocalGet, 1, kAtomicPrefix, kExprI32AtomicAdd, 2,
- 0
- ])
- .exportFunc();
-
- return builder.instantiate();
-}
-
-// Disable WebAssembly threads initially.
-%SetWasmThreadsEnabled(false);
-assertThrows(instantiateModuleWithThreads, WebAssembly.CompileError);
-
-// Enable WebAssembly threads.
-%SetWasmThreadsEnabled(true);
-assertInstanceof(instantiateModuleWithThreads(), WebAssembly.Instance);
-
-// Disable WebAssembly threads.
-%SetWasmThreadsEnabled(false);
-assertThrows(instantiateModuleWithThreads, WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/wasm/shared-arraybuffer-worker-simple-gc.js b/deps/v8/test/mjsunit/wasm/shared-arraybuffer-worker-simple-gc.js
index a32e6f4d15..2b9cd86a5b 100644
--- a/deps/v8/test/mjsunit/wasm/shared-arraybuffer-worker-simple-gc.js
+++ b/deps/v8/test/mjsunit/wasm/shared-arraybuffer-worker-simple-gc.js
@@ -7,13 +7,14 @@
const kNumIterations = 10;
function NewWorker() {
- let script =
-`onmessage = (msg) => {
- if (msg.memory) postMessage("ack");
- if (msg.quit) postMessage("bye");
- gc();
-}`;
- return new Worker(script, {type: 'string'});
+ function workerCode() {
+ onmessage = (msg) => {
+ if (msg.memory) postMessage("ack");
+ if (msg.quit) postMessage("bye");
+ gc();
+ }
+ }
+ return new Worker(workerCode, {type: 'function'});
}
function PingWorker(worker, memory) {
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js b/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js
index 6afc6115f8..4a24e6de93 100644
--- a/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js
@@ -11,12 +11,15 @@ function AllocMemory(pages = 1, max = pages) {
}
(function RunTest() {
- let worker = new Worker(
-`onmessage =
- function(msg) {
- if (msg.memory) postMessage({memory : msg.memory});
- gc();
-}`, {type : 'string'});
+ function workerCode() {
+ onmessage =
+ function(msg) {
+ if (msg.memory) postMessage({memory : msg.memory});
+ gc();
+ }
+ }
+
+ let worker = new Worker(workerCode);
let time = performance.now();
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index 1f3b8146da..4f91f58fc7 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -88,11 +88,11 @@ Error.prepareStackTrace = function(error, frames) {
module.exports.main();
verifyStack(stack, [
- // isWasm function line pos file offset funcIndex
- [ false, "STACK", 38, 0, "stack.js"],
- [ true, "main", 0, 1, null, '0x86', 1],
- [ false, "testStackFrames", 88, 0, "stack.js"],
- [ false, null, 97, 0, "stack.js"]
+ // isWasm function line pos file offset funcIndex
+ [ false, "STACK", 38, 0, "stack.js"],
+ [ true, "main", 1, 0x86, null, '0x86', 1],
+ [ false, "testStackFrames", 88, 0, "stack.js"],
+ [ false, null, 97, 0, "stack.js"]
]);
})();
@@ -104,7 +104,7 @@ Error.prepareStackTrace = function(error, frames) {
assertContains("unreachable", e.message);
verifyStack(e.stack, [
// isWasm function line pos file offset funcIndex
- [ true, "exec_unreachable", 0, 1, null, '0x8b', 2],
+ [ true, "exec_unreachable", 1, 0x8b, null, '0x8b', 2],
[ false, "testWasmUnreachable", 101, 0, "stack.js"],
[ false, null, 112, 0, "stack.js"]
]);
@@ -118,11 +118,11 @@ Error.prepareStackTrace = function(error, frames) {
} catch (e) {
assertContains("out of bounds", e.message);
verifyStack(e.stack, [
- // isWasm function line pos file offset funcIndex
- [ true, "mem_out_of_bounds", 0, 3, null, '0x91', 3],
- [ true, "call_mem_out_of_bounds", 0, 1, null, '0x97', 4],
- [ false, "testWasmMemOutOfBounds", 116, 0, "stack.js"],
- [ false, null, 128, 0, "stack.js"]
+ // isWasm function line pos file offset funcIndex
+ [ true, "mem_out_of_bounds", 1, 0x91, null, '0x91', 3],
+ [ true, "call_mem_out_of_bounds", 1, 0x97, null, '0x97', 4],
+ [ false, "testWasmMemOutOfBounds", 116, 0, "stack.js"],
+ [ false, null, 128, 0, "stack.js"]
]);
}
})();
@@ -147,11 +147,11 @@ Error.prepareStackTrace = function(error, frames) {
assertEquals("Maximum call stack size exceeded", e.message, "trap reason");
assertTrue(e.stack.length >= 4, "expected at least 4 stack entries");
verifyStack(e.stack.splice(0, 4), [
- // isWasm function line pos file offset funcIndex
- [ true, "recursion", 0, 0, null, '0x34', 0],
- [ true, "recursion", 0, 3, null, '0x37', 0],
- [ true, "recursion", 0, 3, null, '0x37', 0],
- [ true, "recursion", 0, 3, null, '0x37', 0]
+ // isWasm function line pos file offset funcIndex
+ [ true, "recursion", 1, 0x34, null, '0x34', 0],
+ [ true, "recursion", 1, 0x37, null, '0x37', 0],
+ [ true, "recursion", 1, 0x37, null, '0x37', 0],
+ [ true, "recursion", 1, 0x37, null, '0x37', 0]
]);
}
})();
@@ -175,10 +175,10 @@ Error.prepareStackTrace = function(error, frames) {
assertEquals('unreachable', e.message, 'trap reason');
let hexOffset = '0x' + (unreachable_pos + 0x25).toString(16);
verifyStack(e.stack, [
- // isWasm function line pos file offset funcIndex
- [ true, 'main', 0, unreachable_pos + 1, null, hexOffset, 0],
- [ false, 'testBigOffset', 172, 0, 'stack.js'],
- [ false, null, 184, 0, 'stack.js']
+ // isWasm function line pos file offset funcIndex
+ [ true, 'main', 1, unreachable_pos + 0x25, null, hexOffset, 0],
+ [ false, 'testBigOffset', 172, 0, 'stack.js'],
+ [ false, null, 184, 0, 'stack.js']
]);
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js b/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js
index cbd763ddcd..3098926367 100644
--- a/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/tier-down-to-liftoff.js
@@ -39,7 +39,7 @@ function checkTieredUp(instance) {
}
function check(instance) {
- %WasmTierDownModule(instance);
+ %WasmTierDown();
checkTieredDown(instance);
for (let i = 0; i < num_functions; ++i) {
@@ -47,7 +47,7 @@ function check(instance) {
}
checkTieredDown(instance);
- %WasmTierUpModule(instance);
+ %WasmTierUp();
checkTieredUp(instance);
}
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index db5a9390fd..a34162ab8c 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -23,16 +23,16 @@ function testTrapLocations(instance, expected_stack_length) {
// function.
assertTrue(
e.stack[1].toString().startsWith(function_name), 'stack depth');
- assertEquals(0, e.stack[0].getLineNumber(), 'wasmFunctionIndex');
+ assertEquals(1, e.stack[0].getLineNumber(), 'wasmFunctionIndex');
assertEquals(position, e.stack[0].getPosition(), 'position');
}
}
// The actual tests:
- testWasmTrap(0, kTrapDivByZero, 14);
- testWasmTrap(1, kTrapMemOutOfBounds, 15);
- testWasmTrap(2, kTrapUnreachable, 28);
- testWasmTrap(3, kTrapTableOutOfBounds, 32);
+ testWasmTrap(0, kTrapDivByZero, 73);
+ testWasmTrap(1, kTrapMemOutOfBounds, 74);
+ testWasmTrap(2, kTrapUnreachable, 87);
+ testWasmTrap(3, kTrapTableOutOfBounds, 91);
}
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection-with-exnref.js b/deps/v8/test/mjsunit/wasm/type-reflection-with-exnref.js
deleted file mode 100644
index df655f6ce7..0000000000
--- a/deps/v8/test/mjsunit/wasm/type-reflection-with-exnref.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-wasm-type-reflection --experimental-wasm-eh
-
-load('test/mjsunit/wasm/wasm-module-builder.js');
-
-(function TestGlobalType() {
- let global = new WebAssembly.Global({value: "exnref", mutable: true});
- let type = WebAssembly.Global.type(global);
- assertEquals("exnref", type.value);
- assertEquals(true, type.mutable);
- assertEquals(2, Object.getOwnPropertyNames(type).length);
-
- global = new WebAssembly.Global({value: "exnref"});
- type = WebAssembly.Global.type(global);
- assertEquals("exnref", type.value);
- assertEquals(false, type.mutable);
- assertEquals(2, Object.getOwnPropertyNames(type).length);
-})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 7b9500eb70..b5021a313c 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -76,10 +76,12 @@ let kWasmFunctionTypeForm = 0x60;
let kWasmStructTypeForm = 0x5f;
let kWasmArrayTypeForm = 0x5e;
-let kLimitsNoMaximum = 0
-let kLimitsHasMaximum = 1;
-let kLimitsSharedNoMaximum = 2;
-let kLimitsSharedHasMaximum = 3;
+let kLimitsNoMaximum = 0x00;
+let kLimitsWithMaximum = 0x01;
+let kLimitsSharedNoMaximum = 0x02;
+let kLimitsSharedWithMaximum = 0x03;
+let kLimitsMemory64NoMaximum = 0x04;
+let kLimitsMemory64WithMaximum = 0x05;
// Segment flags
let kActiveNoIndex = 0;
@@ -118,7 +120,6 @@ let kWasmRtt = 0x69;
function wasmRtt(index, depth) {
return {opcode: kWasmRtt, index: index, depth: depth};
}
-let kWasmExnRef = 0x68;
let kExternalFunction = 0;
let kExternalTable = 1;
@@ -168,17 +169,14 @@ let kSig_f_d = makeSig([kWasmF64], [kWasmF32]);
let kSig_d_d = makeSig([kWasmF64], [kWasmF64]);
let kSig_r_r = makeSig([kWasmExternRef], [kWasmExternRef]);
let kSig_a_a = makeSig([kWasmAnyFunc], [kWasmAnyFunc]);
-let kSig_e_e = makeSig([kWasmExnRef], [kWasmExnRef]);
let kSig_i_r = makeSig([kWasmExternRef], [kWasmI32]);
let kSig_v_r = makeSig([kWasmExternRef], []);
let kSig_v_a = makeSig([kWasmAnyFunc], []);
-let kSig_v_e = makeSig([kWasmExnRef], []);
let kSig_v_rr = makeSig([kWasmExternRef, kWasmExternRef], []);
let kSig_v_aa = makeSig([kWasmAnyFunc, kWasmAnyFunc], []);
let kSig_r_v = makeSig([], [kWasmExternRef]);
let kSig_a_v = makeSig([], [kWasmAnyFunc]);
let kSig_a_i = makeSig([kWasmI32], [kWasmAnyFunc]);
-let kSig_e_v = makeSig([], [kWasmExnRef]);
let kSig_s_i = makeSig([kWasmI32], [kWasmS128]);
let kSig_i_s = makeSig([kWasmS128], [kWasmI32]);
@@ -626,7 +624,7 @@ let kExprS128Xor = 0x51;
let kExprS128Select = 0x52;
let kExprI8x16Abs = 0x60;
let kExprI8x16Neg = 0x61;
-let kExprV8x16AnyTrue = 0x62;
+let kExprV128AnyTrue = 0x62;
let kExprV8x16AllTrue = 0x63;
let kExprI8x16SConvertI16x8 = 0x65;
let kExprI8x16UConvertI16x8 = 0x66;
@@ -646,7 +644,6 @@ let kExprI8x16MaxU = 0x79;
let kExprI8x16RoundingAverageU = 0x7b;
let kExprI16x8Abs = 0x80;
let kExprI16x8Neg = 0x81;
-let kExprV16x8AnyTrue = 0x82;
let kExprV16x8AllTrue = 0x83;
let kExprI16x8SConvertI32x4 = 0x85;
let kExprI16x8UConvertI32x4 = 0x86;
@@ -671,7 +668,6 @@ let kExprI16x8MaxU = 0x99;
let kExprI16x8RoundingAverageU = 0x9b;
let kExprI32x4Abs = 0xa0;
let kExprI32x4Neg = 0xa1;
-let kExprV32x4AnyTrue = 0xa2;
let kExprV32x4AllTrue = 0xa3;
let kExprI32x4SConvertI16x8Low = 0xa7;
let kExprI32x4SConvertI16x8High = 0xa8;
@@ -1025,7 +1021,24 @@ class WasmModuleBuilder {
}
addMemory(min, max, exported, shared) {
- this.memory = {min: min, max: max, exported: exported, shared: shared};
+ this.memory = {
+ min: min,
+ max: max,
+ exported: exported,
+ shared: shared || false,
+ is_memory64: false
+ };
+ return this;
+ }
+
+ addMemory64(min, max, exported) {
+ this.memory = {
+ min: min,
+ max: max,
+ exported: exported,
+ shared: false,
+ is_memory64: true
+ };
return this;
}
@@ -1364,12 +1377,22 @@ class WasmModuleBuilder {
binary.emit_section(kMemorySectionCode, section => {
section.emit_u8(1); // one memory entry
const has_max = wasm.memory.max !== undefined;
- const is_shared = wasm.memory.shared !== undefined;
- section.emit_u8(is_shared
- ? (has_max ? kLimitsSharedHasMaximum : kLimitsSharedNoMaximum)
- : (has_max ? kLimitsHasMaximum : kLimitsNoMaximum));
- section.emit_u32v(wasm.memory.min);
- if (has_max) section.emit_u32v(wasm.memory.max);
+ if (wasm.memory.is_memory64) {
+ assertFalse(
+ wasm.memory.shared, 'sharing memory64 is not supported (yet)');
+ section.emit_u8(
+ has_max ? kLimitsMemory64WithMaximum : kLimitsMemory64NoMaximum);
+ section.emit_u64v(wasm.memory.min);
+ if (has_max) section.emit_u64v(wasm.memory.max);
+ } else {
+ section.emit_u8(
+ wasm.memory.shared ?
+ (has_max ? kLimitsSharedWithMaximum :
+ kLimitsSharedNoMaximum) :
+ (has_max ? kLimitsWithMaximum : kLimitsNoMaximum));
+ section.emit_u32v(wasm.memory.min);
+ if (has_max) section.emit_u32v(wasm.memory.max);
+ }
});
}
@@ -1427,10 +1450,6 @@ class WasmModuleBuilder {
section.emit_u8(kWasmAnyFunc);
}
break;
- case kWasmExnRef:
- section.emit_u8(kExprRefNull);
- section.emit_u8(kWasmExnRef);
- break;
default:
if (global.function_index !== undefined) {
section.emit_u8(kExprRefFunc);
diff --git a/deps/v8/test/mjsunit/wasm/worker-memory.js b/deps/v8/test/mjsunit/wasm/worker-memory.js
index bf5430f713..28a6924ea7 100644
--- a/deps/v8/test/mjsunit/wasm/worker-memory.js
+++ b/deps/v8/test/mjsunit/wasm/worker-memory.js
@@ -19,34 +19,39 @@
})();
// Can't use assert in a worker.
-let workerHelpers =
- `function assertTrue(value, msg) {
+function workerHelpersHelper() {
+ assertTrue = function(value, msg) {
if (!value) {
postMessage("Error: " + msg);
throw new Error("Exit"); // To stop testing.
}
}
- function assertIsWasmMemory(memory, expectedSize) {
- assertTrue(memory instanceof WebAssembly.Memory,
- "object is not a WebAssembly.Memory");
+ assertIsWasmMemory = function(memory, expectedSize) {
+ assertTrue(memory instanceof WebAssembly.Memory,
+ "object is not a WebAssembly.Memory");
- assertTrue(memory.buffer instanceof SharedArrayBuffer,
- "object.buffer is not a SharedArrayBuffer");
+ assertTrue(memory.buffer instanceof SharedArrayBuffer,
+ "object.buffer is not a SharedArrayBuffer");
- assertTrue(memory.buffer.byteLength == expectedSize,
- "object.buffer.byteLength is not " + expectedSize + " bytes");
- }
-`;
+ assertTrue(memory.buffer.byteLength == expectedSize,
+ "object.buffer.byteLength is not " + expectedSize + " bytes");
+ }
+}
+
+let workerHelpers = "(" + workerHelpersHelper.toString() + ")()";
(function TestPostMessageSharedMemory() {
- let workerScript = workerHelpers +
- `onmessage = function(memory) {
- assertIsWasmMemory(memory, 65536);
- postMessage("OK");
- };`;
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(memory) {
+ assertIsWasmMemory(memory, 65536);
+ postMessage("OK");
+ };
+ }
- let worker = new Worker(workerScript, {type: 'string'});
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]});
let memory = new WebAssembly.Memory({initial: 1, maximum: 2, shared: true});
worker.postMessage(memory);
assertEquals("OK", worker.getMessage());
@@ -54,8 +59,9 @@ let workerHelpers =
})();
(function TestPostMessageComplexObjectWithSharedMemory() {
- let workerScript = workerHelpers +
- `onmessage = function(obj) {
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(obj) {
assertIsWasmMemory(obj.memories[0], 65536);
assertIsWasmMemory(obj.memories[1], 65536);
assertTrue(obj.buffer instanceof SharedArrayBuffer,
@@ -65,9 +71,11 @@ let workerHelpers =
"buffers aren't equal");
assertTrue(obj.foo === 1, "foo is not 1");
postMessage("OK");
- };`;
+ };
+ }
- let worker = new Worker(workerScript, {type: 'string'});
+ let worker = new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]});
let memory = new WebAssembly.Memory({initial: 1, maximum: 2, shared: true});
let obj = {memories: [memory, memory], buffer: memory.buffer, foo: 1};
worker.postMessage(obj);
@@ -76,14 +84,18 @@ let workerHelpers =
})();
(function TestTwoWorkers() {
- let workerScript = workerHelpers +
- `onmessage = function(memory) {
+ function workerCode(workerHelpers) {
+ eval(workerHelpers);
+ onmessage = function(memory) {
assertIsWasmMemory(memory, 65536);
postMessage("OK");
- };`;
+ };
+ }
- let workers = [new Worker(workerScript, {type: 'string'}),
- new Worker(workerScript, {type: 'string'})];
+ let workers = [new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]}),
+ new Worker(workerCode,
+ {type: 'function', arguments: [workerHelpers]})];
let memory = new WebAssembly.Memory({initial: 1, maximum: 2, shared: true});
for (let worker of workers) {
worker.postMessage(memory);
diff --git a/deps/v8/test/mjsunit/wasm/worker-module.js b/deps/v8/test/mjsunit/wasm/worker-module.js
index ac0be10bfc..4dbe887d95 100644
--- a/deps/v8/test/mjsunit/wasm/worker-module.js
+++ b/deps/v8/test/mjsunit/wasm/worker-module.js
@@ -12,7 +12,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let module = builder.toModule();
- let workerScript = `
+ function workerCode() {
onmessage = function(module) {
try {
let instance = new WebAssembly.Instance(module);
@@ -22,9 +22,9 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
postMessage('ERROR: ' + e);
}
}
- `;
+ }
- let worker = new Worker(workerScript, {type: 'string'});
+ let worker = new Worker(workerCode, {type: 'function'});
worker.postMessage(module);
assertEquals(42, worker.getMessage());
worker.terminate();
diff --git a/deps/v8/test/mjsunit/worker-ping-test.js b/deps/v8/test/mjsunit/worker-ping-test.js
index 5c50b7d13b..6919665fa0 100644
--- a/deps/v8/test/mjsunit/worker-ping-test.js
+++ b/deps/v8/test/mjsunit/worker-ping-test.js
@@ -44,53 +44,63 @@ function RunWorkerPingTest(config) {
// Every {config.allocInterval}, a worker creates a new thing by
// {config.AllocThing}.
- let script =
-`const kNumThings = ${config.numThings};
- const kAllocInterval = ${config.allocInterval};
- let index = 0;
- let total = 0;
- let id = 0;
- let things = new Array(kNumThings);
- for (let i = 0; i < kNumThings; i++) {
- things[i] = TryAllocThing();
- }
+ function workerCode(config, AllocThing, BeforeReceive) {
+ eval(AllocThing);
+ eval(BeforeReceive);
+ const kNumThings = config.numThings;
+ const kAllocInterval = config.allocInterval;
+ let index = 0;
+ let total = 0;
+ let id = 0;
+ let things = new Array(kNumThings);
+ for (let i = 0; i < kNumThings; i++) {
+ things[i] = TryAllocThing();
+ }
- function TryAllocThing() {
- try {
- let thing = AllocThing(id++);
- ${config.traceAlloc ? "print(\"alloc success\");" : ""}
+ function TryAllocThing() {
+ try {
+ let thing = AllocThing(id++);
+ if (config.traceAlloc) {
+ print("alloc success");
+ }
return thing;
} catch(e) {
- ${config.abortOnFail ? "postMessage({error: e.toString()}); throw e;" : "" }
- ${config.traceAlloc ? "print(\"alloc fail: \" + e);" : ""}
+ if (config.abortOnFail) {
+ postMessage({error: e.toString()}); throw e;
+ }
+ if (config.traceAlloc) {
+ print("alloc fail: " + e);
+ }
}
- }
+ }
- onmessage = function(msg) {
- BeforeReceive(msg);
- if (msg.thing !== undefined) {
- let reply = things[index];
- if ((total % kAllocInterval) == 0) {
- reply = TryAllocThing();
+ onmessage = function(msg) {
+ BeforeReceive(msg);
+ if (msg.thing !== undefined) {
+ let reply = things[index];
+ if ((total % kAllocInterval) == 0) {
+ reply = TryAllocThing();
+ }
+ things[index] = msg.thing;
+ postMessage({thing : reply});
+ index = (index + 1) % kNumThings;
+ total++;
}
- things[index] = msg.thing;
- postMessage({thing : reply});
- index = (index + 1) % kNumThings;
- total++;
}
}
- ${config.AllocThing.toString()}
- ${beforeReceive.toString()}
- `;
if (config.traceScript) {
print("========== Worker script ==========");
- print(script);
+ print(workerCode.toString());
print("===================================");
}
+ let arguments = [config,
+ config.AllocThing.toString(),
+ beforeReceive.toString()];
for (let i = 0; i < config.numWorkers; i++) {
- let worker = new Worker(script, {type : 'string'});
+ let worker = new Worker(workerCode, {type: 'function',
+ arguments: arguments});
workers.push(worker);
}
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 45c9a9745a..f25a39daaf 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -1068,4 +1068,10 @@
'*': [SKIP], # only relevant for mjsunit tests.
}],
+################################################################################
+['variant == stress_concurrent_inlining', {
+ # Timeout during TurboFan optimization.
+ 'ecma/FunctionObjects/15.3.1.1-3': [SKIP],
+}],
+
]
diff --git a/deps/v8/test/test262/OWNERS b/deps/v8/test/test262/OWNERS
index 56c71e23b1..b634e10b6f 100644
--- a/deps/v8/test/test262/OWNERS
+++ b/deps/v8/test/test262/OWNERS
@@ -1,3 +1,2 @@
adamk@chromium.org
-gsathya@chromium.org
syg@chromium.org
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index d5a49570ec..d94b2caa98 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -79,7 +79,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=4895
'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/tonumber-value-detached-buffer': [FAIL],
'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/BigInt/tonumber-value-detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/GetOwnProperty/BigInt/index-prop-desc': [FAIL],
# Some TypedArray methods throw due to the same bug, from Get
'built-ins/TypedArray/prototype/every/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/every/BigInt/callbackfn-detachbuffer': [FAIL],
@@ -581,6 +580,13 @@
'built-ins/String/prototype/at/*': [FAIL],
'built-ins/TypedArray/prototype/at/*': [FAIL],
+ # Temporarily disabled until upstream tests are changed to use /d
+ 'built-ins/RegExp/match-indices/*': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=11411
+ 'intl402/DateTimeFormat/prototype/formatRange/date-same-returns-single-date': [FAIL],
+ 'intl402/DateTimeFormat/prototype/formatRangeToParts/date-same-returns-single-date': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# https://bugs.chromium.org/p/v8/issues/detail?id=7833
@@ -730,6 +736,8 @@
'built-ins/ArrayBuffer/length-is-too-large-throws': [SKIP],
'built-ins/SharedArrayBuffer/allocation-limit': [SKIP],
'built-ins/SharedArrayBuffer/length-is-too-large-throws': [SKIP],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=11438
+ 'intl402/DateTimeFormat/timezone-invalid' : [SKIP],
}], # asan == True or msan == True or tsan == True
['system == android', {
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index f5431fbc9d..1e11465f5a 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -711,9 +711,6 @@ macro TestFrame1(implicit context: Context)() {
case (_f: StandardFrame): {
unreachable;
}
- case (_f: ArgumentsAdaptorFrame): {
- unreachable;
- }
case (_f: StubFrame): {
}
}
@@ -1353,4 +1350,64 @@ macro TestOffHeapSlice(ptr: RawPtr<char8>, length: intptr) {
check(*onHeapSlice.AtIndex(i) == *offHeapSlice.AtIndex(i));
}
}
+
+struct TwoValues {
+ a: Smi;
+ b: Map;
+}
+
+builtin ReturnTwoValues(implicit context: Context)(
+ value: Smi, obj: HeapObject): TwoValues {
+ return TwoValues{a: value + 1, b: obj.map};
+}
+
+@export
+macro TestCallMultiReturnBuiltin(implicit context: Context)() {
+ const result = ReturnTwoValues(444, FromConstexpr<String>('hi'));
+ check(result.a == 445);
+ check(result.b == FromConstexpr<String>('hi').map);
+}
+
+@export
+macro TestRunLazyTwice(lazySmi: Lazy<Smi>): Smi {
+ const firstResult = RunLazy(lazySmi);
+ const secondResult = RunLazy(lazySmi);
+ return firstResult + secondResult;
+}
+
+macro GetLazySmi(): Smi {
+ return 3;
+}
+
+macro AddTwoSmiValues(a: Smi, b: Smi): Smi {
+ return a + b;
+}
+
+macro AddSmiAndConstexprValues(a: Smi, b: constexpr int31): Smi {
+ return a + b;
+}
+
+@export
+macro TestCreateLazyNodeFromTorque() {
+ const lazy = %MakeLazy<Smi>('GetLazySmi');
+ const result = TestRunLazyTwice(lazy);
+ check(result == 6);
+
+ // The macro can also be referred to using namespace qualifications.
+ const lazy2 = %MakeLazy<Smi>('test::GetLazySmi');
+ const result2 = TestRunLazyTwice(lazy2);
+ check(result2 == 6);
+
+ // We can save params to the macro. The most common usage is likely a
+ // single-arg macro that just returns the arg, but we can use any number of
+ // params.
+ const lazy3 = %MakeLazy<Smi>('AddTwoSmiValues', 5, 6);
+ const result3 = TestRunLazyTwice(lazy3);
+ check(result3 == 22);
+
+ // It's okay if some of the params are constexpr and some aren't.
+ const lazy4 = %MakeLazy<Smi>('AddSmiAndConstexprValues', 7, 8);
+ const result4 = TestRunLazyTwice(lazy4);
+ check(result4 == 30);
+}
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index d98c5b82c1..1940dfa77e 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -10,6 +10,7 @@ if (is_fuchsia) {
cr_fuchsia_package("v8_unittests_pkg") {
testonly = true
binary = ":unittests"
+ manifest = "//build/config/fuchsia/tests-with-exec.cmx"
package_name_override = "v8_unittests"
}
@@ -60,6 +61,9 @@ v8_source_set("v8_cppgc_shared_unittests_sources") {
if (cppgc_is_standalone) {
v8_executable("cppgc_unittests") {
testonly = true
+ if (v8_current_cpu == "riscv64") {
+ libs = [ "atomic" ]
+ }
configs = [
"../..:external_config",
@@ -96,6 +100,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/heap-growing-unittest.cc",
"heap/cppgc/heap-object-header-unittest.cc",
"heap/cppgc/heap-page-unittest.cc",
+ "heap/cppgc/heap-statistics-collector-unittest.cc",
"heap/cppgc/heap-unittest.cc",
"heap/cppgc/incremental-marking-schedule-unittest.cc",
"heap/cppgc/logging-unittest.cc",
@@ -103,8 +108,10 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/marking-verifier-unittest.cc",
"heap/cppgc/marking-visitor-unittest.cc",
"heap/cppgc/member-unittest.cc",
+ "heap/cppgc/metric-recorder-unittest.cc",
"heap/cppgc/minor-gc-unittest.cc",
"heap/cppgc/name-trait-unittest.cc",
+ "heap/cppgc/object-size-trait-unittest.cc",
"heap/cppgc/object-start-bitmap-unittest.cc",
"heap/cppgc/page-memory-unittest.cc",
"heap/cppgc/persistent-family-unittest.cc",
@@ -116,6 +123,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/sweeper-unittest.cc",
"heap/cppgc/test-platform.cc",
"heap/cppgc/test-platform.h",
+ "heap/cppgc/testing-unittest.cc",
"heap/cppgc/tests.cc",
"heap/cppgc/tests.h",
"heap/cppgc/visitor-unittest.cc",
@@ -134,8 +142,13 @@ v8_source_set("cppgc_unittests_sources") {
"//testing/gtest",
]
- if (!cppgc_is_standalone) {
- deps += [ "../..:v8_tracing" ]
+ if (cppgc_is_standalone) {
+ deps += [ "../..:cppgc_for_testing" ]
+ } else {
+ deps += [
+ "../..:v8_for_testing",
+ "../..:v8_tracing",
+ ]
}
}
@@ -190,8 +203,6 @@ v8_source_set("unittests_sources") {
"api/remote-object-unittest.cc",
"api/resource-constraints-unittest.cc",
"api/v8-object-unittest.cc",
- "asmjs/asm-scanner-unittest.cc",
- "asmjs/asm-types-unittest.cc",
"base/address-region-unittest.cc",
"base/atomic-utils-unittest.cc",
"base/bits-unittest.cc",
@@ -294,6 +305,7 @@ v8_source_set("unittests_sources") {
"heap/gc-tracer-unittest.cc",
"heap/heap-controller-unittest.cc",
"heap/heap-unittest.cc",
+ "heap/heap-utils.cc",
"heap/heap-utils.h",
"heap/index-generator-unittest.cc",
"heap/item-parallel-job-unittest.cc",
@@ -389,6 +401,13 @@ v8_source_set("unittests_sources") {
"zone/zone-unittest.cc",
]
+ if (v8_enable_webassembly) {
+ sources += [
+ "asmjs/asm-scanner-unittest.cc",
+ "asmjs/asm-types-unittest.cc",
+ ]
+ }
+
if (v8_enable_wasm_gdb_remote_debugging) {
sources += [ "wasm/wasm-gdbserver-unittest.cc" ]
}
@@ -422,6 +441,11 @@ v8_source_set("unittests_sources") {
"assembler/turbo-assembler-mips64-unittest.cc",
"compiler/mips64/instruction-selector-mips64-unittest.cc",
]
+ } else if (v8_current_cpu == "riscv64") {
+ sources += [
+ "assembler/turbo-assembler-riscv64-unittest.cc",
+ "compiler/riscv64/instruction-selector-riscv64-unittest.cc",
+ ]
} else if (v8_current_cpu == "x64") {
sources += [
"assembler/turbo-assembler-x64-unittest.cc",
@@ -459,6 +483,7 @@ v8_source_set("unittests_sources") {
"../..:v8_for_testing",
"../..:v8_libbase",
"../..:v8_libplatform",
+ "../..:v8_shared_internal_headers",
"../..:v8_wrappers",
"../..:wasm_test_common",
"../../third_party/inspector_protocol:crdtp_test",
diff --git a/deps/v8/test/unittests/api/remote-object-unittest.cc b/deps/v8/test/unittests/api/remote-object-unittest.cc
index a73db835a4..5b350365c4 100644
--- a/deps/v8/test/unittests/api/remote-object-unittest.cc
+++ b/deps/v8/test/unittests/api/remote-object-unittest.cc
@@ -39,7 +39,7 @@ TEST_F(RemoteObjectTest, CreationContextOfRemoteContext) {
Local<Object> remote_context =
Context::NewRemoteContext(isolate(), global_template).ToLocalChecked();
- EXPECT_TRUE(remote_context->CreationContext().IsEmpty());
+ EXPECT_TRUE(remote_context->GetCreationContext().IsEmpty());
}
TEST_F(RemoteObjectTest, CreationContextOfRemoteObject) {
@@ -51,7 +51,7 @@ TEST_F(RemoteObjectTest, CreationContextOfRemoteObject) {
Local<Object> remote_object =
constructor_template->NewRemoteInstance().ToLocalChecked();
- EXPECT_TRUE(remote_object->CreationContext().IsEmpty());
+ EXPECT_TRUE(remote_object->GetCreationContext().IsEmpty());
}
TEST_F(RemoteObjectTest, RemoteContextInstanceChecks) {
diff --git a/deps/v8/test/unittests/api/v8-object-unittest.cc b/deps/v8/test/unittests/api/v8-object-unittest.cc
index a3c0c2574c..9ebdb12fa7 100644
--- a/deps/v8/test/unittests/api/v8-object-unittest.cc
+++ b/deps/v8/test/unittests/api/v8-object-unittest.cc
@@ -77,8 +77,9 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPrototype) {
Local<Object> object =
interface_for_receiver->NewInstance(receiver_context).ToLocalChecked();
object->SetPrototype(caller_context, prototype).ToChecked();
- EXPECT_EQ(receiver_context, object->CreationContext());
- EXPECT_EQ(prototype_context, prototype->CreationContext());
+ EXPECT_EQ(receiver_context, object->GetCreationContext().ToLocalChecked());
+ EXPECT_EQ(prototype_context,
+ prototype->GetCreationContext().ToLocalChecked());
EXPECT_EQ(0, call_count);
object->Get(caller_context, property_key).ToLocalChecked();
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-riscv64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-riscv64-unittest.cc
new file mode 100644
index 0000000000..6084d36dfc
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-riscv64-unittest.cc
@@ -0,0 +1,64 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/riscv64/assembler-riscv64-inl.h"
+#include "src/execution/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ buffer->MakeExecutable();
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST_F(TurboAssemblerTest, TestCheck) {
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter (in {a0}) is 17.
+ __ Check(Condition::ne, AbortReason::kNoReason, a0, Operand(17));
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ buffer->MakeExecutable();
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/codegen/source-position-table-unittest.cc b/deps/v8/test/unittests/codegen/source-position-table-unittest.cc
index edf554a8ea..6f966255f8 100644
--- a/deps/v8/test/unittests/codegen/source-position-table-unittest.cc
+++ b/deps/v8/test/unittests/codegen/source-position-table-unittest.cc
@@ -61,7 +61,7 @@ TEST_F(SourcePositionTableTest, EncodeExpression) {
CHECK(!builder()->ToSourcePositionTable(isolate()).is_null());
}
-TEST_F(SourcePositionTableTest, EncodeAscending) {
+TEST_F(SourcePositionTableTest, EncodeAscendingPositive) {
int code_offset = 0;
int source_position = 0;
for (size_t i = 0; i < arraysize(offsets); i++) {
@@ -74,7 +74,13 @@ TEST_F(SourcePositionTableTest, EncodeAscending) {
}
}
- // Also test negative offsets for source positions:
+ CHECK(!builder()->ToSourcePositionTable(isolate()).is_null());
+}
+
+TEST_F(SourcePositionTableTest, EncodeAscendingNegative) {
+ int code_offset = 0;
+ // Start with a big source position, then decrement it.
+ int source_position = 1 << 26;
for (size_t i = 0; i < arraysize(offsets); i++) {
code_offset += offsets[i];
source_position -= offsets[i];
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index 0ee9aad0d4..0e323f6afa 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -76,8 +76,8 @@ TEST_F(OptimizingCompileDispatcherTest, NonBlockingFlush) {
Handle<JSFunction> fun =
RunJS<JSFunction>("function f() { function g() {}; return g;}; f();");
IsCompiledScope is_compiled_scope;
- ASSERT_TRUE(
- Compiler::Compile(fun, Compiler::CLEAR_EXCEPTION, &is_compiled_scope));
+ ASSERT_TRUE(Compiler::Compile(i_isolate(), fun, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope));
BlockingCompilationJob* job = new BlockingCompilationJob(i_isolate(), fun);
OptimizingCompileDispatcher dispatcher(i_isolate());
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index f7be8bbfb7..0c0214ce43 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -149,7 +149,7 @@ const FrameStateFunctionInfo*
InstructionSelectorTest::StreamBuilder::GetFrameStateFunctionInfo(
int parameter_count, int local_count) {
return common()->CreateFrameStateFunctionInfo(
- FrameStateType::kInterpretedFunction, parameter_count, local_count,
+ FrameStateType::kUnoptimizedFunction, parameter_count, local_count,
Handle<SharedFunctionInfo>());
}
@@ -333,7 +333,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::AnyTagged());
- BailoutId bailout_id(42);
+ BytecodeOffset bailout_id(42);
Node* function_node = m.Parameter(0);
Node* receiver = m.Parameter(1);
@@ -361,7 +361,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
m.common()->FrameState(bailout_id, OutputFrameStateCombine::PokeAt(0),
m.GetFrameStateFunctionInfo(1, 0)),
parameters, locals, stack, context_sentinel, function_node,
- m.UndefinedConstant());
+ m.graph()->start());
// Build the call.
Node* nodes[] = {function_node, receiver, m.UndefinedConstant(),
@@ -389,7 +389,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::AnyTagged());
- BailoutId bailout_id_before(42);
+ BytecodeOffset bailout_id_before(42);
// Some arguments for the call node.
Node* function_node = m.Parameter(0);
@@ -421,7 +421,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
OutputFrameStateCombine::PokeAt(0),
m.GetFrameStateFunctionInfo(1, 1)),
parameters, locals, stack, context_sentinel, function_node,
- m.UndefinedConstant());
+ m.graph()->start());
// Build the call.
Node* stub_code = m.HeapConstant(callable.code());
@@ -481,8 +481,8 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::AnyTagged());
- BailoutId bailout_id_before(42);
- BailoutId bailout_id_parent(62);
+ BytecodeOffset bailout_id_before(42);
+ BytecodeOffset bailout_id_parent(62);
// Some arguments for the call node.
Node* function_node = m.Parameter(0);
@@ -512,7 +512,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
m.common()->FrameState(bailout_id_parent,
OutputFrameStateCombine::Ignore(),
m.GetFrameStateFunctionInfo(1, 1)),
- parameters, locals, stack, context, function_node, m.UndefinedConstant());
+ parameters, locals, stack, context, function_node, m.graph()->start());
Node* parameters2 = m.AddNode(
m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
index 01ec8a0fe8..c66685b710 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
@@ -361,6 +361,9 @@ InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
case kSlot:
return Unallocated(op, UnallocatedOperand::MUST_HAVE_SLOT,
UnallocatedOperand::USED_AT_START);
+ case kDeoptArg:
+ return Unallocated(op, UnallocatedOperand::REGISTER_OR_SLOT,
+ UnallocatedOperand::USED_AT_END);
case kFixedRegister: {
MachineRepresentation rep = GetCanonicalRep(op);
CHECK(0 <= op.value_ && op.value_ < GetNumRegs(rep));
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
index 763c63bfd9..988bc37281 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
@@ -51,7 +51,8 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
kNone,
kConstant,
kUnique,
- kUniqueRegister
+ kUniqueRegister,
+ kDeoptArg
};
struct TestOperand {
@@ -104,6 +105,10 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
return TestOperand(kConstant, index);
}
+ static TestOperand DeoptArg(VReg vreg) {
+ return TestOperand(kDeoptArg, vreg);
+ }
+
static TestOperand Use(VReg vreg) { return TestOperand(kNone, vreg); }
static TestOperand Use() { return Use(VReg()); }
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 2c89252d24..97ddd8ee52 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -61,7 +61,7 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
Handle<BytecodeArray> bytecode,
const std::vector<std::pair<std::string, std::string>>&
expected_liveness) {
- BytecodeAnalysis analysis(bytecode, zone(), BailoutId::None(), true);
+ BytecodeAnalysis analysis(bytecode, zone(), BytecodeOffset::None(), true);
interpreter::BytecodeArrayIterator iterator(bytecode);
for (auto liveness : expected_liveness) {
@@ -93,8 +93,6 @@ TEST_F(BytecodeAnalysisTest, EmptyBlock) {
interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
- interpreter::Register reg_0(0);
-
builder.Return();
expected_liveness.emplace_back("...L", "....");
@@ -229,7 +227,6 @@ TEST_F(BytecodeAnalysisTest, SimpleLoop) {
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
- interpreter::Register reg_1(1);
interpreter::Register reg_2(2);
// Kill r0.
diff --git a/deps/v8/test/unittests/compiler/decompression-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/decompression-optimizer-unittest.cc
index 5fe00ac5db..3e17b3cb7b 100644
--- a/deps/v8/test/unittests/compiler/decompression-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/decompression-optimizer-unittest.cc
@@ -252,7 +252,7 @@ TEST_F(DecompressionOptimizerTest, TypedStateValues) {
Node* constant_2 =
graph()->NewNode(common()->HeapConstant(heap_constants[j]));
graph()->SetEnd(graph()->NewNode(
- common()->FrameState(BailoutId::None(),
+ common()->FrameState(BytecodeOffset::None(),
OutputFrameStateCombine::Ignore(), nullptr),
typed_state_values, typed_state_values, typed_state_values,
constant_2, UndefinedConstant(), graph()->start()));
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 9f07449733..4a197d557d 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -93,11 +93,11 @@ Node* GraphTest::EmptyFrameState() {
graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
FrameStateFunctionInfo const* function_info =
common()->CreateFrameStateFunctionInfo(
- FrameStateType::kInterpretedFunction, 0, 0,
+ FrameStateType::kUnoptimizedFunction, 0, 0,
Handle<SharedFunctionInfo>());
return graph()->NewNode(
- common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
- function_info),
+ common()->FrameState(BytecodeOffset::None(),
+ OutputFrameStateCombine::Ignore(), function_info),
state_values, state_values, state_values, NumberConstant(0),
UndefinedConstant(), graph()->start());
}
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index a10f246491..de1271bc4c 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -54,9 +54,9 @@ class JSCreateLoweringTest : public TypedGraphTest {
graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
return graph()->NewNode(
common()->FrameState(
- BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ BytecodeOffset::None(), OutputFrameStateCombine::Ignore(),
common()->CreateFrameStateFunctionInfo(
- FrameStateType::kInterpretedFunction, 1, 0, shared)),
+ FrameStateType::kUnoptimizedFunction, 1, 0, shared)),
state_values, state_values, state_values, NumberConstant(0),
UndefinedConstant(), outer_frame_state);
}
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index 2fb91ae1b4..ceed584d85 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -29,16 +29,26 @@ class LinkageTailCall : public TestWithZone {
DCHECK(arraysize(kMachineTypes) >=
locations->return_count() + locations->parameter_count());
USE(kMachineTypes);
+ size_t stack_arguments = 0;
+ for (size_t i = 0; i < locations->parameter_count(); ++i) {
+ if (locations->GetParam(i).IsCallerFrameSlot()) stack_arguments++;
+ }
+ size_t stack_returns = 0;
+ for (size_t i = 0; i < locations->return_count(); ++i) {
+ if (locations->GetReturn(i).IsCallerFrameSlot()) stack_returns++;
+ }
return zone()->New<CallDescriptor>(
CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
LinkageLocation::ForAnyRegister(MachineType::Pointer()),
- locations, // location_sig
- 0, // js_parameter_count
+ locations, // location_sig
+ stack_arguments,
Operator::kNoProperties, // properties
0, // callee-saved
0, // callee-saved fp
CallDescriptor::kNoFlags, // flags,
- "");
+ "", StackArgumentOrder::kDefault,
+ 0, // allocatable_registers
+ stack_returns);
}
LinkageLocation StackLocation(int loc) {
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 358771f6d4..d54e927e83 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -2427,18 +2427,19 @@ TEST_F(MachineOperatorReducerTest, Float64Atan2WithConstant) {
TEST_F(MachineOperatorReducerTest, Float64Atan2WithNaN) {
Node* const p0 = Parameter(0);
- Node* const nan = Float64Constant(std::numeric_limits<double>::quiet_NaN());
+ const double nan = std::numeric_limits<double>::quiet_NaN();
+ Node* const nan_node = Float64Constant(nan);
{
Reduction const r =
- Reduce(graph()->NewNode(machine()->Float64Atan2(), p0, nan));
+ Reduce(graph()->NewNode(machine()->Float64Atan2(), p0, nan_node));
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(nan, r.replacement());
+ EXPECT_THAT(r.replacement(), IsFloat64Constant(NanSensitiveDoubleEq(nan)));
}
{
Reduction const r =
- Reduce(graph()->NewNode(machine()->Float64Atan2(), nan, p0));
+ Reduce(graph()->NewNode(machine()->Float64Atan2(), nan_node, p0));
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(nan, r.replacement());
+ EXPECT_THAT(r.replacement(), IsFloat64Constant(NanSensitiveDoubleEq(nan)));
}
}
diff --git a/deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc
index 7d20ec2ad4..4218f66180 100644
--- a/deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc
@@ -629,6 +629,32 @@ TEST_F(MidTierRegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
Allocate();
}
+TEST_F(MidTierRegisterAllocatorTest, RegressionSpillDeoptInputIfUsedAtEnd) {
+ StartBlock();
+ VReg in1 = Define(Reg(1));
+ VReg out1 = EmitOI(Same(), Reg(in1), DeoptArg(in1));
+ Return(out1);
+ EndBlock(Last());
+
+ Allocate();
+
+ const int instr_index = 1;
+ Instruction* instr = sequence()->InstructionAt(instr_index);
+ EXPECT_FALSE(instr->InputAt(0)->EqualsCanonicalized(*instr->InputAt(1)));
+}
+
+TEST_F(MidTierRegisterAllocatorTest, RegressionConstantInSlotOperands) {
+ StartBlock();
+ auto const_var1 = DefineConstant(1);
+ auto const_var2 = DefineConstant(2);
+ EmitOI(Reg(), Slot(const_var1));
+ VReg out = EmitOI(Same(), Slot(const_var2));
+ Return(out);
+ EndBlock(Last());
+
+ Allocate();
+}
+
TEST_F(MidTierRegisterAllocatorTest, DiamondWithCallFirstBlock) {
StartBlock();
auto x = EmitOI(Reg(0));
diff --git a/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
new file mode 100644
index 0000000000..9df179139e
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
@@ -0,0 +1,1589 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file
+
+#include "src/objects/objects-inl.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+template <typename T>
+struct MachInst {
+ T constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ MachineType machine_type;
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+ return os << mi.constructor_name;
+}
+
+using MachInst1 = MachInst<Node* (RawMachineAssembler::*)(Node*)>;
+using MachInst2 = MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)>;
+
+// To avoid duplicated code IntCmp helper structure
+// is created. It contains MachInst2 with two nodes and expected_size
+// because different cmp instructions have different size.
+struct IntCmp {
+ MachInst2 mi;
+ uint32_t expected_size;
+};
+
+struct FPCmp {
+ MachInst2 mi;
+ FlagsCondition cond;
+};
+
+const FPCmp kFPCmpInstructions[] = {
+ {{&RawMachineAssembler::Float64Equal, "Float64Equal", kRiscvCmpD,
+ MachineType::Float64()},
+ kEqual},
+ {{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kRiscvCmpD,
+ MachineType::Float64()},
+ kUnsignedLessThan},
+ {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+ kRiscvCmpD, MachineType::Float64()},
+ kUnsignedLessThanOrEqual},
+ {{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan",
+ kRiscvCmpD, MachineType::Float64()},
+ kUnsignedLessThan},
+ {{&RawMachineAssembler::Float64GreaterThanOrEqual,
+ "Float64GreaterThanOrEqual", kRiscvCmpD, MachineType::Float64()},
+ kUnsignedLessThanOrEqual}};
+
+struct Conversion {
+ // The machine_type field in MachInst1 represents the destination type.
+ MachInst1 mi;
+ MachineType src_machine_type;
+};
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kLogicalInstructions[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kRiscvAnd32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kRiscvAnd,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kRiscvOr32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kRiscvOr,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kRiscvXor32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kRiscvXor,
+ MachineType::Int64()}};
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kShiftInstructions[] = {
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kRiscvShl32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Shl, "Word64Shl", kRiscvShl64,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kRiscvShr32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Shr, "Word64Shr", kRiscvShr64,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kRiscvSar32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Sar, "Word64Sar", kRiscvSar64,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kRiscvRor32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Ror, "Word64Ror", kRiscvRor64,
+ MachineType::Int64()}};
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kMulDivInstructions[] = {
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kRiscvMul32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kRiscvDiv32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kRiscvDivU32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Int64Mul, "Int64Mul", kRiscvMul64,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int64Div, "Int64Div", kRiscvDiv64,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Uint64Div, "Uint64Div", kRiscvDivU64,
+ MachineType::Uint64()},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kRiscvMulD,
+ MachineType::Float64()},
+ {&RawMachineAssembler::Float64Div, "Float64Div", kRiscvDivD,
+ MachineType::Float64()}};
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kModInstructions[] = {
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kRiscvMod32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kRiscvModU32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Float64Mod, "Float64Mod", kRiscvModD,
+ MachineType::Float64()}};
+
+// ----------------------------------------------------------------------------
+// Arithmetic FPU instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kFPArithInstructions[] = {
+ {&RawMachineAssembler::Float64Add, "Float64Add", kRiscvAddD,
+ MachineType::Float64()},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub", kRiscvSubD,
+ MachineType::Float64()}};
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, two nodes.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kAddSubInstructions[] = {
+ {&RawMachineAssembler::Int32Add, "Int32Add", kRiscvAdd32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Add, "Int64Add", kRiscvAdd64,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kRiscvSub32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Sub, "Int64Sub", kRiscvSub64,
+ MachineType::Int64()}};
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, one node.
+// ----------------------------------------------------------------------------
+
+const MachInst1 kAddSubOneInstructions[] = {
+ {&RawMachineAssembler::Int32Neg, "Int32Neg", kRiscvSub32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Neg, "Int64Neg", kRiscvSub64,
+ MachineType::Int64()}};
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions.
+// ----------------------------------------------------------------------------
+
+const IntCmp kCmpInstructions[] = {
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kRiscvCmp,
+ MachineType::Int64()},
+ 1U},
+ {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kRiscvCmp,
+ MachineType::Int64()},
+ 1U},
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kRiscvCmp,
+ MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kRiscvCmp,
+ MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kRiscvCmp,
+ MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kRiscvCmp, MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kRiscvCmp,
+ MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+ kRiscvCmp, MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kRiscvCmp,
+ MachineType::Uint32()},
+ 1U},
+ {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kRiscvCmp, MachineType::Uint32()},
+ 1U}};
+
+// ----------------------------------------------------------------------------
+// Conversion instructions.
+// ----------------------------------------------------------------------------
+
+const Conversion kConversionInstructions[] = {
+ // Conversion instructions are related to machine_operator.h:
+ // FPU conversions:
+ // Convert representation of integers between float64 and int32/uint32.
+ // The precise rounding mode and handling of out of range inputs are *not*
+ // defined for these operators, since they are intended only for use with
+ // integers.
+ // mips instructions:
+ // mtc1, cvt.d.w
+ {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+ kRiscvCvtDW, MachineType::Float64()},
+ MachineType::Int32()},
+
+ // mips instructions:
+ // cvt.d.uw
+ {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+ kRiscvCvtDUw, MachineType::Float64()},
+ MachineType::Int32()},
+
+ // mips instructions:
+ // mfc1, trunc double to word, for more details look at mips macro
+ // asm and mips asm file
+ {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+ kRiscvTruncWD, MachineType::Float64()},
+ MachineType::Int32()},
+
+ // mips instructions:
+ // trunc double to unsigned word, for more details look at mips macro
+ // asm and mips asm file
+ {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+ kRiscvTruncUwD, MachineType::Float64()},
+ MachineType::Int32()}};
+
+const Conversion kFloat64RoundInstructions[] = {
+ {{&RawMachineAssembler::Float64RoundUp, "Float64RoundUp", kRiscvCeilWD,
+ MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundDown, "Float64RoundDown", kRiscvFloorWD,
+ MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTiesEven, "Float64RoundTiesEven",
+ kRiscvRoundWD, MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTruncate, "Float64RoundTruncate",
+ kRiscvTruncWD, MachineType::Int32()},
+ MachineType::Float64()}};
+
+const Conversion kFloat32RoundInstructions[] = {
+ {{&RawMachineAssembler::Float32RoundUp, "Float32RoundUp", kRiscvCeilWS,
+ MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundDown, "Float32RoundDown", kRiscvFloorWS,
+ MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTiesEven, "Float32RoundTiesEven",
+ kRiscvRoundWS, MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTruncate, "Float32RoundTruncate",
+ kRiscvTruncWS, MachineType::Int32()},
+ MachineType::Float32()}};
+
+// MIPS64 instructions that clear the top 32 bits of the destination.
+const MachInst2 kCanElideChangeUint32ToUint64[] = {
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kRiscvDivU32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kRiscvModU32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32MulHigh, "Uint32MulHigh", kRiscvMulHighU32,
+ MachineType::Uint32()}};
+
+} // namespace
+
+using InstructionSelectorFPCmpTest = InstructionSelectorTestWithParam<FPCmp>;
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type,
+ cmp.mi.machine_type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions integers
+// ----------------------------------------------------------------------------
+using InstructionSelectorCmpTest = InstructionSelectorTestWithParam<IntCmp>;
+
+TEST_P(InstructionSelectorCmpTest, Parameter) {
+ const IntCmp cmp = GetParam();
+ const MachineType type = cmp.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+
+ if (FLAG_debug_code &&
+ type.representation() == MachineRepresentation::kWord32) {
+ ASSERT_EQ(6U, s.size());
+
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+
+ EXPECT_EQ(kRiscvShl64, s[1]->arch_opcode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+
+ EXPECT_EQ(kRiscvShl64, s[2]->arch_opcode());
+ EXPECT_EQ(2U, s[2]->InputCount());
+ EXPECT_EQ(1U, s[2]->OutputCount());
+
+ EXPECT_EQ(cmp.mi.arch_opcode, s[3]->arch_opcode());
+ EXPECT_EQ(2U, s[3]->InputCount());
+ EXPECT_EQ(1U, s[3]->OutputCount());
+
+ EXPECT_EQ(kRiscvAssertEqual, s[4]->arch_opcode());
+ EXPECT_EQ(3U, s[4]->InputCount());
+ EXPECT_EQ(0U, s[4]->OutputCount());
+
+ EXPECT_EQ(cmp.mi.arch_opcode, s[5]->arch_opcode());
+ EXPECT_EQ(2U, s[5]->InputCount());
+ EXPECT_EQ(1U, s[5]->OutputCount());
+ } else {
+ ASSERT_EQ(cmp.expected_size, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
+ ::testing::ValuesIn(kCmpInstructions));
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+using InstructionSelectorShiftTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FORRANGE(int32_t, imm, 0,
+ ((1 << ElementSizeLog2Of(type.representation())) * 8) - 1) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShiftInstructions));
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+using InstructionSelectorLogicalTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorLogicalTest,
+ ::testing::ValuesIn(kLogicalInstructions));
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Int64Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvNor32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvNor32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithWord64Or) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Word64Or(m.Parameter(0), m.Parameter(0)),
+ m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Int64Constant(-1),
+ m.Word64Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Word32Or(m.Parameter(0), m.Parameter(0)),
+ m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvNor32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1),
+ m.Word32Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvNor32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
+ m.Int32Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvShl32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64ShlWithWord64And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word64Shl(m.Word64And(p0, m.Int64Constant((1L << (63 - shift)) - 1)),
+ m.Int64Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvShl64, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvSignExtendByte, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvSignExtendShort, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(32)), m.Int32Constant(32));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvShl32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+using InstructionSelectorMulDivTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+ ::testing::ValuesIn(kMulDivInstructions));
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+using InstructionSelectorModTest = InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorModTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorModTest,
+ ::testing::ValuesIn(kModInstructions));
+
+// ----------------------------------------------------------------------------
+// Floating point instructions.
+// ----------------------------------------------------------------------------
+using InstructionSelectorFPArithTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+ const MachInst2 fpa = GetParam();
+ StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+ m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorFPArithTest,
+ ::testing::ValuesIn(kFPArithInstructions));
+// ----------------------------------------------------------------------------
+// Integer arithmetic
+// ----------------------------------------------------------------------------
+using InstructionSelectorIntArithTwoTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
+ const MachInst2 intpa = GetParam();
+ StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+ intpa.machine_type);
+ m.Return((m.*intpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithTwoTest,
+ ::testing::ValuesIn(kAddSubInstructions));
+
+// ----------------------------------------------------------------------------
+// One node.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorIntArithOneTest =
+ InstructionSelectorTestWithParam<MachInst1>;
+
+TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
+ const MachInst1 intpa = GetParam();
+ StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+ intpa.machine_type);
+ m.Return((m.*intpa.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithOneTest,
+ ::testing::ValuesIn(kAddSubOneInstructions));
+// ----------------------------------------------------------------------------
+// Conversions.
+// ----------------------------------------------------------------------------
+using InstructionSelectorConversionTest =
+ InstructionSelectorTestWithParam<Conversion>;
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorConversionTest,
+ ::testing::ValuesIn(kConversionInstructions));
+
+TEST_F(InstructionSelectorTest, ChangesFromToSmi) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.TruncateInt64ToInt32(
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvSar64, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(
+ m.Word64Shl(m.ChangeInt32ToInt64(m.Parameter(0)), m.Int32Constant(32)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvShl64, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+using CombineChangeFloat64ToInt32WithRoundFloat64 =
+ InstructionSelectorTestWithParam<Conversion>;
+
+TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32((m.*conv.mi.constructor)(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ CombineChangeFloat64ToInt32WithRoundFloat64,
+ ::testing::ValuesIn(kFloat64RoundInstructions));
+
+using CombineChangeFloat32ToInt32WithRoundFloat32 =
+ InstructionSelectorTestWithParam<Conversion>;
+
+TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32(
+ m.ChangeFloat32ToFloat64((m.*conv.mi.constructor)(m.Parameter(0)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ CombineChangeFloat32ToInt32WithRoundFloat32,
+ ::testing::ValuesIn(kFloat32RoundInstructions));
+
+TEST_F(InstructionSelectorTest, ChangeFloat64ToInt32OfChangeFloat32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
+ m.Return(m.ChangeFloat64ToInt32(m.ChangeFloat32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvTruncWS, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest,
+ TruncateFloat64ToFloat32OfChangeInt32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Int32());
+ m.Return(
+ m.TruncateFloat64ToFloat32(m.ChangeInt32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvCvtSW, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CombineShiftsWithMul) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mul(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvMulHigh64, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CombineShiftsWithDivMod) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Div(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvDiv64, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mod(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvMod64, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
+ // For each case, test that the conversion is merged into the load
+ // operation.
+ // ChangeInt32ToInt64(Load_Uint8) -> Lbu
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvLbu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int8) -> Lb
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvLb, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint16) -> Lhu
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvLhu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int16) -> Lh
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvLh, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint32) -> Lw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvLw, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int32) -> Lw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvLw, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+}
+
+using InstructionSelectorElidedChangeUint32ToUint64Test =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
+ const MachInst2 binop = GetParam();
+ StreamBuilder m(this, MachineType::Uint64(), binop.machine_type,
+ binop.machine_type);
+ m.Return(m.ChangeUint32ToUint64(
+ (m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ // Make sure the `ChangeUint32ToUint64` node turned into two op(sli 32 and sri
+ // 32).
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorElidedChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
+ // For each case, make sure the `ChangeUint32ToUint64` node turned into a
+ // no-op.
+
+ // Lbu
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvAdd64, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kRiscvLbu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // Lhu
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvAdd64, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kRiscvLhu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // Lwu
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvAdd64, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kRiscvLwu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Loads and stores.
+// ----------------------------------------------------------------------------
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+};
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {MachineType::Int8(), kRiscvLb, kRiscvSb},
+ {MachineType::Uint8(), kRiscvLbu, kRiscvSb},
+ {MachineType::Int16(), kRiscvLh, kRiscvSh},
+ {MachineType::Uint16(), kRiscvLhu, kRiscvSh},
+ {MachineType::Int32(), kRiscvLw, kRiscvSw},
+ {MachineType::Float32(), kRiscvLoadFloat, kRiscvStoreFloat},
+ {MachineType::Float64(), kRiscvLoadDouble, kRiscvStoreDouble},
+ {MachineType::Int64(), kRiscvLd, kRiscvSd}};
+
+struct MemoryAccessImm {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm& acc) {
+ return os << acc.type;
+}
+
+struct MemoryAccessImm1 {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[5];
+};
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
+ return os << acc.type;
+}
+
+struct MemoryAccessImm2 {
+ MachineType type;
+ ArchOpcode store_opcode;
+ ArchOpcode store_opcode_unaligned;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm2& acc) {
+ return os << acc.type;
+}
+
+// ----------------------------------------------------------------------------
+// Loads and stores immediate values
+// ----------------------------------------------------------------------------
+
+const MemoryAccessImm kMemoryAccessesImm[] = {
+ {MachineType::Int8(),
+ kRiscvLb,
+ kRiscvSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Uint8(),
+ kRiscvLbu,
+ kRiscvSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int16(),
+ kRiscvLh,
+ kRiscvSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Uint16(),
+ kRiscvLhu,
+ kRiscvSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int32(),
+ kRiscvLw,
+ kRiscvSw,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float32(),
+ kRiscvLoadFloat,
+ kRiscvStoreFloat,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float64(),
+ kRiscvLoadDouble,
+ kRiscvStoreDouble,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int64(),
+ kRiscvLd,
+ kRiscvSd,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
+
+const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
+ {MachineType::Int8(),
+ kRiscvLb,
+ kRiscvSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Uint8(),
+ kRiscvLbu,
+ kRiscvSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Int16(),
+ kRiscvLh,
+ kRiscvSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Uint16(),
+ kRiscvLhu,
+ kRiscvSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Int32(),
+ kRiscvLw,
+ kRiscvSw,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Float32(),
+ kRiscvLoadFloat,
+ kRiscvStoreFloat,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Float64(),
+ kRiscvLoadDouble,
+ kRiscvStoreDouble,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Int64(),
+ kRiscvLd,
+ kRiscvSd,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}}};
+
+const MemoryAccessImm2 kMemoryAccessesImmUnaligned[] = {
+ {MachineType::Int16(),
+ kRiscvUsh,
+ kRiscvSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int32(),
+ kRiscvUsw,
+ kRiscvSw,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int64(),
+ kRiscvUsd,
+ kRiscvSd,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float32(),
+ kRiscvUStoreFloat,
+ kRiscvStoreFloat,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float64(),
+ kRiscvUStoreDouble,
+ kRiscvStoreDouble,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
+
+} // namespace
+
+using InstructionSelectorMemoryAccessTest =
+ InstructionSelectorTestWithParam<MemoryAccess>;
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.Load(memacc.type, m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+// ----------------------------------------------------------------------------
+// Load immediate.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorMemoryAccessImmTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm>;
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Store immediate.
+// ----------------------------------------------------------------------------
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Int32Constant(0), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImm));
+
+using InstructionSelectorMemoryAccessUnalignedImmTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm2>;
+
+TEST_P(InstructionSelectorMemoryAccessUnalignedImmTest, StoreZero) {
+ const MemoryAccessImm2 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
+ bool unaligned_store_supported =
+ m.machine()->UnalignedStoreSupported(memacc.type.representation());
+ m.UnalignedStore(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Int32Constant(0));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ uint32_t i = is_int12(index) ? 0 : 1;
+ ASSERT_EQ(i + 1, s.size());
+ EXPECT_EQ(unaligned_store_supported ? memacc.store_opcode_unaligned
+ : memacc.store_opcode,
+ s[i]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[i]->addressing_mode());
+ ASSERT_EQ(3U, s[i]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[i]->InputAt(1)->kind());
+ EXPECT_EQ(i == 0 ? index : 0, s.ToInt32(s[i]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[i]->InputAt(2)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[i]->InputAt(2)));
+ EXPECT_EQ(0U, s[i]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessUnalignedImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImmUnaligned));
+
+// ----------------------------------------------------------------------------
+// Load/store offsets more than 16 bits.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorMemoryAccessImmMoreThan16bitTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm1>;
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ LoadWithImmediateIndex) {
+ const MemoryAccessImm1 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ StoreWithImmediateIndex) {
+ const MemoryAccessImm1 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
+
+// ----------------------------------------------------------------------------
+// kRiscvCmp with zero testing.
+// ----------------------------------------------------------------------------
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvCmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvClz32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Word64Clz) {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvClz64, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvAbsS, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvAbsD, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Max) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvFloat64Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Min) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvFloat64Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, LoadAndShiftRight) {
+ {
+ int32_t immediates[] = {-256, -255, -3, -2, -1, 0, 1,
+ 2, 3, 255, 256, 260, 4096, 4100,
+ 8192, 8196, 3276, 3280, 16376, 16380};
+ TRACED_FOREACH(int32_t, index, immediates) {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer());
+ Node* const load =
+ m.Load(MachineType::Uint64(), m.Parameter(0), m.Int32Constant(index));
+ Node* const sar = m.Word64Sar(load, m.Int32Constant(32));
+ // Make sure we don't fold the shift into the following add:
+ m.Return(m.Int64Add(sar, m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kRiscvLw, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ EXPECT_EQ(index + 4, s.ToInt32(s[0]->InputAt(1)));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+#endif
+
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32ReverseBytes) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32ReverseBytes(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ // EXPECT_EQ(kRiscvByteSwap32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64ReverseBytes) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64ReverseBytes(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kRiscvByteSwap64, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
index 4da9870221..c4aea68f15 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
@@ -37,7 +37,7 @@ class ConcurrentMarkingTest : public testing::TestWithHeap {
Heap* heap = Heap::From(GetHeap());
heap->DisableHeapGrowingForTesting();
heap->StartIncrementalGarbageCollection(ConcurrentPreciseConfig);
- heap->marker()->DisableIncrementalMarkingForTesting();
+ heap->marker()->SetMainThreadMarkingDisabledForTesting(true);
}
bool SingleStep(Config::StackState stack_state) {
@@ -52,7 +52,9 @@ class ConcurrentMarkingTest : public testing::TestWithHeap {
}
void FinishGC() {
- Heap::From(GetHeap())->FinalizeIncrementalGarbageCollectionIfRunning(
+ Heap* heap = Heap::From(GetHeap());
+ heap->marker()->SetMainThreadMarkingDisabledForTesting(false);
+ heap->FinalizeIncrementalGarbageCollectionIfRunning(
ConcurrentPreciseConfig);
}
};
diff --git a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
index 1172eedb86..33adc71ca6 100644
--- a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
@@ -26,11 +26,28 @@ class EphemeronHolder : public GarbageCollected<GCed> {
EphemeronHolder(GCed* key, GCed* value) : ephemeron_pair_(key, value) {}
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(ephemeron_pair_); }
+ const EphemeronPair<GCed, GCed>& ephemeron_pair() const {
+ return ephemeron_pair_;
+ }
+
+ private:
+ EphemeronPair<GCed, GCed> ephemeron_pair_;
+};
+
+class EphemeronHolderTraceEphemeron
+ : public GarbageCollected<EphemeronHolderTraceEphemeron> {
+ public:
+ EphemeronHolderTraceEphemeron(GCed* key, GCed* value)
+ : ephemeron_pair_(key, value) {}
+ void Trace(cppgc::Visitor* visitor) const {
+ visitor->TraceEphemeron(ephemeron_pair_.key, &ephemeron_pair_.value);
+ }
+
private:
EphemeronPair<GCed, GCed> ephemeron_pair_;
};
-class EhpemeronPairTest : public testing::TestWithHeap {
+class EphemeronPairTest : public testing::TestWithHeap {
using MarkingConfig = Marker::MarkingConfig;
static constexpr Marker::MarkingConfig IncrementalPreciseMarkingConfig = {
@@ -69,11 +86,11 @@ class EhpemeronPairTest : public testing::TestWithHeap {
// static
constexpr Marker::MarkingConfig
- EhpemeronPairTest::IncrementalPreciseMarkingConfig;
+ EphemeronPairTest::IncrementalPreciseMarkingConfig;
} // namespace
-TEST_F(EhpemeronPairTest, ValueMarkedWhenKeyIsMarked) {
+TEST_F(EphemeronPairTest, ValueMarkedWhenKeyIsMarked) {
GCed* key = MakeGarbageCollected<GCed>(GetAllocationHandle());
GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
Persistent<EphemeronHolder> holder =
@@ -84,7 +101,7 @@ TEST_F(EhpemeronPairTest, ValueMarkedWhenKeyIsMarked) {
EXPECT_TRUE(HeapObjectHeader::FromPayload(value).IsMarked());
}
-TEST_F(EhpemeronPairTest, ValueNotMarkedWhenKeyIsNotMarked) {
+TEST_F(EphemeronPairTest, ValueNotMarkedWhenKeyIsNotMarked) {
GCed* key = MakeGarbageCollected<GCed>(GetAllocationHandle());
GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
Persistent<EphemeronHolder> holder =
@@ -95,7 +112,7 @@ TEST_F(EhpemeronPairTest, ValueNotMarkedWhenKeyIsNotMarked) {
EXPECT_FALSE(HeapObjectHeader::FromPayload(value).IsMarked());
}
-TEST_F(EhpemeronPairTest, ValueNotMarkedBeforeKey) {
+TEST_F(EphemeronPairTest, ValueNotMarkedBeforeKey) {
GCed* key = MakeGarbageCollected<GCed>(GetAllocationHandle());
GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
Persistent<EphemeronHolder> holder =
@@ -108,5 +125,48 @@ TEST_F(EhpemeronPairTest, ValueNotMarkedBeforeKey) {
EXPECT_TRUE(HeapObjectHeader::FromPayload(value).IsMarked());
}
+TEST_F(EphemeronPairTest, TraceEphemeronDispatch) {
+ GCed* key = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ Persistent<EphemeronHolderTraceEphemeron> holder =
+ MakeGarbageCollected<EphemeronHolderTraceEphemeron>(GetAllocationHandle(),
+ key, value);
+ HeapObjectHeader::FromPayload(key).TryMarkAtomic();
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishMarking();
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(value).IsMarked());
+}
+
+TEST_F(EphemeronPairTest, EmptyValue) {
+ GCed* key = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ Persistent<EphemeronHolderTraceEphemeron> holder =
+ MakeGarbageCollected<EphemeronHolderTraceEphemeron>(GetAllocationHandle(),
+ key, nullptr);
+ HeapObjectHeader::FromPayload(key).TryMarkAtomic();
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishMarking();
+}
+
+TEST_F(EphemeronPairTest, EmptyKey) {
+ GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ Persistent<EphemeronHolderTraceEphemeron> holder =
+ MakeGarbageCollected<EphemeronHolderTraceEphemeron>(GetAllocationHandle(),
+ nullptr, value);
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishMarking();
+ // Key is not alive and value should thus not be held alive.
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(value).IsMarked());
+}
+
+using EphemeronPairGCTest = testing::TestWithHeap;
+
+TEST_F(EphemeronPairGCTest, EphemeronPairValueIsCleared) {
+ GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ Persistent<EphemeronHolder> holder = MakeGarbageCollected<EphemeronHolder>(
+ GetAllocationHandle(), nullptr, value);
+ PreciseGC();
+ EXPECT_EQ(nullptr, holder->ephemeron_pair().value.Get());
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc b/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc
index e6e0ce71c8..7155aa6914 100644
--- a/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc
@@ -44,23 +44,43 @@ class GarbageCollectedTestWithHeap
} // namespace
TEST(GarbageCollectedTest, GarbageCollectedTrait) {
- STATIC_ASSERT(!IsGarbageCollectedType<int>::value);
- STATIC_ASSERT(!IsGarbageCollectedType<NotGCed>::value);
- STATIC_ASSERT(IsGarbageCollectedType<GCed>::value);
- STATIC_ASSERT(IsGarbageCollectedType<Mixin>::value);
- STATIC_ASSERT(IsGarbageCollectedType<GCedWithMixin>::value);
- STATIC_ASSERT(IsGarbageCollectedType<MergedMixins>::value);
- STATIC_ASSERT(IsGarbageCollectedType<GCWithMergedMixins>::value);
+ STATIC_ASSERT(!IsGarbageCollectedTypeV<int>);
+ STATIC_ASSERT(!IsGarbageCollectedTypeV<NotGCed>);
+ STATIC_ASSERT(IsGarbageCollectedTypeV<GCed>);
+ STATIC_ASSERT(!IsGarbageCollectedTypeV<Mixin>);
+ STATIC_ASSERT(IsGarbageCollectedTypeV<GCedWithMixin>);
+ STATIC_ASSERT(!IsGarbageCollectedTypeV<MergedMixins>);
+ STATIC_ASSERT(IsGarbageCollectedTypeV<GCWithMergedMixins>);
}
TEST(GarbageCollectedTest, GarbageCollectedMixinTrait) {
- STATIC_ASSERT(!IsGarbageCollectedMixinType<int>::value);
- STATIC_ASSERT(!IsGarbageCollectedMixinType<GCed>::value);
- STATIC_ASSERT(!IsGarbageCollectedMixinType<NotGCed>::value);
- STATIC_ASSERT(IsGarbageCollectedMixinType<Mixin>::value);
- STATIC_ASSERT(IsGarbageCollectedMixinType<GCedWithMixin>::value);
- STATIC_ASSERT(IsGarbageCollectedMixinType<MergedMixins>::value);
- STATIC_ASSERT(IsGarbageCollectedMixinType<GCWithMergedMixins>::value);
+ STATIC_ASSERT(!IsGarbageCollectedMixinTypeV<int>);
+ STATIC_ASSERT(!IsGarbageCollectedMixinTypeV<GCed>);
+ STATIC_ASSERT(!IsGarbageCollectedMixinTypeV<NotGCed>);
+ STATIC_ASSERT(IsGarbageCollectedMixinTypeV<Mixin>);
+ STATIC_ASSERT(!IsGarbageCollectedMixinTypeV<GCedWithMixin>);
+ STATIC_ASSERT(IsGarbageCollectedMixinTypeV<MergedMixins>);
+ STATIC_ASSERT(!IsGarbageCollectedMixinTypeV<GCWithMergedMixins>);
+}
+
+TEST(GarbageCollectedTest, GarbageCollectedOrMixinTrait) {
+ STATIC_ASSERT(!IsGarbageCollectedOrMixinTypeV<int>);
+ STATIC_ASSERT(IsGarbageCollectedOrMixinTypeV<GCed>);
+ STATIC_ASSERT(!IsGarbageCollectedOrMixinTypeV<NotGCed>);
+ STATIC_ASSERT(IsGarbageCollectedOrMixinTypeV<Mixin>);
+ STATIC_ASSERT(IsGarbageCollectedOrMixinTypeV<GCedWithMixin>);
+ STATIC_ASSERT(IsGarbageCollectedOrMixinTypeV<MergedMixins>);
+ STATIC_ASSERT(IsGarbageCollectedOrMixinTypeV<GCWithMergedMixins>);
+}
+
+TEST(GarbageCollectedTest, GarbageCollectedWithMixinTrait) {
+ STATIC_ASSERT(!IsGarbageCollectedWithMixinTypeV<int>);
+ STATIC_ASSERT(!IsGarbageCollectedWithMixinTypeV<GCed>);
+ STATIC_ASSERT(!IsGarbageCollectedWithMixinTypeV<NotGCed>);
+ STATIC_ASSERT(!IsGarbageCollectedWithMixinTypeV<Mixin>);
+ STATIC_ASSERT(IsGarbageCollectedWithMixinTypeV<GCedWithMixin>);
+ STATIC_ASSERT(!IsGarbageCollectedWithMixinTypeV<MergedMixins>);
+ STATIC_ASSERT(IsGarbageCollectedWithMixinTypeV<GCWithMergedMixins>);
}
TEST_F(GarbageCollectedTestWithHeap, GetObjectStartReturnsCurrentAddress) {
diff --git a/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
index 9c48621e10..3d951dc6cf 100644
--- a/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
@@ -23,7 +23,7 @@ constexpr GCInfo GetEmptyGCInfo() { return {nullptr, nullptr, nullptr, false}; }
TEST(GCInfoTableTest, InitialEmpty) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
- EXPECT_EQ(GCInfoTable::kMinIndex, table.NumberOfGCInfosForTesting());
+ EXPECT_EQ(GCInfoTable::kMinIndex, table.NumberOfGCInfos());
}
TEST(GCInfoTableTest, ResizeToMaxIndex) {
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc
index 39d6cb3b94..17ab9680c9 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-growing-unittest.cc
@@ -60,7 +60,8 @@ void FakeAllocate(StatsCollector* stats_collector, size_t bytes) {
} // namespace
TEST(HeapGrowingTest, ConservativeGCInvoked) {
- StatsCollector stats_collector;
+ StatsCollector stats_collector(nullptr /* metric_recorder */,
+ nullptr /* platform */);
MockGarbageCollector gc;
cppgc::Heap::ResourceConstraints constraints;
// Force GC at the first update.
@@ -73,7 +74,8 @@ TEST(HeapGrowingTest, ConservativeGCInvoked) {
}
TEST(HeapGrowingTest, InitialHeapSize) {
- StatsCollector stats_collector;
+ StatsCollector stats_collector(nullptr /* metric_recorder */,
+ nullptr /* platform */);
MockGarbageCollector gc;
cppgc::Heap::ResourceConstraints constraints;
// Use larger size to avoid running into small heap optimizations.
@@ -90,7 +92,8 @@ TEST(HeapGrowingTest, InitialHeapSize) {
TEST(HeapGrowingTest, ConstantGrowingFactor) {
// Use larger size to avoid running into small heap optimizations.
constexpr size_t kObjectSize = 10 * HeapGrowing::kMinLimitIncrease;
- StatsCollector stats_collector;
+ StatsCollector stats_collector(nullptr /* metric_recorder */,
+ nullptr /* platform */);
FakeGarbageCollector gc(&stats_collector);
cppgc::Heap::ResourceConstraints constraints;
// Force GC at the first update.
@@ -108,7 +111,8 @@ TEST(HeapGrowingTest, ConstantGrowingFactor) {
TEST(HeapGrowingTest, SmallHeapGrowing) {
// Larger constant to avoid running into special handling for smaller heaps.
constexpr size_t kLargeAllocation = 100 * kMB;
- StatsCollector stats_collector;
+ StatsCollector stats_collector(nullptr /* metric_recorder */,
+ nullptr /* platform */);
FakeGarbageCollector gc(&stats_collector);
cppgc::Heap::ResourceConstraints constraints;
// Force GC at the first update.
@@ -124,7 +128,8 @@ TEST(HeapGrowingTest, SmallHeapGrowing) {
}
TEST(HeapGrowingTest, IncrementalGCStarted) {
- StatsCollector stats_collector;
+ StatsCollector stats_collector(nullptr /* metric_recorder */,
+ nullptr /* platform */);
MockGarbageCollector gc;
cppgc::Heap::ResourceConstraints constraints;
HeapGrowing growing(&gc, &stats_collector, constraints,
@@ -137,7 +142,8 @@ TEST(HeapGrowingTest, IncrementalGCStarted) {
}
TEST(HeapGrowingTest, IncrementalGCFinalized) {
- StatsCollector stats_collector;
+ StatsCollector stats_collector(nullptr /* metric_recorder */,
+ nullptr /* platform */);
MockGarbageCollector gc;
cppgc::Heap::ResourceConstraints constraints;
HeapGrowing growing(&gc, &stats_collector, constraints,
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-statistics-collector-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-statistics-collector-unittest.cc
new file mode 100644
index 0000000000..b0819f30cb
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/heap-statistics-collector-unittest.cc
@@ -0,0 +1,130 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-statistics-collector.h"
+
+#include "include/cppgc/heap-statistics.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapStatisticsCollectorTest : public testing::TestWithHeap {};
+
+TEST_F(HeapStatisticsCollectorTest, EmptyHeapBriefStatisitcs) {
+ HeapStatistics brief_stats = Heap::From(GetHeap())->CollectStatistics(
+ HeapStatistics::DetailLevel::kBrief);
+ EXPECT_EQ(HeapStatistics::DetailLevel::kBrief, brief_stats.detail_level);
+ EXPECT_EQ(0u, brief_stats.used_size_bytes);
+ EXPECT_EQ(0u, brief_stats.used_size_bytes);
+ EXPECT_TRUE(brief_stats.space_stats.empty());
+}
+
+TEST_F(HeapStatisticsCollectorTest, EmptyHeapDetailedStatisitcs) {
+ HeapStatistics detailed_stats = Heap::From(GetHeap())->CollectStatistics(
+ HeapStatistics::DetailLevel::kDetailed);
+ EXPECT_EQ(HeapStatistics::DetailLevel::kDetailed,
+ detailed_stats.detail_level);
+ EXPECT_EQ(0u, detailed_stats.used_size_bytes);
+ EXPECT_EQ(0u, detailed_stats.used_size_bytes);
+ EXPECT_EQ(RawHeap::kNumberOfRegularSpaces, detailed_stats.space_stats.size());
+ for (HeapStatistics::SpaceStatistics& space_stats :
+ detailed_stats.space_stats) {
+ EXPECT_EQ(0u, space_stats.used_size_bytes);
+ EXPECT_EQ(0u, space_stats.used_size_bytes);
+ EXPECT_TRUE(space_stats.page_stats.empty());
+ if (space_stats.name == "LargePageSpace") {
+ // Large page space has no free list.
+ EXPECT_TRUE(space_stats.free_list_stats.bucket_size.empty());
+ EXPECT_TRUE(space_stats.free_list_stats.free_count.empty());
+ EXPECT_TRUE(space_stats.free_list_stats.free_size.empty());
+ } else {
+ EXPECT_EQ(kPageSizeLog2, space_stats.free_list_stats.bucket_size.size());
+ EXPECT_EQ(kPageSizeLog2, space_stats.free_list_stats.free_count.size());
+ EXPECT_EQ(kPageSizeLog2, space_stats.free_list_stats.free_size.size());
+ }
+ }
+}
+
+namespace {
+template <size_t Size>
+class GCed : public GarbageCollected<GCed<Size>> {
+ public:
+ void Trace(Visitor*) const {}
+
+ private:
+ char array_[Size];
+};
+} // namespace
+
+TEST_F(HeapStatisticsCollectorTest, NonEmptyNormalPage) {
+ MakeGarbageCollected<GCed<1>>(GetHeap()->GetAllocationHandle());
+ static constexpr size_t used_size =
+ RoundUp<kAllocationGranularity>(1 + sizeof(HeapObjectHeader));
+ HeapStatistics detailed_stats = Heap::From(GetHeap())->CollectStatistics(
+ HeapStatistics::DetailLevel::kDetailed);
+ EXPECT_EQ(HeapStatistics::DetailLevel::kDetailed,
+ detailed_stats.detail_level);
+ EXPECT_EQ(kPageSize, detailed_stats.physical_size_bytes);
+ EXPECT_EQ(used_size, detailed_stats.used_size_bytes);
+ EXPECT_EQ(RawHeap::kNumberOfRegularSpaces, detailed_stats.space_stats.size());
+ bool found_non_empty_space = false;
+ for (const HeapStatistics::SpaceStatistics& space_stats :
+ detailed_stats.space_stats) {
+ if (space_stats.page_stats.empty()) {
+ EXPECT_EQ(0u, space_stats.physical_size_bytes);
+ EXPECT_EQ(0u, space_stats.used_size_bytes);
+ continue;
+ }
+ EXPECT_NE("LargePageSpace", space_stats.name);
+ EXPECT_FALSE(found_non_empty_space);
+ found_non_empty_space = true;
+ EXPECT_EQ(kPageSize, space_stats.physical_size_bytes);
+ EXPECT_EQ(used_size, space_stats.used_size_bytes);
+ EXPECT_EQ(1u, space_stats.page_stats.size());
+ EXPECT_EQ(kPageSize, space_stats.page_stats.back().physical_size_bytes);
+ EXPECT_EQ(used_size, space_stats.page_stats.back().used_size_bytes);
+ }
+ EXPECT_TRUE(found_non_empty_space);
+}
+
+TEST_F(HeapStatisticsCollectorTest, NonEmptyLargePage) {
+ MakeGarbageCollected<GCed<kLargeObjectSizeThreshold>>(
+ GetHeap()->GetAllocationHandle());
+ static constexpr size_t used_size = RoundUp<kAllocationGranularity>(
+ kLargeObjectSizeThreshold + sizeof(HeapObjectHeader));
+ static constexpr size_t physical_size =
+ RoundUp<kAllocationGranularity>(used_size + sizeof(LargePage));
+ HeapStatistics detailed_stats = Heap::From(GetHeap())->CollectStatistics(
+ HeapStatistics::DetailLevel::kDetailed);
+ EXPECT_EQ(HeapStatistics::DetailLevel::kDetailed,
+ detailed_stats.detail_level);
+ EXPECT_EQ(physical_size, detailed_stats.physical_size_bytes);
+ EXPECT_EQ(used_size, detailed_stats.used_size_bytes);
+ EXPECT_EQ(RawHeap::kNumberOfRegularSpaces, detailed_stats.space_stats.size());
+ bool found_non_empty_space = false;
+ for (const HeapStatistics::SpaceStatistics& space_stats :
+ detailed_stats.space_stats) {
+ if (space_stats.page_stats.empty()) {
+ EXPECT_EQ(0u, space_stats.physical_size_bytes);
+ EXPECT_EQ(0u, space_stats.used_size_bytes);
+ continue;
+ }
+ EXPECT_EQ("LargePageSpace", space_stats.name);
+ EXPECT_FALSE(found_non_empty_space);
+ found_non_empty_space = true;
+ EXPECT_EQ(physical_size, space_stats.physical_size_bytes);
+ EXPECT_EQ(used_size, space_stats.used_size_bytes);
+ EXPECT_EQ(1u, space_stats.page_stats.size());
+ EXPECT_EQ(physical_size, space_stats.page_stats.back().physical_size_bytes);
+ EXPECT_EQ(used_size, space_stats.page_stats.back().used_size_bytes);
+ }
+ EXPECT_TRUE(found_non_empty_space);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
index 694d031dda..f664afdd17 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
@@ -9,6 +9,9 @@
#include <numeric>
#include "include/cppgc/allocation.h"
+#include "include/cppgc/heap-consistency.h"
+#include "include/cppgc/persistent.h"
+#include "include/cppgc/prefinalizer.h"
#include "src/heap/cppgc/globals.h"
#include "test/unittests/heap/cppgc/tests.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -30,6 +33,8 @@ class GCHeapTest : public testing::TestWithHeap {
}
};
+class GCHeapDeathTest : public GCHeapTest {};
+
class Foo : public GarbageCollected<Foo> {
public:
static size_t destructor_callcount;
@@ -92,7 +97,7 @@ TEST_F(GCHeapTest, ObjectPayloadSize) {
Heap::From(GetHeap())->CollectGarbage(
GarbageCollector::Config::ConservativeAtomicConfig());
- Heap::NoGCScope no_gc_scope(*Heap::From(GetHeap()));
+ subtle::NoGarbageCollectionScope no_gc(*Heap::From(GetHeap()));
for (size_t k = 0; k < kNumberOfObjectsPerArena; ++k) {
MakeGarbageCollected<GCed<kObjectSizes[0]>>(GetAllocationHandle());
@@ -153,5 +158,169 @@ TEST_F(GCHeapTest, AllocatedSizeDependOnAdditionalBytes) {
HeapObjectHeader::FromPayload(object_with_more_bytes).GetSize());
}
+TEST_F(GCHeapTest, Epoch) {
+ const size_t epoch_before = internal::Heap::From(GetHeap())->epoch();
+ PreciseGC();
+ const size_t epoch_after_gc = internal::Heap::From(GetHeap())->epoch();
+ EXPECT_EQ(epoch_after_gc, epoch_before + 1);
+}
+
+TEST_F(GCHeapTest, NoGarbageCollectionScope) {
+ const size_t epoch_before = internal::Heap::From(GetHeap())->epoch();
+ {
+ subtle::NoGarbageCollectionScope scope(GetHeap()->GetHeapHandle());
+ PreciseGC();
+ }
+ const size_t epoch_after_gc = internal::Heap::From(GetHeap())->epoch();
+ EXPECT_EQ(epoch_after_gc, epoch_before);
+}
+
+TEST_F(GCHeapTest, IsGarbageCollectionAllowed) {
+ EXPECT_TRUE(
+ subtle::DisallowGarbageCollectionScope::IsGarbageCollectionAllowed(
+ GetHeap()->GetHeapHandle()));
+ {
+ subtle::DisallowGarbageCollectionScope disallow_gc(*Heap::From(GetHeap()));
+ EXPECT_FALSE(
+ subtle::DisallowGarbageCollectionScope::IsGarbageCollectionAllowed(
+ GetHeap()->GetHeapHandle()));
+ }
+}
+
+TEST_F(GCHeapTest, IsMarking) {
+ GarbageCollector::Config config = GarbageCollector::Config::
+ PreciseIncrementalMarkingConcurrentSweepingConfig();
+ auto* heap = Heap::From(GetHeap());
+ EXPECT_FALSE(subtle::HeapState::IsMarking(*heap));
+ heap->StartIncrementalGarbageCollection(config);
+ EXPECT_TRUE(subtle::HeapState::IsMarking(*heap));
+ heap->FinalizeIncrementalGarbageCollectionIfRunning(config);
+ EXPECT_FALSE(subtle::HeapState::IsMarking(*heap));
+ heap->AsBase().sweeper().FinishIfRunning();
+ EXPECT_FALSE(subtle::HeapState::IsMarking(*heap));
+}
+
+TEST_F(GCHeapTest, IsSweeping) {
+ GarbageCollector::Config config = GarbageCollector::Config::
+ PreciseIncrementalMarkingConcurrentSweepingConfig();
+ auto* heap = Heap::From(GetHeap());
+ EXPECT_FALSE(subtle::HeapState::IsSweeping(*heap));
+ heap->StartIncrementalGarbageCollection(config);
+ EXPECT_FALSE(subtle::HeapState::IsSweeping(*heap));
+ heap->FinalizeIncrementalGarbageCollectionIfRunning(config);
+ EXPECT_TRUE(subtle::HeapState::IsSweeping(*heap));
+ heap->AsBase().sweeper().FinishIfRunning();
+ EXPECT_FALSE(subtle::HeapState::IsSweeping(*heap));
+}
+
+namespace {
+
+class ExpectAtomicPause final : public GarbageCollected<ExpectAtomicPause> {
+ CPPGC_USING_PRE_FINALIZER(ExpectAtomicPause, PreFinalizer);
+
+ public:
+ explicit ExpectAtomicPause(HeapHandle& handle) : handle_(handle) {}
+ ~ExpectAtomicPause() {
+ EXPECT_TRUE(subtle::HeapState::IsInAtomicPause(handle_));
+ }
+ void PreFinalizer() {
+ EXPECT_TRUE(subtle::HeapState::IsInAtomicPause(handle_));
+ }
+ void Trace(Visitor*) const {}
+
+ private:
+ HeapHandle& handle_;
+};
+
+} // namespace
+
+TEST_F(GCHeapTest, IsInAtomicPause) {
+ GarbageCollector::Config config =
+ GarbageCollector::Config::PreciseIncrementalConfig();
+ auto* heap = Heap::From(GetHeap());
+ MakeGarbageCollected<ExpectAtomicPause>(heap->object_allocator(), *heap);
+ EXPECT_FALSE(subtle::HeapState::IsInAtomicPause(*heap));
+ heap->StartIncrementalGarbageCollection(config);
+ EXPECT_FALSE(subtle::HeapState::IsInAtomicPause(*heap));
+ heap->FinalizeIncrementalGarbageCollectionIfRunning(config);
+ EXPECT_FALSE(subtle::HeapState::IsInAtomicPause(*heap));
+ heap->AsBase().sweeper().FinishIfRunning();
+ EXPECT_FALSE(subtle::HeapState::IsInAtomicPause(*heap));
+}
+
+TEST_F(GCHeapTest, TerminateEmptyHeap) { Heap::From(GetHeap())->Terminate(); }
+
+TEST_F(GCHeapTest, TerminateClearsPersistent) {
+ Persistent<Foo> foo = MakeGarbageCollected<Foo>(GetAllocationHandle());
+ EXPECT_TRUE(foo.Get());
+ Heap::From(GetHeap())->Terminate();
+ EXPECT_FALSE(foo.Get());
+}
+
+TEST_F(GCHeapTest, TerminateInvokesDestructor) {
+ Persistent<Foo> foo = MakeGarbageCollected<Foo>(GetAllocationHandle());
+ EXPECT_EQ(0u, Foo::destructor_callcount);
+ Heap::From(GetHeap())->Terminate();
+ EXPECT_EQ(1u, Foo::destructor_callcount);
+}
+
+namespace {
+
+class Cloner final : public GarbageCollected<Cloner> {
+ public:
+ static size_t destructor_count;
+
+ Cloner(cppgc::AllocationHandle& handle, size_t count)
+ : handle_(handle), count_(count) {}
+
+ ~Cloner() {
+ EXPECT_FALSE(new_instance_);
+ destructor_count++;
+ if (count_) {
+ new_instance_ =
+ MakeGarbageCollected<Cloner>(handle_, handle_, count_ - 1);
+ }
+ }
+
+ void Trace(Visitor*) const {}
+
+ private:
+ static Persistent<Cloner> new_instance_;
+
+ cppgc::AllocationHandle& handle_;
+ size_t count_;
+};
+
+Persistent<Cloner> Cloner::new_instance_;
+size_t Cloner::destructor_count;
+
+} // namespace
+
+TEST_F(GCHeapTest, TerminateReclaimsNewState) {
+ Persistent<Cloner> cloner = MakeGarbageCollected<Cloner>(
+ GetAllocationHandle(), GetAllocationHandle(), 1);
+ Cloner::destructor_count = 0;
+ EXPECT_TRUE(cloner.Get());
+ Heap::From(GetHeap())->Terminate();
+ EXPECT_FALSE(cloner.Get());
+ EXPECT_EQ(2u, Cloner::destructor_count);
+}
+
+TEST_F(GCHeapDeathTest, TerminateProhibitsAllocation) {
+ Heap::From(GetHeap())->Terminate();
+ EXPECT_DEATH_IF_SUPPORTED(MakeGarbageCollected<Foo>(GetAllocationHandle()),
+ "");
+}
+
+TEST_F(GCHeapDeathTest, LargeChainOfNewStates) {
+ Persistent<Cloner> cloner = MakeGarbageCollected<Cloner>(
+ GetAllocationHandle(), GetAllocationHandle(), 1000);
+ Cloner::destructor_count = 0;
+ EXPECT_TRUE(cloner.Get());
+ // Terminate() requires destructors to stop creating new state within a few
+ // garbage collections.
+ EXPECT_DEATH_IF_SUPPORTED(Heap::From(GetHeap())->Terminate(), "");
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/incremental-marking-schedule-unittest.cc b/deps/v8/test/unittests/heap/cppgc/incremental-marking-schedule-unittest.cc
index 0c811668e8..a98ca6ac30 100644
--- a/deps/v8/test/unittests/heap/cppgc/incremental-marking-schedule-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/incremental-marking-schedule-unittest.cc
@@ -38,7 +38,7 @@ TEST_F(IncrementalMarkingScheduleTest, NoTimePassedReturnsMinimumDuration) {
IncrementalMarkingSchedule schedule;
schedule.NotifyIncrementalMarkingStart();
// Add incrementally marked bytes to tell oracle this is not the first step.
- schedule.UpdateIncrementalMarkedBytes(
+ schedule.UpdateMutatorThreadMarkedBytes(
IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep);
schedule.SetElapsedTimeForTesting(0);
EXPECT_EQ(IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep,
@@ -50,7 +50,7 @@ TEST_F(IncrementalMarkingScheduleTest, OracleDoesntExccedMaximumStepDuration) {
schedule.NotifyIncrementalMarkingStart();
// Add incrementally marked bytes to tell oracle this is not the first step.
static constexpr size_t kMarkedBytes = 1;
- schedule.UpdateIncrementalMarkedBytes(kMarkedBytes);
+ schedule.UpdateMutatorThreadMarkedBytes(kMarkedBytes);
schedule.SetElapsedTimeForTesting(
IncrementalMarkingSchedule::kEstimatedMarkingTimeMs);
EXPECT_EQ(kObjectSize - kMarkedBytes,
@@ -61,7 +61,7 @@ TEST_F(IncrementalMarkingScheduleTest, AheadOfScheduleReturnsMinimumDuration) {
IncrementalMarkingSchedule schedule;
schedule.NotifyIncrementalMarkingStart();
// Add incrementally marked bytes to tell oracle this is not the first step.
- schedule.UpdateIncrementalMarkedBytes(
+ schedule.UpdateMutatorThreadMarkedBytes(
IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep);
schedule.AddConcurrentlyMarkedBytes(0.6 * kObjectSize);
schedule.SetElapsedTimeForTesting(
@@ -73,7 +73,7 @@ TEST_F(IncrementalMarkingScheduleTest, AheadOfScheduleReturnsMinimumDuration) {
TEST_F(IncrementalMarkingScheduleTest, BehindScheduleReturnsCorrectDuration) {
IncrementalMarkingSchedule schedule;
schedule.NotifyIncrementalMarkingStart();
- schedule.UpdateIncrementalMarkedBytes(0.1 * kObjectSize);
+ schedule.UpdateMutatorThreadMarkedBytes(0.1 * kObjectSize);
schedule.AddConcurrentlyMarkedBytes(0.25 * kObjectSize);
schedule.SetElapsedTimeForTesting(
0.5 * IncrementalMarkingSchedule::kEstimatedMarkingTimeMs);
diff --git a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
index 5bb2814705..eeb4b74b6d 100644
--- a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
@@ -294,26 +294,26 @@ class IncrementalMarkingTest : public testing::TestWithHeap {
}
void FinishMarking() {
- marker_->FinishMarking(MarkingConfig::StackState::kMayContainHeapPointers);
+ GetMarkerRef()->FinishMarking(
+ MarkingConfig::StackState::kMayContainHeapPointers);
// Pretend do finish sweeping as StatsCollector verifies that Notify*
// methods are called in the right order.
+ GetMarkerRef().reset();
Heap::From(GetHeap())->stats_collector()->NotifySweepingCompleted();
}
void InitializeMarker(HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config) {
- marker_ =
+ GetMarkerRef() =
MarkerFactory::CreateAndStartMarking<Marker>(heap, platform, config);
}
- Marker* marker() const { return marker_.get(); }
+ MarkerBase* marker() const { return GetMarkerRef().get(); }
private:
bool SingleStep(MarkingConfig::StackState stack_state) {
- return marker_->IncrementalMarkingStepForTesting(stack_state);
+ return GetMarkerRef()->IncrementalMarkingStepForTesting(stack_state);
}
-
- std::unique_ptr<Marker> marker_;
};
constexpr IncrementalMarkingTest::MarkingConfig
@@ -348,9 +348,8 @@ TEST_F(IncrementalMarkingTest,
InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(),
IncrementalPreciseMarkingConfig);
root->SetChild(MakeGarbageCollected<GCed>(GetAllocationHandle()));
- HeapObjectHeader& header = HeapObjectHeader::FromPayload(root->child());
- EXPECT_FALSE(header.IsMarked());
FinishSteps(MarkingConfig::StackState::kNoHeapPointers);
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(root->child());
EXPECT_TRUE(header.IsMarked());
FinishMarking();
}
diff --git a/deps/v8/test/unittests/heap/cppgc/member-unittest.cc b/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
index 6d2161112b..67c51f10bd 100644
--- a/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
@@ -98,6 +98,12 @@ void EmptyTest() {
EXPECT_EQ(nullptr, empty.Get());
EXPECT_EQ(nullptr, empty.Release());
}
+ {
+ // Move-constructs empty from another Member that is created from nullptr.
+ MemberType<const GCed> empty = nullptr;
+ EXPECT_EQ(nullptr, empty.Get());
+ EXPECT_EQ(nullptr, empty.Release());
+ }
}
TEST_F(MemberTest, Empty) {
diff --git a/deps/v8/test/unittests/heap/cppgc/metric-recorder-unittest.cc b/deps/v8/test/unittests/heap/cppgc/metric-recorder-unittest.cc
new file mode 100644
index 0000000000..bd5200f939
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/metric-recorder-unittest.cc
@@ -0,0 +1,324 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/metric-recorder.h"
+
+#include "src/heap/cppgc/stats-collector.h"
+#include "test/unittests/heap/cppgc/tests.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+class MetricRecorderImpl final : public MetricRecorder {
+ public:
+ void AddMainThreadEvent(const CppGCFullCycle& event) final {
+ CppGCFullCycle_event = event;
+ CppGCFullCycle_callcount++;
+ }
+ void AddMainThreadEvent(const CppGCMainThreadIncrementalMark& event) final {
+ CppGCMainThreadIncrementalMark_event = event;
+ CppGCMainThreadIncrementalMark_callcount++;
+ }
+ void AddMainThreadEvent(const CppGCMainThreadIncrementalSweep& event) final {
+ CppGCMainThreadIncrementalSweep_event = event;
+ CppGCMainThreadIncrementalSweep_callcount++;
+ }
+
+ static size_t CppGCFullCycle_callcount;
+ static CppGCFullCycle CppGCFullCycle_event;
+ static size_t CppGCMainThreadIncrementalMark_callcount;
+ static CppGCMainThreadIncrementalMark CppGCMainThreadIncrementalMark_event;
+ static size_t CppGCMainThreadIncrementalSweep_callcount;
+ static CppGCMainThreadIncrementalSweep CppGCMainThreadIncrementalSweep_event;
+};
+
+// static
+size_t MetricRecorderImpl::CppGCFullCycle_callcount = 0u;
+MetricRecorderImpl::CppGCFullCycle MetricRecorderImpl::CppGCFullCycle_event;
+size_t MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount = 0u;
+MetricRecorderImpl::CppGCMainThreadIncrementalMark
+ MetricRecorderImpl::CppGCMainThreadIncrementalMark_event;
+size_t MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount = 0u;
+MetricRecorderImpl::CppGCMainThreadIncrementalSweep
+ MetricRecorderImpl::CppGCMainThreadIncrementalSweep_event;
+
+class MetricRecorderTest : public testing::TestWithHeap {
+ public:
+ MetricRecorderTest() : stats(Heap::From(GetHeap())->stats_collector()) {
+ stats->SetMetricRecorderForTesting(std::make_unique<MetricRecorderImpl>());
+ }
+
+ void StartGC() {
+ stats->NotifyMarkingStarted(
+ GarbageCollector::Config::CollectionType::kMajor,
+ GarbageCollector::Config::IsForcedGC::kNotForced);
+ }
+ void EndGC(size_t marked_bytes) {
+ stats->NotifyMarkingCompleted(marked_bytes);
+ stats->NotifySweepingCompleted();
+ }
+
+ StatsCollector* stats;
+};
+} // namespace
+
+TEST_F(MetricRecorderTest, IncrementalScopesReportedImmediately) {
+ MetricRecorderImpl::CppGCFullCycle_callcount = 0u;
+ MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount = 0u;
+ MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount = 0u;
+ StartGC();
+ {
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount);
+ {
+ StatsCollector::EnabledScope scope(
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kIncrementalMark);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(1));
+ }
+ EXPECT_EQ(1u, MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount);
+ EXPECT_LT(
+ 0u,
+ MetricRecorderImpl::CppGCMainThreadIncrementalMark_event.duration_us);
+ }
+ {
+ EXPECT_EQ(0u,
+ MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount);
+ {
+ StatsCollector::EnabledScope scope(
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kIncrementalSweep);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(1));
+ }
+ EXPECT_EQ(1u,
+ MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount);
+ EXPECT_LT(
+ 0u,
+ MetricRecorderImpl::CppGCMainThreadIncrementalSweep_event.duration_us);
+ }
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCFullCycle_callcount);
+ EndGC(0);
+}
+
+TEST_F(MetricRecorderTest, NonIncrementlaScopesNotReportedImmediately) {
+ MetricRecorderImpl::CppGCFullCycle_callcount = 0u;
+ MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount = 0u;
+ MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount = 0u;
+ StartGC();
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kAtomicMark);
+ }
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kAtomicWeak);
+ }
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kAtomicCompact);
+ }
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kAtomicSweep);
+ }
+ {
+ StatsCollector::EnabledConcurrentScope scope(
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kConcurrentMark);
+ }
+ {
+ StatsCollector::EnabledConcurrentScope scope(
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kConcurrentSweep);
+ }
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount);
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount);
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCFullCycle_callcount);
+ EndGC(0);
+}
+
+TEST_F(MetricRecorderTest, CycleEndMetricsReportedOnGcEnd) {
+ MetricRecorderImpl::CppGCFullCycle_callcount = 0u;
+ MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount = 0u;
+ MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount = 0u;
+ StartGC();
+ EndGC(0);
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCMainThreadIncrementalMark_callcount);
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCMainThreadIncrementalSweep_callcount);
+ EXPECT_EQ(1u, MetricRecorderImpl::CppGCFullCycle_callcount);
+}
+
+TEST_F(MetricRecorderTest, CycleEndHistogramReportsCorrectValues) {
+ StartGC();
+ EndGC(1000);
+ StartGC();
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kIncrementalMark);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(10));
+ }
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kIncrementalSweep);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(20));
+ }
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kAtomicMark);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(30));
+ }
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kAtomicWeak);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(50));
+ }
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kAtomicCompact);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(60));
+ }
+ {
+ StatsCollector::EnabledScope scope(Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kAtomicSweep);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(70));
+ }
+ {
+ StatsCollector::EnabledConcurrentScope scope(
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kConcurrentMark);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(80));
+ }
+ {
+ StatsCollector::EnabledConcurrentScope scope(
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kConcurrentSweep);
+ scope.DecreaseStartTimeForTesting(
+ v8::base::TimeDelta::FromMilliseconds(100));
+ }
+ EndGC(300);
+ // Check durations.
+ static constexpr int64_t kDurationComparisonTolerance = 500;
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event
+ .main_thread_incremental.mark_duration_us -
+ 10000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event
+ .main_thread_incremental.sweep_duration_us -
+ 20000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event.main_thread_atomic
+ .mark_duration_us -
+ 30000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event.main_thread_atomic
+ .weak_duration_us -
+ 50000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event.main_thread_atomic
+ .compact_duration_us -
+ 60000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event.main_thread_atomic
+ .sweep_duration_us -
+ 70000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event.main_thread
+ .mark_duration_us -
+ 40000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event.main_thread
+ .weak_duration_us -
+ 50000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event.main_thread
+ .compact_duration_us -
+ 60000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event.main_thread
+ .sweep_duration_us -
+ 90000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(
+ std::abs(MetricRecorderImpl::CppGCFullCycle_event.total.mark_duration_us -
+ 120000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(
+ std::abs(MetricRecorderImpl::CppGCFullCycle_event.total.weak_duration_us -
+ 50000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(
+ std::abs(
+ MetricRecorderImpl::CppGCFullCycle_event.total.compact_duration_us -
+ 60000),
+ kDurationComparisonTolerance);
+ EXPECT_LT(
+ std::abs(
+ MetricRecorderImpl::CppGCFullCycle_event.total.sweep_duration_us -
+ 190000),
+ kDurationComparisonTolerance);
+ // Check collection rate and efficiency.
+ EXPECT_DOUBLE_EQ(
+ 0.3, MetricRecorderImpl::CppGCFullCycle_event.collection_rate_in_percent);
+ static constexpr double kEfficiencyComparisonTolerance = 0.00001;
+ EXPECT_LT(
+ std::abs(
+ MetricRecorderImpl::CppGCFullCycle_event.efficiency_in_bytes_per_us -
+ (700.0 / (120000 + 50000 + 60000 + 190000))),
+ kEfficiencyComparisonTolerance);
+ EXPECT_LT(std::abs(MetricRecorderImpl::CppGCFullCycle_event
+ .main_thread_efficiency_in_bytes_per_us -
+ (700.0 / (40000 + 50000 + 60000 + 90000))),
+ kEfficiencyComparisonTolerance);
+}
+
+TEST_F(MetricRecorderTest, ObjectSizeMetricsNoAllocations) {
+ // Populate previous event.
+ StartGC();
+ EndGC(1000);
+ // Populate current event.
+ StartGC();
+ EndGC(800);
+ EXPECT_EQ(1000u,
+ MetricRecorderImpl::CppGCFullCycle_event.objects.before_bytes);
+ EXPECT_EQ(800u, MetricRecorderImpl::CppGCFullCycle_event.objects.after_bytes);
+ EXPECT_EQ(200u, MetricRecorderImpl::CppGCFullCycle_event.objects.freed_bytes);
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCFullCycle_event.memory.before_bytes);
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCFullCycle_event.memory.after_bytes);
+ EXPECT_EQ(0u, MetricRecorderImpl::CppGCFullCycle_event.memory.freed_bytes);
+}
+
+TEST_F(MetricRecorderTest, ObjectSizeMetricsWithAllocations) {
+ // Populate previous event.
+ StartGC();
+ EndGC(1000);
+ // Populate current event.
+ StartGC();
+ stats->NotifyAllocation(300);
+ stats->NotifyAllocatedMemory(1400);
+ stats->NotifyFreedMemory(700);
+ stats->NotifyMarkingCompleted(800);
+ stats->NotifyAllocation(150);
+ stats->NotifyAllocatedMemory(1000);
+ stats->NotifyFreedMemory(400);
+ stats->NotifySweepingCompleted();
+ EXPECT_EQ(1300u,
+ MetricRecorderImpl::CppGCFullCycle_event.objects.before_bytes);
+ EXPECT_EQ(800, MetricRecorderImpl::CppGCFullCycle_event.objects.after_bytes);
+ EXPECT_EQ(500u, MetricRecorderImpl::CppGCFullCycle_event.objects.freed_bytes);
+ EXPECT_EQ(700u, MetricRecorderImpl::CppGCFullCycle_event.memory.before_bytes);
+ EXPECT_EQ(300u, MetricRecorderImpl::CppGCFullCycle_event.memory.after_bytes);
+ EXPECT_EQ(400u, MetricRecorderImpl::CppGCFullCycle_event.memory.freed_bytes);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc b/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc
index a023a37f0d..eac88b498a 100644
--- a/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/minor-gc-unittest.cc
@@ -89,7 +89,7 @@ TYPED_TEST(MinorGCTestForType, MinorCollection) {
EXPECT_EQ(1u, TestFixture::DestructedObjects());
{
- Heap::NoGCScope no_gc_scope_(*Heap::From(this->GetHeap()));
+ subtle::NoGarbageCollectionScope no_gc_scope(*Heap::From(this->GetHeap()));
Type* prev = nullptr;
for (size_t i = 0; i < 64; ++i) {
@@ -144,7 +144,7 @@ void InterGenerationalPointerTest(MinorGCTest* test, cppgc::Heap* heap) {
Type2* young = nullptr;
{
- Heap::NoGCScope no_gc_scope_(*Heap::From(heap));
+ subtle::NoGarbageCollectionScope no_gc_scope(*Heap::From(heap));
// Allocate young objects.
for (size_t i = 0; i < 64; ++i) {
diff --git a/deps/v8/test/unittests/heap/cppgc/object-size-trait-unittest.cc b/deps/v8/test/unittests/heap/cppgc/object-size-trait-unittest.cc
new file mode 100644
index 0000000000..d43513d9da
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/object-size-trait-unittest.cc
@@ -0,0 +1,51 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/object-size-trait.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "src/heap/cppgc/heap.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class ObjectSizeTraitTest : public testing::TestWithHeap {};
+
+class GCed : public GarbageCollected<GCed> {
+ public:
+ void Trace(Visitor*) const {}
+};
+
+class NotGCed {};
+class Mixin : public GarbageCollectedMixin {};
+class UnmanagedMixinWithDouble {
+ protected:
+ virtual void ForceVTable() {}
+};
+class GCedWithMixin : public GarbageCollected<GCedWithMixin>,
+ public UnmanagedMixinWithDouble,
+ public Mixin {};
+
+} // namespace
+
+TEST_F(ObjectSizeTraitTest, GarbageCollected) {
+ auto* obj = cppgc::MakeGarbageCollected<GCed>(GetAllocationHandle());
+ EXPECT_GE(subtle::ObjectSizeTrait<GCed>::GetSize(*obj), sizeof(GCed));
+}
+
+TEST_F(ObjectSizeTraitTest, GarbageCollectedMixin) {
+ auto* obj = cppgc::MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
+ Mixin& mixin = static_cast<Mixin&>(*obj);
+ EXPECT_NE(static_cast<void*>(&mixin), obj);
+ EXPECT_GE(subtle::ObjectSizeTrait<Mixin>::GetSize(mixin),
+ sizeof(GCedWithMixin));
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc b/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
index ae6ee23625..65c3e897ee 100644
--- a/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
@@ -135,7 +135,7 @@ void NullStateCtor(cppgc::Heap* heap) {
}
{
// Runtime null must not allocated associated node.
- PersistentType<GCed> empty = static_cast<GCed*>(0);
+ PersistentType<GCed> empty = static_cast<GCed*>(nullptr);
EXPECT_EQ(nullptr, empty.Get());
EXPECT_EQ(nullptr, empty.Release());
EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
@@ -167,6 +167,12 @@ void RawCtor(cppgc::Heap* heap) {
EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
}
EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<const GCed> p = gced;
+ EXPECT_EQ(gced, p.Get());
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
TEST_F(PersistentTest, RawCtor) {
@@ -631,6 +637,52 @@ TEST_F(PersistentTest, HeterogeneousConversion) {
HeterogeneousConversion<WeakPersistent, Persistent>(heap);
}
+namespace {
+
+class Parent : public GarbageCollected<Parent> {
+ public:
+ virtual void Trace(Visitor*) const {}
+ void ParentFoo() { /* Dummy method to trigger vtable check on UBSan. */
+ }
+};
+class Child : public Parent {
+ public:
+ void ChildFoo() { /* Dummy method to trigger vtable check on UBSan. */
+ }
+};
+
+template <template <typename> class PersistentType>
+void ImplicitUpcast(cppgc::Heap* heap) {
+ PersistentType<Child> child;
+ PersistentType<Parent> parent = child;
+}
+
+template <template <typename> class PersistentType>
+void ExplicitDowncast(cppgc::Heap* heap) {
+ PersistentType<Parent> parent{
+ MakeGarbageCollected<Child>(heap->GetAllocationHandle())};
+ PersistentType<Child> child = parent.template To<Child>();
+ child->ChildFoo();
+}
+
+} // namespace
+
+TEST_F(PersistentTest, ImplicitUpcast) {
+ auto* heap = GetHeap();
+ ImplicitUpcast<Persistent>(heap);
+ ImplicitUpcast<WeakPersistent>(heap);
+ ImplicitUpcast<subtle::CrossThreadPersistent>(heap);
+ ImplicitUpcast<subtle::WeakCrossThreadPersistent>(heap);
+}
+
+TEST_F(PersistentTest, ExplicitDowncast) {
+ auto* heap = GetHeap();
+ ExplicitDowncast<Persistent>(heap);
+ ExplicitDowncast<WeakPersistent>(heap);
+ ExplicitDowncast<subtle::CrossThreadPersistent>(heap);
+ ExplicitDowncast<subtle::WeakCrossThreadPersistent>(heap);
+}
+
TEST_F(PersistentTest, TraceStrong) {
auto* heap = GetHeap();
static constexpr size_t kItems = 512;
@@ -858,5 +910,23 @@ TEST_F(PersistentTest, PersistentTraceLocation) {
}
}
+namespace {
+class IncompleteType;
+} // namespace
+
+TEST_F(PersistentTest, EmptyPersistentConstructDestructWithoutCompleteType) {
+ // Test ensures that empty constructor and destructor compile without having
+ // a complete type available.
+ Persistent<IncompleteType> p1;
+ WeakPersistent<IncompleteType> p2;
+ subtle::CrossThreadPersistent<IncompleteType> p3;
+ subtle::WeakCrossThreadPersistent<IncompleteType> p4;
+}
+
+TEST_F(PersistentTest, Lock) {
+ subtle::WeakCrossThreadPersistent<GCed> weak;
+ auto strong = weak.Lock();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc b/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
index de24e591d3..451fce8fd6 100644
--- a/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
@@ -171,6 +171,77 @@ TEST_F(PrefinalizerTest, PrefinalizerInvocationPreservesOrder) {
namespace {
+class LinkedNode final : public GarbageCollected<LinkedNode> {
+ public:
+ explicit LinkedNode(LinkedNode* next) : next_(next) {}
+
+ void Trace(Visitor* visitor) const { visitor->Trace(next_); }
+
+ LinkedNode* next() const { return next_; }
+
+ void RemoveNext() {
+ CHECK(next_);
+ next_ = next_->next_;
+ }
+
+ private:
+ Member<LinkedNode> next_;
+};
+
+class MutatingPrefinalizer final
+ : public GarbageCollected<MutatingPrefinalizer> {
+ CPPGC_USING_PRE_FINALIZER(MutatingPrefinalizer, PreFinalizer);
+
+ public:
+ void PreFinalizer() {
+ // Pre-finalizers are generally used to mutate the object graph. The API
+ // does not allow distinguishing between live and dead objects. It is
+ // generally safe to re-write the dead *or* the live object graph. Adding
+ // a dead object to the live graph must not happen.
+ //
+ // RemoveNext() must not trigger a write barrier. In the case all LinkedNode
+ // objects die at the same time, the graph is mutated with a dead object.
+ // This is only safe when the dead object is added to a dead subgraph.
+ parent_node_->RemoveNext();
+ }
+
+ explicit MutatingPrefinalizer(LinkedNode* parent) : parent_node_(parent) {}
+
+ void Trace(Visitor* visitor) const { visitor->Trace(parent_node_); }
+
+ private:
+ Member<LinkedNode> parent_node_;
+};
+
+} // namespace
+
+TEST_F(PrefinalizerTest, PrefinalizerCanRewireGraphWithLiveObjects) {
+ Persistent<LinkedNode> root{MakeGarbageCollected<LinkedNode>(
+ GetAllocationHandle(),
+ MakeGarbageCollected<LinkedNode>(
+ GetAllocationHandle(),
+ MakeGarbageCollected<LinkedNode>(GetAllocationHandle(), nullptr)))};
+ CHECK(root->next());
+ MakeGarbageCollected<MutatingPrefinalizer>(GetAllocationHandle(), root.Get());
+ PreciseGC();
+}
+
+TEST_F(PrefinalizerTest, PrefinalizerCanRewireGraphWithDeadObjects) {
+ Persistent<LinkedNode> root{MakeGarbageCollected<LinkedNode>(
+ GetAllocationHandle(),
+ MakeGarbageCollected<LinkedNode>(
+ GetAllocationHandle(),
+ MakeGarbageCollected<LinkedNode>(GetAllocationHandle(), nullptr)))};
+ CHECK(root->next());
+ MakeGarbageCollected<MutatingPrefinalizer>(GetAllocationHandle(), root.Get());
+ // All LinkedNode objects will die on the following GC. The pre-finalizer may
+ // still operate with them but not add them to a live object.
+ root.Clear();
+ PreciseGC();
+}
+
+namespace {
+
class AllocatingPrefinalizer : public GarbageCollected<AllocatingPrefinalizer> {
CPPGC_USING_PRE_FINALIZER(AllocatingPrefinalizer, PreFinalizer);
diff --git a/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc b/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc
index cdc862e309..dc30e750cd 100644
--- a/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc
+++ b/deps/v8/test/unittests/heap/cppgc/run-all-unittests.cc
@@ -2,8 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/cppgc/platform.h"
+#include "test/unittests/heap/cppgc/test-platform.h"
#include "testing/gmock/include/gmock/gmock.h"
+namespace {
+
+class DefaultPlatformEnvironment final : public ::testing::Environment {
+ public:
+ DefaultPlatformEnvironment() = default;
+
+ void SetUp() override {
+ platform_ =
+ std::make_unique<cppgc::internal::testing::TestPlatform>(nullptr);
+ cppgc::InitializeProcess(platform_->GetPageAllocator());
+ }
+
+ void TearDown() override { cppgc::ShutdownProcess(); }
+
+ private:
+ std::shared_ptr<cppgc::internal::testing::TestPlatform> platform_;
+};
+
+} // namespace
+
int main(int argc, char** argv) {
// Don't catch SEH exceptions and continue as the following tests might hang
// in an broken environment on windows.
@@ -13,5 +35,6 @@ int main(int argc, char** argv) {
testing::FLAGS_gtest_death_test_style = "threadsafe";
testing::InitGoogleMock(&argc, argv);
+ testing::AddGlobalTestEnvironment(new DefaultPlatformEnvironment);
return RUN_ALL_TESTS();
}
diff --git a/deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc b/deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc
index e9640ecbb6..9e23cb5681 100644
--- a/deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/stats-collector-scopes-unittest.cc
@@ -109,7 +109,8 @@ TEST_F(CppgcTracingScopesTest, DisabledScope) {
ResetDelegatingTracingController();
{
StatsCollector::DisabledScope scope(
- *Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist);
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kMarkProcessMarkingWorklist);
}
EXPECT_EQ(0u, DelegatingTracingControllerImpl::AddTraceEvent_callcount);
EndGC();
@@ -121,7 +122,8 @@ TEST_F(CppgcTracingScopesTest, EnabledScope) {
ResetDelegatingTracingController("CppGC.MarkProcessMarkingWorklist");
{
StatsCollector::EnabledScope scope(
- *Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist);
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kMarkProcessMarkingWorklist);
}
EXPECT_EQ(2u, DelegatingTracingControllerImpl::AddTraceEvent_callcount);
EndGC();
@@ -131,7 +133,7 @@ TEST_F(CppgcTracingScopesTest, EnabledScope) {
ResetDelegatingTracingController("CppGC.MarkProcessWriteBarrierWorklist");
{
StatsCollector::EnabledScope scope(
- *Heap::From(GetHeap()),
+ Heap::From(GetHeap())->stats_collector(),
StatsCollector::kMarkProcessWriteBarrierWorklist);
}
EXPECT_EQ(2u, DelegatingTracingControllerImpl::AddTraceEvent_callcount);
@@ -146,7 +148,8 @@ TEST_F(CppgcTracingScopesTest, EnabledScopeWithArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
- *Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist);
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kMarkProcessMarkingWorklist);
}
EXPECT_EQ(2, DelegatingTracingControllerImpl::stored_num_args);
EndGC();
@@ -156,8 +159,8 @@ TEST_F(CppgcTracingScopesTest, EnabledScopeWithArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
- *Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
- "arg1", 1);
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kMarkProcessMarkingWorklist, "arg1", 1);
}
EXPECT_EQ(3, DelegatingTracingControllerImpl::stored_num_args);
EndGC();
@@ -167,8 +170,8 @@ TEST_F(CppgcTracingScopesTest, EnabledScopeWithArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
- *Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
- "arg1", 1, "arg2", 2);
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kMarkProcessMarkingWorklist, "arg1", 1, "arg2", 2);
}
EXPECT_EQ(4, DelegatingTracingControllerImpl::stored_num_args);
EndGC();
@@ -181,8 +184,9 @@ TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
- *Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
- "uint_arg", 13u, "bool_arg", false);
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kMarkProcessMarkingWorklist, "uint_arg", 13u,
+ "bool_arg", false);
}
FindArgument("uint_arg", TRACE_VALUE_TYPE_UINT, 13);
FindArgument("bool_arg", TRACE_VALUE_TYPE_BOOL, false);
@@ -193,8 +197,9 @@ TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
ResetDelegatingTracingController();
{
StatsCollector::EnabledScope scope(
- *Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
- "neg_int_arg", -5, "pos_int_arg", 7);
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kMarkProcessMarkingWorklist, "neg_int_arg", -5,
+ "pos_int_arg", 7);
}
FindArgument("neg_int_arg", TRACE_VALUE_TYPE_INT, -5);
FindArgument("pos_int_arg", TRACE_VALUE_TYPE_INT, 7);
@@ -207,8 +212,9 @@ TEST_F(CppgcTracingScopesTest, CheckScopeArgs) {
const char* string_value = "test";
{
StatsCollector::EnabledScope scope(
- *Heap::From(GetHeap()), StatsCollector::kMarkProcessMarkingWorklist,
- "string_arg", string_value, "double_arg", double_value);
+ Heap::From(GetHeap())->stats_collector(),
+ StatsCollector::kMarkProcessMarkingWorklist, "string_arg",
+ string_value, "double_arg", double_value);
}
FindArgument("string_arg", TRACE_VALUE_TYPE_STRING,
reinterpret_cast<uint64_t>(string_value));
@@ -227,16 +233,17 @@ TEST_F(CppgcTracingScopesTest, InitalScopesAreZero) {
stats_collector->NotifySweepingCompleted();
const StatsCollector::Event& event =
stats_collector->GetPreviousEventForTesting();
- for (int i = 0; i < StatsCollector::kNumScopeIds; ++i) {
+ for (int i = 0; i < StatsCollector::kNumHistogramScopeIds; ++i) {
EXPECT_TRUE(event.scope_data[i].IsZero());
}
- for (int i = 0; i < StatsCollector::kNumConcurrentScopeIds; ++i) {
+ for (int i = 0; i < StatsCollector::kNumHistogramConcurrentScopeIds; ++i) {
EXPECT_EQ(0, event.concurrent_scope_data[i]);
}
}
TEST_F(CppgcTracingScopesTest, TestIndividualScopes) {
- for (int scope_id = 0; scope_id < StatsCollector::kNumScopeIds; ++scope_id) {
+ for (int scope_id = 0; scope_id < StatsCollector::kNumHistogramScopeIds;
+ ++scope_id) {
StatsCollector* stats_collector = Heap::From(GetHeap())->stats_collector();
stats_collector->NotifyMarkingStarted(
GarbageCollector::Config::CollectionType::kMajor,
@@ -244,7 +251,7 @@ TEST_F(CppgcTracingScopesTest, TestIndividualScopes) {
DelegatingTracingControllerImpl::check_expectations = false;
{
StatsCollector::EnabledScope scope(
- *Heap::From(GetHeap()),
+ Heap::From(GetHeap())->stats_collector(),
static_cast<StatsCollector::ScopeId>(scope_id));
v8::base::TimeTicks time = v8::base::TimeTicks::Now();
while (time == v8::base::TimeTicks::Now()) {
@@ -255,21 +262,21 @@ TEST_F(CppgcTracingScopesTest, TestIndividualScopes) {
stats_collector->NotifySweepingCompleted();
const StatsCollector::Event& event =
stats_collector->GetPreviousEventForTesting();
- for (int i = 0; i < StatsCollector::kNumScopeIds; ++i) {
+ for (int i = 0; i < StatsCollector::kNumHistogramScopeIds; ++i) {
if (i == scope_id)
EXPECT_LT(v8::base::TimeDelta(), event.scope_data[i]);
else
EXPECT_TRUE(event.scope_data[i].IsZero());
}
- for (int i = 0; i < StatsCollector::kNumConcurrentScopeIds; ++i) {
+ for (int i = 0; i < StatsCollector::kNumHistogramConcurrentScopeIds; ++i) {
EXPECT_EQ(0, event.concurrent_scope_data[i]);
}
}
}
TEST_F(CppgcTracingScopesTest, TestIndividualConcurrentScopes) {
- for (int scope_id = 0; scope_id < StatsCollector::kNumConcurrentScopeIds;
- ++scope_id) {
+ for (int scope_id = 0;
+ scope_id < StatsCollector::kNumHistogramConcurrentScopeIds; ++scope_id) {
StatsCollector* stats_collector = Heap::From(GetHeap())->stats_collector();
stats_collector->NotifyMarkingStarted(
GarbageCollector::Config::CollectionType::kMajor,
@@ -277,7 +284,7 @@ TEST_F(CppgcTracingScopesTest, TestIndividualConcurrentScopes) {
DelegatingTracingControllerImpl::check_expectations = false;
{
StatsCollector::EnabledConcurrentScope scope(
- *Heap::From(GetHeap()),
+ Heap::From(GetHeap())->stats_collector(),
static_cast<StatsCollector::ConcurrentScopeId>(scope_id));
v8::base::TimeTicks time = v8::base::TimeTicks::Now();
while (time == v8::base::TimeTicks::Now()) {
@@ -288,10 +295,10 @@ TEST_F(CppgcTracingScopesTest, TestIndividualConcurrentScopes) {
stats_collector->NotifySweepingCompleted();
const StatsCollector::Event& event =
stats_collector->GetPreviousEventForTesting();
- for (int i = 0; i < StatsCollector::kNumScopeIds; ++i) {
+ for (int i = 0; i < StatsCollector::kNumHistogramScopeIds; ++i) {
EXPECT_TRUE(event.scope_data[i].IsZero());
}
- for (int i = 0; i < StatsCollector::kNumConcurrentScopeIds; ++i) {
+ for (int i = 0; i < StatsCollector::kNumHistogramConcurrentScopeIds; ++i) {
if (i == scope_id)
EXPECT_LT(0, event.concurrent_scope_data[i]);
else
diff --git a/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc b/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc
index 154ac3cfc6..5c8044db7e 100644
--- a/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc
@@ -18,6 +18,9 @@ constexpr size_t kMinReportedSize = StatsCollector::kAllocationThresholdBytes;
class StatsCollectorTest : public ::testing::Test {
public:
+ StatsCollectorTest()
+ : stats(nullptr /* metric_recorder */, nullptr /* platform */) {}
+
void FakeAllocate(size_t bytes) {
stats.NotifyAllocation(bytes);
stats.NotifySafePointForConservativeCollection();
@@ -114,6 +117,8 @@ class MockAllocationObserver : public StatsCollector::AllocationObserver {
MOCK_METHOD(void, AllocatedObjectSizeIncreased, (size_t), (override));
MOCK_METHOD(void, AllocatedObjectSizeDecreased, (size_t), (override));
MOCK_METHOD(void, ResetAllocatedObjectSize, (size_t), (override));
+ MOCK_METHOD(void, AllocatedSizeIncreased, (size_t), (override));
+ MOCK_METHOD(void, AllocatedSizeDecreased, (size_t), (override));
};
TEST_F(StatsCollectorTest, RegisterUnregisterObserver) {
@@ -153,6 +158,18 @@ TEST_F(StatsCollectorTest, ObserveResetAllocatedObjectSize) {
stats.UnregisterObserver(&observer);
}
+TEST_F(StatsCollectorTest, ObserveAllocatedMemoryIncreaseAndDecrease) {
+ MockAllocationObserver observer;
+ stats.RegisterObserver(&observer);
+ static constexpr size_t kAllocatedMemorySize = 4096;
+ EXPECT_CALL(observer, AllocatedSizeIncreased(kAllocatedMemorySize));
+ stats.NotifyAllocatedMemory(kAllocatedMemorySize);
+ static constexpr size_t kFreedMemorySize = 2048;
+ EXPECT_CALL(observer, AllocatedSizeDecreased(kFreedMemorySize));
+ stats.NotifyFreedMemory(kFreedMemorySize);
+ stats.UnregisterObserver(&observer);
+}
+
namespace {
class AllocationObserverTriggeringGC final
diff --git a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
index 09384cf90b..94c3479d3a 100644
--- a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
@@ -269,5 +269,62 @@ TEST_F(SweeperTest, UnmarkObjects) {
#endif
}
+TEST_F(SweeperTest, LazySweepingDuringAllocation) {
+ using GCedObject = GCed<256>;
+ static const size_t kObjectsPerPage =
+ NormalPage::PayloadSize() /
+ (sizeof(GCedObject) + sizeof(HeapObjectHeader));
+ // This test expects each page contain at least 2 objects.
+ DCHECK_LT(2u, kObjectsPerPage);
+ PreciseGC();
+ std::vector<Persistent<GCedObject>> first_page;
+ first_page.push_back(MakeGarbageCollected<GCedObject>(GetAllocationHandle()));
+ GCedObject* expected_address_on_first_page =
+ MakeGarbageCollected<GCedObject>(GetAllocationHandle());
+ for (size_t i = 2; i < kObjectsPerPage; ++i) {
+ first_page.push_back(
+ MakeGarbageCollected<GCedObject>(GetAllocationHandle()));
+ }
+ std::vector<Persistent<GCedObject>> second_page;
+ second_page.push_back(
+ MakeGarbageCollected<GCedObject>(GetAllocationHandle()));
+ GCedObject* expected_address_on_second_page =
+ MakeGarbageCollected<GCedObject>(GetAllocationHandle());
+ for (size_t i = 2; i < kObjectsPerPage; ++i) {
+ second_page.push_back(
+ MakeGarbageCollected<GCedObject>(GetAllocationHandle()));
+ }
+ testing::TestPlatform::DisableBackgroundTasksScope no_concurrent_sweep_scope(
+ GetPlatformHandle().get());
+ g_destructor_callcount = 0;
+ static constexpr Heap::Config config = {
+ Heap::Config::CollectionType::kMajor,
+ Heap::Config::StackState::kNoHeapPointers,
+ Heap::Config::MarkingType::kAtomic,
+ Heap::Config::SweepingType::kIncrementalAndConcurrent};
+ Heap::From(GetHeap())->CollectGarbage(config);
+ // Incremetal sweeping is active and the space should have two pages with
+ // no room for an additional GCedObject. Allocating a new GCedObject should
+ // trigger sweeping. All objects other than the 2nd object on each page are
+ // marked. Lazy sweeping on allocation should reclaim the object on one of
+ // the pages and reuse its memory. The object on the other page should remain
+ // un-reclaimed. To confirm: the newly object will be allcoated at one of the
+ // expected addresses and the GCedObject destructor is only called once.
+ GCedObject* new_object1 =
+ MakeGarbageCollected<GCedObject>(GetAllocationHandle());
+ EXPECT_EQ(1u, g_destructor_callcount);
+ EXPECT_TRUE((new_object1 == expected_address_on_first_page) ||
+ (new_object1 == expected_address_on_second_page));
+ // Allocating again should reclaim the other unmarked object and reuse its
+ // memory. The destructor will be called again and the new object will be
+ // allocated in one of the expected addresses but not the same one as before.
+ GCedObject* new_object2 =
+ MakeGarbageCollected<GCedObject>(GetAllocationHandle());
+ EXPECT_EQ(2u, g_destructor_callcount);
+ EXPECT_TRUE((new_object2 == expected_address_on_first_page) ||
+ (new_object2 == expected_address_on_second_page));
+ EXPECT_NE(new_object1, new_object2);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc b/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
new file mode 100644
index 0000000000..1aa9bd15bb
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
@@ -0,0 +1,55 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/testing.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/persistent.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+class TestingTest : public testing::TestWithHeap {};
+
+class GCed : public GarbageCollected<GCed> {
+ public:
+ void Trace(Visitor*) const {}
+};
+} // namespace
+
+TEST_F(TestingTest, OverrideEmbeddertackStateScope) {
+ {
+ auto* gced = MakeGarbageCollected<GCed>(GetHeap()->GetAllocationHandle());
+ WeakPersistent<GCed> weak{gced};
+ internal::Heap::From(GetHeap())->CollectGarbage(
+ Heap::Config::PreciseAtomicConfig());
+ EXPECT_FALSE(weak);
+ }
+ {
+ auto* gced = MakeGarbageCollected<GCed>(GetHeap()->GetAllocationHandle());
+ WeakPersistent<GCed> weak{gced};
+ cppgc::testing::OverrideEmbedderStackStateScope override_stack(
+ GetHeap()->GetHeapHandle(),
+ EmbedderStackState::kMayContainHeapPointers);
+ internal::Heap::From(GetHeap())->CollectGarbage(
+ Heap::Config::PreciseAtomicConfig());
+ EXPECT_TRUE(weak);
+ }
+ {
+ auto* gced = MakeGarbageCollected<GCed>(GetHeap()->GetAllocationHandle());
+ WeakPersistent<GCed> weak{gced};
+ cppgc::testing::OverrideEmbedderStackStateScope override_stack(
+ GetHeap()->GetHeapHandle(), EmbedderStackState::kNoHeapPointers);
+ internal::Heap::From(GetHeap())->CollectGarbage(
+ Heap::Config::ConservativeAtomicConfig());
+ EXPECT_FALSE(weak);
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.cc b/deps/v8/test/unittests/heap/cppgc/tests.cc
index a170ecd879..b2bed85f1d 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.cc
+++ b/deps/v8/test/unittests/heap/cppgc/tests.cc
@@ -20,12 +20,10 @@ std::shared_ptr<TestPlatform> TestWithPlatform::platform_;
void TestWithPlatform::SetUpTestSuite() {
platform_ = std::make_unique<TestPlatform>(
std::make_unique<DelegatingTracingController>());
- cppgc::InitializeProcess(platform_->GetPageAllocator());
}
// static
void TestWithPlatform::TearDownTestSuite() {
- cppgc::ShutdownProcess();
platform_.reset();
}
@@ -38,7 +36,7 @@ void TestWithHeap::ResetLinearAllocationBuffers() {
}
TestSupportingAllocationOnly::TestSupportingAllocationOnly()
- : no_gc_scope_(*internal::Heap::From(GetHeap())) {}
+ : no_gc_scope_(GetHeap()->GetHeapHandle()) {}
} // namespace testing
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.h b/deps/v8/test/unittests/heap/cppgc/tests.h
index fba3ab92e9..c091c7f6ec 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.h
+++ b/deps/v8/test/unittests/heap/cppgc/tests.h
@@ -5,6 +5,7 @@
#ifndef V8_UNITTESTS_HEAP_CPPGC_TESTS_H_
#define V8_UNITTESTS_HEAP_CPPGC_TESTS_H_
+#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/heap.h"
#include "include/cppgc/platform.h"
#include "src/heap/cppgc/heap.h"
@@ -85,6 +86,10 @@ class TestWithHeap : public TestWithPlatform {
return Heap::From(GetHeap())->marker_;
}
+ const std::unique_ptr<MarkerBase>& GetMarkerRef() const {
+ return Heap::From(GetHeap())->marker_;
+ }
+
void ResetLinearAllocationBuffers();
private:
@@ -101,7 +106,7 @@ class TestSupportingAllocationOnly : public TestWithHeap {
TestSupportingAllocationOnly();
private:
- Heap::NoGCScope no_gc_scope_;
+ subtle::NoGarbageCollectionScope no_gc_scope_;
};
} // namespace testing
diff --git a/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc b/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc
index c179635c8a..ba340743ea 100644
--- a/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc
@@ -175,7 +175,7 @@ class NoWriteBarrierTest : public testing::TestWithHeap {};
TEST_F(WriteBarrierTest, EnableDisableIncrementalMarking) {
{
IncrementalMarkingScope scope(marker());
- EXPECT_TRUE(ProcessHeap::IsAnyIncrementalOrConcurrentMarking());
+ EXPECT_TRUE(WriteBarrier::IsAnyIncrementalOrConcurrentMarking());
}
}
@@ -415,10 +415,13 @@ TEST_F(WriteBarrierTest, DijkstraWriteBarrierRangeTriggersWhenMarkingIsOn) {
EXPECT_FALSE(object1->IsMarked());
WriteBarrierParams params;
EXPECT_EQ(WriteBarrierType::kMarking,
- HeapConsistency::GetWriteBarrierType(object2->objects, params));
+ HeapConsistency::GetWriteBarrierType(
+ object2->objects, params, [this]() -> HeapHandle& {
+ return GetHeap()->GetHeapHandle();
+ }));
HeapConsistency::DijkstraWriteBarrierRange(
- params, GetHeap()->GetHeapHandle(), object2->objects,
- sizeof(InlinedObject), 4, TraceTrait<InlinedObject>::Trace);
+ params, object2->objects, sizeof(InlinedObject), 4,
+ TraceTrait<InlinedObject>::Trace);
EXPECT_TRUE(object1->IsMarked());
}
}
@@ -432,10 +435,13 @@ TEST_F(WriteBarrierTest, DijkstraWriteBarrierRangeBailoutIfMarked) {
ExpectNoWriteBarrierFires scope(marker(), {object1});
WriteBarrierParams params;
EXPECT_EQ(WriteBarrierType::kMarking,
- HeapConsistency::GetWriteBarrierType(object2->objects, params));
+ HeapConsistency::GetWriteBarrierType(
+ object2->objects, params, [this]() -> HeapHandle& {
+ return GetHeap()->GetHeapHandle();
+ }));
HeapConsistency::DijkstraWriteBarrierRange(
- params, GetHeap()->GetHeapHandle(), object2->objects,
- sizeof(InlinedObject), 4, TraceTrait<InlinedObject>::Trace);
+ params, object2->objects, sizeof(InlinedObject), 4,
+ TraceTrait<InlinedObject>::Trace);
}
}
@@ -447,8 +453,8 @@ TEST_F(WriteBarrierTest, SteeleWriteBarrierTriggersWhenMarkingIsOn) {
EXPECT_TRUE(HeapObjectHeader::FromPayload(object1).TryMarkAtomic());
WriteBarrierParams params;
EXPECT_EQ(WriteBarrierType::kMarking,
- HeapConsistency::GetWriteBarrierType(object2->next_ref().Get(),
- params));
+ HeapConsistency::GetWriteBarrierType(
+ &object2->next_ref(), object2->next_ref().Get(), params));
HeapConsistency::SteeleWriteBarrier(params, object2->next_ref().Get());
}
}
@@ -460,8 +466,8 @@ TEST_F(WriteBarrierTest, SteeleWriteBarrierBailoutIfNotMarked) {
ExpectNoWriteBarrierFires scope(marker(), {object1});
WriteBarrierParams params;
EXPECT_EQ(WriteBarrierType::kMarking,
- HeapConsistency::GetWriteBarrierType(object2->next_ref().Get(),
- params));
+ HeapConsistency::GetWriteBarrierType(
+ &object2->next_ref(), object2->next_ref().Get(), params));
HeapConsistency::SteeleWriteBarrier(params, object2->next_ref().Get());
}
}
diff --git a/deps/v8/test/unittests/heap/heap-utils.cc b/deps/v8/test/unittests/heap/heap-utils.cc
new file mode 100644
index 0000000000..66ad8d98c8
--- /dev/null
+++ b/deps/v8/test/unittests/heap/heap-utils.cc
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/heap/heap-utils.h"
+
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/safepoint.h"
+
+namespace v8 {
+namespace internal {
+
+void HeapInternalsBase::SimulateIncrementalMarking(Heap* heap,
+ bool force_completion) {
+ constexpr double kStepSizeInMs = 100;
+ CHECK(FLAG_incremental_marking);
+ i::IncrementalMarking* marking = heap->incremental_marking();
+ i::MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ SafepointScope scope(heap);
+ collector->EnsureSweepingCompleted();
+ }
+ CHECK(marking->IsMarking() || marking->IsStopped() || marking->IsComplete());
+ if (marking->IsStopped()) {
+ heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
+ i::GarbageCollectionReason::kTesting);
+ }
+ CHECK(marking->IsMarking() || marking->IsComplete());
+ if (!force_completion) return;
+
+ while (!marking->IsComplete()) {
+ marking->Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::StepOrigin::kV8);
+ if (marking->IsReadyToOverApproximateWeakClosure()) {
+ SafepointScope scope(heap);
+ marking->FinalizeIncrementally();
+ }
+ }
+ CHECK(marking->IsComplete());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-utils.h b/deps/v8/test/unittests/heap/heap-utils.h
index 7474370aad..2cd123c827 100644
--- a/deps/v8/test/unittests/heap/heap-utils.h
+++ b/deps/v8/test/unittests/heap/heap-utils.h
@@ -13,8 +13,13 @@
namespace v8 {
namespace internal {
+class HeapInternalsBase {
+ protected:
+ void SimulateIncrementalMarking(Heap* heap, bool force_completion);
+};
+
template <typename TMixin>
-class WithHeapInternals : public TMixin {
+class WithHeapInternals : public TMixin, HeapInternalsBase {
public:
WithHeapInternals() = default;
WithHeapInternals(const WithHeapInternals&) = delete;
@@ -25,6 +30,11 @@ class WithHeapInternals : public TMixin {
}
Heap* heap() const { return this->i_isolate()->heap(); }
+
+ void SimulateIncrementalMarking(bool force_completion = true) {
+ return HeapInternalsBase::SimulateIncrementalMarking(heap(),
+ force_completion);
+ }
};
using TestWithHeapInternals = //
diff --git a/deps/v8/test/unittests/heap/local-factory-unittest.cc b/deps/v8/test/unittests/heap/local-factory-unittest.cc
index a41ce1a3e1..bd8e5db0a7 100644
--- a/deps/v8/test/unittests/heap/local-factory-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-factory-unittest.cc
@@ -63,7 +63,6 @@ class LocalFactoryTest : public TestWithIsolateAndZone {
REPLMode::kNo),
&state_),
local_isolate_(isolate()->main_thread_local_isolate()) {
- FLAG_concurrent_allocation = true;
}
FunctionLiteral* ParseProgram(const char* source) {
diff --git a/deps/v8/test/unittests/heap/local-heap-unittest.cc b/deps/v8/test/unittests/heap/local-heap-unittest.cc
index f08e6e03ff..919578f2fb 100644
--- a/deps/v8/test/unittests/heap/local-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-heap-unittest.cc
@@ -3,7 +3,11 @@
// found in the LICENSE file.
#include "src/heap/local-heap.h"
+
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/heap.h"
+#include "src/heap/parked-scope.h"
#include "src/heap/safepoint.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -72,5 +76,114 @@ TEST_F(LocalHeapTest, CurrentBackground) {
CHECK_NULL(LocalHeap::Current());
}
+namespace {
+
+class GCEpilogue {
+ public:
+ static void Callback(void* data) {
+ reinterpret_cast<GCEpilogue*>(data)->was_invoked_ = true;
+ }
+
+ void NotifyStarted() {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ started_ = true;
+ cv_.NotifyOne();
+ }
+
+ void WaitUntilStarted() {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ while (!started_) {
+ cv_.Wait(&mutex_);
+ }
+ }
+ void RequestStop() {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ stop_requested_ = true;
+ }
+
+ bool StopRequested() {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ return stop_requested_;
+ }
+
+ bool WasInvoked() { return was_invoked_; }
+
+ private:
+ bool was_invoked_ = false;
+ bool started_ = false;
+ bool stop_requested_ = false;
+ base::Mutex mutex_;
+ base::ConditionVariable cv_;
+};
+
+class BackgroundThreadForGCEpilogue final : public v8::base::Thread {
+ public:
+ explicit BackgroundThreadForGCEpilogue(Heap* heap, bool parked,
+ GCEpilogue* epilogue)
+ : v8::base::Thread(base::Thread::Options("BackgroundThread")),
+ heap_(heap),
+ parked_(parked),
+ epilogue_(epilogue) {}
+
+ void Run() override {
+ LocalHeap lh(heap_, ThreadKind::kBackground);
+ base::Optional<UnparkedScope> unparked_scope;
+ if (!parked_) {
+ unparked_scope.emplace(&lh);
+ }
+ {
+ base::Optional<UnparkedScope> unparked_scope;
+ if (parked_) unparked_scope.emplace(&lh);
+ lh.AddGCEpilogueCallback(&GCEpilogue::Callback, epilogue_);
+ }
+ epilogue_->NotifyStarted();
+ while (!epilogue_->StopRequested()) {
+ lh.Safepoint();
+ }
+ {
+ base::Optional<UnparkedScope> unparked_scope;
+ if (parked_) unparked_scope.emplace(&lh);
+ lh.RemoveGCEpilogueCallback(&GCEpilogue::Callback, epilogue_);
+ }
+ }
+
+ Heap* heap_;
+ bool parked_;
+ GCEpilogue* epilogue_;
+};
+
+} // anonymous namespace
+
+TEST_F(LocalHeapTest, GCEpilogue) {
+ Heap* heap = i_isolate()->heap();
+ LocalHeap lh(heap, ThreadKind::kMain);
+ std::array<GCEpilogue, 3> epilogue;
+ {
+ UnparkedScope unparked(&lh);
+ lh.AddGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
+ }
+ auto thread1 =
+ std::make_unique<BackgroundThreadForGCEpilogue>(heap, true, &epilogue[1]);
+ auto thread2 = std::make_unique<BackgroundThreadForGCEpilogue>(heap, false,
+ &epilogue[2]);
+ CHECK(thread1->Start());
+ CHECK(thread2->Start());
+ epilogue[1].WaitUntilStarted();
+ epilogue[2].WaitUntilStarted();
+ heap->PreciseCollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kTesting);
+ epilogue[1].RequestStop();
+ epilogue[2].RequestStop();
+ thread1->Join();
+ thread2->Join();
+ {
+ UnparkedScope unparked(&lh);
+ lh.RemoveGCEpilogueCallback(&GCEpilogue::Callback, &epilogue[0]);
+ }
+ for (auto& e : epilogue) {
+ CHECK(e.WasInvoked());
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/safepoint-unittest.cc b/deps/v8/test/unittests/heap/safepoint-unittest.cc
index 0846e0de6f..8cd21c1bed 100644
--- a/deps/v8/test/unittests/heap/safepoint-unittest.cc
+++ b/deps/v8/test/unittests/heap/safepoint-unittest.cc
@@ -15,16 +15,9 @@
namespace v8 {
namespace internal {
-void EnsureFlagLocalHeapsEnabled() {
- // Avoid data race in concurrent thread by only setting the flag to true if
- // not already enabled.
- if (!FLAG_local_heaps) FLAG_local_heaps = true;
-}
-
using SafepointTest = TestWithIsolate;
TEST_F(SafepointTest, ReachSafepointWithoutLocalHeaps) {
- EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap();
bool run = false;
{
@@ -54,7 +47,6 @@ class ParkedThread final : public v8::base::Thread {
};
TEST_F(SafepointTest, StopParkedThreads) {
- EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap();
int safepoints = 0;
@@ -114,7 +106,6 @@ class RunningThread final : public v8::base::Thread {
};
TEST_F(SafepointTest, StopRunningThreads) {
- EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap();
const int kThreads = 10;
diff --git a/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
index c06e85a69e..848def9e21 100644
--- a/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
@@ -265,6 +265,7 @@ namespace {
class GCedWithJSRef : public cppgc::GarbageCollected<GCedWithJSRef> {
public:
+ static uint16_t kWrappableType;
static constexpr const char kExpectedName[] =
"v8::internal::(anonymous namespace)::GCedWithJSRef";
@@ -285,8 +286,12 @@ class GCedWithJSRef : public cppgc::GarbageCollected<GCedWithJSRef> {
private:
TracedReference<v8::Object> v8_object_;
};
+
constexpr const char GCedWithJSRef::kExpectedName[];
+// static
+uint16_t GCedWithJSRef::kWrappableType = WrapperHelper::kTracedEmbedderId;
+
class V8_NODISCARD JsTestingScope {
public:
explicit JsTestingScope(v8::Isolate* isolate)
@@ -311,7 +316,8 @@ cppgc::Persistent<GCedWithJSRef> SetupWrapperWrappablePair(
cppgc::Persistent<GCedWithJSRef> gc_w_js_ref =
cppgc::MakeGarbageCollected<GCedWithJSRef>(allocation_handle);
v8::Local<v8::Object> wrapper_object = WrapperHelper::CreateWrapper(
- testing_scope.context(), gc_w_js_ref.Get(), name);
+ testing_scope.context(), &GCedWithJSRef::kWrappableType,
+ gc_w_js_ref.Get(), name);
gc_w_js_ref->SetV8Object(testing_scope.isolate(), wrapper_object);
return std::move(gc_w_js_ref);
}
@@ -356,7 +362,7 @@ TEST_F(UnifiedHeapSnapshotTest, MergedWrapperNode) {
testing_scope, allocation_handle(), "MergedObject");
gc_w_js_ref->SetWrapperClassId(1); // Any class id will do.
v8::Local<v8::Object> next_object = WrapperHelper::CreateWrapper(
- testing_scope.context(), nullptr, "NextObject");
+ testing_scope.context(), nullptr, nullptr, "NextObject");
v8::Local<v8::Object> wrapper_object =
gc_w_js_ref->wrapper().Get(v8_isolate());
// Chain another object to `wrapper_object`. Since `wrapper_object` should be
diff --git a/deps/v8/test/unittests/heap/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
index ca72a80c64..597cbcf2cf 100644
--- a/deps/v8/test/unittests/heap/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
@@ -4,9 +4,13 @@
#include "include/cppgc/allocation.h"
#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/persistent.h"
#include "include/cppgc/platform.h"
+#include "include/v8-cppgc.h"
+#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc/sweeper.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/heap/heap-utils.h"
#include "test/unittests/heap/unified-heap-utils.h"
@@ -22,11 +26,22 @@ class Wrappable final : public cppgc::GarbageCollected<Wrappable> {
~Wrappable() { destructor_callcount++; }
- void Trace(cppgc::Visitor* visitor) const {}
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(wrapper_); }
+
+ void SetWrapper(v8::Isolate* isolate, v8::Local<v8::Object> wrapper) {
+ wrapper_.Reset(isolate, wrapper);
+ }
+
+ TracedReference<v8::Object>& wrapper() { return wrapper_; }
+
+ private:
+ TracedReference<v8::Object> wrapper_;
};
size_t Wrappable::destructor_callcount = 0;
+using UnifiedHeapDetachedTest = TestWithHeapInternals;
+
} // namespace
TEST_F(UnifiedHeapTest, OnlyGC) { CollectGarbageWithEmbedderStack(); }
@@ -35,18 +50,104 @@ TEST_F(UnifiedHeapTest, FindingV8ToBlinkReference) {
v8::HandleScope scope(v8_isolate());
v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
v8::Context::Scope context_scope(context);
+ uint16_t wrappable_type = WrapperHelper::kTracedEmbedderId;
v8::Local<v8::Object> api_object = WrapperHelper::CreateWrapper(
- context, cppgc::MakeGarbageCollected<Wrappable>(allocation_handle()));
+ context, &wrappable_type,
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle()));
+ Wrappable::destructor_callcount = 0;
EXPECT_FALSE(api_object.IsEmpty());
EXPECT_EQ(0u, Wrappable::destructor_callcount);
- CollectGarbageWithoutEmbedderStack();
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
EXPECT_EQ(0u, Wrappable::destructor_callcount);
WrapperHelper::ResetWrappableConnection(api_object);
- CollectGarbageWithoutEmbedderStack();
- // Calling CollectGarbage twice to force the first GC to finish sweeping.
- CollectGarbageWithoutEmbedderStack();
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
EXPECT_EQ(1u, Wrappable::destructor_callcount);
}
+TEST_F(UnifiedHeapTest, WriteBarrierV8ToCppReference) {
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ void* wrappable = cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ v8::Local<v8::Object> api_object =
+ WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ Wrappable::destructor_callcount = 0;
+ WrapperHelper::ResetWrappableConnection(api_object);
+ SimulateIncrementalMarking();
+ {
+ // The following snippet shows the embedder code for implementing a GC-safe
+ // setter for JS to C++ references.
+ WrapperHelper::SetWrappableConnection(api_object, wrappable, wrappable);
+ JSHeapConsistency::WriteBarrierParams params;
+ auto barrier_type = JSHeapConsistency::GetWriteBarrierType(
+ api_object, 1, wrappable, params,
+ [this]() -> cppgc::HeapHandle& { return cpp_heap().GetHeapHandle(); });
+ EXPECT_EQ(JSHeapConsistency::WriteBarrierType::kMarking, barrier_type);
+ JSHeapConsistency::DijkstraMarkingBarrier(
+ params, cpp_heap().GetHeapHandle(), wrappable);
+ }
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+}
+
+TEST_F(UnifiedHeapTest, WriteBarrierCppToV8Reference) {
+ v8::HandleScope scope(v8_isolate());
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ cppgc::Persistent<Wrappable> wrappable =
+ cppgc::MakeGarbageCollected<Wrappable>(allocation_handle());
+ Wrappable::destructor_callcount = 0;
+ SimulateIncrementalMarking();
+ // Pick a sentinel to compare against.
+ void* kMagicAddress = &Wrappable::destructor_callcount;
+ {
+ // The following snippet shows the embedder code for implementing a GC-safe
+ // setter for C++ to JS references.
+ v8::HandleScope nested_scope(v8_isolate());
+ v8::Local<v8::Object> api_object =
+ WrapperHelper::CreateWrapper(context, nullptr, nullptr);
+ // Setting only one field to avoid treating this as wrappable backref, see
+ // `LocalEmbedderHeapTracer::ExtractWrapperInfo`.
+ api_object->SetAlignedPointerInInternalField(1, kMagicAddress);
+ wrappable->SetWrapper(v8_isolate(), api_object);
+ JSHeapConsistency::WriteBarrierParams params;
+ auto barrier_type = JSHeapConsistency::GetWriteBarrierType(
+ wrappable->wrapper(), params,
+ [this]() -> cppgc::HeapHandle& { return cpp_heap().GetHeapHandle(); });
+ EXPECT_EQ(JSHeapConsistency::WriteBarrierType::kMarking, barrier_type);
+ JSHeapConsistency::DijkstraMarkingBarrier(
+ params, cpp_heap().GetHeapHandle(), wrappable->wrapper());
+ }
+ CollectGarbageWithoutEmbedderStack(cppgc::Heap::SweepingType::kAtomic);
+ EXPECT_EQ(0u, Wrappable::destructor_callcount);
+ EXPECT_EQ(kMagicAddress,
+ wrappable->wrapper()->GetAlignedPointerFromInternalField(1));
+}
+
+TEST_F(UnifiedHeapDetachedTest, AllocationBeforeConfigureHeap) {
+ auto heap = v8::CppHeap::Create(
+ V8::GetCurrentPlatform(),
+ CppHeapCreateParams{{}, WrapperHelper::DefaultWrapperDescriptor()});
+ auto* object =
+ cppgc::MakeGarbageCollected<Wrappable>(heap->GetAllocationHandle());
+ cppgc::WeakPersistent<Wrappable> weak_holder{object};
+
+ auto& js_heap = *isolate()->heap();
+ js_heap.AttachCppHeap(heap.get());
+ auto& cpp_heap = *CppHeap::From(isolate()->heap()->cpp_heap());
+ {
+ CollectGarbage(OLD_SPACE);
+ cpp_heap.AsBase().sweeper().FinishIfRunning();
+ EXPECT_TRUE(weak_holder);
+ }
+ {
+ js_heap.SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ CollectGarbage(OLD_SPACE);
+ cpp_heap.AsBase().sweeper().FinishIfRunning();
+ EXPECT_FALSE(weak_holder);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/unified-heap-utils.cc b/deps/v8/test/unittests/heap/unified-heap-utils.cc
index 03ca79f21f..631695b7a5 100644
--- a/deps/v8/test/unittests/heap/unified-heap-utils.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-utils.cc
@@ -15,25 +15,30 @@ namespace v8 {
namespace internal {
UnifiedHeapTest::UnifiedHeapTest()
- : saved_incremental_marking_wrappers_(FLAG_incremental_marking_wrappers) {
- FLAG_incremental_marking_wrappers = false;
- isolate()->heap()->ConfigureCppHeap(std::make_unique<CppHeapCreateParams>());
+ : cpp_heap_(v8::CppHeap::Create(
+ V8::GetCurrentPlatform(),
+ CppHeapCreateParams{{}, WrapperHelper::DefaultWrapperDescriptor()})) {
+ isolate()->heap()->AttachCppHeap(cpp_heap_.get());
}
-UnifiedHeapTest::~UnifiedHeapTest() {
- FLAG_incremental_marking_wrappers = saved_incremental_marking_wrappers_;
-}
-
-void UnifiedHeapTest::CollectGarbageWithEmbedderStack() {
+void UnifiedHeapTest::CollectGarbageWithEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type) {
heap()->SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
CollectGarbage(OLD_SPACE);
+ if (sweeping_type == cppgc::Heap::SweepingType::kAtomic) {
+ cpp_heap().AsBase().sweeper().FinishIfRunning();
+ }
}
-void UnifiedHeapTest::CollectGarbageWithoutEmbedderStack() {
+void UnifiedHeapTest::CollectGarbageWithoutEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type) {
heap()->SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
CollectGarbage(OLD_SPACE);
+ if (sweeping_type == cppgc::Heap::SweepingType::kAtomic) {
+ cpp_heap().AsBase().sweeper().FinishIfRunning();
+ }
}
CppHeap& UnifiedHeapTest::cpp_heap() const {
@@ -46,8 +51,8 @@ cppgc::AllocationHandle& UnifiedHeapTest::allocation_handle() {
// static
v8::Local<v8::Object> WrapperHelper::CreateWrapper(
- v8::Local<v8::Context> context, void* wrappable_object,
- const char* class_name) {
+ v8::Local<v8::Context> context, void* wrappable_type,
+ void* wrappable_object, const char* class_name) {
v8::EscapableHandleScope scope(context->GetIsolate());
v8::Local<v8::FunctionTemplate> function_t =
v8::FunctionTemplate::New(context->GetIsolate());
@@ -62,8 +67,7 @@ v8::Local<v8::Object> WrapperHelper::CreateWrapper(
function_t->GetFunction(context).ToLocalChecked();
v8::Local<v8::Object> instance =
function->NewInstance(context).ToLocalChecked();
- instance->SetAlignedPointerInInternalField(0, wrappable_object);
- instance->SetAlignedPointerInInternalField(1, wrappable_object);
+ SetWrappableConnection(instance, wrappable_type, wrappable_object);
CHECK(!instance.IsEmpty());
i::Handle<i::JSReceiver> js_obj = v8::Utils::OpenHandle(*instance);
CHECK_EQ(i::JS_API_OBJECT_TYPE, js_obj->map().instance_type());
@@ -72,8 +76,19 @@ v8::Local<v8::Object> WrapperHelper::CreateWrapper(
// static
void WrapperHelper::ResetWrappableConnection(v8::Local<v8::Object> api_object) {
- api_object->SetAlignedPointerInInternalField(0, nullptr);
- api_object->SetAlignedPointerInInternalField(1, nullptr);
+ api_object->SetAlignedPointerInInternalField(kWrappableTypeEmbedderIndex,
+ nullptr);
+ api_object->SetAlignedPointerInInternalField(kWrappableInstanceEmbedderIndex,
+ nullptr);
+}
+
+// static
+void WrapperHelper::SetWrappableConnection(v8::Local<v8::Object> api_object,
+ void* type, void* instance) {
+ api_object->SetAlignedPointerInInternalField(kWrappableTypeEmbedderIndex,
+ type);
+ api_object->SetAlignedPointerInInternalField(kWrappableInstanceEmbedderIndex,
+ instance);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/unified-heap-utils.h b/deps/v8/test/unittests/heap/unified-heap-utils.h
index 6c37f03163..8df67aa7a4 100644
--- a/deps/v8/test/unittests/heap/unified-heap-utils.h
+++ b/deps/v8/test/unittests/heap/unified-heap-utils.h
@@ -6,10 +6,14 @@
#define V8_UNITTESTS_HEAP_UNIFIED_HEAP_UTILS_H_
#include "include/cppgc/heap.h"
+#include "include/v8-cppgc.h"
#include "include/v8.h"
#include "test/unittests/heap/heap-utils.h"
namespace v8 {
+
+class CppHeap;
+
namespace internal {
class CppHeap;
@@ -17,30 +21,50 @@ class CppHeap;
class UnifiedHeapTest : public TestWithHeapInternals {
public:
UnifiedHeapTest();
- ~UnifiedHeapTest() override;
+ ~UnifiedHeapTest() override = default;
- void CollectGarbageWithEmbedderStack();
- void CollectGarbageWithoutEmbedderStack();
+ void CollectGarbageWithEmbedderStack(cppgc::Heap::SweepingType sweeping_type =
+ cppgc::Heap::SweepingType::kAtomic);
+ void CollectGarbageWithoutEmbedderStack(
+ cppgc::Heap::SweepingType sweeping_type =
+ cppgc::Heap::SweepingType::kAtomic);
CppHeap& cpp_heap() const;
cppgc::AllocationHandle& allocation_handle();
private:
- bool saved_incremental_marking_wrappers_;
+ std::unique_ptr<v8::CppHeap> cpp_heap_;
};
class WrapperHelper {
public:
+ static constexpr size_t kWrappableTypeEmbedderIndex = 0;
+ static constexpr size_t kWrappableInstanceEmbedderIndex = 1;
+ // Id that identifies types that should be traced.
+ static constexpr uint16_t kTracedEmbedderId = uint16_t{0xA50F};
+
+ static constexpr WrapperDescriptor DefaultWrapperDescriptor() {
+ return WrapperDescriptor(kWrappableTypeEmbedderIndex,
+ kWrappableInstanceEmbedderIndex,
+ kTracedEmbedderId);
+ }
+
// Sets up a V8 API object so that it points back to a C++ object. The setup
// used is recognized by the GC and references will be followed for liveness
// analysis (marking) as well as tooling (snapshot).
static v8::Local<v8::Object> CreateWrapper(v8::Local<v8::Context> context,
+ void* wrappable_type,
void* wrappable_object,
const char* class_name = "");
// Resets the connection of a wrapper (JS) to its wrappable (C++), meaning
// that the wrappable object is not longer kept alive by the wrapper object.
static void ResetWrappableConnection(v8::Local<v8::Object> api_object);
+
+ // Sets up the connection of a wrapper (JS) to its wrappable (C++). Does not
+ // emit any possibly needed write barrier.
+ static void SetWrappableConnection(v8::Local<v8::Object> api_object, void*,
+ void*);
};
} // namespace internal
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 9da64339d5..d2beba0fbc 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -84,6 +84,12 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreAccumulatorInRegister(reg)
.LoadNull();
+ // The above had a lot of Star0, but we must also emit the rest of
+ // the short-star codes.
+ for (int i = 1; i < 16; ++i) {
+ builder.StoreAccumulatorInRegister(Register(i));
+ }
+
// Emit register-register transfer.
builder.MoveRegister(reg, other);
builder.MoveRegister(reg, wide);
@@ -446,7 +452,13 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.Return();
// Generate BytecodeArray.
- scope.SetScriptScopeInfo(factory->NewScopeInfo(1));
+ Handle<ScopeInfo> scope_info =
+ factory->NewScopeInfo(ScopeInfo::kVariablePartIndex);
+ scope_info->set_flags(0);
+ scope_info->set_context_local_count(0);
+ scope_info->set_parameter_count(0);
+ scope.SetScriptScopeInfo(scope_info);
+
ast_factory.Internalize(isolate());
Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
CHECK_EQ(the_array->frame_size(),
@@ -694,8 +706,6 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
BytecodeArrayBuilder builder(zone(), 1, 1);
- Register reg(0);
-
BytecodeLabel end;
builder.JumpIfNull(&end);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 5772b802c0..ea60664bea 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -26,7 +26,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
- BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
+ BytecodeArrayBuilder builder(zone(), 3, 17, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
@@ -35,7 +35,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
- Register reg_1(1);
+ Register reg_16(16); // Something not eligible for short Star.
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
@@ -55,11 +55,11 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.LoadLiteral(smi_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StoreAccumulatorInRegister(reg_1)
+ .StoreAccumulatorInRegister(reg_16)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
- .StoreAccumulatorInRegister(reg_1)
- .LoadNamedProperty(reg_1, name, load_feedback_slot)
+ .StoreAccumulatorInRegister(reg_16)
+ .LoadNamedProperty(reg_16, name, load_feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
@@ -85,13 +85,11 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
iterator.Advance();
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
@@ -103,13 +101,11 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
iterator.Advance();
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
@@ -119,13 +115,11 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
iterator.Advance();
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
iterator.Advance();
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
@@ -136,13 +130,11 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
iterator.Advance();
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
iterator.Advance();
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
@@ -157,7 +149,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
@@ -183,7 +175,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
@@ -192,7 +184,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), load_feedback_slot);
CHECK(!iterator.done());
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index ecdf6757fb..c2eda59d07 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -244,7 +244,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
- BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
+ BytecodeArrayBuilder builder(zone(), 3, 17, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
@@ -253,7 +253,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
- Register reg_1(1);
+ Register reg_16(16); // Something not eligible for short Star.
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
@@ -270,11 +270,11 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
.LoadLiteral(smi_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StoreAccumulatorInRegister(reg_1)
+ .StoreAccumulatorInRegister(reg_16)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
- .StoreAccumulatorInRegister(reg_1)
- .LoadNamedProperty(reg_1, name, feedback_slot)
+ .StoreAccumulatorInRegister(reg_16)
+ .LoadNamedProperty(reg_16, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
@@ -292,13 +292,13 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
iterator.GoToIndex(11);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
@@ -314,7 +314,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
iterator.GoToIndex(2);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_index(), 2);
@@ -326,13 +326,13 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
iterator.GoToIndex(16);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
@@ -364,7 +364,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
@@ -383,13 +383,13 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
iterator.GoToIndex(20);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
@@ -422,7 +422,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
- BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
+ BytecodeArrayBuilder builder(zone(), 3, 17, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
@@ -431,7 +431,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
- Register reg_1(1);
+ Register reg_16(16); // Something not eligible for short Star.
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
@@ -448,11 +448,11 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
.LoadLiteral(smi_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StoreAccumulatorInRegister(reg_1)
+ .StoreAccumulatorInRegister(reg_16)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
- .StoreAccumulatorInRegister(reg_1)
- .LoadNamedProperty(reg_1, name, feedback_slot)
+ .StoreAccumulatorInRegister(reg_16)
+ .LoadNamedProperty(reg_16, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
@@ -478,14 +478,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_index(), 1);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
@@ -498,14 +496,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_index(), 3);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
@@ -516,14 +512,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
++iterator;
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_index(), 5);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
@@ -535,14 +529,12 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
++iterator;
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_index(), 7);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
- offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
@@ -559,7 +551,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 9);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
@@ -588,7 +580,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 12);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
@@ -598,7 +590,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
@@ -683,7 +675,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
- BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
+ BytecodeArrayBuilder builder(zone(), 3, 17, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
@@ -692,7 +684,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
- Register reg_1(1);
+ Register reg_16(16); // Something not eligible for short Star.
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
@@ -709,11 +701,11 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
.LoadLiteral(smi_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
- .StoreAccumulatorInRegister(reg_1)
+ .StoreAccumulatorInRegister(reg_16)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
- .StoreAccumulatorInRegister(reg_1)
- .LoadNamedProperty(reg_1, name, feedback_slot)
+ .StoreAccumulatorInRegister(reg_16)
+ .LoadNamedProperty(reg_16, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
@@ -809,7 +801,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
@@ -820,7 +812,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 12);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
@@ -849,7 +841,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 9);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
@@ -864,13 +856,11 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
ASSERT_TRUE(iterator.IsValid());
--iterator;
- offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ offset -= Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_index(), 7);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
@@ -883,13 +873,11 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
ASSERT_TRUE(iterator.IsValid());
--iterator;
- offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ offset -= Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_index(), 5);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
@@ -901,13 +889,11 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
ASSERT_TRUE(iterator.IsValid());
--iterator;
- offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ offset -= Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_index(), 3);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
@@ -921,13 +907,11 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
ASSERT_TRUE(iterator.IsValid());
--iterator;
- offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
- EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ offset -= Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
+ EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
EXPECT_EQ(iterator.current_index(), 1);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
index 4604560429..7c20e69b3e 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -81,7 +81,8 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
Register temp = NewTemporary();
optimizer()->DoStar(temp);
CHECK_EQ(write_count(), 0u);
- optimizer()->PrepareForBytecode<Bytecode::kJump, AccumulatorUse::kNone>();
+ optimizer()
+ ->PrepareForBytecode<Bytecode::kJump, ImplicitRegisterUse::kNone>();
CHECK_EQ(write_count(), 1u);
CHECK_EQ(output()->at(0).bytecode, Bytecode::kStar);
CHECK_EQ(output()->at(0).output.index(), temp.index());
@@ -98,19 +99,25 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
optimizer()->DoStar(temp);
ReleaseTemporaries(temp);
CHECK_EQ(write_count(), 0u);
- optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
+ optimizer()
+ ->PrepareForBytecode<Bytecode::kReturn,
+ ImplicitRegisterUse::kReadAccumulator>();
CHECK_EQ(output()->at(0).bytecode, Bytecode::kLdar);
CHECK_EQ(output()->at(0).input.index(), parameter.index());
}
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
Initialize(3, 1);
- optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
+ optimizer()
+ ->PrepareForBytecode<Bytecode::kLdaSmi,
+ ImplicitRegisterUse::kWriteAccumulator>();
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
optimizer()->DoStar(temp1);
CHECK_EQ(write_count(), 0u);
- optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
+ optimizer()
+ ->PrepareForBytecode<Bytecode::kLdaSmi,
+ ImplicitRegisterUse::kWriteAccumulator>();
CHECK_EQ(write_count(), 1u);
CHECK_EQ(output()->at(0).bytecode, Bytecode::kStar);
CHECK_EQ(output()->at(0).output.index(), temp1.index());
@@ -120,7 +127,9 @@ TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
CHECK_EQ(write_count(), 1u);
optimizer()->DoLdar(temp0);
CHECK_EQ(write_count(), 1u);
- optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
+ optimizer()
+ ->PrepareForBytecode<Bytecode::kReturn,
+ ImplicitRegisterUse::kReadAccumulator>();
CHECK_EQ(write_count(), 2u);
CHECK_EQ(output()->at(1).bytecode, Bytecode::kLdar);
CHECK_EQ(output()->at(1).input.index(), temp1.index());
@@ -128,7 +137,9 @@ TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterNotFlushed) {
Initialize(3, 1);
- optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
+ optimizer()
+ ->PrepareForBytecode<Bytecode::kLdaSmi,
+ ImplicitRegisterUse::kWriteAccumulator>();
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
optimizer()->DoStar(temp0);
@@ -154,7 +165,9 @@ TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
CHECK_EQ(output()->at(0).input.index(), parameter.index());
CHECK_EQ(output()->at(0).output.index(), local.index());
- optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
+ optimizer()
+ ->PrepareForBytecode<Bytecode::kReturn,
+ ImplicitRegisterUse::kReadAccumulator>();
CHECK_EQ(write_count(), 2u);
CHECK_EQ(output()->at(1).bytecode, Bytecode::kLdar);
CHECK_EQ(output()->at(1).input.index(), local.index());
@@ -183,13 +196,16 @@ TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
Register parameter = Register::FromParameterIndex(1, 3);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
- optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
+ optimizer()
+ ->PrepareForBytecode<Bytecode::kLdaSmi,
+ ImplicitRegisterUse::kWriteAccumulator>();
optimizer()->DoStar(temp0);
optimizer()->DoMov(parameter, temp1);
CHECK_EQ(write_count(), 0u);
optimizer()
- ->PrepareForBytecode<Bytecode::kCallJSRuntime, AccumulatorUse::kWrite>();
+ ->PrepareForBytecode<Bytecode::kCallJSRuntime,
+ ImplicitRegisterUse::kWriteAccumulator>();
RegisterList reg_list = optimizer()->GetInputRegisterList(
BytecodeUtils::NewRegisterList(temp0.index(), 2));
CHECK_EQ(temp0.index(), reg_list.first_register().index());
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index f390631e9f..1aadb5a6c9 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -333,30 +333,33 @@ TEST(OperandScale, PrefixesRequired) {
Bytecode::kExtraWide);
}
-TEST(AccumulatorUse, LogicalOperators) {
- CHECK_EQ(AccumulatorUse::kNone | AccumulatorUse::kRead,
- AccumulatorUse::kRead);
- CHECK_EQ(AccumulatorUse::kRead | AccumulatorUse::kWrite,
- AccumulatorUse::kReadWrite);
- CHECK_EQ(AccumulatorUse::kRead & AccumulatorUse::kReadWrite,
- AccumulatorUse::kRead);
- CHECK_EQ(AccumulatorUse::kRead & AccumulatorUse::kWrite,
- AccumulatorUse::kNone);
+TEST(ImplicitRegisterUse, LogicalOperators) {
+ CHECK_EQ(ImplicitRegisterUse::kNone | ImplicitRegisterUse::kReadAccumulator,
+ ImplicitRegisterUse::kReadAccumulator);
+ CHECK_EQ(ImplicitRegisterUse::kReadAccumulator |
+ ImplicitRegisterUse::kWriteAccumulator,
+ ImplicitRegisterUse::kReadWriteAccumulator);
+ CHECK_EQ(ImplicitRegisterUse::kReadAccumulator &
+ ImplicitRegisterUse::kReadWriteAccumulator,
+ ImplicitRegisterUse::kReadAccumulator);
+ CHECK_EQ(ImplicitRegisterUse::kReadAccumulator &
+ ImplicitRegisterUse::kWriteAccumulator,
+ ImplicitRegisterUse::kNone);
}
-TEST(AccumulatorUse, SampleBytecodes) {
+TEST(ImplicitRegisterUse, SampleBytecodes) {
CHECK(Bytecodes::ReadsAccumulator(Bytecode::kStar));
CHECK(!Bytecodes::WritesAccumulator(Bytecode::kStar));
- CHECK_EQ(Bytecodes::GetAccumulatorUse(Bytecode::kStar),
- AccumulatorUse::kRead);
+ CHECK_EQ(Bytecodes::GetImplicitRegisterUse(Bytecode::kStar),
+ ImplicitRegisterUse::kReadAccumulator);
CHECK(!Bytecodes::ReadsAccumulator(Bytecode::kLdar));
CHECK(Bytecodes::WritesAccumulator(Bytecode::kLdar));
- CHECK_EQ(Bytecodes::GetAccumulatorUse(Bytecode::kLdar),
- AccumulatorUse::kWrite);
+ CHECK_EQ(Bytecodes::GetImplicitRegisterUse(Bytecode::kLdar),
+ ImplicitRegisterUse::kWriteAccumulator);
CHECK(Bytecodes::ReadsAccumulator(Bytecode::kAdd));
CHECK(Bytecodes::WritesAccumulator(Bytecode::kAdd));
- CHECK_EQ(Bytecodes::GetAccumulatorUse(Bytecode::kAdd),
- AccumulatorUse::kReadWrite);
+ CHECK_EQ(Bytecodes::GetImplicitRegisterUse(Bytecode::kAdd),
+ ImplicitRegisterUse::kReadWriteAccumulator);
}
} // namespace interpreter
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 6ec3999968..735ecf4d2f 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -48,6 +48,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::
if (Bytecodes::WritesAccumulator(bytecode())) {
SetAccumulator(NullConstant());
}
+ if (Bytecodes::WritesImplicitRegister(bytecode())) {
+ StoreRegisterForShortStar(NullConstant(), IntPtrConstant(2));
+ }
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
diff --git a/deps/v8/test/unittests/logging/counters-unittest.cc b/deps/v8/test/unittests/logging/counters-unittest.cc
index c0ab18343b..468ca4fc4e 100644
--- a/deps/v8/test/unittests/logging/counters-unittest.cc
+++ b/deps/v8/test/unittests/logging/counters-unittest.cc
@@ -813,6 +813,20 @@ TEST_F(RuntimeCallStatsTest, ApiGetter) {
EXPECT_EQ(kCustomCallbackTime * 4013, counter2()->time().InMicroseconds());
}
+TEST_F(RuntimeCallStatsTest, GarbageCollection) {
+ FLAG_expose_gc = true;
+ v8::Isolate* isolate = v8_isolate();
+ RunJS(
+ "let root = [];"
+ "for (let i = 0; i < 10; i++) root.push((new Array(1000)).fill(0));"
+ "root.push((new Array(1000000)).fill(0));"
+ "((new Array(1000000)).fill(0));");
+ isolate->RequestGarbageCollectionForTesting(
+ v8::Isolate::kFullGarbageCollection);
+ isolate->RequestGarbageCollectionForTesting(
+ v8::Isolate::kFullGarbageCollection);
+}
+
TEST_F(SnapshotNativeCounterTest, StringAddNative) {
RunJS("let s = 'hello, ' + 'world!'");
diff --git a/deps/v8/test/unittests/objects/object-unittest.cc b/deps/v8/test/unittests/objects/object-unittest.cc
index e5aea4c682..eb666ebca8 100644
--- a/deps/v8/test/unittests/objects/object-unittest.cc
+++ b/deps/v8/test/unittests/objects/object-unittest.cc
@@ -57,7 +57,7 @@ TEST(Object, InstanceTypeList) {
TEST(Object, InstanceTypeListOrder) {
int current = 0;
- int last = -1;
+ int prev = -1;
InstanceType current_type = static_cast<InstanceType>(current);
EXPECT_EQ(current_type, InstanceType::FIRST_TYPE);
EXPECT_EQ(current_type, InstanceType::INTERNALIZED_STRING_TYPE);
@@ -65,12 +65,12 @@ TEST(Object, InstanceTypeListOrder) {
current_type = InstanceType::type; \
current = static_cast<int>(current_type); \
if (current > static_cast<int>(LAST_NAME_TYPE)) { \
- EXPECT_LE(last + 1, current); \
+ EXPECT_LE(prev + 1, current); \
} \
- EXPECT_LT(last, current) << " INSTANCE_TYPE_LIST is not ordered: " \
- << "last = " << static_cast<InstanceType>(last) \
+ EXPECT_LT(prev, current) << " INSTANCE_TYPE_LIST is not ordered: " \
+ << "last = " << static_cast<InstanceType>(prev) \
<< " vs. current = " << current_type; \
- last = current;
+ prev = current;
// Only test hand-written portion of instance type list. The generated portion
// doesn't run the same risk of getting out of order, and it does emit type
@@ -83,17 +83,17 @@ TEST(Object, InstanceTypeListOrder) {
TEST(Object, StructListOrder) {
int current = static_cast<int>(InstanceType::FIRST_STRUCT_TYPE);
- int last = current - 1;
- ASSERT_LT(0, last);
+ int prev = current - 1;
+ ASSERT_LT(0, prev);
InstanceType current_type = static_cast<InstanceType>(current);
#define TEST_STRUCT(TYPE, class, name) \
current_type = InstanceType::TYPE; \
current = static_cast<int>(current_type); \
- EXPECT_LE(last + 1, current) \
+ EXPECT_LE(prev + 1, current) \
<< " STRUCT_LIST is not ordered: " \
- << " last = " << static_cast<InstanceType>(last) \
+ << " last = " << static_cast<InstanceType>(prev) \
<< " vs. current = " << current_type; \
- last = current;
+ prev = current;
// Only test the _BASE portion (the hand-coded part). Note that the values are
// not necessarily consecutive because some Structs that need special
diff --git a/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc b/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
index 225048de63..e3b783089c 100644
--- a/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
+++ b/deps/v8/test/unittests/objects/osr-optimized-code-cache-unittest.cc
@@ -51,7 +51,7 @@ TEST_F(TestWithNativeContext, AddCodeToEmptyCache) {
Handle<NativeContext> native_context(function->native_context(), isolate);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
Handle<Code> code(function->code(), isolate);
- BailoutId bailout_id(1);
+ BytecodeOffset bailout_id(1);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
bailout_id);
@@ -88,14 +88,14 @@ TEST_F(TestWithNativeContext, GrowCodeCache) {
int bailout_id = 0;
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
}
Handle<OSROptimizedCodeCache> osr_cache(
native_context->GetOSROptimizedCodeCache(), isolate);
EXPECT_EQ(osr_cache->length(), kInitialLength);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
osr_cache = Handle<OSROptimizedCodeCache>(
native_context->GetOSROptimizedCodeCache(), isolate);
EXPECT_EQ(osr_cache->length(), kInitialLength * 2);
@@ -131,7 +131,7 @@ TEST_F(TestWithNativeContext, FindCachedEntry) {
int bailout_id = 0;
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
}
i::ScopedVector<char> source1(1024);
@@ -140,24 +140,25 @@ TEST_F(TestWithNativeContext, FindCachedEntry) {
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
Handle<Code> code1(function1->code(), isolate);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
Handle<OSROptimizedCodeCache> osr_cache(
native_context->GetOSROptimizedCodeCache(), isolate);
- EXPECT_EQ(osr_cache->GetOptimizedCode(shared, BailoutId(0), isolate), *code);
+ EXPECT_EQ(osr_cache->GetOptimizedCode(shared, BytecodeOffset(0), isolate),
+ *code);
EXPECT_EQ(
- osr_cache->GetOptimizedCode(shared1, BailoutId(bailout_id), isolate),
+ osr_cache->GetOptimizedCode(shared1, BytecodeOffset(bailout_id), isolate),
*code1);
RunJS("%DeoptimizeFunction(f1)");
EXPECT_TRUE(
- osr_cache->GetOptimizedCode(shared1, BailoutId(bailout_id), isolate)
+ osr_cache->GetOptimizedCode(shared1, BytecodeOffset(bailout_id), isolate)
.is_null());
osr_cache->Set(OSROptimizedCodeCache::kCachedCodeOffset,
HeapObjectReference::ClearedValue(isolate));
- EXPECT_TRUE(
- osr_cache->GetOptimizedCode(shared, BailoutId(0), isolate).is_null());
+ EXPECT_TRUE(osr_cache->GetOptimizedCode(shared, BytecodeOffset(0), isolate)
+ .is_null());
}
TEST_F(TestWithNativeContext, MaxCapacityCache) {
@@ -177,7 +178,7 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
// Add max_capacity - 1 entries.
for (bailout_id = 0; bailout_id < kMaxEntries - 1; bailout_id++) {
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
}
Handle<OSROptimizedCodeCache> osr_cache(
native_context->GetOSROptimizedCodeCache(), isolate);
@@ -190,7 +191,7 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
Handle<Code> code1(function1->code(), isolate);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
osr_cache = Handle<OSROptimizedCodeCache>(
native_context->GetOSROptimizedCodeCache(), isolate);
EXPECT_EQ(osr_cache->length(), kMaxLength);
@@ -215,7 +216,7 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
Handle<Code> code2(function2->code(), isolate);
bailout_id++;
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
osr_cache = Handle<OSROptimizedCodeCache>(
native_context->GetOSROptimizedCodeCache(), isolate);
EXPECT_EQ(osr_cache->length(), kMaxLength);
@@ -249,7 +250,7 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
int bailout_id = 0;
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
}
Handle<OSROptimizedCodeCache> osr_cache(
native_context->GetOSROptimizedCodeCache(), isolate);
@@ -268,7 +269,7 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
Handle<Code> code1(function1->code(), isolate);
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
osr_cache = Handle<OSROptimizedCodeCache>(
native_context->GetOSROptimizedCodeCache(), isolate);
EXPECT_EQ(osr_cache->length(), expected_length);
@@ -292,7 +293,7 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
Handle<Code> code2(function2->code(), isolate);
bailout_id++;
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
osr_cache = Handle<OSROptimizedCodeCache>(
native_context->GetOSROptimizedCodeCache(), isolate);
EXPECT_EQ(osr_cache->length(), expected_length);
@@ -335,10 +336,10 @@ TEST_F(TestWithNativeContext, EvictDeoptedEntriesNoCompact) {
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
if (bailout_id == deopt_id1 || bailout_id == deopt_id2) {
OSROptimizedCodeCache::AddOptimizedCode(
- native_context, deopt_shared, deopt_code, BailoutId(bailout_id));
+ native_context, deopt_shared, deopt_code, BytecodeOffset(bailout_id));
} else {
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
}
}
Handle<OSROptimizedCodeCache> osr_cache(
@@ -392,10 +393,10 @@ TEST_F(TestWithNativeContext, EvictDeoptedEntriesCompact) {
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
if (bailout_id % 2 == 0) {
OSROptimizedCodeCache::AddOptimizedCode(
- native_context, deopt_shared, deopt_code, BailoutId(bailout_id));
+ native_context, deopt_shared, deopt_code, BytecodeOffset(bailout_id));
} else {
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- BailoutId(bailout_id));
+ BytecodeOffset(bailout_id));
}
}
Handle<OSROptimizedCodeCache> osr_cache(
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 82afd6fd1f..afefdc1f45 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -1511,6 +1511,25 @@ TEST_F(ValueSerializerTest, DecodeLinearRegExp) {
i::FLAG_enable_experimental_regexp_engine = flag_was_enabled;
}
+TEST_F(ValueSerializerTest, DecodeHasIndicesRegExp) {
+ bool flag_was_enabled = i::FLAG_harmony_regexp_match_indices;
+
+ // The last byte encodes the regexp flags.
+ std::vector<uint8_t> regexp_encoding = {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03,
+ 0x66, 0x6F, 0x6F, 0xAD, 0x01};
+
+ i::FLAG_harmony_regexp_match_indices = true;
+ Local<Value> value = DecodeTest(regexp_encoding);
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/dgmsy'");
+
+ i::FLAG_harmony_regexp_match_indices = false;
+ InvalidDecodeTest(regexp_encoding);
+
+ i::FLAG_harmony_regexp_match_indices = flag_was_enabled;
+}
+
TEST_F(ValueSerializerTest, RoundTripMap) {
Local<Value> value = RoundTripTest("var m = new Map(); m.set(42, 'foo'); m;");
ASSERT_TRUE(value->IsMap());
@@ -2461,6 +2480,7 @@ TEST_F(ValueSerializerTestWithHostArrayBufferView, RoundTripUint8ArrayInput) {
ExpectScriptTrue("result.a === result.b");
}
+#if V8_ENABLE_WEBASSEMBLY
// It's expected that WebAssembly has more exhaustive tests elsewhere; this
// mostly checks that the logic to embed it in structured clone serialization
// works correctly.
@@ -2712,6 +2732,7 @@ TEST_F(ValueSerializerTestWithWasm, ComplexObjectWithManyTransfer) {
VerifyComplexObject(value);
ExpectScriptTrue("result.mod1 != result.mod2");
}
+#endif // V8_ENABLE_WEBASSEMBLY
class ValueSerializerTestWithLimitedMemory : public ValueSerializerTest {
protected:
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
index 2511cdd130..5ef3fe3afe 100644
--- a/deps/v8/test/unittests/run-all-unittests.cc
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/cppgc/platform.h"
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
#include "src/base/compiler-specific.h"
@@ -18,6 +19,7 @@ class DefaultPlatformEnvironment final : public ::testing::Environment {
0, v8::platform::IdleTaskSupport::kEnabled);
ASSERT_TRUE(platform_.get() != nullptr);
v8::V8::InitializePlatform(platform_.get());
+ cppgc::InitializeProcess(platform_->GetPageAllocator());
ASSERT_TRUE(v8::V8::Initialize());
}
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index ca62119e56..d2362e6e03 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -89,6 +89,7 @@ type SmiTagged<T : type extends uint31> extends Smi;
type String extends HeapObject;
type HeapNumber extends HeapObject;
type FixedArrayBase extends HeapObject;
+type Lazy<T: type>;
struct float64_or_hole {
is_hole: bool;
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index f799bb9eae..9acc942ea5 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -4,6 +4,12 @@
[
##############################################################################
+[ALWAYS, {
+ # https://crbug.com/v8/11413
+ 'RuntimeCallStatsTest.GarbageCollection': [PASS, ['verify_csa', SKIP]],
+}], # ALWAYS
+
+##############################################################################
['system == macos and asan', {
# BUG(820416).
'BitsDeathTest*': [SKIP],
@@ -39,11 +45,11 @@
}], # system == macos and arch == arm64 and not simulator_run
##############################################################################
-['lite_mode or variant == jitless', {
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
'ValueSerializerTestWithSharedArrayBufferClone.RoundTripWebAssemblyMemory': [SKIP],
'ValueSerializerTestWithWasm.*': [SKIP],
-}], # lite_mode or variant == jitless
+}], # not has_webassembly or variant == jitless
##############################################################################
['system == aix', {
@@ -76,10 +82,16 @@
['not pointer_compression', {
# Tests are irrelevant without pointer compression
'DecompressionOptimizerTest.*': [SKIP],
-}], # not pointer_compression
+}], # not pointer_compression
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
}],
+
+################################################################################
+['is_clang == False and arch == riscv64',{
+ 'LoggingTest.SourceLocation':[SKIP] # issue-174
+}],
+
]
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
index 0d3ca9ad56..cd03cedeff 100644
--- a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -72,9 +72,9 @@ class ControlTransferTest : public TestWithZone {
// Check all control targets in the map.
for (auto& expected_transfer : expected_transfers) {
pc_t pc = expected_transfer.pc;
- EXPECT_TRUE(map.count(pc) > 0) << "expected control target @" << pc;
- if (!map.count(pc)) continue;
- auto& entry = map[pc];
+ EXPECT_TRUE(map.map.count(pc) > 0) << "expected control target @" << pc;
+ if (!map.map.count(pc)) continue;
+ auto& entry = map.map[pc];
EXPECT_THAT(entry, MakeMatcher(new ControlTransferMatcher(
pc, expected_transfer)));
}
@@ -97,7 +97,7 @@ class ControlTransferTest : public TestWithZone {
}
}
if (found) continue;
- EXPECT_TRUE(map.count(pc) == 0) << "expected no control @ +" << pc;
+ EXPECT_TRUE(map.map.count(pc) == 0) << "expected no control @ +" << pc;
}
}
};
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index b445cedcd8..124d3bb1b6 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -2882,7 +2882,7 @@ TEST_F(FunctionBodyDecoderTest, TryCatch) {
ExpectFailure(sigs.v_v(), {WASM_TRY_OP, kExprEnd}, kAppendEnd,
"missing catch or catch-all in try");
ExpectFailure(sigs.v_v(), {kExprCatch, ex, kExprEnd}, kAppendEnd,
- "catch does not match any try");
+ "catch does not match a try");
}
TEST_F(FunctionBodyDecoderTest, TryUnwind) {
@@ -3205,9 +3205,6 @@ TEST_F(FunctionBodyDecoderTest, MemoryInit) {
builder.InitializeMemory();
builder.SetDataSegmentCount(1);
- ExpectFailure(sigs.v_v(),
- {WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(),
{WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
@@ -3218,7 +3215,6 @@ TEST_F(FunctionBodyDecoderTest, MemoryInitInvalid) {
builder.InitializeMemory();
builder.SetDataSegmentCount(1);
- WASM_FEATURE_SCOPE(bulk_memory);
byte code[] = {WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO),
WASM_END};
for (size_t i = 0; i <= arraysize(code); ++i) {
@@ -3230,8 +3226,6 @@ TEST_F(FunctionBodyDecoderTest, DataDrop) {
builder.InitializeMemory();
builder.SetDataSegmentCount(1);
- ExpectFailure(sigs.v_v(), {WASM_DATA_DROP(0)});
- WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(), {WASM_DATA_DROP(0)});
ExpectFailure(sigs.v_v(), {WASM_DATA_DROP(1)});
}
@@ -3240,7 +3234,6 @@ TEST_F(FunctionBodyDecoderTest, DataSegmentIndexUnsigned) {
builder.InitializeMemory();
builder.SetDataSegmentCount(65);
- WASM_FEATURE_SCOPE(bulk_memory);
// Make sure that the index is interpreted as an unsigned number; 64 is
// interpreted as -64 when decoded as a signed LEB.
ExpectValidates(sigs.v_v(),
@@ -3251,9 +3244,6 @@ TEST_F(FunctionBodyDecoderTest, DataSegmentIndexUnsigned) {
TEST_F(FunctionBodyDecoderTest, MemoryCopy) {
builder.InitializeMemory();
- ExpectFailure(sigs.v_v(),
- {WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(),
{WASM_MEMORY_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
@@ -3261,15 +3251,11 @@ TEST_F(FunctionBodyDecoderTest, MemoryCopy) {
TEST_F(FunctionBodyDecoderTest, MemoryFill) {
builder.InitializeMemory();
- ExpectFailure(sigs.v_v(),
- {WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(),
{WASM_MEMORY_FILL(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, BulkMemoryOpsWithoutMemory) {
- WASM_FEATURE_SCOPE(bulk_memory);
ExpectFailure(sigs.v_v(),
{WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
@@ -3282,9 +3268,6 @@ TEST_F(FunctionBodyDecoderTest, TableInit) {
builder.InitializeTable(wasm::kWasmFuncRef);
builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
- ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(),
{WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
@@ -3296,7 +3279,6 @@ TEST_F(FunctionBodyDecoderTest, TableInitWrongType) {
uint32_t element_index =
builder.AddPassiveElementSegment(wasm::kWasmExternRef);
- WASM_FEATURE_SCOPE(bulk_memory);
WASM_FEATURE_SCOPE(reftypes);
ExpectFailure(sigs.v_v(), {WASM_TABLE_INIT(table_index, element_index,
WASM_ZERO, WASM_ZERO, WASM_ZERO)});
@@ -3306,7 +3288,6 @@ TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
builder.InitializeTable(wasm::kWasmFuncRef);
builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
- WASM_FEATURE_SCOPE(bulk_memory);
byte code[] = {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO),
WASM_END};
for (size_t i = 0; i <= arraysize(code); ++i) {
@@ -3318,8 +3299,6 @@ TEST_F(FunctionBodyDecoderTest, ElemDrop) {
builder.InitializeTable(wasm::kWasmFuncRef);
builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
- ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
- WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(), {WASM_ELEM_DROP(0)});
ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(1)});
}
@@ -3328,7 +3307,6 @@ TEST_F(FunctionBodyDecoderTest, TableInitDeclarativeElem) {
builder.InitializeTable(wasm::kWasmFuncRef);
builder.AddDeclarativeElementSegment();
- WASM_FEATURE_SCOPE(bulk_memory);
WASM_FEATURE_SCOPE(reftypes);
byte code[] = {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO),
WASM_END};
@@ -3341,8 +3319,6 @@ TEST_F(FunctionBodyDecoderTest, DeclarativeElemDrop) {
builder.InitializeTable(wasm::kWasmFuncRef);
builder.AddDeclarativeElementSegment();
- ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
- WASM_FEATURE_SCOPE(bulk_memory);
WASM_FEATURE_SCOPE(reftypes);
ExpectValidates(sigs.v_v(), {WASM_ELEM_DROP(0)});
ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(1)});
@@ -3353,7 +3329,6 @@ TEST_F(FunctionBodyDecoderTest, RefFuncDeclared) {
byte function_index = builder.AddFunction(sigs.v_i());
ExpectFailure(sigs.a_v(), {WASM_REF_FUNC(function_index)});
- WASM_FEATURE_SCOPE(bulk_memory);
WASM_FEATURE_SCOPE(reftypes);
ExpectValidates(sigs.a_v(), {WASM_REF_FUNC(function_index)});
}
@@ -3362,7 +3337,6 @@ TEST_F(FunctionBodyDecoderTest, RefFuncUndeclared) {
builder.InitializeTable(wasm::kWasmStmt);
byte function_index = builder.AddFunction(sigs.v_i(), false);
- WASM_FEATURE_SCOPE(bulk_memory);
WASM_FEATURE_SCOPE(reftypes);
ExpectFailure(sigs.a_v(), {WASM_REF_FUNC(function_index)});
}
@@ -3373,7 +3347,6 @@ TEST_F(FunctionBodyDecoderTest, ElemSegmentIndexUnsigned) {
builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
}
- WASM_FEATURE_SCOPE(bulk_memory);
// Make sure that the index is interpreted as an unsigned number; 64 is
// interpreted as -64 when decoded as a signed LEB.
ExpectValidates(sigs.v_v(),
@@ -3384,9 +3357,6 @@ TEST_F(FunctionBodyDecoderTest, ElemSegmentIndexUnsigned) {
TEST_F(FunctionBodyDecoderTest, TableCopy) {
builder.InitializeTable(wasm::kWasmStmt);
- ExpectFailure(sigs.v_v(),
- {WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(),
{WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
@@ -3395,7 +3365,6 @@ TEST_F(FunctionBodyDecoderTest, TableCopyWrongType) {
uint32_t dst_table_index = builder.InitializeTable(wasm::kWasmFuncRef);
uint32_t src_table_index = builder.InitializeTable(wasm::kWasmExternRef);
- WASM_FEATURE_SCOPE(bulk_memory);
WASM_FEATURE_SCOPE(reftypes);
ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(dst_table_index, src_table_index,
WASM_ZERO, WASM_ZERO, WASM_ZERO)});
@@ -3472,18 +3441,14 @@ TEST_F(FunctionBodyDecoderTest, TableOpsWithoutTable) {
{WASM_TABLE_FILL(0, WASM_ONE, WASM_REF_NULL(kExternRefCode),
WASM_ONE)});
}
- {
- WASM_FEATURE_SCOPE(bulk_memory);
- builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
- ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- ExpectFailure(sigs.v_v(),
- {WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- }
+ builder.AddPassiveElementSegment(wasm::kWasmFuncRef);
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, TableCopyMultiTable) {
- WASM_FEATURE_SCOPE(bulk_memory);
WASM_FEATURE_SCOPE(reftypes);
{
TestModuleBuilder builder;
@@ -3533,7 +3498,6 @@ TEST_F(FunctionBodyDecoderTest, TableCopyMultiTable) {
}
TEST_F(FunctionBodyDecoderTest, TableInitMultiTable) {
- WASM_FEATURE_SCOPE(bulk_memory);
WASM_FEATURE_SCOPE(reftypes);
{
TestModuleBuilder builder;
@@ -3672,11 +3636,9 @@ TEST_F(FunctionBodyDecoderTest, RefEq) {
kWasmF32,
kWasmF64,
kWasmS128,
- kWasmExnRef,
kWasmExternRef,
kWasmFuncRef,
kWasmAnyRef,
- ValueType::Ref(HeapType::kExn, kNonNullable),
ValueType::Ref(HeapType::kExtern, kNonNullable),
ValueType::Ref(HeapType::kFunc, kNonNullable)};
@@ -3713,8 +3675,8 @@ TEST_F(FunctionBodyDecoderTest, RefAsNonNull) {
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
uint32_t heap_types[] = {
- struct_type_index, array_type_index, HeapType::kExn, HeapType::kFunc,
- HeapType::kEq, HeapType::kExtern, HeapType::kI31, HeapType::kAny};
+ struct_type_index, array_type_index, HeapType::kFunc, HeapType::kEq,
+ HeapType::kExtern, HeapType::kI31, HeapType::kAny};
ValueType non_compatible_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
kWasmS128};
@@ -3755,8 +3717,8 @@ TEST_F(FunctionBodyDecoderTest, RefNull) {
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
uint32_t type_reprs[] = {
- struct_type_index, array_type_index, HeapType::kExn, HeapType::kFunc,
- HeapType::kEq, HeapType::kExtern, HeapType::kI31, HeapType::kAny};
+ struct_type_index, array_type_index, HeapType::kFunc, HeapType::kEq,
+ HeapType::kExtern, HeapType::kI31, HeapType::kAny};
// It works with heap types.
for (uint32_t type_repr : type_reprs) {
const ValueType type = ValueType::Ref(type_repr, kNullable);
@@ -3785,8 +3747,8 @@ TEST_F(FunctionBodyDecoderTest, RefIsNull) {
byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
byte array_type_index = builder.AddArray(kWasmI32, true);
uint32_t heap_types[] = {
- struct_type_index, array_type_index, HeapType::kExn, HeapType::kFunc,
- HeapType::kEq, HeapType::kExtern, HeapType::kI31, HeapType::kAny};
+ struct_type_index, array_type_index, HeapType::kFunc, HeapType::kEq,
+ HeapType::kExtern, HeapType::kI31, HeapType::kAny};
for (uint32_t heap_type : heap_types) {
const ValueType types[] = {kWasmI32, ValueType::Ref(heap_type, kNullable)};
@@ -3884,7 +3846,7 @@ TEST_F(FunctionBodyDecoderTest, GCStruct) {
kExprDrop},
kAppendEnd,
"struct.new_with_rtt[1] expected rtt for type 0, found "
- "rtt.canon of type (rtt 1 1)");
+ "rtt.canon of type (rtt 0 1)");
// Out-of-bounds index.
ExpectFailure(sigs.v_v(),
{WASM_STRUCT_NEW_WITH_RTT(42, WASM_I32V(0),
@@ -4017,7 +3979,7 @@ TEST_F(FunctionBodyDecoderTest, GCArray) {
WASM_RTT_CANON(struct_type_index))},
kAppendEnd,
"array.new_with_rtt[2] expected rtt for type 0, found "
- "rtt.canon of type (rtt 1 1)");
+ "rtt.canon of type (rtt 0 1)");
// Wrong type index.
ExpectFailure(
sigs.v_v(),
@@ -4207,21 +4169,16 @@ TEST_F(FunctionBodyDecoderTest, RttCanon) {
uint8_t array_type_index = builder.AddArray(kWasmI32, true);
uint8_t struct_type_index = builder.AddStruct({F(kWasmI64, true)});
- for (HeapType::Representation heap :
- {HeapType::kExtern, HeapType::kEq, HeapType::kExn, HeapType::kI31,
- HeapType::kAny, static_cast<HeapType::Representation>(array_type_index),
- static_cast<HeapType::Representation>(struct_type_index)}) {
- ValueType rtt1 =
- ValueType::Rtt(HeapType(heap), heap == HeapType::kAny ? 0 : 1);
+ for (uint32_t type_index : {array_type_index, struct_type_index}) {
+ ValueType rtt1 = ValueType::Rtt(type_index, 0);
FunctionSig sig1(1, 0, &rtt1);
- ExpectValidates(&sig1, {WASM_RTT_CANON(rtt1.heap_type().code() & 0x7F)});
+ ExpectValidates(&sig1, {WASM_RTT_CANON(type_index)});
// rtt.canon should fail for incorrect depth.
- ValueType rtt2 =
- ValueType::Rtt(HeapType(heap), heap == HeapType::kAny ? 1 : 2);
+ ValueType rtt2 = ValueType::Rtt(type_index, 1);
FunctionSig sig2(1, 0, &rtt2);
- ExpectFailure(&sig2, {WASM_RTT_CANON(rtt2.heap_type().code() & 0x7F)},
- kAppendEnd, "type error in merge[0]");
+ ExpectFailure(&sig2, {WASM_RTT_CANON(type_index)}, kAppendEnd,
+ "type error in merge[0]");
}
}
@@ -4238,82 +4195,33 @@ TEST_F(FunctionBodyDecoderTest, RttSub) {
uint8_t sub_struct_type_index =
builder.AddStruct({F(kWasmI16, true), F(kWasmI32, false)});
- {
- // Can build an rtt.sub with self type for a generic heap type.
- ValueType type = ValueType::Rtt(HeapType::kFunc, 2);
- FunctionSig sig(1, 0, &type);
- ExpectValidates(&sig,
- {WASM_RTT_SUB(kFuncRefCode, WASM_RTT_CANON(kFuncRefCode))});
- }
-
- {
- // Can build an rtt.sub from a generic type with itself.
- ValueType type = ValueType::Rtt(HeapType::kAny, 1);
- FunctionSig sig(1, 0, &type);
- ExpectValidates(&sig,
- {WASM_RTT_SUB(kAnyRefCode, WASM_RTT_CANON(kAnyRefCode))});
- }
-
- // Can build an rtt.sub between related generic types.
- {
- ValueType type = ValueType::Rtt(HeapType::kFunc, 1);
- FunctionSig sig(1, 0, &type);
- ExpectValidates(&sig,
- {WASM_RTT_SUB(kFuncRefCode, WASM_RTT_CANON(kAnyRefCode))});
- }
- {
- ValueType type = ValueType::Rtt(HeapType::kEq, 1);
- FunctionSig sig(1, 0, &type);
- ExpectValidates(&sig,
- {WASM_RTT_SUB(kEqRefCode, WASM_RTT_CANON(kAnyRefCode))});
- }
- {
- ValueType type = ValueType::Rtt(HeapType::kI31, 2);
- FunctionSig sig(1, 0, &type);
- ExpectValidates(&sig,
- {WASM_RTT_SUB(kI31RefCode, WASM_RTT_CANON(kEqRefCode))});
- }
-
- // Cannot build an rtt.sub between unrelated generic types.
- {
- ValueType type = ValueType::Rtt(HeapType::kFunc, 2);
- FunctionSig sig(1, 0, &type);
- ExpectFailure(
- &sig, {WASM_RTT_SUB(kFuncRefCode, WASM_RTT_CANON(kI31RefCode))},
- kAppendEnd, "rtt.sub[0] expected rtt for a supertype of type func");
- }
-
// Trivial type error.
ExpectFailure(
- sigs.v_v(), {WASM_RTT_SUB(kFuncRefCode, WASM_I32V(42)), kExprDrop},
- kAppendEnd, "rtt.sub[0] expected rtt for a supertype of type func");
+ sigs.v_v(), {WASM_RTT_SUB(array_type_index, WASM_I32V(42)), kExprDrop},
+ kAppendEnd, "rtt.sub[0] expected rtt for a supertype of type 0");
{
- ValueType type = ValueType::Rtt(array_type_index, 2);
+ ValueType type = ValueType::Rtt(array_type_index, 1);
FunctionSig sig(1, 0, &type);
// Can build an rtt.sub with self type for an array type.
ExpectValidates(&sig, {WASM_RTT_SUB(array_type_index,
WASM_RTT_CANON(array_type_index))});
- // Can build an rtt.sub for an array from eqref.
- ExpectValidates(
- &sig, {WASM_RTT_SUB(array_type_index, WASM_RTT_CANON(kEqRefCode))});
// Fails when argument to rtt.sub is not a supertype.
- ExpectFailure(
- sigs.v_v(),
- {WASM_RTT_SUB(kEqRefCode, WASM_RTT_CANON(array_type_index)), kExprDrop},
- kAppendEnd, "rtt.sub[0] expected rtt for a supertype of type eq");
+ ExpectFailure(sigs.v_v(),
+ {WASM_RTT_SUB(super_struct_type_index,
+ WASM_RTT_CANON(array_type_index)),
+ kExprDrop},
+ kAppendEnd,
+ "rtt.sub[0] expected rtt for a supertype of type 1");
}
{
- ValueType type = ValueType::Rtt(super_struct_type_index, 2);
+ ValueType type = ValueType::Rtt(super_struct_type_index, 1);
FunctionSig sig(1, 0, &type);
// Can build an rtt.sub with self type for a struct type.
ExpectValidates(&sig,
{WASM_RTT_SUB(super_struct_type_index,
WASM_RTT_CANON(super_struct_type_index))});
- // Can build an rtt.sub for a struct from eqref.
- ExpectValidates(&sig, {WASM_RTT_SUB(super_struct_type_index,
- WASM_RTT_CANON(kEqRefCode))});
// Fails when argument to rtt.sub is not a supertype.
ExpectFailure(sigs.v_v(),
{WASM_RTT_SUB(super_struct_type_index,
@@ -4329,7 +4237,7 @@ TEST_F(FunctionBodyDecoderTest, RttSub) {
{
// Can build an rtt from a stuct supertype.
- ValueType type = ValueType::Rtt(sub_struct_type_index, 2);
+ ValueType type = ValueType::Rtt(sub_struct_type_index, 1);
FunctionSig sig(1, 0, &type);
ExpectValidates(&sig,
{WASM_RTT_SUB(sub_struct_type_index,
@@ -4355,93 +4263,96 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
static_cast<HeapType::Representation>(
builder.AddStruct({F(kWasmI16, true), F(kWasmI32, false)}));
- // Passing/failing tests due to static subtyping.
- std::pair<HeapType::Representation, HeapType::Representation> valid_pairs[] =
- {{HeapType::kAny, HeapType::kI31}, {HeapType::kAny, HeapType::kFunc},
- {HeapType::kAny, array_heap}, {HeapType::kAny, super_struct_heap},
- {HeapType::kEq, HeapType::kI31}, {HeapType::kFunc, HeapType::kFunc},
- {HeapType::kEq, array_heap}, {HeapType::kEq, super_struct_heap},
- {super_struct_heap, sub_struct_heap}};
-
- for (auto pair : valid_pairs) {
- HeapType from_heap = HeapType(pair.first);
- HeapType to_heap = HeapType(pair.second);
- ValueType test_reps[] = {kWasmI32, ValueType::Ref(from_heap, kNullable)};
- FunctionSig test_sig(1, 1, test_reps);
- ValueType cast_reps[] = {ValueType::Ref(to_heap, kNonNullable),
- ValueType::Ref(from_heap, kNullable)};
- FunctionSig cast_sig(1, 1, cast_reps);
- ExpectValidates(&test_sig,
- {WASM_REF_TEST(WASM_HEAP_TYPE(from_heap),
- WASM_HEAP_TYPE(to_heap), WASM_LOCAL_GET(0),
- WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))});
- ExpectValidates(&cast_sig,
- {WASM_REF_CAST(WASM_HEAP_TYPE(from_heap),
- WASM_HEAP_TYPE(to_heap), WASM_LOCAL_GET(0),
- WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))});
- }
+ HeapType::Representation func_heap_1 =
+ static_cast<HeapType::Representation>(builder.AddSignature(sigs.i_i()));
- std::pair<HeapType::Representation, HeapType::Representation>
- invalid_pairs[] = {
- {array_heap, HeapType::kAny}, {HeapType::kEq, HeapType::kAny},
- {HeapType::kI31, HeapType::kEq}, {array_heap, super_struct_heap},
- {array_heap, HeapType::kEq}, {HeapType::kExtern, HeapType::kExn}};
+ HeapType::Representation func_heap_2 =
+ static_cast<HeapType::Representation>(builder.AddSignature(sigs.i_v()));
+
+ // Passing/failing tests due to static subtyping.
+ std::tuple<HeapType::Representation, HeapType::Representation, bool> tests[] =
+ {std::make_tuple(HeapType::kData, array_heap, true),
+ std::make_tuple(HeapType::kData, super_struct_heap, true),
+ std::make_tuple(HeapType::kFunc, func_heap_1, true),
+ std::make_tuple(func_heap_1, func_heap_1, true),
+ std::make_tuple(func_heap_1, func_heap_2, false),
+ std::make_tuple(super_struct_heap, sub_struct_heap, true),
+ std::make_tuple(sub_struct_heap, super_struct_heap, false),
+ std::make_tuple(sub_struct_heap, array_heap, false),
+ std::make_tuple(HeapType::kFunc, array_heap, false)};
+
+ for (auto test : tests) {
+ HeapType from_heap = HeapType(std::get<0>(test));
+ HeapType to_heap = HeapType(std::get<1>(test));
+ bool should_pass = std::get<2>(test);
- for (auto pair : invalid_pairs) {
- HeapType from_heap = HeapType(pair.first);
- HeapType to_heap = HeapType(pair.second);
ValueType test_reps[] = {kWasmI32, ValueType::Ref(from_heap, kNullable)};
FunctionSig test_sig(1, 1, test_reps);
- ValueType cast_reps[] = {ValueType::Ref(to_heap, kNonNullable),
- ValueType::Ref(from_heap, kNullable)};
- FunctionSig cast_sig(1, 1, cast_reps);
- ExpectFailure(&test_sig,
- {WASM_REF_TEST(WASM_HEAP_TYPE(from_heap),
- WASM_HEAP_TYPE(to_heap), WASM_LOCAL_GET(0),
- WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))},
- kAppendEnd, "is not a subtype of immediate object type");
- ExpectFailure(&cast_sig,
- {WASM_REF_CAST(WASM_HEAP_TYPE(from_heap),
- WASM_HEAP_TYPE(to_heap), WASM_LOCAL_GET(0),
- WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))},
- kAppendEnd, "is not a subtype of immediate object type");
+
+ ValueType cast_reps_with_depth[] = {ValueType::Ref(to_heap, kNullable),
+ ValueType::Ref(from_heap, kNullable)};
+ FunctionSig cast_sig_with_depth(1, 1, cast_reps_with_depth);
+
+ ValueType cast_reps[] = {ValueType::Ref(to_heap, kNullable),
+ ValueType::Ref(from_heap, kNullable),
+ ValueType::Rtt(to_heap.ref_index())};
+ FunctionSig cast_sig(1, 2, cast_reps);
+
+ if (should_pass) {
+ ExpectValidates(&test_sig,
+ {WASM_REF_TEST(WASM_LOCAL_GET(0),
+ WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))});
+ ExpectValidates(&cast_sig_with_depth,
+ {WASM_REF_CAST(WASM_LOCAL_GET(0),
+ WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))});
+ ExpectValidates(&cast_sig,
+ {WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
+ } else {
+ std::string error_message = "[0] expected supertype of type " +
+ std::to_string(to_heap.ref_index()) +
+ ", found local.get of type " +
+ test_reps[1].name();
+ ExpectFailure(&test_sig,
+ {WASM_REF_TEST(WASM_LOCAL_GET(0),
+ WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))},
+ kAppendEnd, ("ref.test" + error_message).c_str());
+ ExpectFailure(&cast_sig_with_depth,
+ {WASM_REF_CAST(WASM_LOCAL_GET(0),
+ WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))},
+ kAppendEnd, ("ref.cast" + error_message).c_str());
+ ExpectFailure(&cast_sig,
+ {WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))},
+ kAppendEnd, ("ref.cast" + error_message).c_str());
+ }
}
// Trivial type error.
- ExpectFailure(sigs.v_v(),
- {WASM_REF_TEST(kEqRefCode, kI31RefCode, WASM_I32V(1),
- WASM_RTT_CANON(kI31RefCode)),
- kExprDrop},
- kAppendEnd,
- "ref.test[0] expected type eqref, found i32.const of type i32");
- ExpectFailure(sigs.v_v(),
- {WASM_REF_CAST(kEqRefCode, kI31RefCode, WASM_I32V(1),
- WASM_RTT_CANON(kI31RefCode)),
- kExprDrop},
- kAppendEnd,
- "ref.cast[0] expected type eqref, found i32.const of type i32");
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_REF_TEST(WASM_I32V(1), WASM_RTT_CANON(array_heap)), kExprDrop},
+ kAppendEnd,
+ "ref.test[0] expected subtype of (ref null func) or (ref null data), "
+ "found i32.const of type i32");
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_REF_CAST(WASM_I32V(1), WASM_RTT_CANON(array_heap)), kExprDrop},
+ kAppendEnd,
+ "ref.cast[0] expected subtype of (ref null func) or (ref null data), "
+ "found i32.const of type i32");
- // Mismached object heap immediate.
- {
- ValueType arg_type = ValueType::Ref(HeapType::kEq, kNonNullable);
- FunctionSig sig(0, 1, &arg_type);
- ExpectFailure(
- &sig,
- {WASM_REF_TEST(kEqRefCode, static_cast<byte>(array_heap),
- WASM_LOCAL_GET(0), WASM_RTT_CANON(kI31RefCode)),
- kExprDrop},
- kAppendEnd,
- "ref.test[1] expected rtt for type 0, found rtt.canon of type (rtt 1 "
- "i31)");
- ExpectFailure(
- &sig,
- {WASM_REF_CAST(kEqRefCode, static_cast<byte>(array_heap),
- WASM_LOCAL_GET(0), WASM_RTT_CANON(kI31RefCode)),
- kExprDrop},
- kAppendEnd,
- "ref.cast[1] expected rtt for type 0, found rtt.canon of type (rtt 1 "
- "i31)");
- }
+ // Trivial type error.
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_REF_TEST(WASM_I32V(1), WASM_RTT_CANON(array_heap)), kExprDrop},
+ kAppendEnd,
+ "ref.test[0] expected subtype of (ref null func) or (ref null data), "
+ "found i32.const of type i32");
+ ExpectFailure(
+ sigs.v_v(),
+ {WASM_REF_CAST(WASM_I32V(1), WASM_RTT_CANON(array_heap)), kExprDrop},
+ kAppendEnd,
+ "ref.cast[0] expected subtype of (ref null func) or (ref null data), "
+ "found i32.const of type i32");
}
// This tests that num_locals_ in decoder remains consistent, even if we fail
@@ -4931,19 +4842,6 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
pos = ExpectRun(map, pos, kWasmI64, 212);
}
-TEST_F(LocalDeclDecoderTest, ExnRef) {
- WASM_FEATURE_SCOPE(eh);
- ValueType type = kWasmExnRef;
- const byte data[] = {1, 1, static_cast<byte>(type.value_type_code())};
- BodyLocalDecls decls(zone());
- bool result = DecodeLocalDecls(&decls, data, data + sizeof(data));
- EXPECT_TRUE(result);
- EXPECT_EQ(1u, decls.type_list.size());
-
- TypesOfLocals map = decls.type_list;
- EXPECT_EQ(type, map[0]);
-}
-
TEST_F(LocalDeclDecoderTest, InvalidTypeIndex) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
@@ -5092,6 +4990,16 @@ TEST_P(FunctionBodyDecoderTestOnBothMemoryTypes, 64BitOffset) {
#undef ZERO_FOR_TYPE
}
+TEST_P(FunctionBodyDecoderTestOnBothMemoryTypes, MemorySize) {
+ builder.InitializeMemory(GetParam());
+ // memory.size returns i32 on memory32.
+ Validate(!is_memory64(), sigs.v_v(),
+ {WASM_MEMORY_SIZE, kExprI32Eqz, kExprDrop});
+ // memory.size returns i64 on memory64.
+ Validate(is_memory64(), sigs.v_v(),
+ {WASM_MEMORY_SIZE, kExprI64Eqz, kExprDrop});
+}
+
#undef B1
#undef B2
#undef B3
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 15fcff6852..f721dc33d3 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -225,6 +225,13 @@ TEST_F(WasmModuleVerifyTest, WrongVersion) {
}
}
+TEST_F(WasmModuleVerifyTest, WrongSection) {
+ constexpr byte kInvalidSection = 0x1c;
+ const byte data[] = {kInvalidSection, 0};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
+}
+
TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
ModuleResult result = DecodeModule(nullptr, nullptr);
EXPECT_TRUE(result.ok());
@@ -280,7 +287,6 @@ TEST_F(WasmModuleVerifyTest, S128Global) {
TEST_F(WasmModuleVerifyTest, ExternRefGlobal) {
WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -329,7 +335,6 @@ TEST_F(WasmModuleVerifyTest, ExternRefGlobal) {
TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -562,15 +567,16 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte referencing_undefined_global_nested[] = {
- SECTION(Global, ENTRY_COUNT(2), // --
- WASM_RTT(2, kFuncRefCode), // type
- 0, // mutable
- WASM_RTT_SUB(kFuncRefCode, // init value
- WASM_GLOBAL_GET(1)), // --
- kExprEnd, // --
- WASM_RTT(1, kFuncRefCode), // type
- 0, // mutable
- WASM_RTT_CANON(kFuncRefCode), kExprEnd) // init value
+ SECTION(Type, ENTRY_COUNT(1), WASM_ARRAY_DEF(kI32Code, true)),
+ SECTION(Global, ENTRY_COUNT(2), // --
+ WASM_RTT_WITH_DEPTH(1, 0), // type
+ 0, // mutable
+ WASM_RTT_SUB(0, // init value
+ WASM_GLOBAL_GET(1)), // --
+ kExprEnd, // --
+ WASM_RTT_WITH_DEPTH(0, 0), // type
+ 0, // mutable
+ WASM_RTT_CANON(0), kExprEnd) // init value
};
EXPECT_FAILURE_WITH_MSG(referencing_undefined_global_nested,
"global #1 is not defined yet");
@@ -783,17 +789,6 @@ TEST_F(WasmModuleVerifyTest, RefNullGlobalInvalid2) {
"of type definitions supported by V8");
}
-TEST_F(WasmModuleVerifyTest, RttCanonGlobalGeneric) {
- WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(typed_funcref);
- WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {SECTION(Global, ENTRY_COUNT(1),
- WASM_RTT(1, kFuncRefCode), 1,
- WASM_RTT_CANON(kFuncRefCode), kExprEnd)};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
- EXPECT_OK(result);
-}
-
TEST_F(WasmModuleVerifyTest, RttCanonGlobalStruct) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
@@ -801,8 +796,8 @@ TEST_F(WasmModuleVerifyTest, RttCanonGlobalStruct) {
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1),
WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT(1, 0), 0, WASM_RTT_CANON(0),
- kExprEnd)};
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(0, 0), 0,
+ WASM_RTT_CANON(0), kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
}
@@ -811,13 +806,15 @@ TEST_F(WasmModuleVerifyTest, RttCanonGlobalTypeError) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {SECTION(Global, ENTRY_COUNT(1),
- WASM_RTT(1, kExternRefCode), 1,
- WASM_RTT_CANON(kFuncRefCode), kExprEnd)};
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 0), 1,
+ WASM_RTT_CANON(0), kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result,
- "type error in init expression, expected (rtt 1 extern), got "
- "(rtt 1 func)");
+ "type error in init expression, expected (rtt 1 0), got "
+ "(rtt 0 0)");
}
TEST_F(WasmModuleVerifyTest, GlobalRttSubOfCanon) {
@@ -825,11 +822,14 @@ TEST_F(WasmModuleVerifyTest, GlobalRttSubOfCanon) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte data[] = {
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT(2, kI31RefCode), 1,
- WASM_RTT_SUB(kI31RefCode, WASM_RTT_CANON(kEqRefCode)), kExprEnd)};
+ SECTION(Type, ENTRY_COUNT(2),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
+ WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
+ STRUCT_FIELD(kI32Code, true))),
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 1), 1,
+ WASM_RTT_SUB(1, WASM_RTT_CANON(0)), kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
- WasmInitExpr expected = WasmInitExpr::RttSub(
- HeapType::kI31, WasmInitExpr::RttCanon(HeapType::kEq));
+ WasmInitExpr expected = WasmInitExpr::RttSub(1, WasmInitExpr::RttCanon(0));
EXPECT_OK(result);
EXPECT_EQ(result.value()->globals.front().init, expected);
}
@@ -838,15 +838,16 @@ TEST_F(WasmModuleVerifyTest, GlobalRttSubOfSubOfCanon) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {SECTION(
- Global, ENTRY_COUNT(1), WASM_RTT(3, kEqRefCode), 1,
- WASM_RTT_SUB(kEqRefCode,
- WASM_RTT_SUB(kEqRefCode, WASM_RTT_CANON(kEqRefCode))),
- kExprEnd)};
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(2),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
+ WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
+ STRUCT_FIELD(kI32Code, true))),
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(2, 1), 1,
+ WASM_RTT_SUB(1, WASM_RTT_SUB(1, WASM_RTT_CANON(0))), kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
WasmInitExpr expected = WasmInitExpr::RttSub(
- HeapType::kEq, WasmInitExpr::RttSub(
- HeapType::kEq, WasmInitExpr::RttCanon(HeapType::kEq)));
+ 1, WasmInitExpr::RttSub(1, WasmInitExpr::RttCanon(0)));
EXPECT_OK(result);
EXPECT_EQ(result.value()->globals.front().init, expected);
}
@@ -856,18 +857,21 @@ TEST_F(WasmModuleVerifyTest, GlobalRttSubOfGlobal) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte data[] = {
- SECTION(Import, // section header
- ENTRY_COUNT(1), // number of imports
- ADD_COUNT('m'), // module name
- ADD_COUNT('f'), // global name
- kExternalGlobal, // import kind
- WASM_RTT(1, kEqRefCode), // type
- 0), // mutability
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT(2, kI31RefCode), 1,
- WASM_RTT_SUB(kI31RefCode, WASM_GLOBAL_GET(0)), kExprEnd)};
+ SECTION(Type, ENTRY_COUNT(2),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
+ WASM_STRUCT_DEF(FIELD_COUNT(2), STRUCT_FIELD(kI32Code, true),
+ STRUCT_FIELD(kI32Code, true))),
+ SECTION(Import, // section header
+ ENTRY_COUNT(1), // number of imports
+ ADD_COUNT('m'), // module name
+ ADD_COUNT('f'), // global name
+ kExternalGlobal, // import kind
+ WASM_RTT_WITH_DEPTH(0, 0), // type
+ 0), // mutability
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 1), 1,
+ WASM_RTT_SUB(1, WASM_GLOBAL_GET(0)), kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
- WasmInitExpr expected =
- WasmInitExpr::RttSub(HeapType::kI31, WasmInitExpr::GlobalGet(0));
+ WasmInitExpr expected = WasmInitExpr::RttSub(1, WasmInitExpr::GlobalGet(0));
EXPECT_OK(result);
EXPECT_EQ(result.value()->globals[1].init, expected);
}
@@ -877,6 +881,8 @@ TEST_F(WasmModuleVerifyTest, GlobalRttSubOfGlobalTypeError) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
SECTION(Import, // section header
ENTRY_COUNT(1), // number of imports
ADD_COUNT('m'), // module name
@@ -884,8 +890,8 @@ TEST_F(WasmModuleVerifyTest, GlobalRttSubOfGlobalTypeError) {
kExternalGlobal, // import kind
kI32Code, // type
0), // mutability
- SECTION(Global, ENTRY_COUNT(1), WASM_RTT(2, kExternRefCode), 1,
- WASM_RTT_SUB(kExternRefCode, WASM_GLOBAL_GET(0)), kExprEnd)};
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 0), 1,
+ WASM_RTT_SUB(0, WASM_GLOBAL_GET(0)), kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result, "rtt.sub requires a supertype rtt on stack");
}
@@ -894,9 +900,12 @@ TEST_F(WasmModuleVerifyTest, GlobalRttSubIllegalParent) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {SECTION(
- Global, ENTRY_COUNT(1), WASM_RTT(2, kEqRefCode), 1,
- WASM_RTT_SUB(kEqRefCode, WASM_RTT_CANON(kExternRefCode)), kExprEnd)};
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(2),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true)),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kF32Code, true))),
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(1, 1), 1,
+ WASM_RTT_SUB(1, WASM_RTT_CANON(0)), kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result, "rtt.sub requires a supertype rtt on stack");
}
@@ -905,13 +914,15 @@ TEST_F(WasmModuleVerifyTest, RttSubGlobalTypeError) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- static const byte data[] = {SECTION(
- Global, ENTRY_COUNT(1), WASM_RTT(1 /* Should be 2 */, kI31RefCode), 1,
- WASM_RTT_SUB(kI31RefCode, WASM_RTT_CANON(kEqRefCode)), kExprEnd)};
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1),
+ WASM_STRUCT_DEF(FIELD_COUNT(1), STRUCT_FIELD(kI32Code, true))),
+ SECTION(Global, ENTRY_COUNT(1), WASM_RTT_WITH_DEPTH(0, 0), 1,
+ WASM_RTT_SUB(0, WASM_RTT_CANON(0)), kExprEnd)};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result,
- "type error in init expression, expected (rtt 1 i31), got "
- "(rtt 2 i31)");
+ "type error in init expression, expected (rtt 0 0), got "
+ "(rtt 1 0)");
}
TEST_F(WasmModuleVerifyTest, EmptyStruct) {
@@ -1628,7 +1639,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTables) {
// Test that if we have multiple tables, in the element section we can target
// and initialize all tables.
WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -1661,7 +1671,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTables) {
// Test that if we have multiple tables, both imported and module-defined, in
// the element section we can target and initialize all tables.
WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -1719,7 +1728,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTablesArbitraryOrder) {
// Test that the order in which tables are targeted in the element secion
// can be arbitrary.
WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -1756,7 +1764,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
// Test that the order in which tables are targeted in the element secion can
// be arbitrary. In this test, tables can be both imported and module-defined.
WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -1812,7 +1819,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
TEST_F(WasmModuleVerifyTest, ElementSectionInitExternRefTableWithFuncRef) {
WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -1847,7 +1853,6 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitExternRefImportedTable) {
// Test that imported tables of type ExternRef cannot be initialized in the
// elements section.
WASM_FEATURE_SCOPE(reftypes);
- WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
TYPE_SECTION_ONE_SIG_VOID_VOID,
@@ -1977,7 +1982,7 @@ TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
{kOptRefCode, 1},
{kOptRefCode, kI31RefCode},
{kI31RefCode},
- {kRttCode, 2, kFuncRefCode}};
+ {kRttWithDepthCode, 2, 0}};
for (Vec type : table_types) {
Vec data = {
@@ -1994,9 +1999,10 @@ TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
auto result = DecodeModule(data.data(), data.data() + data.size());
- EXPECT_NOT_OK(result,
- "Currently, only nullable exnref, externref, and "
- "function references are allowed as table types");
+ EXPECT_NOT_OK(
+ result,
+ "Currently, only externref and function references are allowed "
+ "as table types");
}
}
@@ -3016,8 +3022,6 @@ TEST_F(WasmModuleVerifyTest, PassiveDataSegment) {
// data segments --------------------------------------------------------
SECTION(Data, ENTRY_COUNT(1), PASSIVE, ADD_COUNT('h', 'i')),
};
- EXPECT_FAILURE(data);
- WASM_FEATURE_SCOPE(bulk_memory);
EXPECT_VERIFIES(data);
EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
}
@@ -3036,8 +3040,6 @@ TEST_F(WasmModuleVerifyTest, ActiveElementSegmentWithElements) {
REF_FUNC_ELEMENT(0), REF_FUNC_ELEMENT(0), REF_NULL_ELEMENT),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
- EXPECT_FAILURE(data);
- WASM_FEATURE_SCOPE(bulk_memory);
EXPECT_VERIFIES(data);
EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
}
@@ -3056,8 +3058,6 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
REF_NULL_ELEMENT),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
- EXPECT_FAILURE(data);
- WASM_FEATURE_SCOPE(bulk_memory);
EXPECT_VERIFIES(data);
EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
}
@@ -3075,7 +3075,6 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegmentExternRef) {
U32V_1(0)),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
- WASM_FEATURE_SCOPE(bulk_memory);
EXPECT_FAILURE(data);
}
@@ -3092,8 +3091,6 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegmentWithIndices) {
ENTRY_COUNT(3), U32V_1(0), U32V_1(0), U32V_1(0)),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
- EXPECT_FAILURE(data);
- WASM_FEATURE_SCOPE(bulk_memory);
EXPECT_VERIFIES(data);
EXPECT_OFF_END_FAILURE(data, arraysize(data) - 5);
}
@@ -3113,14 +3110,11 @@ TEST_F(WasmModuleVerifyTest, DeclarativeElementSegmentFuncRef) {
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
EXPECT_FAILURE(data);
- WASM_FEATURE_SCOPE(bulk_memory);
- EXPECT_FAILURE(data);
WASM_FEATURE_SCOPE(reftypes);
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, DeclarativeElementSegmentWithInvalidIndex) {
- WASM_FEATURE_SCOPE(bulk_memory);
WASM_FEATURE_SCOPE(reftypes);
static const byte data[] = {
// sig#0 -----------------------------------------------------------------
@@ -3144,15 +3138,12 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionCorrectPlacement) {
static const byte data[] = {SECTION(Element, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0)),
SECTION(Code, ENTRY_COUNT(0))};
- EXPECT_FAILURE(data);
- WASM_FEATURE_SCOPE(bulk_memory);
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, DataCountSectionAfterCode) {
static const byte data[] = {SECTION(Code, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0))};
- WASM_FEATURE_SCOPE(bulk_memory);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result,
"The DataCount section must appear before the Code section");
@@ -3161,7 +3152,6 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionAfterCode) {
TEST_F(WasmModuleVerifyTest, DataCountSectionBeforeElement) {
static const byte data[] = {SECTION(DataCount, ENTRY_COUNT(0)),
SECTION(Element, ENTRY_COUNT(0))};
- WASM_FEATURE_SCOPE(bulk_memory);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result, "unexpected section <Element>");
}
@@ -3178,7 +3168,6 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionAfterStartBeforeElement) {
SECTION(DataCount, ENTRY_COUNT(0)), // DataCount section.
SECTION(Element, ENTRY_COUNT(0)) // Element section.
};
- WASM_FEATURE_SCOPE(bulk_memory);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result, "unexpected section <Element>");
}
@@ -3186,7 +3175,6 @@ TEST_F(WasmModuleVerifyTest, DataCountSectionAfterStartBeforeElement) {
TEST_F(WasmModuleVerifyTest, MultipleDataCountSections) {
static const byte data[] = {SECTION(DataCount, ENTRY_COUNT(0)),
SECTION(DataCount, ENTRY_COUNT(0))};
- WASM_FEATURE_SCOPE(bulk_memory);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result, "Multiple DataCount sections not allowed");
}
@@ -3198,8 +3186,6 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCountMatch) {
SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0, // Data section.
WASM_INIT_EXPR_I32V_1(12), ADD_COUNT('h', 'i'))};
- EXPECT_FAILURE(data);
- WASM_FEATURE_SCOPE(bulk_memory);
EXPECT_VERIFIES(data);
}
@@ -3208,7 +3194,6 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_greater) {
SECTION(Memory, ENTRY_COUNT(1), 0, 1), // Memory section.
SECTION(DataCount, ENTRY_COUNT(3)), // DataCount section.
SECTION(Data, ENTRY_COUNT(0))}; // Data section.
- WASM_FEATURE_SCOPE(bulk_memory);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result, "data segments count 0 mismatch (3 expected)");
}
@@ -3219,7 +3204,6 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_less) {
SECTION(DataCount, ENTRY_COUNT(0)), // DataCount section.
SECTION(Data, ENTRY_COUNT(1), LINEAR_MEMORY_INDEX_0, // Data section.
WASM_INIT_EXPR_I32V_1(12), ADD_COUNT('a', 'b', 'c'))};
- WASM_FEATURE_SCOPE(bulk_memory);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result, "data segments count 1 mismatch (0 expected)");
}
@@ -3227,7 +3211,6 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_less) {
TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_omitted) {
static const byte data[] = {SECTION(Memory, ENTRY_COUNT(1), 0, 1),
SECTION(DataCount, ENTRY_COUNT(1))};
- WASM_FEATURE_SCOPE(bulk_memory);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_NOT_OK(result, "data segments count 0 mismatch (1 expected)");
}
@@ -3328,8 +3311,6 @@ TEST_F(WasmModuleVerifyTest, Memory64DataSegment) {
#undef NOP_BODY
#undef SIG_ENTRY_i_i
#undef UNKNOWN_SECTION
-#undef COUNT_ARGS
-#undef CHECK_LEB1
#undef ADD_COUNT
#undef SECTION
#undef TYPE_SECTION
diff --git a/deps/v8/test/unittests/wasm/subtyping-unittest.cc b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
index ec6e30ac75..d06b4d675b 100644
--- a/deps/v8/test/unittests/wasm/subtyping-unittest.cc
+++ b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
@@ -58,94 +58,112 @@ TEST_F(WasmSubtypingTest, Subtyping) {
ValueType numeric_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
kWasmS128};
ValueType ref_types[] = {
- kWasmExternRef, kWasmFuncRef, kWasmExnRef, kWasmEqRef, kWasmI31Ref,
- kWasmAnyRef, optRef(0), ref(0), optRef(2), ref(2)};
+ kWasmExternRef, kWasmFuncRef, kWasmEqRef, kWasmI31Ref, kWasmDataRef,
+ kWasmAnyRef, optRef(0), ref(0), optRef(2), ref(2)};
// Type judgements across modules should work the same as within one module.
for (WasmModule* module : {module1, module2}) {
// Value types are unrelated, except if they are equal.
for (ValueType subtype : numeric_types) {
for (ValueType supertype : numeric_types) {
- CHECK_EQ(IsSubtypeOf(subtype, supertype, module1, module),
- subtype == supertype);
+ EXPECT_EQ(IsSubtypeOf(subtype, supertype, module1, module),
+ subtype == supertype);
}
}
// Value types are unrelated with reference types.
for (ValueType value_type : numeric_types) {
for (ValueType ref_type : ref_types) {
- CHECK(!IsSubtypeOf(value_type, ref_type, module1, module));
- CHECK(!IsSubtypeOf(ref_type, value_type, module1, module));
+ EXPECT_TRUE(!IsSubtypeOf(value_type, ref_type, module1, module));
+ EXPECT_TRUE(!IsSubtypeOf(ref_type, value_type, module1, module));
}
}
for (ValueType ref_type : ref_types) {
- // Concrete reference types and i31ref are subtypes of eqref,
- // exnref/externref/funcref/anyref are not.
- CHECK_EQ(IsSubtypeOf(ref_type, kWasmEqRef, module1, module),
- ref_type != kWasmFuncRef && ref_type != kWasmExternRef &&
- ref_type != kWasmExnRef && ref_type != kWasmAnyRef);
+ // Concrete reference types, i31ref and dataref are subtypes of eqref,
+ // externref/funcref/anyref are not.
+ EXPECT_EQ(IsSubtypeOf(ref_type, kWasmEqRef, module1, module),
+ ref_type != kWasmFuncRef && ref_type != kWasmExternRef &&
+ ref_type != kWasmAnyRef);
+ // Non-nullable struct/array types are subtypes of dataref.
+ EXPECT_EQ(IsSubtypeOf(ref_type, kWasmDataRef, module1, module),
+ ref_type == kWasmDataRef ||
+ (ref_type.kind() == kRef && ref_type.has_index()));
// Each reference type is a subtype of itself.
- CHECK(IsSubtypeOf(ref_type, ref_type, module1, module));
+ EXPECT_TRUE(IsSubtypeOf(ref_type, ref_type, module1, module));
// Each reference type is a subtype of anyref.
- CHECK(IsSubtypeOf(ref_type, kWasmAnyRef, module1, module));
+ EXPECT_TRUE(IsSubtypeOf(ref_type, kWasmAnyRef, module1, module));
// Only anyref is a subtype of anyref.
- CHECK_EQ(IsSubtypeOf(kWasmAnyRef, ref_type, module1, module),
- ref_type == kWasmAnyRef);
+ EXPECT_EQ(IsSubtypeOf(kWasmAnyRef, ref_type, module1, module),
+ ref_type == kWasmAnyRef);
}
// The rest of ref. types are unrelated.
- for (ValueType type_1 :
- {kWasmExternRef, kWasmFuncRef, kWasmExnRef, kWasmI31Ref}) {
- for (ValueType type_2 :
- {kWasmExternRef, kWasmFuncRef, kWasmExnRef, kWasmI31Ref}) {
- CHECK_EQ(IsSubtypeOf(type_1, type_2, module1, module),
- type_1 == type_2);
+ for (ValueType type_1 : {kWasmExternRef, kWasmFuncRef, kWasmI31Ref}) {
+ for (ValueType type_2 : {kWasmExternRef, kWasmFuncRef, kWasmI31Ref}) {
+ EXPECT_EQ(IsSubtypeOf(type_1, type_2, module1, module),
+ type_1 == type_2);
}
}
// Unrelated refs are unrelated.
- CHECK(!IsSubtypeOf(ref(0), ref(2), module1, module));
- CHECK(!IsSubtypeOf(optRef(3), optRef(1), module1, module));
+ EXPECT_TRUE(!IsSubtypeOf(ref(0), ref(2), module1, module));
+ EXPECT_TRUE(!IsSubtypeOf(optRef(3), optRef(1), module1, module));
// ref is a subtype of optref for the same struct/array.
- CHECK(IsSubtypeOf(ref(0), optRef(0), module1, module));
- CHECK(IsSubtypeOf(ref(2), optRef(2), module1, module));
+ EXPECT_TRUE(IsSubtypeOf(ref(0), optRef(0), module1, module));
+ EXPECT_TRUE(IsSubtypeOf(ref(2), optRef(2), module1, module));
// optref is not a subtype of ref for the same struct/array.
- CHECK(!IsSubtypeOf(optRef(0), ref(0), module1, module));
- CHECK(!IsSubtypeOf(optRef(2), ref(2), module1, module));
+ EXPECT_TRUE(!IsSubtypeOf(optRef(0), ref(0), module1, module));
+ EXPECT_TRUE(!IsSubtypeOf(optRef(2), ref(2), module1, module));
// ref is a subtype of optref if the same is true for the underlying
// structs/arrays.
- CHECK(IsSubtypeOf(ref(3), optRef(2), module1, module));
+ EXPECT_TRUE(IsSubtypeOf(ref(3), optRef(2), module1, module));
// Prefix subtyping for structs.
- CHECK(IsSubtypeOf(optRef(4), optRef(0), module1, module));
+ EXPECT_TRUE(IsSubtypeOf(optRef(4), optRef(0), module1, module));
// Mutable fields are invariant.
- CHECK(!IsSubtypeOf(ref(0), ref(5), module1, module));
+ EXPECT_TRUE(!IsSubtypeOf(ref(0), ref(5), module1, module));
// Immutable fields are covariant.
- CHECK(IsSubtypeOf(ref(1), ref(0), module1, module));
+ EXPECT_TRUE(IsSubtypeOf(ref(1), ref(0), module1, module));
// Prefix subtyping + immutable field covariance for structs.
- CHECK(IsSubtypeOf(optRef(4), optRef(1), module1, module));
+ EXPECT_TRUE(IsSubtypeOf(optRef(4), optRef(1), module1, module));
// No subtyping between mutable/immutable fields.
- CHECK(!IsSubtypeOf(ref(7), ref(6), module1, module));
- CHECK(!IsSubtypeOf(ref(6), ref(7), module1, module));
+ EXPECT_TRUE(!IsSubtypeOf(ref(7), ref(6), module1, module));
+ EXPECT_TRUE(!IsSubtypeOf(ref(6), ref(7), module1, module));
// Recursive types.
- CHECK(IsSubtypeOf(ref(9), ref(8), module1, module));
+ EXPECT_TRUE(IsSubtypeOf(ref(9), ref(8), module1, module));
// Identical rtts are subtypes of each other.
- CHECK(IsSubtypeOf(ValueType::Rtt(5, 3), ValueType::Rtt(5, 3), module1,
- module2));
- CHECK(IsSubtypeOf(ValueType::Rtt(HeapType::kExn, 3),
- ValueType::Rtt(HeapType::kExn, 3), module1, module2));
+ EXPECT_TRUE(IsSubtypeOf(ValueType::Rtt(5, 3), ValueType::Rtt(5, 3), module1,
+ module2));
+ EXPECT_TRUE(
+ IsSubtypeOf(ValueType::Rtt(5), ValueType::Rtt(5), module1, module2));
+ // Rtts of unrelated types are unrelated.
+ EXPECT_TRUE(!IsSubtypeOf(ValueType::Rtt(1, 1), ValueType::Rtt(2, 1),
+ module1, module2));
+ EXPECT_TRUE(
+ !IsSubtypeOf(ValueType::Rtt(1), ValueType::Rtt(2), module1, module2));
+ EXPECT_TRUE(!IsSubtypeOf(ValueType::Rtt(1, 0), ValueType::Rtt(2), module1,
+ module2));
// Rtts of different depth are unrelated.
- CHECK(!IsSubtypeOf(ValueType::Rtt(5, 1), ValueType::Rtt(5, 3), module1,
- module2));
- CHECK(!IsSubtypeOf(ValueType::Rtt(5, 8), ValueType::Rtt(5, 3), module1,
- module2));
+ EXPECT_TRUE(!IsSubtypeOf(ValueType::Rtt(5, 1), ValueType::Rtt(5, 3),
+ module1, module2));
+ EXPECT_TRUE(!IsSubtypeOf(ValueType::Rtt(5, 8), ValueType::Rtt(5, 3),
+ module1, module2));
// Rtts of identical types are subtype-related.
- CHECK(IsSubtypeOf(ValueType::Rtt(8, 1), ValueType::Rtt(9, 1), module1,
- module));
+ EXPECT_TRUE(IsSubtypeOf(ValueType::Rtt(8, 1), ValueType::Rtt(9, 1), module1,
+ module));
+ EXPECT_TRUE(
+ IsSubtypeOf(ValueType::Rtt(8), ValueType::Rtt(9), module1, module));
// Rtts of subtypes are not related.
- CHECK(!IsSubtypeOf(ValueType::Rtt(1, 1), ValueType::Rtt(0, 1), module1,
- module));
+ EXPECT_TRUE(!IsSubtypeOf(ValueType::Rtt(1, 1), ValueType::Rtt(0, 1),
+ module1, module));
+ EXPECT_TRUE(
+ !IsSubtypeOf(ValueType::Rtt(1), ValueType::Rtt(0), module1, module));
+ // rtt(t, d) <: rtt(t)
+ for (uint8_t depth : {0, 1, 5}) {
+ EXPECT_TRUE(IsSubtypeOf(ValueType::Rtt(1, depth), ValueType::Rtt(1),
+ module1, module));
+ }
}
}
diff --git a/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc
index 604d2adfb2..b2eb70c247 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc
@@ -53,7 +53,8 @@ TEST_F(SignalHandlerFallbackTest, DoTest) {
const int save_sigs = 1;
if (!sigsetjmp(continuation_, save_sigs)) {
constexpr bool use_default_signal_handler = true;
- CHECK(v8::V8::EnableWebAssemblyTrapHandler(use_default_signal_handler));
+ EXPECT_TRUE(
+ v8::V8::EnableWebAssemblyTrapHandler(use_default_signal_handler));
CrashOnPurpose();
FAIL();
} else {
diff --git a/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
index 006f1344ba..8ad753469e 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
@@ -52,7 +52,7 @@ class ExceptionHandlerFallbackTest : public ::testing::Test {
void TearDown() override {
// be a good citizen and remove the exception handler.
ULONG result = RemoveVectoredExceptionHandler(registered_handler_);
- CHECK(result);
+ EXPECT_TRUE(result);
}
private:
@@ -61,9 +61,9 @@ class ExceptionHandlerFallbackTest : public ::testing::Test {
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
// Make the allocated memory accessible so that from now on memory accesses
// do not cause an exception anymore.
- CHECK(i::SetPermissions(page_allocator, g_start_address,
- page_allocator->AllocatePageSize(),
- v8::PageAllocator::kReadWrite));
+ EXPECT_TRUE(i::SetPermissions(page_allocator, g_start_address,
+ page_allocator->AllocatePageSize(),
+ v8::PageAllocator::kReadWrite));
// The memory access should work now, we can continue execution.
return EXCEPTION_CONTINUE_EXECUTION;
}
@@ -74,7 +74,7 @@ class ExceptionHandlerFallbackTest : public ::testing::Test {
TEST_F(ExceptionHandlerFallbackTest, DoTest) {
constexpr bool use_default_handler = true;
- CHECK(v8::V8::EnableWebAssemblyTrapHandler(use_default_handler));
+ EXPECT_TRUE(v8::V8::EnableWebAssemblyTrapHandler(use_default_handler));
// In the original test setup the test memory is protected against any kind of
// access. Therefore the access here causes an access violation exception,
// which should be caught by the exception handler we install above. In the
@@ -83,8 +83,8 @@ TEST_F(ExceptionHandlerFallbackTest, DoTest) {
// memory access again. This time we expect the memory access to work.
constexpr int test_value = 42;
WriteToTestMemory(test_value);
- CHECK_EQ(test_value, ReadFromTestMemory());
- CHECK(g_handler_got_executed);
+ EXPECT_EQ(test_value, ReadFromTestMemory());
+ EXPECT_TRUE(g_handler_got_executed);
v8::internal::trap_handler::RemoveTrapHandler();
}
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
index 478ee45aee..1d8efdae75 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
@@ -85,7 +85,7 @@ class TrapHandlerTest : public TestWithIsolate,
backing_store_ = BackingStore::AllocateWasmMemory(i_isolate(), 1, 1,
SharedFlag::kNotShared);
CHECK(backing_store_);
- CHECK(backing_store_->has_guard_regions());
+ EXPECT_TRUE(backing_store_->has_guard_regions());
// The allocated backing store ends with a guard page.
crash_address_ = reinterpret_cast<Address>(backing_store_->buffer_start()) +
backing_store_->byte_length() + 32;
@@ -104,11 +104,11 @@ class TrapHandlerTest : public TestWithIsolate,
sigemptyset(&action.sa_mask);
action.sa_flags = SA_SIGINFO;
// SIGSEGV happens for wasm oob memory accesses on Linux.
- CHECK_EQ(0, sigaction(SIGSEGV, &action, &g_old_segv_action));
+ EXPECT_EQ(0, sigaction(SIGSEGV, &action, &g_old_segv_action));
// SIGBUS happens for wasm oob memory accesses on macOS.
- CHECK_EQ(0, sigaction(SIGBUS, &action, &g_old_bus_action));
+ EXPECT_EQ(0, sigaction(SIGBUS, &action, &g_old_bus_action));
// SIGFPE to simulate crashes which are not handled by the trap handler.
- CHECK_EQ(0, sigaction(SIGFPE, &action, &g_old_fpe_action));
+ EXPECT_EQ(0, sigaction(SIGFPE, &action, &g_old_fpe_action));
#elif V8_OS_WIN
g_registered_handler =
AddVectoredExceptionHandler(/*first=*/0, TestHandler);
@@ -117,7 +117,7 @@ class TrapHandlerTest : public TestWithIsolate,
void TearDown() override {
// We should always have left wasm code.
- CHECK(!GetThreadInWasmFlag());
+ EXPECT_TRUE(!GetThreadInWasmFlag());
buffer_.reset();
recovery_buffer_.reset();
backing_store_.reset();
@@ -128,9 +128,9 @@ class TrapHandlerTest : public TestWithIsolate,
#if V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD
// The test handler cleans up the signal handler setup in the test. If the
// test handler was not called, we have to do the cleanup ourselves.
- CHECK_EQ(0, sigaction(SIGSEGV, &g_old_segv_action, nullptr));
- CHECK_EQ(0, sigaction(SIGFPE, &g_old_fpe_action, nullptr));
- CHECK_EQ(0, sigaction(SIGBUS, &g_old_bus_action, nullptr));
+ EXPECT_EQ(0, sigaction(SIGSEGV, &g_old_segv_action, nullptr));
+ EXPECT_EQ(0, sigaction(SIGFPE, &g_old_fpe_action, nullptr));
+ EXPECT_EQ(0, sigaction(SIGBUS, &g_old_bus_action, nullptr));
#elif V8_OS_WIN
RemoveVectoredExceptionHandler(g_registered_handler);
g_registered_handler = nullptr;
@@ -232,21 +232,21 @@ class TrapHandlerTest : public TestWithIsolate,
GeneratedCode<void>::FromAddress(
i_isolate(), reinterpret_cast<Address>(buffer_->start()))
.Call();
- CHECK(!g_test_handler_executed);
+ EXPECT_FALSE(g_test_handler_executed);
}
// Execute the code in buffer. We expect a crash which we recover from in the
// test handler.
void ExecuteExpectCrash(TestingAssemblerBuffer* buffer,
bool check_wasm_flag = true) {
- CHECK(!g_test_handler_executed);
+ EXPECT_FALSE(g_test_handler_executed);
buffer->MakeExecutable();
GeneratedCode<void>::FromAddress(i_isolate(),
reinterpret_cast<Address>(buffer->start()))
.Call();
- CHECK(g_test_handler_executed);
+ EXPECT_TRUE(g_test_handler_executed);
g_test_handler_executed = false;
- if (check_wasm_flag) CHECK(!GetThreadInWasmFlag());
+ if (check_wasm_flag) EXPECT_FALSE(GetThreadInWasmFlag());
}
bool test_handler_executed() { return g_test_handler_executed; }
@@ -457,12 +457,12 @@ TEST_P(TrapHandlerTest, TestCrashInOtherThread) {
desc.instr_size, 1, &protected_instruction);
CodeRunner runner(this, buffer_.get());
- CHECK(!GetThreadInWasmFlag());
+ EXPECT_FALSE(GetThreadInWasmFlag());
// Set the thread-in-wasm flag manually in this thread.
*trap_handler::GetThreadInWasmThreadLocalAddress() = 1;
- CHECK(runner.Start());
+ EXPECT_TRUE(runner.Start());
runner.Join();
- CHECK(GetThreadInWasmFlag());
+ EXPECT_TRUE(GetThreadInWasmFlag());
// Reset the thread-in-wasm flag.
*trap_handler::GetThreadInWasmThreadLocalAddress() = 0;
}
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index 7311ad39e8..f71b881b5a 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -28,16 +28,16 @@ void DisjointAllocationPoolTest::CheckPool(
const DisjointAllocationPool& mem,
std::initializer_list<base::AddressRegion> expected_regions) {
const auto& regions = mem.regions();
- CHECK_EQ(regions.size(), expected_regions.size());
+ EXPECT_EQ(regions.size(), expected_regions.size());
auto iter = expected_regions.begin();
for (auto it = regions.begin(), e = regions.end(); it != e; ++it, ++iter) {
- CHECK_EQ(*it, *iter);
+ EXPECT_EQ(*it, *iter);
}
}
void DisjointAllocationPoolTest::CheckRange(base::AddressRegion region1,
base::AddressRegion region2) {
- CHECK_EQ(region1, region2);
+ EXPECT_EQ(region1, region2);
}
DisjointAllocationPool DisjointAllocationPoolTest::Make(
@@ -51,7 +51,7 @@ DisjointAllocationPool DisjointAllocationPoolTest::Make(
TEST_F(DisjointAllocationPoolTest, ConstructEmpty) {
DisjointAllocationPool a;
- CHECK(a.IsEmpty());
+ EXPECT_TRUE(a.IsEmpty());
CheckPool(a, {});
a.Merge({1, 4});
CheckPool(a, {{1, 4}});
@@ -59,7 +59,7 @@ TEST_F(DisjointAllocationPoolTest, ConstructEmpty) {
TEST_F(DisjointAllocationPoolTest, ConstructWithRange) {
DisjointAllocationPool a({1, 4});
- CHECK(!a.IsEmpty());
+ EXPECT_FALSE(a.IsEmpty());
CheckPool(a, {{1, 4}});
}
@@ -70,16 +70,16 @@ TEST_F(DisjointAllocationPoolTest, SimpleExtract) {
CheckRange(b, {1, 2});
a.Merge(b);
CheckPool(a, {{1, 4}});
- CHECK_EQ(a.regions().size(), 1);
- CHECK_EQ(a.regions().begin()->begin(), 1);
- CHECK_EQ(a.regions().begin()->end(), 5);
+ EXPECT_EQ(a.regions().size(), uint32_t{1});
+ EXPECT_EQ(a.regions().begin()->begin(), uint32_t{1});
+ EXPECT_EQ(a.regions().begin()->end(), uint32_t{5});
}
TEST_F(DisjointAllocationPoolTest, ExtractAll) {
DisjointAllocationPool a({1, 4});
base::AddressRegion b = a.Allocate(4);
CheckRange(b, {1, 4});
- CHECK(a.IsEmpty());
+ EXPECT_TRUE(a.IsEmpty());
a.Merge(b);
CheckPool(a, {{1, 4}});
}
@@ -88,14 +88,14 @@ TEST_F(DisjointAllocationPoolTest, FailToExtract) {
DisjointAllocationPool a = Make({{1, 4}});
base::AddressRegion b = a.Allocate(5);
CheckPool(a, {{1, 4}});
- CHECK(b.is_empty());
+ EXPECT_TRUE(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, FailToExtractExact) {
DisjointAllocationPool a = Make({{1, 4}, {10, 4}});
base::AddressRegion b = a.Allocate(5);
CheckPool(a, {{1, 4}, {10, 4}});
- CHECK(b.is_empty());
+ EXPECT_TRUE(b.is_empty());
}
TEST_F(DisjointAllocationPoolTest, ExtractExact) {
diff --git a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
index 708ff9b030..9689a15eb4 100644
--- a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
@@ -22,7 +22,7 @@ TEST_F(WasmCallDescriptorTest, TestExternRefIsGrouped) {
for (size_t i = 0; i < kMaxCount; i += 2) {
params[i] = kWasmExternRef;
- CHECK_LT(i + 1, kMaxCount);
+ EXPECT_TRUE(i + 1 < kMaxCount);
params[i + 1] = kWasmI32;
}
@@ -32,7 +32,7 @@ TEST_F(WasmCallDescriptorTest, TestExternRefIsGrouped) {
compiler::GetWasmCallDescriptor(zone(), &sig);
// The WasmInstance is the implicit first parameter.
- CHECK_EQ(count + 1, desc->ParameterCount());
+ EXPECT_EQ(count + 1, desc->ParameterCount());
bool has_untagged_stack_param = false;
bool has_tagged_register_param = false;
@@ -45,7 +45,7 @@ TEST_F(WasmCallDescriptorTest, TestExternRefIsGrouped) {
if (location.IsRegister()) {
has_tagged_register_param = true;
} else {
- CHECK(location.IsCallerFrameSlot());
+ EXPECT_TRUE(location.IsCallerFrameSlot());
max_tagged_stack_location =
std::max(max_tagged_stack_location, location.AsCallerFrameSlot());
}
@@ -55,14 +55,14 @@ TEST_F(WasmCallDescriptorTest, TestExternRefIsGrouped) {
min_untagged_stack_location = std::min(min_untagged_stack_location,
location.AsCallerFrameSlot());
} else {
- CHECK(location.IsRegister());
+ EXPECT_TRUE(location.IsRegister());
}
}
}
// There should never be a tagged parameter in a register and an untagged
// parameter on the stack at the same time.
- CHECK_EQ(false, has_tagged_register_param && has_untagged_stack_param);
- CHECK_LT(max_tagged_stack_location, min_untagged_stack_location);
+ EXPECT_EQ(false, has_tagged_register_param && has_untagged_stack_param);
+ EXPECT_TRUE(max_tagged_stack_location < min_untagged_stack_location);
}
}
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index 2a7843430b..2788ed5fce 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -39,6 +39,11 @@ proposal_flags = [{
'flags': ['--experimental-wasm-simd',
'--wasm-staging']
},
+ {
+ 'name': 'memory64',
+ 'flags': ['--experimental-wasm-memory64',
+ '--wasm-staging']
+ },
]
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index 05f7ebb2b7..287917ecda 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-31c11a41026c56be3c6d6470755d476840ce0132 \ No newline at end of file
+ef30002bb06bd09b91b62d3fa152d1af94b28eaf \ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index 7d05f23747..9f8d54442d 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -42,10 +42,10 @@
}], # mode == debug or simulator_run or variant != default or arch == arm or tsan or msan or asan
##############################################################################
-['lite_mode or variant == jitless', {
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
'*': [SKIP],
-}], # lite_mode or variant == jitless
+}], # not has_webassembly or variant == jitless
################################################################################
['variant == stress_snapshot', {
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index 6d4f139a2e..908ce09d4e 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -32,6 +32,11 @@ proposal_flags = [{
'flags': ['--experimental-wasm-simd',
'--wasm-staging']
},
+ {
+ 'name': 'memory64',
+ 'flags': ['--experimental-wasm-memory64',
+ '--wasm-staging']
+ },
]
class TestLoader(testsuite.JSTestLoader):
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index dd94a6715a..ee7a50fdd9 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-38edac624024750d4d35619df1bbdc7902f9cb7c \ No newline at end of file
+4db01ba8549a087ae9adaa8540cec2689c7dad64 \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index 85850dd967..38ac495bea 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -5,14 +5,10 @@
[
[ALWAYS, {
'skip-stack-guard-page': [PASS, ['((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
- # TODO(wasm) Investigate failing spec tests after update.
- 'binary': [FAIL],
- 'proposals/bulk-memory-operations/binary': [FAIL],
# TODO(v8:10994): Failing spec test after update.
- 'proposals/simd/binary': [FAIL],
+ 'proposals/simd/imports': [FAIL],
'proposals/simd/data': [FAIL],
'proposals/js-types/data': [FAIL],
- 'proposals/reference-types/binary': [FAIL],
# TODO(v8:9144): The MVP behavior when bounds-checking segments changed in
# the bulk-memory proposal. Since we've enabled bulk-memory by default, we
@@ -22,8 +18,9 @@
'data': [FAIL],
# TODO(wasm): Roll newest tests into "js-types" repository.
- 'proposals/js-types/exports': [FAIL],
+ 'proposals/js-types/elem': [FAIL],
'proposals/js-types/globals': [FAIL],
+ 'proposals/js-types/imports': [FAIL],
'proposals/js-types/linking': [FAIL],
# TODO(wasm): Roll newest tests into "tail-call" repository.
@@ -39,6 +36,18 @@
# This test requires the reftypes flag to be disabled.
'proposals/bulk-memory-operations/imports': [FAIL],
+
+ # TODO(v8:11401): Fix memory64 spec tests / the v8 implementation (whatever
+ # is broken).
+ 'proposals/memory64/address64': [FAIL],
+ 'proposals/memory64/data': [FAIL],
+ 'proposals/memory64/elem': [FAIL],
+ 'proposals/memory64/float_memory64': [FAIL],
+ 'proposals/memory64/imports': [FAIL],
+ 'proposals/memory64/load64': [FAIL],
+ 'proposals/memory64/memory64': [FAIL],
+ 'proposals/memory64/memory_grow64': [FAIL],
+ 'proposals/memory64/memory_trap64': [FAIL],
}], # ALWAYS
['arch == arm and not simulator_run', {
@@ -96,6 +105,57 @@
'proposals/tail-call/skip-stack-guard-page': '--sim-stack-size=8192',
}], # '(arch == mipsel or arch == mips64el) and simulator_run'
+['arch == riscv64', {
+ 'conversions': [SKIP],
+ 'proposals/JS-BigInt-integration/conversions': [SKIP],
+ 'proposals/bulk-memory-operations/conversions': [SKIP],
+ 'proposals/js-types/conversions': [SKIP],
+ 'proposals/multi-value/conversions': [SKIP],
+ 'proposals/reference-types/conversions': [SKIP],
+
+ 'f32': [SKIP],
+ 'f64': [SKIP],
+ 'proposals/multi-value/f64': [SKIP],
+ 'proposals/JS-BigInt-integration/f64': [SKIP],
+ 'proposals/JS-BigInt-integration/f32': [SKIP],
+ 'proposals/bulk-memory-operations/f32': [SKIP],
+ 'proposals/bulk-memory-operations/f64': [SKIP],
+ 'proposals/js-types/f32': [SKIP],
+ 'proposals/js-types/f64': [SKIP],
+ 'proposals/multi-value/f32': [SKIP],
+ 'proposals/reference-types/f32': [SKIP],
+ 'proposals/reference-types/f64': [SKIP],
+
+ # the following all fail w/ symptons captured in issue #166
+ 'float_exprs': [SKIP],
+ 'proposals/tail-call/conversions': [SKIP],
+ 'proposals/tail-call/float_exprs': [SKIP],
+ 'proposals/tail-call/f64': [SKIP],
+ 'proposals/tail-call/f32': [SKIP],
+
+ # These tests need larger stack size on simulator.
+ 'skip-stack-guard-page': '--sim-stack-size=8192',
+ 'proposals/tail-call/skip-stack-guard-page': '--sim-stack-size=8192',
+
+ # SIMD is not fully implemented yet.
+ 'proposals/simd/*': [SKIP],
+
+ # See issue #403
+ 'proposals/js-types/exports': [SKIP],
+ 'proposals/js-types/imports': [SKIP],
+
+ # riscv64 don't implemented some wasm atomic func
+ 'left-to-right': [SKIP],
+ 'float_misc': [SKIP],
+ 'f64_bitwise': [SKIP],
+ 'f32_bitwise': [SKIP],
+ 'proposals/tail-call/f32_bitwise': [SKIP],
+ 'proposals/tail-call/f64_bitwise': [SKIP],
+ 'proposals/tail-call/float_misc': [SKIP],
+ 'proposals/tail-call/left-to-right': [SKIP],
+
+}], # 'arch == riscv64
+
['arch == ppc or arch == ppc64', {
# These tests fail because ppc float min and max doesn't convert sNaN to qNaN.
'f32': [SKIP],
@@ -131,14 +191,19 @@
}], # 'arch == s390 or arch == s390x'
##############################################################################
-['lite_mode or variant == jitless', {
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
'*': [SKIP],
-}], # lite_mode or variant == jitless
+}], # not has_webassembly or variant == jitless
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
}],
+##############################################################################
+['no_simd_sse == True', {
+ 'proposals/simd/*': [SKIP],
+}], # no_simd_sse == True
+
]
diff --git a/deps/v8/third_party/v8/builtins/OWNERS b/deps/v8/third_party/v8/builtins/OWNERS
index 255508218e..2abd973305 100644
--- a/deps/v8/third_party/v8/builtins/OWNERS
+++ b/deps/v8/third_party/v8/builtins/OWNERS
@@ -1,3 +1,2 @@
jgruber@chromium.org
szuend@chromium.org
-tebbi@chromium.org
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index ea7c0e7dc9..7737ab78e3 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -276,7 +276,7 @@ Store<FastSmiElements>(
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
const value = UnsafeCast<Smi>(value);
- StoreFixedArrayElement(elements, index, value, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(elements, index, value);
return kSuccess;
}
diff --git a/deps/v8/third_party/zlib/adler32.c b/deps/v8/third_party/zlib/adler32.c
index 696773a09d..8f8fbb9048 100644
--- a/deps/v8/third_party/zlib/adler32.c
+++ b/deps/v8/third_party/zlib/adler32.c
@@ -74,10 +74,10 @@ uLong ZEXPORT adler32_z(adler, buf, len)
unsigned n;
#if defined(ADLER32_SIMD_SSSE3)
- if (x86_cpu_enable_ssse3 && buf && len >= 64)
+ if (buf != Z_NULL && len >= 64 && x86_cpu_enable_ssse3)
return adler32_simd_(adler, buf, len);
#elif defined(ADLER32_SIMD_NEON)
- if (buf && len >= 64)
+ if (buf != Z_NULL && len >= 64)
return adler32_simd_(adler, buf, len);
#endif
diff --git a/deps/v8/third_party/zlib/google/zip_internal.cc b/deps/v8/third_party/zlib/google/zip_internal.cc
index 9cbb78cb58..354fbf8c8d 100644
--- a/deps/v8/third_party/zlib/google/zip_internal.cc
+++ b/deps/v8/third_party/zlib/google/zip_internal.cc
@@ -56,10 +56,10 @@ void* ZipOpenFunc(void *opaque, const char* filename, int mode) {
creation_disposition = CREATE_ALWAYS;
}
- base::string16 filename16 = base::UTF8ToUTF16(filename);
+ std::wstring filenamew = base::UTF8ToWide(filename);
if ((filename != NULL) && (desired_access != 0)) {
- file = CreateFile(filename16.c_str(), desired_access, share_mode,
- NULL, creation_disposition, flags_and_attributes, NULL);
+ file = CreateFile(filenamew.c_str(), desired_access, share_mode, NULL,
+ creation_disposition, flags_and_attributes, NULL);
}
if (file == INVALID_HANDLE_VALUE)
diff --git a/deps/v8/third_party/zlib/google/zip_reader.cc b/deps/v8/third_party/zlib/google/zip_reader.cc
index 96e9ff07fb..1e86afe77d 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader.cc
@@ -334,9 +334,9 @@ void ZipReader::ExtractCurrentEntryToFilePathAsync(
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&ZipReader::ExtractChunk, weak_ptr_factory_.GetWeakPtr(),
- Passed(std::move(output_file)),
- std::move(success_callback), std::move(failure_callback),
- progress_callback, 0 /* initial offset */));
+ std::move(output_file), std::move(success_callback),
+ std::move(failure_callback), progress_callback,
+ 0 /* initial offset */));
}
bool ZipReader::ExtractCurrentEntryToString(uint64_t max_read_bytes,
@@ -436,9 +436,9 @@ void ZipReader::ExtractChunk(base::File output_file,
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&ZipReader::ExtractChunk, weak_ptr_factory_.GetWeakPtr(),
- Passed(std::move(output_file)),
- std::move(success_callback), std::move(failure_callback),
- progress_callback, current_progress));
+ std::move(output_file), std::move(success_callback),
+ std::move(failure_callback), progress_callback,
+ current_progress));
}
}
diff --git a/deps/v8/tools/SourceMap.js b/deps/v8/tools/SourceMap.js
deleted file mode 100644
index 46354506ec..0000000000
--- a/deps/v8/tools/SourceMap.js
+++ /dev/null
@@ -1,382 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This is a copy from blink dev tools, see:
-// http://src.chromium.org/viewvc/blink/trunk/Source/devtools/front_end/SourceMap.js
-// revision: 153407
-
-// Added to make the file work without dev tools
-WebInspector = {};
-WebInspector.ParsedURL = {};
-WebInspector.ParsedURL.completeURL = function(){};
-// start of original file content
-
-/*
- * Copyright (C) 2012 Google Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Implements Source Map V3 model. See http://code.google.com/p/closure-compiler/wiki/SourceMaps
- * for format description.
- * @constructor
- * @param {string} sourceMappingURL
- * @param {SourceMapV3} payload
- */
-WebInspector.SourceMap = function(sourceMappingURL, payload)
-{
- if (!WebInspector.SourceMap.prototype._base64Map) {
- const base64Digits = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
- WebInspector.SourceMap.prototype._base64Map = {};
- for (var i = 0; i < base64Digits.length; ++i)
- WebInspector.SourceMap.prototype._base64Map[base64Digits.charAt(i)] = i;
- }
-
- this._sourceMappingURL = sourceMappingURL;
- this._reverseMappingsBySourceURL = {};
- this._mappings = [];
- this._sources = {};
- this._sourceContentByURL = {};
- this._parseMappingPayload(payload);
-}
-
-/**
- * @param {string} sourceMapURL
- * @param {string} compiledURL
- * @param {function(WebInspector.SourceMap)} callback
- */
-WebInspector.SourceMap.load = function(sourceMapURL, compiledURL, callback)
-{
- NetworkAgent.loadResourceForFrontend(WebInspector.resourceTreeModel.mainFrame.id, sourceMapURL, undefined, contentLoaded.bind(this));
-
- /**
- * @param {?Protocol.Error} error
- * @param {number} statusCode
- * @param {NetworkAgent.Headers} headers
- * @param {string} content
- */
- function contentLoaded(error, statusCode, headers, content)
- {
- if (error || !content || statusCode >= 400) {
- console.error("Could not load content for " + sourceMapURL + " : " + (error || ("HTTP status code: " + statusCode)));
- callback(null);
- return;
- }
-
- if (content.slice(0, 3) === ")]}")
- content = content.substring(content.indexOf('\n'));
- try {
- var payload = /** @type {SourceMapV3} */ (JSON.parse(content));
- var baseURL = sourceMapURL.startsWith("data:") ? compiledURL : sourceMapURL;
- callback(new WebInspector.SourceMap(baseURL, payload));
- } catch(e) {
- console.error(e.message);
- callback(null);
- }
- }
-}
-
-WebInspector.SourceMap.prototype = {
- /**
- * @return {Array.<string>}
- */
- sources: function()
- {
- return Object.keys(this._sources);
- },
-
- /**
- * @param {string} sourceURL
- * @return {string|undefined}
- */
- sourceContent: function(sourceURL)
- {
- return this._sourceContentByURL[sourceURL];
- },
-
- /**
- * @param {string} sourceURL
- * @param {WebInspector.ResourceType} contentType
- * @return {WebInspector.ContentProvider}
- */
- sourceContentProvider: function(sourceURL, contentType)
- {
- var lastIndexOfDot = sourceURL.lastIndexOf(".");
- var extension = lastIndexOfDot !== -1 ? sourceURL.substr(lastIndexOfDot + 1) : "";
- var mimeType = WebInspector.ResourceType.mimeTypesForExtensions[extension.toLowerCase()];
- var sourceContent = this.sourceContent(sourceURL);
- if (sourceContent)
- return new WebInspector.StaticContentProvider(contentType, sourceContent, mimeType);
- return new WebInspector.CompilerSourceMappingContentProvider(sourceURL, contentType, mimeType);
- },
-
- /**
- * @param {SourceMapV3} mappingPayload
- */
- _parseMappingPayload: function(mappingPayload)
- {
- if (mappingPayload.sections)
- this._parseSections(mappingPayload.sections);
- else
- this._parseMap(mappingPayload, 0, 0);
- },
-
- /**
- * @param {Array.<SourceMapV3.Section>} sections
- */
- _parseSections: function(sections)
- {
- for (var i = 0; i < sections.length; ++i) {
- var section = sections[i];
- this._parseMap(section.map, section.offset.line, section.offset.column);
- }
- },
-
- /**
- * @param {number} lineNumber in compiled resource
- * @param {number} columnNumber in compiled resource
- * @return {?Array}
- */
- findEntry: function(lineNumber, columnNumber)
- {
- var first = 0;
- var count = this._mappings.length;
- while (count > 1) {
- var step = count >> 1;
- var middle = first + step;
- var mapping = this._mappings[middle];
- if (lineNumber < mapping[0] || (lineNumber === mapping[0] && columnNumber < mapping[1]))
- count = step;
- else {
- first = middle;
- count -= step;
- }
- }
- var entry = this._mappings[first];
- if (!first && entry && (lineNumber < entry[0] || (lineNumber === entry[0] && columnNumber < entry[1])))
- return null;
- return entry;
- },
-
- /**
- * @param {string} sourceURL of the originating resource
- * @param {number} lineNumber in the originating resource
- * @return {Array}
- */
- findEntryReversed: function(sourceURL, lineNumber)
- {
- var mappings = this._reverseMappingsBySourceURL[sourceURL];
- for ( ; lineNumber < mappings.length; ++lineNumber) {
- var mapping = mappings[lineNumber];
- if (mapping)
- return mapping;
- }
- return this._mappings[0];
- },
-
- /**
- * @override
- */
- _parseMap: function(map, lineNumber, columnNumber)
- {
- var sourceIndex = 0;
- var sourceLineNumber = 0;
- var sourceColumnNumber = 0;
- var nameIndex = 0;
-
- var sources = [];
- var originalToCanonicalURLMap = {};
- for (var i = 0; i < map.sources.length; ++i) {
- var originalSourceURL = map.sources[i];
- var sourceRoot = map.sourceRoot || "";
- if (sourceRoot && !sourceRoot.endsWith("/"))
- sourceRoot += "/";
- var href = sourceRoot + originalSourceURL;
- var url = WebInspector.ParsedURL.completeURL(this._sourceMappingURL, href) || href;
- originalToCanonicalURLMap[originalSourceURL] = url;
- sources.push(url);
- this._sources[url] = true;
-
- if (map.sourcesContent && map.sourcesContent[i])
- this._sourceContentByURL[url] = map.sourcesContent[i];
- }
-
- var stringCharIterator = new WebInspector.SourceMap.StringCharIterator(map.mappings);
- var sourceURL = sources[sourceIndex];
-
- while (true) {
- if (stringCharIterator.peek() === ",")
- stringCharIterator.next();
- else {
- while (stringCharIterator.peek() === ";") {
- lineNumber += 1;
- columnNumber = 0;
- stringCharIterator.next();
- }
- if (!stringCharIterator.hasNext())
- break;
- }
-
- columnNumber += this._decodeVLQ(stringCharIterator);
- if (this._isSeparator(stringCharIterator.peek())) {
- this._mappings.push([lineNumber, columnNumber]);
- continue;
- }
-
- var sourceIndexDelta = this._decodeVLQ(stringCharIterator);
- if (sourceIndexDelta) {
- sourceIndex += sourceIndexDelta;
- sourceURL = sources[sourceIndex];
- }
- sourceLineNumber += this._decodeVLQ(stringCharIterator);
- sourceColumnNumber += this._decodeVLQ(stringCharIterator);
- if (!this._isSeparator(stringCharIterator.peek()))
- nameIndex += this._decodeVLQ(stringCharIterator);
-
- this._mappings.push([lineNumber, columnNumber, sourceURL, sourceLineNumber, sourceColumnNumber]);
- }
-
- for (var i = 0; i < this._mappings.length; ++i) {
- var mapping = this._mappings[i];
- var url = mapping[2];
- if (!url)
- continue;
- if (!this._reverseMappingsBySourceURL[url])
- this._reverseMappingsBySourceURL[url] = [];
- var reverseMappings = this._reverseMappingsBySourceURL[url];
- var sourceLine = mapping[3];
- if (!reverseMappings[sourceLine])
- reverseMappings[sourceLine] = [mapping[0], mapping[1]];
- }
- },
-
- /**
- * @param {string} char
- * @return {boolean}
- */
- _isSeparator: function(char)
- {
- return char === "," || char === ";";
- },
-
- /**
- * @param {WebInspector.SourceMap.StringCharIterator} stringCharIterator
- * @return {number}
- */
- _decodeVLQ: function(stringCharIterator)
- {
- // Read unsigned value.
- var result = 0;
- var shift = 0;
- do {
- var digit = this._base64Map[stringCharIterator.next()];
- result += (digit & this._VLQ_BASE_MASK) << shift;
- shift += this._VLQ_BASE_SHIFT;
- } while (digit & this._VLQ_CONTINUATION_MASK);
-
- // Fix the sign.
- var negative = result & 1;
- // Use unsigned right shift, so that the 32nd bit is properly shifted
- // to the 31st, and the 32nd becomes unset.
- result >>>= 1;
- if (negate) {
- // We need to OR 0x80000000 here to ensure the 32nd bit (the sign bit
- // in a 32bit int) is always set for negative numbers. If `result`
- // were 1, (meaning `negate` is true and all other bits were zeros),
- // `result` would now be 0. But -0 doesn't flip the 32nd bit as
- // intended. All other numbers will successfully set the 32nd bit
- // without issue, so doing this is a noop for them.
- return -result | 0x80000000;
- }
- return result;
- },
-
- _VLQ_BASE_SHIFT: 5,
- _VLQ_BASE_MASK: (1 << 5) - 1,
- _VLQ_CONTINUATION_MASK: 1 << 5
-}
-
-/**
- * @constructor
- * @param {string} string
- */
-WebInspector.SourceMap.StringCharIterator = function(string)
-{
- this._string = string;
- this._position = 0;
-}
-
-WebInspector.SourceMap.StringCharIterator.prototype = {
- /**
- * @return {string}
- */
- next: function()
- {
- return this._string.charAt(this._position++);
- },
-
- /**
- * @return {string}
- */
- peek: function()
- {
- return this._string.charAt(this._position);
- },
-
- /**
- * @return {boolean}
- */
- hasNext: function()
- {
- return this._position < this._string.length;
- }
-}
diff --git a/deps/v8/tools/arguments.js b/deps/v8/tools/arguments.js
deleted file mode 100644
index c2b3d1bfdb..0000000000
--- a/deps/v8/tools/arguments.js
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-class BaseArgumentsProcessor {
- constructor(args) {
- this.args_ = args;
- this.result_ = this.getDefaultResults();
- console.assert(this.result_ !== undefined)
- console.assert(this.result_.logFileName !== undefined);
- this.argsDispatch_ = this.getArgsDispatch();
- console.assert(this.argsDispatch_ !== undefined);
- }
-
- getDefaultResults() {
- throw "Implement in getDefaultResults in subclass";
- }
-
- getArgsDispatch() {
- throw "Implement getArgsDispatch in subclass";
- }
-
- result() { return this.result_ }
-
- printUsageAndExit() {
- print('Cmdline args: [options] [log-file-name]\n' +
- 'Default log file name is "' +
- this.result_.logFileName + '".\n');
- print('Options:');
- for (var arg in this.argsDispatch_) {
- var synonyms = [arg];
- var dispatch = this.argsDispatch_[arg];
- for (var synArg in this.argsDispatch_) {
- if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
- synonyms.push(synArg);
- delete this.argsDispatch_[synArg];
- }
- }
- print(' ' + synonyms.join(', ').padEnd(20) + " " + dispatch[2]);
- }
- quit(2);
- }
-
- parse() {
- while (this.args_.length) {
- var arg = this.args_.shift();
- if (arg.charAt(0) != '-') {
- this.result_.logFileName = arg;
- continue;
- }
- var userValue = null;
- var eqPos = arg.indexOf('=');
- if (eqPos != -1) {
- userValue = arg.substr(eqPos + 1);
- arg = arg.substr(0, eqPos);
- }
- if (arg in this.argsDispatch_) {
- var dispatch = this.argsDispatch_[arg];
- var property = dispatch[0];
- var defaultValue = dispatch[1];
- if (typeof defaultValue == "function") {
- userValue = defaultValue(userValue);
- } else if (userValue == null) {
- userValue = defaultValue;
- }
- this.result_[property] = userValue;
- } else {
- return false;
- }
- }
- return true;
- }
-}
-
-function parseBool(str) {
- if (str == "true" || str == "1") return true;
- return false;
-}
diff --git a/deps/v8/tools/callstats-from-telemetry.sh b/deps/v8/tools/callstats-from-telemetry.sh
index ead482a3ae..cea471cde8 100755
--- a/deps/v8/tools/callstats-from-telemetry.sh
+++ b/deps/v8/tools/callstats-from-telemetry.sh
@@ -3,7 +3,7 @@ set -e
usage() {
cat << EOF
-usage: $0 OPTIONS RESULTS_DIR
+usage: $0 OPTIONS RESULTS_DIR | TRACE_JSON
Convert telemetry json trace result to callstats.html compatible
versions ot ./out.json
@@ -11,6 +11,7 @@ versions ot ./out.json
OPTIONS:
-h Show this message.
RESULTS_DIR tools/perf/artifacts/run_XXX
+ TRACE_JSON .json trace files
EOF
}
@@ -29,9 +30,13 @@ done
# =======================================================================
-RESULTS_DIR=$1
-
-if [[ ! -e "$RESULTS_DIR" ]]; then
+if [[ "$1" == *.json ]]; then
+ echo "Converting json files"
+ JSON=$1
+elif [[ -e "$1" ]]; then
+ echo "Converting reults dir"
+ RESULTS_DIR=$1
+else
echo "RESULTS_DIR '$RESULTS_DIR' not found";
usage;
exit 1;
@@ -39,23 +44,34 @@ fi
OUT=out.json
-
if [[ -e $OUT ]]; then
+ echo "# Creating backup for $OUT"
cp --backup=numbered $OUT $OUT.bak
fi
+echo "# Writing to $OUT"
-echo '{ "telemetry-results": { "placeholder":{}' > $OUT
-
-for PAGE_DIR in $RESULTS_DIR/*_1; do
- PAGE=`basename $PAGE_DIR`;
- JSON="$PAGE_DIR/trace/traceEvents/*_converted.json";
+function convert {
+ NAME=$1
+ JSON=$2
du -sh $JSON;
- echo "Converting PAGE=$PAGE";
+ echo "Converting NAME=$NAME";
echo "," >> $OUT;
- echo "\"$PAGE\": " >> $OUT;
+ echo "\"$NAME\": " >> $OUT;
jq '[.traceEvents[].args | select(."runtime-call-stats" != null) | ."runtime-call-stats"]' $JSON >> $OUT;
-done
+}
+echo '{ "telemetry-results": { "placeholder":{}' > $OUT
+if [[ $RESULTS_DIR ]]; then
+ for PAGE_DIR in $RESULTS_DIR/*_1; do
+ NAME=`basename $PAGE_DIR`;
+ JSON="$PAGE_DIR/trace/traceEvents/*_converted.json";
+ convert $NAME $JSON
+ done
+else
+ for JSON in $@; do
+ convert $JSON $JSON
+ done
+fi
echo '}}' >> $OUT
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index 5e691ed5c6..cc4260d280 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -710,128 +710,168 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
function showGraphs(page) {
- let groups = page.groups.filter(each => each.enabled);
+ let groups = page.groups.filter(each => each.enabled && !each.isTotal);
// Sort groups by the biggest impact
- groups.sort((a, b) => {
- return b.getTimeImpact() - a.getTimeImpact();
- });
+ groups.sort((a, b) => b.getTimeImpact() - a.getTimeImpact());
if (selectedGroup == undefined) {
selectedGroup = groups[0];
} else {
groups = groups.filter(each => each.name != selectedGroup.name);
- groups.unshift(selectedGroup);
+ if (!selectedGroup.isTotal && selectedGroup.enabled) {
+ groups.unshift(selectedGroup);
+ }
}
- showPageGraph(groups, page);
- showVersionGraph(groups, page);
- showPageVersionGraph(groups, page);
+ // Display graphs delayed for a snappier UI.
+ setTimeout(() => {
+ showPageVersionGraph(groups, page);
+ showPageGraph(groups, page);
+ showVersionGraph(groups, page)
+ }, 10);
}
- function getGraphDataTable(groups) {
+ function getGraphDataTable(groups, page) {
let dataTable = new google.visualization.DataTable();
dataTable.addColumn('string', 'Name');
groups.forEach(group => {
let column = dataTable.addColumn('number', group.name.substring(6));
dataTable.setColumnProperty(column, 'group', group);
+ column = dataTable.addColumn({role: "annotation"});
+ dataTable.setColumnProperty(column, 'group', group);
});
+ let column = dataTable.addColumn('number', 'Chart Total');
+ dataTable.setColumnProperty(column, 'group', page.total);
+ column = dataTable.addColumn({role: "annotation"});
+ dataTable.setColumnProperty(column, 'group', page.total);
return dataTable;
}
let selectedGroup;
+
+ class ChartRow {
+ static kSortFirstValueRelative(chartRow) {
+ if (selectedGroup?.isTotal) return chartRow.total
+ return chartRow.data[0] / chartRow.total;
+ }
+
+ static kSortByFirstValue(chartRow) {
+ if (selectedGroup?.isTotal) return chartRow.total
+ return chartRow.data[0];
+ }
+
+ constructor(linkedPage, label, sortValue_fn, data,
+ excludeFromAverage=false) {
+ this.linkedPage = linkedPage;
+ this.label = label;
+ if (!Array.isArray(data)) {
+ throw new Error("Provide an Array for data");
+ }
+ this.data = data;
+ this.total = 0;
+ for (let i = 0; i < data.length; i++) this.total += data[i];
+ this.sortValue = sortValue_fn(this);
+ this.excludeFromAverage = excludeFromAverage;
+ }
+
+ forDataTable(maxRowsTotal) {
+ // row = [label, entry1, annotation1, entry2, annotation2, ...]
+ const rowData = [this.label];
+ const kShowLabelLimit = 0.1;
+ const kMinLabelWidth = 80;
+ const chartWidth = window.innerWidth - 400;
+ // Add value,label pairs
+ for (let i = 0; i < this.data.length; i++) {
+ const value = this.data[i];
+ let label = '';
+ // Only show labels for entries that are large enough..
+ if (Math.abs(value / maxRowsTotal) * chartWidth > kMinLabelWidth) {
+ label = ms(value);
+ }
+ rowData.push(value, label);
+ }
+ // Add the total row, with very small negative dummy entry for correct
+ // placement of labels in diff view.
+ rowData.push(this.total >= 0 ? 0 : -0.000000001, ms(this.total));
+ return rowData;
+ }
+ }
+
+ function setDataTableRows(dataTable, rows) {
+ let skippedRows = 0;
+ // Always sort by the selected entry (first column after the label)
+ rows.sort((a,b) => b.sortValue - a.sortValue);
+ // Aggregate row data for Average/SUM chart entry:
+ const aggregateData = rows[0].data.slice().fill(0);
+ let maxTotal = 0;
+ for (let i = 0; i < rows.length; i++) {
+ const row = rows[i];
+ let total = Math.abs(row.total);
+ if (total > maxTotal) maxTotal = total;
+ if (row.excludeFromAverage) {
+ skippedRows++;
+ continue
+ }
+ const chartRowData = row.data;
+ for (let j = 0; j < chartRowData.length; j++) {
+ aggregateData[j] += chartRowData[j];
+ }
+ }
+ const length = rows.length - skippedRows;
+ for (let i = 0; i < aggregateData.length; i++) {
+ aggregateData[i] /= rows.length;
+ }
+ const averageRow = new ChartRow(undefined, 'Average',
+ ChartRow.kSortByFirstValue, aggregateData);
+ dataTable.addRow(averageRow.forDataTable());
+
+ rows.forEach(chartRow => {
+ let rowIndex = dataTable.addRow(chartRow.forDataTable(maxTotal));
+ dataTable.setRowProperty(rowIndex, 'page', chartRow.linkedPage);
+ });
+ }
+
+ function showPageVersionGraph(groups, page) {
+ let dataTable = getGraphDataTable(groups, page);
+ let vs = versions.getPageVersions(page);
+ // Calculate the entries for the versions
+ const rows = vs.map(page => new ChartRow(
+ page, page.version.name, ChartRow.kSortByFirstValue,
+ groups.map(group => page.getEntry(group).time),
+ page.version === baselineVersion));
+ renderGraph(`Versions for ${page.name}`, groups, dataTable, rows,
+ 'pageVersionGraph', true);
+ }
+
function showPageGraph(groups, page) {
let isDiffView = baselineVersion !== undefined;
- let dataTable = getGraphDataTable(groups);
+ let dataTable = getGraphDataTable(groups, page);
// Calculate the average row
- let row = ['Average'];
- groups.forEach((group) => {
- if (isDiffView) {
- row.push(group.isTotal ? 0 : group.getAverageTimeImpact());
- } else {
- row.push(group.isTotal ? 0 : group.getTimeImpact());
- }
- });
- dataTable.addRow(row);
// Sort the pages by the selected group.
let pages = page.version.pages.filter(page => page.enabled);
- function sumDiff(page) {
- let sum = 0;
- groups.forEach(group => {
- let value = group.getTimePercentImpact() -
- page.getEntry(group).timePercent;
- sum += value * value;
- });
- return sum;
- }
- if (isDiffView) {
- pages.sort((a, b) => {
- return b.getEntry(selectedGroup).time-
- a.getEntry(selectedGroup).time;
- });
- } else {
- pages.sort((a, b) => {
- return b.getEntry(selectedGroup).timePercent -
- a.getEntry(selectedGroup).timePercent;
- });
- }
- // Sort by sum of squared distance to the average.
- // pages.sort((a, b) => {
- // return a.distanceFromTotalPercent() - b.distanceFromTotalPercent();
- // });
// Calculate the entries for the pages
- pages.forEach((page) => {
- row = [page.name];
- groups.forEach((group) => {
- row.push(group.isTotal ? 0 : page.getEntry(group).time);
- });
- let rowIndex = dataTable.addRow(row);
- dataTable.setRowProperty(rowIndex, 'page', page);
- });
- renderGraph('Pages for ' + page.version.name, groups, dataTable,
+ const rows = pages.map(page => new ChartRow(
+ page, page.name,
+ isDiffView ?
+ ChartRow.kSortByFirstValue : ChartRow.kSortFirstValueRelative,
+ groups.map(group => page.getEntry(group).time)));
+ renderGraph(`Pages for ${page.version.name}`, groups, dataTable, rows,
'pageGraph', isDiffView ? true : 'percent');
}
function showVersionGraph(groups, page) {
- let dataTable = getGraphDataTable(groups);
- let row;
+ let dataTable = getGraphDataTable(groups, page);
let vs = versions.versions.filter(version => version.enabled);
- vs.sort((a, b) => {
- return b.getEntry(selectedGroup).getTimeImpact() -
- a.getEntry(selectedGroup).getTimeImpact();
- });
// Calculate the entries for the versions
- vs.forEach((version) => {
- row = [version.name];
- groups.forEach((group) => {
- row.push(group.isTotal ? 0 : version.getEntry(group).getTimeImpact());
- });
- let rowIndex = dataTable.addRow(row);
- dataTable.setRowProperty(rowIndex, 'page', page);
- });
- renderGraph('Versions Total Time over all Pages', groups, dataTable,
+ const rows = vs.map((version) => new ChartRow(
+ version.get(page), version.name, ChartRow.kSortByFirstValue,
+ groups.map(group => version.getEntry(group).getTimeImpact()),
+ version === baselineVersion));
+ renderGraph('Versions Total Time over all Pages', groups, dataTable, rows,
'versionGraph', true);
}
- function showPageVersionGraph(groups, page) {
- let dataTable = getGraphDataTable(groups);
- let row;
- let vs = versions.getPageVersions(page);
- vs.sort((a, b) => {
- return b.getEntry(selectedGroup).time - a.getEntry(selectedGroup).time;
- });
- // Calculate the entries for the versions
- vs.forEach((page) => {
- row = [page.version.name];
- groups.forEach((group) => {
- row.push(group.isTotal ? 0 : page.getEntry(group).time);
- });
- let rowIndex = dataTable.addRow(row);
- dataTable.setRowProperty(rowIndex, 'page', page);
- });
- renderGraph('Versions for ' + page.name, groups, dataTable,
- 'pageVersionGraph', true);
- }
-
- function renderGraph(title, groups, dataTable, id, isStacked) {
+ function renderGraph(title, groups, dataTable, rows, id, isStacked) {
let isDiffView = baselineVersion !== undefined;
+ setDataTableRows(dataTable, rows);
let formatter = new google.visualization.NumberFormat({
suffix: (isDiffView ? 'msΔ' : 'ms'),
negativeColor: 'red',
@@ -848,21 +888,18 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
minValue: 0,
textStyle: { fontSize: 14 }
},
- animation:{
- duration: dataTable.getNumberOfRows() > 50 ? 0 : 500 ,
- easing: 'out',
- },
vAxis: {
textStyle: { fontSize: 14 }
},
tooltip: { textStyle: { fontSize: 14 }},
+ annotations: { textStyle: { fontSize: 8 }},
explorer: {
actions: ['dragToZoom', 'rightClickToReset'],
maxZoomIn: 0.01
},
- legend: {position:'top', maxLines: 1, textStyle: { fontSize: 14 }},
- chartArea: {left:200, top:50, width:'98%', height:'80%'},
- colors: groups.map(each => each.color)
+ legend: {position:'top', maxLines: 3, textStyle: { fontSize: 12 }},
+ chartArea: {left:200, top:50 },
+ colors: [...groups.map(each => each.color), /* Chart Total */ "#000000"]
};
let parentNode = $(id);
parentNode.querySelector('h2>span, h3>span').textContent = title;
@@ -886,7 +923,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
if (!page) return selectedGroup;
return page.getEntry(selectedGroup);
}
- function selectHandler() {
+ function selectHandler(e) {
selectedGroup = getChartEntry(chart.getSelection()[0])
if (!selectedGroup) return;
selectEntry(selectedGroup, true);
@@ -1066,27 +1103,45 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
// EventHandlers
function handleBodyLoad() {
$('uploadInput').focus();
- if (defaultData) {
- handleLoadJSON(defaultData);
- } else if (window.location.protocol !== 'file:') {
- tryLoadDefaultResults();
- }
+ if (defaultData) return handleLoadJSON(defaultData);
+ if (tryLoadFromURLParams()) return;
+ if (window.location.protocol !== 'file:') return tryLoadDefaultResults();
}
- function tryLoadDefaultResults() {
+ async function tryLoadDefaultResults() {
// Try to load a results.json file adjacent to this day.
- let xhr = new XMLHttpRequest();
// The markers on the following line can be used to replace the url easily
// with scripts.
- xhr.open('GET', /*results-url-start*/'results.json'/*results-url-end*/, true);
- xhr.onreadystatechange = function(e) {
- if(this.readyState !== XMLHttpRequest.DONE || this.status !== 200) return;
- handleLoadText(this.responseText);
- };
- xhr.send();
+ const url = /*results-url-start*/'results.json'/*results-url-end*/;
+ tryLoadFile(url);
+ }
+
+ async function tryLoadFile(url, append=false) {
+ if (!url.startsWith('http')) {
+ // hack to get relative urls
+ let location = window.location;
+ let parts = location.pathname.split("/").slice(0, -1);
+ url = location.origin + parts.join('/') + '/' + url;
+ }
+ let response = await fetch(url);
+ if (!response.ok) return false;
+ let filename = url.split('/');
+ filename = filename[filename.length-1];
+ handleLoadText(await response.text(), append, filename);
+ }
+
+ async function tryLoadFromURLParams() {
+ let params = new URLSearchParams(document.location.search);
+ let hasFile = false;
+ params.forEach((value, key) => {
+ if (key !== 'file') return;
+ hasFile = true;
+ tryLoadFile(value, true);
+ });
+ return hasFile;
}
- function handleAppendFile() {
+ function handleAppendFiles() {
let files = document.getElementById("appendInput").files;
loadFiles(files, true);
}
@@ -1096,25 +1151,30 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
loadFiles(files, false)
}
- function loadFiles(files, append) {
- let file = files[0];
- let reader = new FileReader();
-
- reader.onload = function(evt) {
- handleLoadText(this.result, append, file.name);
+ async function loadFiles(files, append) {
+ for (let i = 0; i < files.length; i++) {
+ const file = files[i];
+ console.log(file.name);
+ let text = await new Promise((resolve, reject) => {
+ const reader = new FileReader();
+ reader.onload = () => resolve(reader.result)
+ reader.readAsText(file);
+ });
+ handleLoadText(text, append, file.name);
}
- reader.readAsText(file);
}
function handleLoadText(text, append, fileName) {
- try {
+ if (fileName.endsWith('.json')) {
handleLoadJSON(JSON.parse(text), append, fileName);
- } catch(e) {
- if (!fileName.endsWith('.txt')) {
- alert(`Error parsing "${fileName}"`);
- console.error(e);
- }
+ } else if (fileName.endsWith('.csv') ||
+ fileName.endsWith('.output') || fileName.endsWith('.output.txt')) {
+ handleLoadCSV(text, append, fileName);
+ } else if (fileName.endsWith('.txt')) {
handleLoadTXT(text, append, fileName);
+ } else {
+ alert(`Error parsing "${fileName}"`);
+ console.error(e);
}
}
@@ -1130,10 +1190,10 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
function handleLoadJSON(json, append, fileName) {
- let isFirstLoad = pages === undefined;
json = fixClusterTelemetryResults(json);
json = fixTraceImportJSON(json);
json = fixSingleVersionJSON(json, fileName);
+ let isFirstLoad = pages === undefined;
if (append && !isFirstLoad) {
json = createUniqueVersions(json)
}
@@ -1146,6 +1206,162 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
displayResultsAfterLoading(isFirstLoad)
}
+ function handleLoadCSV(csv, append, fileName) {
+ let isFirstLoad = pages === undefined;
+ if (!append || isFirstLoad) {
+ pages = new Pages();
+ versions = new Versions();
+ }
+ const lines = csv.split(/\r?\n/);
+ // The first line contains only the field names.
+ const fields = new Map();
+ csvSplit(lines[0]).forEach((name, index) => {
+ fields.set(name, index);
+ });
+ if (fields.has('displayLabel') && fields.has('stories')) {
+ handleLoadResultCSV(fields, lines, fileName)
+ } else if (fields.has('page_name')) {
+ handleLoadClusterTelemetryCSV(fields, lines, fileName)
+ } else {
+ return alert("Unknown CSV format");
+ }
+ displayResultsAfterLoading(isFirstLoad)
+ }
+
+
+ function csvSplit(line) {
+ let fields = [];
+ let index = 0;
+ while (index < line.length) {
+ let lastIndex = index;
+ if (line[lastIndex] == '"') {
+ index = line.indexOf('"', lastIndex+1);
+ if (index < 0) index = line.length;
+ fields.push(line.substring(lastIndex+1, index));
+ // Consume ','
+ index++;
+ } else {
+ index = line.indexOf(',', lastIndex);
+ if (index === -1) index = line.length;
+ fields.push(line.substring(lastIndex, index))
+ }
+ // Consume ','
+ index++;
+ }
+ return fields;
+ }
+
+ function handleLoadClusterTelemetryCSV(fields, lines, fileName) {
+ const rscFields = Array.from(fields.keys())
+ .filter(field => field.endsWith(':duration (ms)'))
+ .map(field => {
+ let name = field.split(':')[0];
+ return [name, fields.get(field), fields.get(`${name}:count`)];
+ })
+ const page_name_i = fields.get('page_name');
+ const version = versions.getOrCreate(fileName);
+ for (let i=1; i<lines.length; i++) {
+ const line = csvSplit(lines[i]);
+ if (line.length == 0) continue;
+ let page_name = line[page_name_i];
+ if (page_name === undefined) continue;
+ page_name = page_name.split(' ')[0];
+ const pageVersion = version.getOrCreate(page_name);
+ for (let [fieldName, duration_i, count_i] of rscFields) {
+ const duration = Number.parseFloat(line[duration_i]);
+ const count = Number.parseFloat(line[count_i]);
+ // Skip over entries without metrics (most likely crashes)
+ if (Number.isNaN(count)|| Number.isNaN(duration)) {
+ console.warn(`BROKEN ${page_name}`, lines[i])
+ break;
+ }
+ pageVersion.add(new Entry(0, fieldName, duration, 0, 0, count, 0 ,0))
+ }
+ }
+ }
+
+ function handleLoadResultCSV(fields, lines, fileName) {
+ const version_i = fields.get('displayLabel');
+ const page_i = fields.get('stories');
+ const category_i = fields.get('name');
+ const value_i = fields.get('avg');
+ // Ignore the following categories as they are aggregated values and are
+ // created by callstats.html on the fly.
+ const skip_categories = new Set([
+ 'V8-Only', 'V8-Only-Main-Thread', 'Total-Main-Thread', 'Blink_Total'])
+ const tempEntriesCache = new Map();
+ for (let i=1; i<lines.length; i++) {
+ const line = csvSplit(lines[i]);
+ if (line.length == 0) continue;
+ const raw_category = line[category_i];
+ if (!raw_category.endsWith(':duration') &&
+ !raw_category.endsWith(':count')) {
+ continue;
+ }
+ let [category, type] = raw_category.split(':');
+ if (skip_categories.has(category)) continue;
+ const version = versions.getOrCreate(line[version_i]);
+ const pageVersion = version.getOrCreate(line[page_i]);
+ const value = Number.parseFloat(line[value_i]);
+ const entry = TempEntry.get(tempEntriesCache, pageVersion, category);
+ if (type == 'duration') {
+ entry.durations.push(value)
+ } else {
+ entry.counts.push(value)
+ }
+ }
+
+ tempEntriesCache.forEach((tempEntries, pageVersion) => {
+ tempEntries.forEach(tmpEntry => {
+ pageVersion.add(tmpEntry.toEntry())
+ })
+ });
+ }
+
+ class TempEntry {
+ constructor(category) {
+ this.category = category;
+ this.durations = [];
+ this.counts = [];
+ }
+
+ static get(cache, pageVersion, category) {
+ let tempEntries = cache.get(pageVersion);
+ if (tempEntries === undefined) {
+ tempEntries = new Map();
+ cache.set(pageVersion, tempEntries);
+ }
+ let tempEntry = tempEntries.get(category);
+ if (tempEntry === undefined) {
+ tempEntry = new TempEntry(category);
+ tempEntries.set(category, tempEntry);
+ }
+ return tempEntry;
+ }
+
+ toEntry() {
+ const [duration, durationStddev] = this.stats(this.durations);
+ const [count, countStddev] = this.stats(this.durations);
+ return new Entry(0, this.category,
+ duration, durationStddev, 0, count, countStddev, 0)
+ }
+
+ stats(values) {
+ let sum = 0;
+ for (let i = 0; i < values.length; i++) {
+ sum += values[i];
+ }
+ const avg = sum / values.length;
+ let stddevSquared = 0;
+ for (let i = 0; i < values.length; i++) {
+ const delta = values[i] - avg;
+ stddevSquared += delta * delta;
+ }
+ const stddev = Math.sqrt(stddevSquared / values.length);
+ return [avg, stddev];
+ }
+ }
+
function handleLoadTXT(txt, append, fileName) {
let isFirstLoad = pages === undefined;
// Load raw RCS output which contains a single page
@@ -1153,21 +1369,22 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
pages = new Pages();
versions = new Versions()
}
- versions.add(Version.fromTXT(fileName, txt))
- displayResultsAfterLoading()
+ versions.add(Version.fromTXT(fileName, txt));
+ displayResultsAfterLoading(isFirstLoad);
}
- function displayResultsAfterLoading(isFirstLoad) {
+ function displayResultsAfterLoading(isFirstLoad=true) {
let state = getStateFromParams();
initialize()
if (isFirstLoad && !popHistoryState(state) && selectedPage) {
showEntry(selectedPage.total);
return;
}
- selectedPage = versions.versions[0].pages[0]
- if (selectedPage == undefined) return;
- showPage(selectedPage);
+ const page = versions.versions[0].pages[0]
+ if (page == undefined) return;
+ showPage(page);
+ showEntry(page.total);
}
function fixClusterTelemetryResults(json) {
@@ -1193,7 +1410,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let count = entry.count;
let time = entry.time;
entries.push([name, time, 0, 0, count, 0, 0]);
- }
+ }
let domain = file_name.split("/").slice(-1)[0];
result[domain] = entries;
}
@@ -1236,7 +1453,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
let total = page_data['Total'];
total.duration.average += metric_duration * kMicroToMilli;
total.count.average += metric_count;
- }
+ }
}
version_data[page_name] = page_data;
}
@@ -1279,6 +1496,24 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return result
}
+ function handleCopyToClipboard(event) {
+ const names =[ "Group", ...versions.versions.map(e=>e.name)];
+ let result = [ names.join("\t") ];
+ let groups = Array.from(Group.groups.values());
+ // Move the total group to the end.
+ groups.push(groups.shift())
+ groups.forEach(group => {
+ let row = [group.name];
+ versions.forEach(v => {
+ const time = v.pages[0].get("Group-"+group.name)?._time ?? 0;
+ row.push(time)
+ })
+ result.push(row.join("\t"));
+ });
+ result = result.join("\n");
+ navigator.clipboard.writeText(result)
+ }
+
function handleToggleGroup(event) {
let group = event.target.parentNode.parentNode.entry;
toggleGroup(selectedPage.get(group.name));
@@ -1397,7 +1632,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.versions = [];
}
add(version) {
- this.versions.push(version)
+ this.versions.push(version);
+ return version;
}
getPageVersions(page) {
let result = [];
@@ -1417,6 +1653,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
getByName(name) {
return this.versions.find((each) => each.name == name);
}
+ getOrCreate(name) {
+ return this.getByName(name) ?? this.add(new Version(name))
+ }
forEach(f) {
this.versions.forEach(f);
}
@@ -1450,6 +1689,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
add(page) {
this.pages.push(page);
+ return page;
}
indexOf(name) {
for (let i = 0; i < this.pages.length; i++) {
@@ -1466,6 +1706,10 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
if (0 <= index) return this.pages[index];
return undefined
}
+ getOrCreate(name) {
+ return this.get(name) ??
+ this.add(new PageVersion(this, pages.getOrCreate(name)));
+ }
get length() {
return this.pages.length
}
@@ -1577,6 +1821,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
return super.get(name);
}
+ getOrCreate(name) {
+ return this.get(name);
+ }
}
class Page {
@@ -1585,8 +1832,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.enabled = true;
this.versions = [];
}
- add(page) {
- this.versions.push(page);
+ add(pageVersion) {
+ this.versions.push(pageVersion);
+ return pageVersion;
}
}
@@ -1609,6 +1857,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
Group.groups.get('blink').entry(),
Group.groups.get('callback').entry(),
Group.groups.get('api').entry(),
+ Group.groups.get('gc-custom').entry(),
+ Group.groups.get('gc-background').entry(),
Group.groups.get('gc').entry(),
Group.groups.get('javascript').entry(),
Group.groups.get('runtime').entry(),
@@ -1629,13 +1879,26 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
add(entry) {
// Ignore accidentally added Group entries.
- if (entry.name.startsWith(GroupedEntry.prefix)) return;
- entry.page = this;
- this.entryDict.set(entry.name, entry);
- for (let group of this.groups) {
- if (group.add(entry)) return;
+ if (entry.name.startsWith(GroupedEntry.prefix)) {
+ console.warn("Skipping accidentally added Group entry:", entry, this);
+ return;
+ }
+ let existingEntry = this.entryDict.get(entry.name);
+ if (existingEntry !== undefined) {
+ // Duplicate entries happen when multiple runs are combined into a
+ // single file.
+ existingEntry.add(entry);
+ for (let group of this.groups) {
+ if (group.addTimeAndCount(entry)) return;
+ }
+ } else {
+ entry.page = this;
+ this.entryDict.set(entry.name, entry);
+ for (let group of this.groups) {
+ if (group.add(entry)) return;
+ }
}
- console.error("Sould not get here", entry);
+ console.error("Should not get here", entry);
}
get(name) {
return this.entryDict.get(name)
@@ -1718,6 +1981,9 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
// Skip the first two lines (HEADER and SEPARATOR)
for (let i = 2; i < lines.length; i++) {
let line = lines[i].trim().split(split)
+ // Skip header lines
+ if (lines[i].startsWith("======")) continue;
+ if (lines[i+1]?.startsWith("======")) continue;
if (line.length != 5) continue;
let position = i-2;
pageVersion.add(Entry.fromTXT(position, line));
@@ -1729,20 +1995,35 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
class Entry {
constructor(position, name, time, timeVariance, timeVariancePercent,
- count,
- countVariance, countVariancePercent) {
+ count, countVariance, countVariancePercent) {
this.position = position;
this.name = name;
this._time = time;
this._timeVariance = timeVariance;
- this._timeVariancePercent = timeVariancePercent;
+ this._timeVariancePercent =
+ this._variancePercent(time, timeVariance, timeVariancePercent);
this._count = count;
this.countVariance = countVariance;
- this.countVariancePercent = countVariancePercent;
+ this.countVariancePercent =
+ this._variancePercent(count, countVariance, countVariancePercent);
this.page = undefined;
this.parent = undefined;
this.isTotal = false;
}
+ _variancePercent(value, valueVariance, valueVariancePercent) {
+ if (valueVariancePercent) return valueVariancePercent;
+ if (!valueVariance) return 0;
+ return valueVariance / value * 100;
+ }
+
+ add(entry) {
+ if (this.name != entry.name) {
+ console.error("Should not combine entries with different names");
+ return;
+ }
+ this._time += entry._time;
+ this._count += entry._count;
+ }
urlParams() {
let params = this.page.urlParams();
params.entry = this.name;
@@ -1841,7 +2122,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.regexp = regexp;
this.color = color;
this.enabled = enabled;
- this.addsToTotal = addsToTotal;
+ this.addsToTotal = addsToTotal;
}
entry() { return new GroupedEntry(this) };
}
@@ -1851,27 +2132,32 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return group;
}
Group.add('total', new Group('Total', /.*Total.*/, '#BBB', true, false));
- Group.add('ic', new Group('IC', /.*IC_.*/, "#3366CC"));
+ Group.add('ic', new Group('IC', /(.*IC_.*)|IC/, "#3366CC"));
Group.add('optimize-background', new Group('Optimize-Background',
- /(.*OptimizeBackground.*)/, "#702000"));
+ /(.*Optimize-?Background.*)/, "#702000"));
Group.add('optimize', new Group('Optimize',
/StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"));
Group.add('compile-background', new Group('Compile-Background',
- /(.*CompileBackground.*)/, "#b08000"));
+ /(.*Compile-?Background.*)/, "#b08000"));
Group.add('compile', new Group('Compile',
/(^Compile.*)|(.*_Compile.*)/, "#FFAA00"));
Group.add('parse-background',
- new Group('Parse-Background', /.*ParseBackground.*/, "#c05000"));
+ new Group('Parse-Background', /.*Parse-?Background.*/, "#c05000"));
Group.add('parse', new Group('Parse', /.*Parse.*/, "#FF6600"));
- Group.add('callback', new Group('Blink C++', /.*Callback*/, "#109618"));
+ Group.add('callback',
+ new Group('Blink C++', /.*(Callback)|(Blink C\+\+).*/, "#109618"));
Group.add('api', new Group('API', /.*API.*/, "#990099"));
Group.add('gc-custom', new Group('GC-Custom', /GC_Custom_.*/, "#0099C6"));
Group.add('gc-background',
- new Group('GC-Background', /.*GC.*BACKGROUND.*/, "#00597c"));
- Group.add('gc', new Group('GC', /GC_.*|AllocateInTargetSpace/, "#00799c"));
- Group.add('javascript', new Group('JavaScript', /JS_Execution/, "#DD4477"));
+ new Group(
+ 'GC-Background', /.*GC.*(BACKGROUND|Background).*/, "#00597c"));
+ Group.add('gc',
+ new Group('GC', /GC_.*|AllocateInTargetSpace|GC/, "#00799c"));
+ Group.add('javascript',
+ new Group('JavaScript', /JS_Execution|JavaScript/, "#DD4477"));
Group.add('runtime', new Group('V8 C++', /.*/, "#88BB00"));
- Group.add('blink', new Group('Blink RCS', /.*Blink_.*/, "#006600", false, false));
+ Group.add('blink',
+ new Group('Blink RCS', /.*Blink_.*/, "#006600", false, false));
Group.add('unclassified', new Group('Unclassified', /.*/, "#000", false));
class GroupedEntry extends Entry {
@@ -1886,18 +2172,23 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
get color() { return this.group.color }
get enabled() { return this.group.enabled }
add(entry) {
- if (!this.regexp.test(entry.name)) return false;
- this._time += entry.time;
- this._count += entry.count;
+ if (!this.addTimeAndCount(entry)) return;
// TODO: sum up variance
this.entries.push(entry);
entry.parent = this;
return true;
}
+ addTimeAndCount(entry) {
+ if (!this.regexp.test(entry.name)) return false;
+ this._time += entry.time;
+ this._count += entry.count;
+ return true;
+ }
_initializeMissingEntries() {
let dummyEntryNames = new Set();
versions.forEach((version) => {
- let groupEntry = version.getEntry(this);
+ let page = version.getOrCreate(this.page.name);
+ let groupEntry = page.get(this.name);
if (groupEntry != this) {
for (let entry of groupEntry.entries) {
if (this.page.get(entry.name) == undefined) {
@@ -1913,7 +2204,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.missingEntries.push(tmpEntry);
};
}
-
forEach(fun) {
// Show also all entries which are in at least one version.
// Concatenate our real entries.
@@ -1942,9 +2232,10 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
getVarianceForProperty(property) {
let sum = 0;
+ const key = property + 'Variance';
this.entries.forEach((entry) => {
- sum += entry[property + 'Variance'] * entry[property +
- 'Variance'];
+ const value = entry[key];
+ sum += value * value;
});
return Math.sqrt(sum);
}
@@ -2009,13 +2300,16 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
<form name="fileForm">
<p>
<label for="uploadInput">Load File:</label>
- <input id="uploadInput" type="file" name="files" onchange="handleLoadFile();" accept=".json,.txt">
+ <input id="uploadInput" type="file" name="files" onchange="handleLoadFile();" accept=".json,.txt,.csv,.output">
</p>
<p>
- <label for="appendInput">Append File:</label>
- <input id="appendInput" type="file" name="files" onchange="handleAppendFile();" accept=".json,.txt">
+ <label for="appendInput">Append Files:</label>
+ <input id="appendInput" type="file" name="files" onchange="handleAppendFiles();" multiple accept=".json,.txt,.csv,.output">
</p>
</form>
+ <p>
+ <button onclick="handleCopyToClipboard()">Copy Table to Clipboard</button>
+ </p>
</div>
<div class="inline hidden">
@@ -2167,7 +2461,13 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
</li>
<li>Load the generated <code>out.json</code></li>
</ol>
- <h3>Raw approach</h3>
+ <h3>Merged CSV from results.html</h3>
+ <ol>
+ <li>Open a results.html page for RCS-enabled benchmarks</li>
+ <li>Select "Export merged CSV" in the toolbar</li>
+ <li>Load the downloading .csv file normally in callstats.html</li>
+ </ol>
+ <h3>Aggregated raw txt output</h3>
<ol>
<li>Install scipy, e.g. <code>sudo aptitude install python-scipy</code>
<li>Check out a known working version of webpagereply:
diff --git a/deps/v8/tools/check-static-initializers.sh b/deps/v8/tools/check-static-initializers.sh
index fdd1e8417d..e59b5c8735 100755
--- a/deps/v8/tools/check-static-initializers.sh
+++ b/deps/v8/tools/check-static-initializers.sh
@@ -30,9 +30,12 @@
# initializer in d8 matches the one defined below.
# Allow:
-# _GLOBAL__sub_I_d8.cc
+# _GLOBAL__I_000101
# _GLOBAL__sub_I_iostream.cpp
-expected_static_init_count=2
+# _GLOBAL__sub_I_d8.cc
+# The first two are needed to set up std::cin/cout/cerr before main() runs.
+# See https://crbug.com/1177324 for more.
+expected_static_init_count=3
v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json b/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json
index e3a6ef4066..8d52aeb2fe 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_flags.json
@@ -17,9 +17,6 @@
[0.1, "--regexp-tier-up-ticks=100"],
[0.1, "--turbo-instruction-scheduling"],
[0.1, "--turbo-stress-instruction-scheduling"],
- [0.1, "--no-enable-sse3"],
- [0.1, "--no-enable-ssse3"],
- [0.1, "--no-enable-sse4_1"],
[0.1, "--no-enable-sse4_2"],
[0.1, "--no-enable-sahf"],
[0.1, "--no-enable-avx"],
diff --git a/deps/v8/tools/codemap.js b/deps/v8/tools/codemap.js
deleted file mode 100644
index 71e3e6a5d8..0000000000
--- a/deps/v8/tools/codemap.js
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Constructs a mapper that maps addresses into code entries.
- *
- * @constructor
- */
-function CodeMap() {
- /**
- * Dynamic code entries. Used for JIT compiled code.
- */
- this.dynamics_ = new SplayTree();
-
- /**
- * Name generator for entries having duplicate names.
- */
- this.dynamicsNameGen_ = new CodeMap.NameGenerator();
-
- /**
- * Static code entries. Used for statically compiled code.
- */
- this.statics_ = new SplayTree();
-
- /**
- * Libraries entries. Used for the whole static code libraries.
- */
- this.libraries_ = new SplayTree();
-
- /**
- * Map of memory pages occupied with static code.
- */
- this.pages_ = [];
-};
-
-
-/**
- * The number of alignment bits in a page address.
- */
-CodeMap.PAGE_ALIGNMENT = 12;
-
-
-/**
- * Page size in bytes.
- */
-CodeMap.PAGE_SIZE =
- 1 << CodeMap.PAGE_ALIGNMENT;
-
-
-/**
- * Adds a dynamic (i.e. moveable and discardable) code entry.
- *
- * @param {number} start The starting address.
- * @param {CodeMap.CodeEntry} codeEntry Code entry object.
- */
-CodeMap.prototype.addCode = function(start, codeEntry) {
- this.deleteAllCoveredNodes_(this.dynamics_, start, start + codeEntry.size);
- this.dynamics_.insert(start, codeEntry);
-};
-
-
-/**
- * Moves a dynamic code entry. Throws an exception if there is no dynamic
- * code entry with the specified starting address.
- *
- * @param {number} from The starting address of the entry being moved.
- * @param {number} to The destination address.
- */
-CodeMap.prototype.moveCode = function(from, to) {
- var removedNode = this.dynamics_.remove(from);
- this.deleteAllCoveredNodes_(this.dynamics_, to, to + removedNode.value.size);
- this.dynamics_.insert(to, removedNode.value);
-};
-
-
-/**
- * Discards a dynamic code entry. Throws an exception if there is no dynamic
- * code entry with the specified starting address.
- *
- * @param {number} start The starting address of the entry being deleted.
- */
-CodeMap.prototype.deleteCode = function(start) {
- var removedNode = this.dynamics_.remove(start);
-};
-
-
-/**
- * Adds a library entry.
- *
- * @param {number} start The starting address.
- * @param {CodeMap.CodeEntry} codeEntry Code entry object.
- */
-CodeMap.prototype.addLibrary = function(
- start, codeEntry) {
- this.markPages_(start, start + codeEntry.size);
- this.libraries_.insert(start, codeEntry);
-};
-
-
-/**
- * Adds a static code entry.
- *
- * @param {number} start The starting address.
- * @param {CodeMap.CodeEntry} codeEntry Code entry object.
- */
-CodeMap.prototype.addStaticCode = function(
- start, codeEntry) {
- this.statics_.insert(start, codeEntry);
-};
-
-
-/**
- * @private
- */
-CodeMap.prototype.markPages_ = function(start, end) {
- for (var addr = start; addr <= end;
- addr += CodeMap.PAGE_SIZE) {
- this.pages_[(addr / CodeMap.PAGE_SIZE)|0] = 1;
- }
-};
-
-
-/**
- * @private
- */
-CodeMap.prototype.deleteAllCoveredNodes_ = function(tree, start, end) {
- var to_delete = [];
- var addr = end - 1;
- while (addr >= start) {
- var node = tree.findGreatestLessThan(addr);
- if (!node) break;
- var start2 = node.key, end2 = start2 + node.value.size;
- if (start2 < end && start < end2) to_delete.push(start2);
- addr = start2 - 1;
- }
- for (var i = 0, l = to_delete.length; i < l; ++i) tree.remove(to_delete[i]);
-};
-
-
-/**
- * @private
- */
-CodeMap.prototype.isAddressBelongsTo_ = function(addr, node) {
- return addr >= node.key && addr < (node.key + node.value.size);
-};
-
-
-/**
- * @private
- */
-CodeMap.prototype.findInTree_ = function(tree, addr) {
- var node = tree.findGreatestLessThan(addr);
- return node && this.isAddressBelongsTo_(addr, node) ? node : null;
-};
-
-
-/**
- * Finds a code entry that contains the specified address. Both static and
- * dynamic code entries are considered. Returns the code entry and the offset
- * within the entry.
- *
- * @param {number} addr Address.
- */
-CodeMap.prototype.findAddress = function(addr) {
- var pageAddr = (addr / CodeMap.PAGE_SIZE)|0;
- if (pageAddr in this.pages_) {
- // Static code entries can contain "holes" of unnamed code.
- // In this case, the whole library is assigned to this address.
- var result = this.findInTree_(this.statics_, addr);
- if (!result) {
- result = this.findInTree_(this.libraries_, addr);
- if (!result) return null;
- }
- return { entry : result.value, offset : addr - result.key };
- }
- var min = this.dynamics_.findMin();
- var max = this.dynamics_.findMax();
- if (max != null && addr < (max.key + max.value.size) && addr >= min.key) {
- var dynaEntry = this.findInTree_(this.dynamics_, addr);
- if (dynaEntry == null) return null;
- // Dedupe entry name.
- var entry = dynaEntry.value;
- if (!entry.nameUpdated_) {
- entry.name = this.dynamicsNameGen_.getName(entry.name);
- entry.nameUpdated_ = true;
- }
- return { entry : entry, offset : addr - dynaEntry.key };
- }
- return null;
-};
-
-
-/**
- * Finds a code entry that contains the specified address. Both static and
- * dynamic code entries are considered.
- *
- * @param {number} addr Address.
- */
-CodeMap.prototype.findEntry = function(addr) {
- var result = this.findAddress(addr);
- return result ? result.entry : null;
-};
-
-
-/**
- * Returns a dynamic code entry using its starting address.
- *
- * @param {number} addr Address.
- */
-CodeMap.prototype.findDynamicEntryByStartAddress =
- function(addr) {
- var node = this.dynamics_.find(addr);
- return node ? node.value : null;
-};
-
-
-/**
- * Returns an array of all dynamic code entries.
- */
-CodeMap.prototype.getAllDynamicEntries = function() {
- return this.dynamics_.exportValues();
-};
-
-
-/**
- * Returns an array of pairs of all dynamic code entries and their addresses.
- */
-CodeMap.prototype.getAllDynamicEntriesWithAddresses = function() {
- return this.dynamics_.exportKeysAndValues();
-};
-
-
-/**
- * Returns an array of all static code entries.
- */
-CodeMap.prototype.getAllStaticEntries = function() {
- return this.statics_.exportValues();
-};
-
-
-/**
- * Returns an array of pairs of all static code entries and their addresses.
- */
-CodeMap.prototype.getAllStaticEntriesWithAddresses = function() {
- return this.statics_.exportKeysAndValues();
-};
-
-
-/**
- * Returns an array of all libraries entries.
- */
-CodeMap.prototype.getAllLibrariesEntries = function() {
- return this.libraries_.exportValues();
-};
-
-
-/**
- * Creates a code entry object.
- *
- * @param {number} size Code entry size in bytes.
- * @param {string} opt_name Code entry name.
- * @param {string} opt_type Code entry type, e.g. SHARED_LIB, CPP.
- * @constructor
- */
-CodeMap.CodeEntry = function(size, opt_name, opt_type) {
- this.size = size;
- this.name = opt_name || '';
- this.type = opt_type || '';
- this.nameUpdated_ = false;
-};
-
-
-CodeMap.CodeEntry.prototype.getName = function() {
- return this.name;
-};
-
-
-CodeMap.CodeEntry.prototype.toString = function() {
- return this.name + ': ' + this.size.toString(16);
-};
-
-
-CodeMap.NameGenerator = function() {
- this.knownNames_ = {};
-};
-
-
-CodeMap.NameGenerator.prototype.getName = function(name) {
- if (!(name in this.knownNames_)) {
- this.knownNames_[name] = 0;
- return name;
- }
- var count = ++this.knownNames_[name];
- return name + ' {' + count + '}';
-};
diff --git a/deps/v8/tools/consarray.js b/deps/v8/tools/consarray.js
deleted file mode 100644
index dbce1de298..0000000000
--- a/deps/v8/tools/consarray.js
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Constructs a ConsArray object. It is used mainly for tree traversal.
- * In this use case we have lots of arrays that we need to iterate
- * sequentally. The internal Array implementation is horribly slow
- * when concatenating on large (10K items) arrays due to memory copying.
- * That's why we avoid copying memory and insead build a linked list
- * of arrays to iterate through.
- *
- * @constructor
- */
-function ConsArray() {
- this.tail_ = new ConsArray.Cell(null, null);
- this.currCell_ = this.tail_;
- this.currCellPos_ = 0;
-};
-
-
-/**
- * Concatenates another array for iterating. Empty arrays are ignored.
- * This operation can be safely performed during ongoing ConsArray
- * iteration.
- *
- * @param {Array} arr Array to concatenate.
- */
-ConsArray.prototype.concat = function(arr) {
- if (arr.length > 0) {
- this.tail_.data = arr;
- this.tail_ = this.tail_.next = new ConsArray.Cell(null, null);
- }
-};
-
-
-/**
- * Whether the end of iteration is reached.
- */
-ConsArray.prototype.atEnd = function() {
- return this.currCell_ === null ||
- this.currCell_.data === null ||
- this.currCellPos_ >= this.currCell_.data.length;
-};
-
-
-/**
- * Returns the current item, moves to the next one.
- */
-ConsArray.prototype.next = function() {
- var result = this.currCell_.data[this.currCellPos_++];
- if (this.currCellPos_ >= this.currCell_.data.length) {
- this.currCell_ = this.currCell_.next;
- this.currCellPos_ = 0;
- }
- return result;
-};
-
-
-/**
- * A cell object used for constructing a list in ConsArray.
- *
- * @constructor
- */
-ConsArray.Cell = function(data, next) {
- this.data = data;
- this.next = next;
-};
diff --git a/deps/v8/tools/csvparser.js b/deps/v8/tools/csvparser.js
deleted file mode 100644
index a4d030441a..0000000000
--- a/deps/v8/tools/csvparser.js
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Creates a CSV lines parser.
- */
-class CsvParser {
- /**
- * Converts \x00 and \u0000 escape sequences in the given string.
- *
- * @param {string} input field.
- **/
- escapeField(string) {
- let nextPos = string.indexOf("\\");
- if (nextPos === -1) return string;
-
- let result = string.substring(0, nextPos);
- // Escape sequences of the form \x00 and \u0000;
- let endPos = string.length;
- let pos = 0;
- while (nextPos !== -1) {
- let escapeIdentifier = string.charAt(nextPos + 1);
- pos = nextPos + 2;
- if (escapeIdentifier === 'n') {
- result += '\n';
- nextPos = pos;
- } else if (escapeIdentifier === '\\') {
- result += '\\';
- nextPos = pos;
- } else {
- if (escapeIdentifier === 'x') {
- // \x00 ascii range escapes consume 2 chars.
- nextPos = pos + 2;
- } else {
- // \u0000 unicode range escapes consume 4 chars.
- nextPos = pos + 4;
- }
- // Convert the selected escape sequence to a single character.
- let escapeChars = string.substring(pos, nextPos);
- result += String.fromCharCode(parseInt(escapeChars, 16));
- }
-
- // Continue looking for the next escape sequence.
- pos = nextPos;
- nextPos = string.indexOf("\\", pos);
- // If there are no more escape sequences consume the rest of the string.
- if (nextPos === -1) {
- result += string.substr(pos);
- } else if (pos !== nextPos) {
- result += string.substring(pos, nextPos);
- }
- }
- return result;
- }
-
- /**
- * Parses a line of CSV-encoded values. Returns an array of fields.
- *
- * @param {string} line Input line.
- */
- parseLine(line) {
- var pos = 0;
- var endPos = line.length;
- var fields = [];
- if (endPos == 0) return fields;
- let nextPos = 0;
- while(nextPos !== -1) {
- nextPos = line.indexOf(',', pos);
- let field;
- if (nextPos === -1) {
- field = line.substr(pos);
- } else {
- field = line.substring(pos, nextPos);
- }
- fields.push(this.escapeField(field));
- pos = nextPos + 1;
- };
- return fields
- }
-}
diff --git a/deps/v8/tools/debug_helper/BUILD.gn b/deps/v8/tools/debug_helper/BUILD.gn
index 064bc32260..54cd3b7a4c 100644
--- a/deps/v8/tools/debug_helper/BUILD.gn
+++ b/deps/v8/tools/debug_helper/BUILD.gn
@@ -77,6 +77,8 @@ v8_component("v8_debug_helper") {
sources = [
"$target_gen_dir/../../torque-generated/class-debug-readers.cc",
"$target_gen_dir/../../torque-generated/class-debug-readers.h",
+ "$target_gen_dir/../../torque-generated/debug-macros.cc",
+ "$target_gen_dir/../../torque-generated/debug-macros.h",
"$target_gen_dir/../../torque-generated/instance-types.h",
"$target_gen_dir/heap-constants-gen.cc",
"compiler-types.cc",
@@ -95,6 +97,8 @@ v8_component("v8_debug_helper") {
"../..:run_torque",
"../..:v8_headers",
"../..:v8_libbase",
+ "../..:v8_shared_internal_headers",
+ "../..:v8_tracing",
]
configs = [ ":internal_config" ]
diff --git a/deps/v8/tools/debug_helper/debug-macro-shims.h b/deps/v8/tools/debug_helper/debug-macro-shims.h
new file mode 100644
index 0000000000..948482810b
--- /dev/null
+++ b/deps/v8/tools/debug_helper/debug-macro-shims.h
@@ -0,0 +1,103 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains implementations of a few macros that are defined
+// as external in Torque, so that generated debug code can work.
+
+#ifndef V8_TORQUE_DEBUG_MACRO_SHIMS_H_
+#define V8_TORQUE_DEBUG_MACRO_SHIMS_H_
+
+#include "src/objects/smi.h"
+#include "tools/debug_helper/debug-helper-internal.h"
+
+// For Object::ReadField<T>.
+#define READ_FIELD_OR_FAIL(Type, destination, accessor, object, offset) \
+ do { \
+ Type value{}; \
+ d::MemoryAccessResult validity = \
+ accessor(object - kHeapObjectTag + offset, \
+ reinterpret_cast<Type*>(&value), sizeof(value)); \
+ if (validity != d::MemoryAccessResult::kOk) return {validity, {}}; \
+ destination = value; \
+ } while (false)
+
+// For TaggedField<T>::load.
+#define READ_TAGGED_FIELD_OR_FAIL(destination, accessor, object, offset) \
+ do { \
+ Tagged_t value{}; \
+ d::MemoryAccessResult validity = \
+ accessor(object - kHeapObjectTag + offset, \
+ reinterpret_cast<uint8_t*>(&value), sizeof(value)); \
+ if (validity != d::MemoryAccessResult::kOk) return {validity, {}}; \
+ destination = EnsureDecompressed(value, object); \
+ } while (false)
+
+// Process Value struct.
+#define ASSIGN_OR_RETURN(dest, val) \
+ do { \
+ if ((val).validity != d::MemoryAccessResult::kOk) \
+ return {(val).validity, {}}; \
+ dest = (val).value; \
+ } while (false)
+
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
+namespace TorqueDebugMacroShims {
+namespace CodeStubAssembler {
+
+inline Value<bool> BoolConstant(d::MemoryAccessor accessor, bool b) {
+ return {d::MemoryAccessResult::kOk, b};
+}
+inline Value<intptr_t> ChangeInt32ToIntPtr(d::MemoryAccessor accessor,
+ int32_t i) {
+ return {d::MemoryAccessResult::kOk, i};
+}
+inline Value<uintptr_t> ChangeUint32ToWord(d::MemoryAccessor accessor,
+ uint32_t u) {
+ return {d::MemoryAccessResult::kOk, u};
+}
+inline Value<intptr_t> IntPtrAdd(d::MemoryAccessor accessor, intptr_t a,
+ intptr_t b) {
+ return {d::MemoryAccessResult::kOk, a + b};
+}
+inline Value<intptr_t> IntPtrMul(d::MemoryAccessor accessor, intptr_t a,
+ intptr_t b) {
+ return {d::MemoryAccessResult::kOk, a * b};
+}
+inline Value<intptr_t> Signed(d::MemoryAccessor accessor, uintptr_t u) {
+ return {d::MemoryAccessResult::kOk, static_cast<intptr_t>(u)};
+}
+inline Value<int32_t> SmiUntag(d::MemoryAccessor accessor, uintptr_t s_t) {
+ Smi s(s_t);
+ return {d::MemoryAccessResult::kOk, s.value()};
+}
+inline Value<bool> UintPtrLessThan(d::MemoryAccessor accessor, uintptr_t a,
+ uintptr_t b) {
+ return {d::MemoryAccessResult::kOk, a < b};
+}
+inline Value<uint32_t> Unsigned(d::MemoryAccessor accessor, int32_t s) {
+ return {d::MemoryAccessResult::kOk, static_cast<uint32_t>(s)};
+}
+#if V8_HOST_ARCH_64_BIT
+inline Value<uintptr_t> Unsigned(d::MemoryAccessor accessor, intptr_t s) {
+ return {d::MemoryAccessResult::kOk, static_cast<uintptr_t>(s)};
+}
+#endif
+inline Value<bool> Word32Equal(d::MemoryAccessor accessor, uint32_t a,
+ uint32_t b) {
+ return {d::MemoryAccessResult::kOk, a == b};
+}
+inline Value<bool> Word32NotEqual(d::MemoryAccessor accessor, uint32_t a,
+ uint32_t b) {
+ return {d::MemoryAccessResult::kOk, a != b};
+}
+
+} // namespace CodeStubAssembler
+} // namespace TorqueDebugMacroShims
+} // namespace debug_helper_internal
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_DEBUG_MACRO_SHIMS_H_
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index a211a990b0..c5ae178c18 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -42,7 +42,7 @@ BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
- "s390", "s390x", "android_arm", "android_arm64"]
+ "riscv64", "s390", "s390x", "android_arm", "android_arm64"]
# Arches that get built/run when you don't specify any.
DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
# Modes that this script understands.
@@ -250,7 +250,7 @@ class Config(object):
if self.arch == "android_arm": return "\nv8_target_cpu = \"arm\""
if self.arch == "android_arm64": return "\nv8_target_cpu = \"arm64\""
if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
- "s390", "s390x"):
+ "riscv64", "s390", "s390x"):
return "\nv8_target_cpu = \"%s\"" % self.arch
return ""
diff --git a/deps/v8/tools/dev/v8gen.py b/deps/v8/tools/dev/v8gen.py
index 0b6e1d1cdc..18abf8aa25 100755
--- a/deps/v8/tools/dev/v8gen.py
+++ b/deps/v8/tools/dev/v8gen.py
@@ -146,7 +146,7 @@ class GenerateGnArgs(object):
self._options.builder = self._options.outdir
# Check for builder/config in mb config.
- if self._options.builder not in self._mbw.masters[self._options.master]:
+ if self._options.builder not in self._mbw.builder_groups[self._options.master]:
print('%s does not exist in %s for %s' % (
self._options.builder, CONFIG, self._options.master))
return 1
@@ -189,7 +189,7 @@ class GenerateGnArgs(object):
return 0
def cmd_list(self):
- print('\n'.join(sorted(self._mbw.masters[self._options.master])))
+ print('\n'.join(sorted(self._mbw.builder_groups[self._options.master])))
return 0
def verbose_print_1(self, text):
@@ -292,10 +292,10 @@ class GenerateGnArgs(object):
self._mbw.ParseArgs(['lookup', '-f', CONFIG])
self._mbw.ReadConfigFile()
- if not self._options.master in self._mbw.masters:
+ if not self._options.master in self._mbw.builder_groups:
print('%s not found in %s\n' % (self._options.master, CONFIG))
print('Choose one of:\n%s\n' % (
- '\n'.join(sorted(self._mbw.masters.keys()))))
+ '\n'.join(sorted(self._mbw.builder_groups.keys()))))
return 1
return self._options.func()
diff --git a/deps/v8/tools/gcmole/bootstrap.sh b/deps/v8/tools/gcmole/bootstrap.sh
index a0386a8054..7f41be47a7 100755
--- a/deps/v8/tools/gcmole/bootstrap.sh
+++ b/deps/v8/tools/gcmole/bootstrap.sh
@@ -102,7 +102,7 @@ fi
# -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS=clang \
# -DLLVM_ENABLE_Z3_SOLVER=OFF "${LLVM_PROJECT_DIR}/llvm"
# MACOSX_DEPLOYMENT_TARGET=10.5 ninja -j"${NUM_JOBS}"
-#
+#
# # Strip the clang binary.
# STRIP_FLAGS=
# if [ "${OS}" = "Darwin" ]; then
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index f2d4a263cd..b4f85cd396 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -38,15 +38,6 @@ Print a v8 Code object from an internal code address
Usage: jco pc
end
-# Print LayoutDescriptor.
-define jld
-call (void) _v8_internal_Print_LayoutDescriptor((void*)($arg0))
-end
-document jld
-Print a v8 LayoutDescriptor object
-Usage: jld tagged_ptr
-end
-
# Print TransitionTree.
define jtt
call (void) _v8_internal_Print_TransitionTree((void*)($arg0))
@@ -86,6 +77,24 @@ Skip the jitted stack on x64 to where we entered JS last.
Usage: jss
end
+# Execute a simulator command.
+python
+import gdb
+
+class SimCommand(gdb.Command):
+ """Sim the current program."""
+
+ def __init__ (self):
+ super (SimCommand, self).__init__ ("sim", gdb.COMMAND_SUPPORT)
+
+ def invoke (self, arg, from_tty):
+ arg_c_string = gdb.Value(arg)
+ cmd_func = gdb.selected_frame().read_var("_v8_internal_Simulator_ExecDebugCommand")
+ cmd_func(arg_c_string)
+
+SimCommand()
+end
+
# Print stack trace with assertion scopes.
define bta
python
diff --git a/deps/v8/tools/gen-v8-gn.py b/deps/v8/tools/gen-v8-gn.py
new file mode 100755
index 0000000000..21d9f3a3a2
--- /dev/null
+++ b/deps/v8/tools/gen-v8-gn.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+# Copyright 2021 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import sys
+
+if (sys.version_info >= (3, 0)):
+ from io import StringIO
+else:
+ from io import BytesIO as StringIO
+
+
+def parse_args():
+ global args
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-o', '--output', type=str, action='store',
+ help='Location of header file to generate')
+ parser.add_argument('-p', '--positive-define', type=str, action='append',
+ help='Externally visibile positive definition')
+ parser.add_argument('-n', '--negative-define', type=str, action='append',
+ help='Externally visibile negative definition')
+ args = parser.parse_args()
+
+def generate_positive_definition(out, define):
+ out.write('''
+#ifndef {define}
+#define {define} 1
+#else
+#if {define} != 1
+#error "{define} defined but not set to 1"
+#endif
+#endif // {define}
+'''.format(define=define))
+
+def generate_negative_definition(out, define):
+ out.write('''
+#ifdef {define}
+#error "{define} is defined but is disabled by V8's GN build arguments"
+#endif // {define}
+'''.format(define=define))
+
+def generate_header(out):
+ out.write('''// AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// The following definitions were used when V8 itself was built, but also appear
+// in the externally-visible header files and so must be included by any
+// embedder. This will be done automatically if V8_GN_HEADER is defined.
+// Ready-compiled distributions of V8 will need to provide this generated header
+// along with the other headers in include.
+
+// This header must be stand-alone because it is used across targets without
+// introducing dependencies. It should only be included via v8config.h.
+''')
+ if args.positive_define:
+ for define in args.positive_define:
+ generate_positive_definition(out, define)
+
+ if args.negative_define:
+ for define in args.negative_define:
+ generate_negative_definition(out, define)
+
+def main():
+ parse_args()
+ header_stream = StringIO("")
+ generate_header(header_stream)
+ contents = header_stream.getvalue()
+ if args.output:
+ # Check if the contents has changed before writing so we don't cause build
+ # churn.
+ old_contents = None
+ if os.path.exists(args.output):
+ with open(args.output, 'r') as f:
+ old_contents = f.read()
+ if old_contents != contents:
+ with open(args.output, 'w') as f:
+ f.write(contents)
+ else:
+ print(contents)
+
+if __name__ == '__main__':
+ main()
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index 909dafe74d..4e58a492de 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -40,7 +40,7 @@ AUTO_EXCLUDE_PATTERNS = [
# platform-specific headers
'\\b{}\\b'.format(p) for p in
('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390',
- 'ppc')]
+ 'ppc','riscv64')]
args = None
def parse_args():
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 1edfd353b5..368580f0c3 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -1235,12 +1235,7 @@ class Map(HeapObject):
def DescriptorsOffset(self):
return self.TransitionsOrPrototypeInfoOffset() + self.heap.PointerSize()
- def LayoutDescriptorOffset(self):
- return self.DescriptorsOffset() + self.heap.PointerSize()
-
def CodeCacheOffset(self):
- if (self.heap.reader.Is64()):
- return self.LayoutDescriptorOffset() + self.heap.PointerSize()
return self.DescriptorsOffset() + self.heap.PointerSize()
def DependentCodeOffset(self):
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index d3aa8480dc..2bd08fad02 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -94,6 +94,8 @@ export const CATEGORIES = new Map([
'SYMBOL_TYPE',
'THIN_ONE_BYTE_STRING_TYPE',
'THIN_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE',
+ 'UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
'UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE',
'UNCACHED_EXTERNAL_STRING_TYPE',
'WASM_INSTANCE_OBJECT_TYPE',
diff --git a/deps/v8/tools/inspect-d8.js b/deps/v8/tools/inspect-d8.js
deleted file mode 100644
index b87a7586b2..0000000000
--- a/deps/v8/tools/inspect-d8.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This helper allows to debug d8 using Chrome DevTools.
-//
-// It runs a simple REPL for inspector messages and relies on
-// websocketd (https://github.com/joewalnes/websocketd) for the WebSocket
-// communication.
-//
-// You can start a session with a debug build of d8 like:
-//
-// $ websocketd out/x64.debug/d8 YOUR_SCRIPT.js tools/inspect-d8.js
-//
-// After that, copy the URL from console and pass it as `ws=` parameter to
-// the Chrome DevTools frontend like:
-//
-// chrome-devtools://devtools/bundled/js_app.html?ws=localhost:80
-
-function receive(msg) {
- print(msg);
-}
-
-function handleInspectorMessage() {
- send(readline());
-}
-
-while (true) {
- handleInspectorMessage();
-}
diff --git a/deps/v8/tools/lldb_commands.py b/deps/v8/tools/lldb_commands.py
index dc96e5747d..f77d0a8bbd 100644
--- a/deps/v8/tools/lldb_commands.py
+++ b/deps/v8/tools/lldb_commands.py
@@ -58,11 +58,6 @@ def jco(debugger, param, *args):
param = str(current_frame(debugger).FindRegister("pc").value)
ptr_arg_cmd(debugger, 'jco', param, "_v8_internal_Print_Code({})")
-def jld(debugger, param, *args):
- """Print a v8 LayoutDescriptor object"""
- ptr_arg_cmd(debugger, 'jld', param,
- "_v8_internal_Print_LayoutDescriptor({})")
-
def jtt(debugger, param, *args):
"""Print the transition tree of a v8 Map"""
ptr_arg_cmd(debugger, 'jtt', param, "_v8_internal_Print_TransitionTree({})")
diff --git a/deps/v8/tools/logreader.js b/deps/v8/tools/logreader.js
deleted file mode 100644
index ff0a71a393..0000000000
--- a/deps/v8/tools/logreader.js
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/**
- * @fileoverview Log Reader is used to process log file produced by V8.
- */
-
-
-/**
- * Base class for processing log files.
- *
- * @param {Array.<Object>} dispatchTable A table used for parsing and processing
- * log records.
- * @param {boolean} timedRange Ignore ticks outside timed range.
- * @param {boolean} pairwiseTimedRange Ignore ticks outside pairs of timer
- * markers.
- * @constructor
- */
-function LogReader(dispatchTable, timedRange, pairwiseTimedRange) {
- /**
- * @type {Array.<Object>}
- */
- this.dispatchTable_ = dispatchTable;
-
- /**
- * @type {boolean}
- */
- this.timedRange_ = timedRange;
-
- /**
- * @type {boolean}
- */
- this.pairwiseTimedRange_ = pairwiseTimedRange;
- if (pairwiseTimedRange) {
- this.timedRange_ = true;
- }
-
- /**
- * Current line.
- * @type {number}
- */
- this.lineNum_ = 0;
-
- /**
- * CSV lines parser.
- * @type {CsvParser}
- */
- this.csvParser_ = new CsvParser();
-
- /**
- * Keeps track of whether we've seen a "current-time" tick yet.
- * @type {boolean}
- */
- this.hasSeenTimerMarker_ = false;
-
- /**
- * List of log lines seen since last "current-time" tick.
- * @type {Array.<String>}
- */
- this.logLinesSinceLastTimerMarker_ = [];
-};
-
-
-/**
- * Used for printing error messages.
- *
- * @param {string} str Error message.
- */
-LogReader.prototype.printError = function(str) {
- // Do nothing.
-};
-
-
-/**
- * Processes a portion of V8 profiler event log.
- *
- * @param {string} chunk A portion of log.
- */
-LogReader.prototype.processLogChunk = function(chunk) {
- this.processLog_(chunk.split('\n'));
-};
-
-
-/**
- * Processes a line of V8 profiler event log.
- *
- * @param {string} line A line of log.
- */
-LogReader.prototype.processLogLine = function(line) {
- if (!this.timedRange_) {
- this.processLogLine_(line);
- return;
- }
- if (line.startsWith("current-time")) {
- if (this.hasSeenTimerMarker_) {
- this.processLog_(this.logLinesSinceLastTimerMarker_);
- this.logLinesSinceLastTimerMarker_ = [];
- // In pairwise mode, a "current-time" line ends the timed range.
- if (this.pairwiseTimedRange_) {
- this.hasSeenTimerMarker_ = false;
- }
- } else {
- this.hasSeenTimerMarker_ = true;
- }
- } else {
- if (this.hasSeenTimerMarker_) {
- this.logLinesSinceLastTimerMarker_.push(line);
- } else if (!line.startsWith("tick")) {
- this.processLogLine_(line);
- }
- }
-};
-
-
-/**
- * Processes stack record.
- *
- * @param {number} pc Program counter.
- * @param {number} func JS Function.
- * @param {Array.<string>} stack String representation of a stack.
- * @return {Array.<number>} Processed stack.
- */
-LogReader.prototype.processStack = function(pc, func, stack) {
- var fullStack = func ? [pc, func] : [pc];
- var prevFrame = pc;
- for (var i = 0, n = stack.length; i < n; ++i) {
- var frame = stack[i];
- var firstChar = frame.charAt(0);
- if (firstChar == '+' || firstChar == '-') {
- // An offset from the previous frame.
- prevFrame += parseInt(frame, 16);
- fullStack.push(prevFrame);
- // Filter out possible 'overflow' string.
- } else if (firstChar != 'o') {
- fullStack.push(parseInt(frame, 16));
- } else {
- this.printError("dropping: " + frame);
- }
- }
- return fullStack;
-};
-
-
-/**
- * Returns whether a particular dispatch must be skipped.
- *
- * @param {!Object} dispatch Dispatch record.
- * @return {boolean} True if dispatch must be skipped.
- */
-LogReader.prototype.skipDispatch = function(dispatch) {
- return false;
-};
-
-// Parses dummy variable for readability;
-const parseString = 'parse-string';
-const parseVarArgs = 'parse-var-args';
-
-/**
- * Does a dispatch of a log record.
- *
- * @param {Array.<string>} fields Log record.
- * @private
- */
-LogReader.prototype.dispatchLogRow_ = function(fields) {
- // Obtain the dispatch.
- var command = fields[0];
- var dispatch = this.dispatchTable_[command];
- if (dispatch === undefined) return;
- if (dispatch === null || this.skipDispatch(dispatch)) {
- return;
- }
-
- // Parse fields.
- var parsedFields = [];
- for (var i = 0; i < dispatch.parsers.length; ++i) {
- var parser = dispatch.parsers[i];
- if (parser === parseString) {
- parsedFields.push(fields[1 + i]);
- } else if (typeof parser == 'function') {
- parsedFields.push(parser(fields[1 + i]));
- } else if (parser === parseVarArgs) {
- // var-args
- parsedFields.push(fields.slice(1 + i));
- break;
- } else {
- throw new Error("Invalid log field parser: " + parser);
- }
- }
-
- // Run the processor.
- dispatch.processor.apply(this, parsedFields);
-};
-
-
-/**
- * Processes log lines.
- *
- * @param {Array.<string>} lines Log lines.
- * @private
- */
-LogReader.prototype.processLog_ = function(lines) {
- for (var i = 0, n = lines.length; i < n; ++i) {
- this.processLogLine_(lines[i]);
- }
-}
-
-/**
- * Processes a single log line.
- *
- * @param {String} a log line
- * @private
- */
-LogReader.prototype.processLogLine_ = function(line) {
- if (line.length > 0) {
- try {
- var fields = this.csvParser_.parseLine(line);
- this.dispatchLogRow_(fields);
- } catch (e) {
- this.printError('line ' + (this.lineNum_ + 1) + ': ' + (e.message || e) + '\n' + e.stack);
- }
- }
- this.lineNum_++;
-};
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index f3e46158b9..8ca9089944 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -62,7 +62,7 @@ class MetaBuildWrapper(object):
self.args = argparse.Namespace()
self.configs = {}
self.luci_tryservers = {}
- self.masters = {}
+ self.builder_groups = {}
self.mixins = {}
self.isolate_exe = 'isolate.exe' if self.platform.startswith(
'win') else 'isolate'
@@ -88,8 +88,9 @@ class MetaBuildWrapper(object):
def AddCommonOptions(subp):
subp.add_argument('-b', '--builder',
help='builder name to look up config from')
- subp.add_argument('-m', '--master',
- help='master name to look up config from')
+ subp.add_argument(
+ '-m', '--builder-group',
+ help='builder group name to look up config from')
subp.add_argument('-c', '--config',
help='configuration to analyze')
subp.add_argument('--phase',
@@ -276,10 +277,10 @@ class MetaBuildWrapper(object):
def CmdExport(self):
self.ReadConfigFile()
obj = {}
- for master, builders in self.masters.items():
- obj[master] = {}
+ for builder_group, builders in self.builder_groups.items():
+ obj[builder_group] = {}
for builder in builders:
- config = self.masters[master][builder]
+ config = self.builder_groups[builder_group][builder]
if not config:
continue
@@ -293,7 +294,7 @@ class MetaBuildWrapper(object):
if 'error' in args:
continue
- obj[master][builder] = args
+ obj[builder_group][builder] = args
# Dump object and trim trailing whitespace.
s = '\n'.join(l.rstrip() for l in
@@ -358,6 +359,9 @@ class MetaBuildWrapper(object):
# swarming parameters, if possible.
#
# TODO(dpranke): Also, add support for sharding and merging results.
+ # TODO(liviurau): While this seems to not be used in V8 yet, we need to add
+ # a switch for internal try-bots, since they need to use 'chrome-swarming'
+ cas_instance = 'chromium-swarm'
dimensions = []
for k, v in self._DefaultDimensions() + self.args.dimensions:
dimensions += ['-d', k, v]
@@ -370,9 +374,7 @@ class MetaBuildWrapper(object):
'archive',
'-i',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
- '-s',
- self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
- '-I', 'isolateserver.appspot.com',
+ '-cas-instance', cas_instance,
'-dump-json',
archive_json_path,
]
@@ -387,7 +389,7 @@ class MetaBuildWrapper(object):
'Failed to read JSON file "%s"' % archive_json_path, file=sys.stderr)
return 1
try:
- isolated_hash = archive_hashes[target]
+ cas_digest = archive_hashes[target]
except Exception:
self.Print(
'Cannot find hash for "%s" in "%s", file content: %s' %
@@ -399,8 +401,7 @@ class MetaBuildWrapper(object):
self.executable,
self.PathJoin('tools', 'swarming_client', 'swarming.py'),
'run',
- '-s', isolated_hash,
- '-I', 'isolateserver.appspot.com',
+ '-digests', cas_digest,
'-S', 'chromium-swarm.appspot.com',
] + dimensions
if self.args.extra_args:
@@ -450,10 +451,10 @@ class MetaBuildWrapper(object):
for bot in sorted(self.luci_tryservers[luci_tryserver]):
self.Print('\tbuilder = %s' % bot)
- for master in sorted(self.masters):
- if master.startswith('tryserver.'):
- self.Print('[bucket "master.%s"]' % master)
- for bot in sorted(self.masters[master]):
+ for builder_group in sorted(self.builder_groups):
+ if builder_group.startswith('tryserver.'):
+ self.Print('[bucket "builder_group.%s"]' % builder_group)
+ for bot in sorted(self.builder_groups[builder_group]):
self.Print('\tbuilder = %s' % bot)
return 0
@@ -466,13 +467,13 @@ class MetaBuildWrapper(object):
# Build a list of all of the configs referenced by builders.
all_configs = {}
- for master in self.masters:
- for config in self.masters[master].values():
+ for builder_group in self.builder_groups:
+ for config in self.builder_groups[builder_group].values():
if isinstance(config, dict):
for c in config.values():
- all_configs[c] = master
+ all_configs[c] = builder_group
else:
- all_configs[config] = master
+ all_configs[config] = builder_group
# Check that every referenced args file or config actually exists.
for config, loc in all_configs.items():
@@ -523,7 +524,7 @@ class MetaBuildWrapper(object):
build_dir = self.args.path[0]
vals = self.DefaultVals()
- if self.args.builder or self.args.master or self.args.config:
+ if self.args.builder or self.args.builder_group or self.args.config:
vals = self.Lookup()
# Re-run gn gen in order to ensure the config is consistent with the
# build dir.
@@ -573,10 +574,10 @@ class MetaBuildWrapper(object):
return vals
def ReadIOSBotConfig(self):
- if not self.args.master or not self.args.builder:
+ if not self.args.builder_group or not self.args.builder:
return {}
path = self.PathJoin(self.chromium_src_dir, 'ios', 'build', 'bots',
- self.args.master, self.args.builder + '.json')
+ self.args.builder_group, self.args.builder + '.json')
if not self.Exists(path):
return {}
@@ -599,7 +600,7 @@ class MetaBuildWrapper(object):
self.configs = contents['configs']
self.luci_tryservers = contents.get('luci_tryservers', {})
- self.masters = contents['masters']
+ self.builder_groups = contents['builder_groups']
self.mixins = contents['mixins']
def ReadIsolateMap(self):
@@ -626,38 +627,40 @@ class MetaBuildWrapper(object):
def ConfigFromArgs(self):
if self.args.config:
- if self.args.master or self.args.builder:
- raise MBErr('Can not specific both -c/--config and -m/--master or '
- '-b/--builder')
+ if self.args.builder_group or self.args.builder:
+ raise MBErr(
+ 'Can not specific both -c/--config and -m/--builder-group or '
+ '-b/--builder')
return self.args.config
- if not self.args.master or not self.args.builder:
+ if not self.args.builder_group or not self.args.builder:
raise MBErr('Must specify either -c/--config or '
- '(-m/--master and -b/--builder)')
+ '(-m/--builder-group and -b/--builder)')
- if not self.args.master in self.masters:
- raise MBErr('Master name "%s" not found in "%s"' %
- (self.args.master, self.args.config_file))
+ if not self.args.builder_group in self.builder_groups:
+ raise MBErr('Builder groups name "%s" not found in "%s"' %
+ (self.args.builder_group, self.args.config_file))
- if not self.args.builder in self.masters[self.args.master]:
- raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' %
- (self.args.builder, self.args.master, self.args.config_file))
+ if not self.args.builder in self.builder_groups[self.args.builder_group]:
+ raise MBErr(
+ 'Builder name "%s" not found under builder_groups[%s] in "%s"' %
+ (self.args.builder, self.args.builder_group, self.args.config_file))
- config = self.masters[self.args.master][self.args.builder]
+ config = self.builder_groups[self.args.builder_group][self.args.builder]
if isinstance(config, dict):
if self.args.phase is None:
raise MBErr('Must specify a build --phase for %s on %s' %
- (self.args.builder, self.args.master))
+ (self.args.builder, self.args.builder_group))
phase = str(self.args.phase)
if phase not in config:
raise MBErr('Phase %s doesn\'t exist for %s on %s' %
- (phase, self.args.builder, self.args.master))
+ (phase, self.args.builder, self.args.builder_group))
return config[phase]
if self.args.phase is not None:
raise MBErr('Must not specify a build --phase for %s on %s' %
- (self.args.builder, self.args.master))
+ (self.args.builder, self.args.builder_group))
return config
def FlattenConfig(self, config):
@@ -689,7 +692,8 @@ class MetaBuildWrapper(object):
if 'args_file' in mixin_vals:
if vals['args_file']:
raise MBErr('args_file specified multiple times in mixins '
- 'for %s on %s' % (self.args.builder, self.args.master))
+ 'for %s on %s' %
+ (self.args.builder, self.args.builder_group))
vals['args_file'] = mixin_vals['args_file']
if 'gn_args' in mixin_vals:
if vals['gn_args']:
@@ -1103,10 +1107,11 @@ class MetaBuildWrapper(object):
raise MBErr('Error %s writing to the output path "%s"' %
(e, path))
- def CheckCompile(self, master, builder):
+ def CheckCompile(self, builder_group, builder):
url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1'
- url = urllib2.quote(url_template.format(master=master, builder=builder),
- safe=':/()?=')
+ url = urllib2.quote(
+ url_template.format(builder_group=builder_group, builder=builder),
+ safe=':/()?=')
try:
builds = json.loads(self.Fetch(url))
except Exception as e:
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
index 765cacbc58..4c67495de4 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -107,12 +107,12 @@ class FakeFile(object):
TEST_CONFIG = """\
{
- 'masters': {
+ 'builder_groups': {
'chromium': {},
- 'fake_master': {
+ 'fake_builder_group': {
'fake_builder': 'rel_bot',
'fake_debug_builder': 'debug_goma',
- 'fake_args_bot': '//build/args/bots/fake_master/fake_args_bot.gn',
+ 'fake_args_bot': '//build/args/bots/fake_builder_group/fake_args_bot.gn',
'fake_multi_phase': { 'phase_1': 'phase_1', 'phase_2': 'phase_2'},
'fake_args_file': 'args_file_goma',
'fake_args_file_twice': 'args_file_twice',
@@ -155,7 +155,7 @@ TEST_CONFIG = """\
TRYSERVER_CONFIG = """\
{
- 'masters': {
+ 'builder_groups': {
'not_a_tryserver': {
'fake_builder': 'fake_config',
},
@@ -190,7 +190,7 @@ class UnitTest(unittest.TestCase):
},
}''')
mbw.files.setdefault(
- mbw.ToAbsPath('//build/args/bots/fake_master/fake_args_bot.gn'),
+ mbw.ToAbsPath('//build/args/bots/fake_builder_group/fake_args_bot.gn'),
'is_debug = false\n')
if files:
for path, contents in files.items():
@@ -334,18 +334,18 @@ class UnitTest(unittest.TestCase):
'--check\n', mbw.out)
mbw = self.fake_mbw()
- self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_bot',
+ self.check(['gen', '-m', 'fake_builder_group', '-b', 'fake_args_bot',
'//out/Debug'],
mbw=mbw, ret=0)
# TODO(almuthanna): disable test temporarily to
# solve this issue https://crbug.com/v8/11102
# self.assertEqual(
# mbw.files['/fake_src/out/Debug/args.gn'],
- # 'import("//build/args/bots/fake_master/fake_args_bot.gn")\n')
+ # 'import("//build/args/bots/fake_builder_group/fake_args_bot.gn")\n')
def test_gen_args_file_mixins(self):
mbw = self.fake_mbw()
- self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file',
+ self.check(['gen', '-m', 'fake_builder_group', '-b', 'fake_args_file',
'//out/Debug'], mbw=mbw, ret=0)
self.assertEqual(
@@ -354,7 +354,7 @@ class UnitTest(unittest.TestCase):
'use_goma = true\n'))
mbw = self.fake_mbw()
- self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file_twice',
+ self.check(['gen', '-m', 'fake_builder_group', '-b', 'fake_args_file_twice',
'//out/Debug'], mbw=mbw, ret=1)
def test_gen_fails(self):
@@ -582,26 +582,31 @@ class UnitTest(unittest.TestCase):
def test_multiple_phases(self):
# Check that not passing a --phase to a multi-phase builder fails.
- mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase'],
+ mbw = self.check(['lookup', '-m', 'fake_builder_group',
+ '-b', 'fake_multi_phase'],
ret=1)
self.assertIn('Must specify a build --phase', mbw.out)
# Check that passing a --phase to a single-phase builder fails.
- mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_builder',
+ mbw = self.check(['lookup', '-m', 'fake_builder_group',
+ '-b', 'fake_builder',
'--phase', 'phase_1'], ret=1)
self.assertIn('Must not specify a build --phase', mbw.out)
# Check that passing a wrong phase key to a multi-phase builder fails.
- mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+ mbw = self.check(['lookup', '-m', 'fake_builder_group',
+ '-b', 'fake_multi_phase',
'--phase', 'wrong_phase'], ret=1)
self.assertIn('Phase wrong_phase doesn\'t exist', mbw.out)
# Check that passing a correct phase key to a multi-phase builder passes.
- mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+ mbw = self.check(['lookup', '-m', 'fake_builder_group',
+ '-b', 'fake_multi_phase',
'--phase', 'phase_1'], ret=0)
self.assertIn('phase = 1', mbw.out)
- mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+ mbw = self.check(['lookup', '-m', 'fake_builder_group',
+ '-b', 'fake_multi_phase',
'--phase', 'phase_2'], ret=0)
self.assertIn('phase = 2', mbw.out)
@@ -612,7 +617,7 @@ class UnitTest(unittest.TestCase):
'enable_antidoom_banana = true\n'
)
}
- self.check(['lookup', '-m', 'fake_master', '-b', 'fake_args_file',
+ self.check(['lookup', '-m', 'fake_builder_group', '-b', 'fake_args_file',
'--recursive'], files=files, ret=0,
out=('enable_antidoom_banana = true\n'
'enable_doom_melon = true\n'
@@ -633,9 +638,9 @@ class UnitTest(unittest.TestCase):
'\tbuilder = luci_builder1\n'
'[bucket "luci.luci_tryserver2"]\n'
'\tbuilder = luci_builder2\n'
- '[bucket "master.tryserver.chromium.linux"]\n'
+ '[bucket "builder_group.tryserver.chromium.linux"]\n'
'\tbuilder = try_builder\n'
- '[bucket "master.tryserver.chromium.mac"]\n'
+ '[bucket "builder_group.tryserver.chromium.mac"]\n'
'\tbuilder = try_builder2\n'))
diff --git a/deps/v8/tools/parse-processor.html b/deps/v8/tools/parse-processor.html
index 9d78bbff2a..fa7033f43c 100644
--- a/deps/v8/tools/parse-processor.html
+++ b/deps/v8/tools/parse-processor.html
@@ -78,6 +78,12 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
overflow: hidden;
text-decoration: none;
color: white;
+ transition: auto ease-in-out 0.8s;
+ max-width: 500px;
+ }
+ .script-size:hover {
+ max-width: 100000px !important;
+ transition: auto ease-in-out 0.8s;
}
.script-size.eval {
background-color: #ee6300fc;
@@ -106,7 +112,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
<script src="https://www.gstatic.com/charts/loader.js"></script>
<script type="module">
-import { ParseProcessor, kSecondsToMillis } from "./parse-processor.mjs";
+import { ParseProcessor, kSecondsToMillis, BYTES, PERCENT } from "./parse-processor.mjs";
google.charts.load('current', {packages: ['corechart']});
@@ -231,10 +237,10 @@ function renderScriptSizes(parseProcessor) {
let scriptsDiv = $('#scripts');
parseProcessor.scripts.forEach(
script => {
- let scriptDiv = a('#script'+script.id, '', 'script-size');
+ let scriptDiv = a(`#script${script.id}`, '', 'script-size');
let scriptId = div('script-details');
scriptId.classList.add('id');
- scriptId.innerText = script.id;
+ scriptId.innerText = `id=${script.id}`;
scriptDiv.appendChild(scriptId);
let scriptSize = div('script-details');
scriptSize.innerText = BYTES(script.bytesTotal);
@@ -252,7 +258,7 @@ function renderScriptSizes(parseProcessor) {
scriptDiv.classList.add('deserialized');
}
scriptDiv.appendChild(scriptUrl);
- scriptDiv.style.width = script.bytesTotal * 0.001;
+ scriptDiv.style.maxWidth = `${script.bytesTotal * 0.001}px`;
scriptsDiv.appendChild(scriptDiv);
});
}
@@ -262,19 +268,33 @@ const kMaxTime = 120 * kSecondsToMillis;
const kTimeIncrement = 1;
const kSelectionTimespan = 2;
// TODO(cbruni): support compilation cache hit.
+class Series {
+ constructor(metricName, description, color, lineStyle, isArea=false) {
+ this.metricName = metricName;
+ this.description = description;
+ this.color = color;
+ this.lineStyle = lineStyle;
+ this.isArea = isArea;
+ }
+}
const series = [
- ['firstParseEvent', 'Any Parse', 'area'],
- ['execution', '1st Exec', 'area'],
- ['firstCompileEvent', 'Any Compile', 'area'],
- ['compile', 'Eager Compile'],
- ['lazyCompile', 'Lazy Compile'],
- ['parse', 'Parsing'],
- ['preparse', 'Preparse'],
- ['resolution', 'Preparse with Var. Resolution'],
- ['deserialization', 'Deserialization'],
- ['optimization', 'Optimize'],
+ new Series('firstParseEvent', 'Any Parse', '#4D4D4D', undefined, true),
+ new Series('execution', '1st Exec', '#fff700',undefined, true),
+ new Series('firstCompileEvent', 'Any Compile', '#5DA5DA', undefined, true),
+
+ new Series('compile', 'Eager Compile', '#FAA43A'),
+ new Series('lazyCompile', 'Lazy Compile','#FAA43A', 'dash'),
+
+ new Series('parse', 'Parsing', '#F17CB0'),
+ new Series('preparse', 'Preparse', '#B2912F'),
+ new Series('resolution', 'Preparse with Var. Resolution', '#B276B2'),
+
+ new Series('deserialization', 'Deserialization', '#DECF3F'),
+
+ new Series('baseline', 'Baseline', '#606611', 'dash'),
+ new Series('optimize', 'Optimize', '#F15854'),
];
-const metricNames = series.map(each => each[0]);
+const metricNames = series.map(each => each.metricName);
// Display cumulative values (useuful for bytes).
const kCumulative = true;
// Include durations in the graphs.
@@ -291,20 +311,18 @@ function appendGraph(script, parentNode, start, end) {
// The series are interleave bytes processed, time spent and thus have two
// different vAxes.
let seriesOptions = [];
- let colors = ['#4D4D4D', '#fff700', '#5DA5DA', '#FAA43A', '#60BD68',
- '#F17CB0', '#B2912F', '#B276B2', '#DECF3F', '#F15854'];
- series.forEach(([metric, description, type]) => {
- let color = colors.shift();
+ series.forEach(series => {
// Add the bytes column.
- data.addColumn('number', description);
- let options = {targetAxisIndex: 0, color: color};
- if (type == 'area') options.type = 'area';
+ data.addColumn('number', series.description);
+ let options = {targetAxisIndex: 0, color: series.color};
+ if (series.isArea) options.type = 'area';
+ if (series.lineStyle === 'dash') options.lineDashStyle = [4, 4];
seriesOptions.push(options)
// Add the time column.
if (kUseDuration) {
- data.addColumn('number', description + ' Duration');
+ data.addColumn('number', series.description + ' Duration');
seriesOptions.push(
- {targetAxisIndex: 1, color: color, lineDashStyle: [3, 2]});
+ {targetAxisIndex: 1, color: series.color, lineDashStyle: [3, 2]});
}
});
diff --git a/deps/v8/tools/parse-processor.mjs b/deps/v8/tools/parse-processor.mjs
index debde68665..974b9e2178 100644
--- a/deps/v8/tools/parse-processor.mjs
+++ b/deps/v8/tools/parse-processor.mjs
@@ -31,7 +31,7 @@ function formatNumber(value) {
return numberFormat.format(value);
}
-function BYTES(bytes, total) {
+export function BYTES(bytes, total) {
let units = ['B ', 'kB', 'mB', 'gB'];
let unitIndex = 0;
let value = bytes;
@@ -46,7 +46,7 @@ function BYTES(bytes, total) {
return result;
}
-function PERCENT(value, total) {
+export function PERCENT(value, total) {
return Math.round(value / total * 100) + "%";
}
@@ -63,6 +63,7 @@ const kNoTimeMetrics = {
class CompilationUnit {
constructor() {
this.isEval = false;
+ this.isDeserialized = false;
// Lazily computed properties.
this.firstEventTimestamp = -1;
@@ -79,7 +80,8 @@ class CompilationUnit {
this.compileTimestamp = -1;
this.lazyCompileTimestamp = -1;
this.executionTimestamp = -1;
- this.optimizationTimestamp = -1;
+ this.baselineTimestamp = -1;
+ this.optimizeTimestamp = -1;
this.deserializationDuration = -0.0;
this.preparseDuration = -0.0;
@@ -89,6 +91,7 @@ class CompilationUnit {
this.scopeResolutionDuration = -0.0;
this.lazyCompileDuration = -0.0;
this.compileDuration = -0.0;
+ this.baselineDuration = -0.0;
this.optimizeDuration = -0.0;
this.ownBytes = -1;
@@ -107,7 +110,7 @@ class CompilationUnit {
this.firstCompileEventTimestamp = this.rawTimestampMin(
this.deserializationTimestamp, this.compileTimestamp,
- this.lazyCompileTimestamp);
+ this.baselineTimestamp, this.lazyCompileTimestamp);
// Any excuted script needs to be compiled.
if (this.hasBeenExecuted() &&
(this.firstCompileEventTimestamp <= 0 ||
@@ -377,20 +380,23 @@ class Script extends CompilationUnit {
info("lazy compiled", all.filter(each => each.lazyCompileTimestamp > 0));
info("eager compiled", all.filter(each => each.compileTimestamp > 0));
- let parsingCost =
- new ExecutionCost('parse', all, each => each.parseDuration);
- parsingCost.setMetrics(this.metrics);
- log(parsingCost.toString());
-
- let preParsingCost =
- new ExecutionCost('preparse', all, each => each.preparseDuration);
- preParsingCost.setMetrics(this.metrics);
- log(preParsingCost.toString());
-
- let resolutionCost =
- new ExecutionCost('resolution', all, each => each.resolutionDuration);
- resolutionCost.setMetrics(this.metrics);
- log(resolutionCost.toString());
+ info("baseline", all.filter(each => each.baselineTimestamp > 0));
+ info("optimized", all.filter(each => each.optimizeTimestamp > 0));
+
+ const costs = [
+ ['parse', each => each.parseDuration],
+ ['preparse', each => each.preparseDuration],
+ ['resolution', each => each.resolutionDuration],
+ ['compile-eager', each => each.compileDuration],
+ ['compile-lazy', each => each.lazyCompileDuration],
+ ['baseline', each => each.baselineDuration],
+ ['optimize', each => each.optimizeDuration],
+ ];
+ for (let [name, fn] of costs) {
+ const executionCost = new ExecutionCost(name, all, fn);
+ executionCost.setMetrics(this.metrics);
+ log(executionCost.toString());
+ }
let nesting = new NestingDistribution(all);
nesting.setMetrics(this.metrics);
@@ -590,14 +596,18 @@ class ExecutionCost {
this.executedCost = 0
// Time spent on not executed functions.
this.nonExecutedCost = 0;
+ this.maxDuration = 0;
- this.executedCost = funktions.reduce((sum, each) => {
- return sum + (each.hasBeenExecuted() ? time_fn(each) : 0)
- }, 0);
- this.nonExecutedCost = funktions.reduce((sum, each) => {
- return sum + (each.hasBeenExecuted() ? 0 : time_fn(each))
- }, 0);
-
+ for (let i = 0; i < funktions.length; i++) {
+ const funktion = funktions[i];
+ const value = time_fn(funktion);
+ if (funktion.hasBeenExecuted()) {
+ this.executedCost += value;
+ } else {
+ this.nonExecutedCost += value;
+ }
+ this.maxDuration = Math.max(this.maxDuration, value);
+ }
}
print() {
@@ -605,9 +615,10 @@ class ExecutionCost {
}
toString() {
- return (` - ${this.prefix}-time:`).padEnd(24) +
- (` executed=${formatNumber(this.executedCost)}ms`).padEnd(20) +
- " non-executed=" + formatNumber(this.nonExecutedCost) + 'ms';
+ return ` - ${this.prefix}-time:`.padEnd(24) +
+ ` executed=${formatNumber(this.executedCost)}ms`.padEnd(20) +
+ ` non-executed=${formatNumber(this.nonExecutedCost)}ms`.padEnd(24) +
+ ` max=${formatNumber(this.maxDuration)}ms`;
}
setMetrics(dict) {
@@ -799,6 +810,8 @@ export class ParseProcessor extends LogReader {
'compile-lazy': this.processCompileLazy.bind(this),
'compile': this.processCompile.bind(this),
'compile-eval': this.processCompileEval.bind(this),
+ 'baseline': this.processBaselineLazy.bind(this),
+ 'baseline-lazy': this.processBaselineLazy.bind(this),
'optimize-lazy': this.processOptimizeLazy.bind(this),
'deserialize': this.processDeserialize.bind(this),
};
@@ -1088,6 +1101,22 @@ export class ParseProcessor extends LogReader {
compilationUnit.isEval = true;
}
+ processBaselineLazy(
+ scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+ let compilationUnit = this.lookupScript(scriptId);
+ if (startPosition > 0) {
+ compilationUnit =
+ compilationUnit.getFunktionAtStartPosition(startPosition);
+ if (compilationUnit === undefined) {
+ // This should not happen since any funktion has to be parsed first.
+ console.error('processBaselineLazy funktion not found', ...arguments);
+ return;
+ }
+ }
+ compilationUnit.baselineTimestamp = startOf(timestamp, duration);
+ compilationUnit.baselineDuration = duration;
+ }
+
processOptimizeLazy(
scriptId, startPosition, endPosition, duration, timestamp, functionName) {
let compilationUnit = this.lookupScript(scriptId);
@@ -1100,8 +1129,8 @@ export class ParseProcessor extends LogReader {
return;
}
}
- compilationUnit.optimizationTimestamp = startOf(timestamp, duration);
- compilationUnit.optimizationDuration = duration;
+ compilationUnit.optimizeTimestamp = startOf(timestamp, duration);
+ compilationUnit.optimizeDuration = duration;
}
processDeserialize(
@@ -1114,6 +1143,7 @@ export class ParseProcessor extends LogReader {
}
compilationUnit.deserializationTimestamp = startOf(timestamp, duration);
compilationUnit.deserializationDuration = duration;
+ compilationUnit.isDeserialized = true;
}
processCompilationCacheEvent(
diff --git a/deps/v8/tools/profile.js b/deps/v8/tools/profile.js
deleted file mode 100644
index 50076825ad..0000000000
--- a/deps/v8/tools/profile.js
+++ /dev/null
@@ -1,1172 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// TODO: move to separate modules
-class SourcePosition {
- constructor(script, line, column) {
- this.script = script;
- this.line = line;
- this.column = column;
- this.entries = [];
- }
- addEntry(entry) {
- this.entries.push(entry);
- }
-}
-
-class Script {
-
- constructor(id, name, source) {
- this.id = id;
- this.name = name;
- this.source = source;
- this.sourcePositions = [];
- // Map<line, Map<column, SourcePosition>>
- this.lineToColumn = new Map();
- }
-
- addSourcePosition(line, column, entry) {
- let sourcePosition = this.lineToColumn.get(line)?.get(column);
- if (sourcePosition === undefined) {
- sourcePosition = new SourcePosition(this, line, column, )
- this.#addSourcePosition(line, column, sourcePosition);
- }
- sourcePosition.addEntry(entry);
- return sourcePosition;
- }
-
- #addSourcePosition(line, column, sourcePosition) {
- let columnToSourcePosition;
- if (this.lineToColumn.has(line)) {
- columnToSourcePosition = this.lineToColumn.get(line);
- } else {
- columnToSourcePosition = new Map();
- this.lineToColumn.set(line, columnToSourcePosition);
- }
- this.sourcePositions.push(sourcePosition);
- columnToSourcePosition.set(column, sourcePosition);
- }
-}
-
-/**
- * Creates a profile object for processing profiling-related events
- * and calculating function execution times.
- *
- * @constructor
- */
-function Profile() {
- this.codeMap_ = new CodeMap();
- this.topDownTree_ = new CallTree();
- this.bottomUpTree_ = new CallTree();
- this.c_entries_ = {};
- this.ticks_ = [];
- this.scripts_ = [];
- this.urlToScript_ = new Map();
-};
-
-
-/**
- * Returns whether a function with the specified name must be skipped.
- * Should be overriden by subclasses.
- *
- * @param {string} name Function name.
- */
-Profile.prototype.skipThisFunction = function (name) {
- return false;
-};
-
-
-/**
- * Enum for profiler operations that involve looking up existing
- * code entries.
- *
- * @enum {number}
- */
-Profile.Operation = {
- MOVE: 0,
- DELETE: 1,
- TICK: 2
-};
-
-
-/**
- * Enum for code state regarding its dynamic optimization.
- *
- * @enum {number}
- */
-Profile.CodeState = {
- COMPILED: 0,
- OPTIMIZABLE: 1,
- OPTIMIZED: 2
-};
-
-
-/**
- * Called whenever the specified operation has failed finding a function
- * containing the specified address. Should be overriden by subclasses.
- * See the Profile.Operation enum for the list of
- * possible operations.
- *
- * @param {number} operation Operation.
- * @param {number} addr Address of the unknown code.
- * @param {number} opt_stackPos If an unknown address is encountered
- * during stack strace processing, specifies a position of the frame
- * containing the address.
- */
-Profile.prototype.handleUnknownCode = function (
- operation, addr, opt_stackPos) {
-};
-
-
-/**
- * Registers a library.
- *
- * @param {string} name Code entry name.
- * @param {number} startAddr Starting address.
- * @param {number} endAddr Ending address.
- */
-Profile.prototype.addLibrary = function (
- name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
- endAddr - startAddr, name, 'SHARED_LIB');
- this.codeMap_.addLibrary(startAddr, entry);
- return entry;
-};
-
-
-/**
- * Registers statically compiled code entry.
- *
- * @param {string} name Code entry name.
- * @param {number} startAddr Starting address.
- * @param {number} endAddr Ending address.
- */
-Profile.prototype.addStaticCode = function (
- name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
- endAddr - startAddr, name, 'CPP');
- this.codeMap_.addStaticCode(startAddr, entry);
- return entry;
-};
-
-
-/**
- * Registers dynamic (JIT-compiled) code entry.
- *
- * @param {string} type Code entry type.
- * @param {string} name Code entry name.
- * @param {number} start Starting address.
- * @param {number} size Code entry size.
- */
-Profile.prototype.addCode = function (
- type, name, timestamp, start, size) {
- var entry = new Profile.DynamicCodeEntry(size, type, name);
- this.codeMap_.addCode(start, entry);
- return entry;
-};
-
-
-/**
- * Registers dynamic (JIT-compiled) code entry.
- *
- * @param {string} type Code entry type.
- * @param {string} name Code entry name.
- * @param {number} start Starting address.
- * @param {number} size Code entry size.
- * @param {number} funcAddr Shared function object address.
- * @param {Profile.CodeState} state Optimization state.
- */
-Profile.prototype.addFuncCode = function (
- type, name, timestamp, start, size, funcAddr, state) {
- // As code and functions are in the same address space,
- // it is safe to put them in a single code map.
- var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
- if (!func) {
- func = new Profile.FunctionEntry(name);
- this.codeMap_.addCode(funcAddr, func);
- } else if (func.name !== name) {
- // Function object has been overwritten with a new one.
- func.name = name;
- }
- var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
- if (entry) {
- if (entry.size === size && entry.func === func) {
- // Entry state has changed.
- entry.state = state;
- } else {
- this.codeMap_.deleteCode(start);
- entry = null;
- }
- }
- if (!entry) {
- entry = new Profile.DynamicFuncCodeEntry(size, type, func, state);
- this.codeMap_.addCode(start, entry);
- }
- return entry;
-};
-
-
-/**
- * Reports about moving of a dynamic code entry.
- *
- * @param {number} from Current code entry address.
- * @param {number} to New code entry address.
- */
-Profile.prototype.moveCode = function (from, to) {
- try {
- this.codeMap_.moveCode(from, to);
- } catch (e) {
- this.handleUnknownCode(Profile.Operation.MOVE, from);
- }
-};
-
-Profile.prototype.deoptCode = function (
- timestamp, code, inliningId, scriptOffset, bailoutType,
- sourcePositionText, deoptReasonText) {
-};
-
-/**
- * Reports about deletion of a dynamic code entry.
- *
- * @param {number} start Starting address.
- */
-Profile.prototype.deleteCode = function (start) {
- try {
- this.codeMap_.deleteCode(start);
- } catch (e) {
- this.handleUnknownCode(Profile.Operation.DELETE, start);
- }
-};
-
-/**
- * Adds source positions for given code.
- */
-Profile.prototype.addSourcePositions = function (
- start, script, startPos, endPos, sourcePositions, inliningPositions,
- inlinedFunctions) {
- // CLI does not need source code => ignore.
-};
-
-/**
- * Adds script source code.
- */
-Profile.prototype.addScriptSource = function (id, url, source) {
- const script = new Script(id, url, source);
- this.scripts_[id] = script;
- this.urlToScript_.set(url, script);
-};
-
-
-/**
- * Adds script source code.
- */
-Profile.prototype.getScript = function (url) {
- return this.urlToScript_.get(url);
-};
-
-/**
- * Reports about moving of a dynamic code entry.
- *
- * @param {number} from Current code entry address.
- * @param {number} to New code entry address.
- */
-Profile.prototype.moveFunc = function (from, to) {
- if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
- this.codeMap_.moveCode(from, to);
- }
-};
-
-
-/**
- * Retrieves a code entry by an address.
- *
- * @param {number} addr Entry address.
- */
-Profile.prototype.findEntry = function (addr) {
- return this.codeMap_.findEntry(addr);
-};
-
-
-/**
- * Records a tick event. Stack must contain a sequence of
- * addresses starting with the program counter value.
- *
- * @param {Array<number>} stack Stack sample.
- */
-Profile.prototype.recordTick = function (time_ns, vmState, stack) {
- var processedStack = this.resolveAndFilterFuncs_(stack);
- this.bottomUpTree_.addPath(processedStack);
- processedStack.reverse();
- this.topDownTree_.addPath(processedStack);
-};
-
-
-/**
- * Translates addresses into function names and filters unneeded
- * functions.
- *
- * @param {Array<number>} stack Stack sample.
- */
-Profile.prototype.resolveAndFilterFuncs_ = function (stack) {
- var result = [];
- var last_seen_c_function = '';
- var look_for_first_c_function = false;
- for (var i = 0; i < stack.length; ++i) {
- var entry = this.codeMap_.findEntry(stack[i]);
- if (entry) {
- var name = entry.getName();
- if (i === 0 && (entry.type === 'CPP' || entry.type === 'SHARED_LIB')) {
- look_for_first_c_function = true;
- }
- if (look_for_first_c_function && entry.type === 'CPP') {
- last_seen_c_function = name;
- }
- if (!this.skipThisFunction(name)) {
- result.push(name);
- }
- } else {
- this.handleUnknownCode(Profile.Operation.TICK, stack[i], i);
- if (i === 0) result.push("UNKNOWN");
- }
- if (look_for_first_c_function &&
- i > 0 &&
- (!entry || entry.type !== 'CPP') &&
- last_seen_c_function !== '') {
- if (this.c_entries_[last_seen_c_function] === undefined) {
- this.c_entries_[last_seen_c_function] = 0;
- }
- this.c_entries_[last_seen_c_function]++;
- look_for_first_c_function = false; // Found it, we're done.
- }
- }
- return result;
-};
-
-
-/**
- * Performs a BF traversal of the top down call graph.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-Profile.prototype.traverseTopDownTree = function (f) {
- this.topDownTree_.traverse(f);
-};
-
-
-/**
- * Performs a BF traversal of the bottom up call graph.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-Profile.prototype.traverseBottomUpTree = function (f) {
- this.bottomUpTree_.traverse(f);
-};
-
-
-/**
- * Calculates a top down profile for a node with the specified label.
- * If no name specified, returns the whole top down calls tree.
- *
- * @param {string} opt_label Node label.
- */
-Profile.prototype.getTopDownProfile = function (opt_label) {
- return this.getTreeProfile_(this.topDownTree_, opt_label);
-};
-
-
-/**
- * Calculates a bottom up profile for a node with the specified label.
- * If no name specified, returns the whole bottom up calls tree.
- *
- * @param {string} opt_label Node label.
- */
-Profile.prototype.getBottomUpProfile = function (opt_label) {
- return this.getTreeProfile_(this.bottomUpTree_, opt_label);
-};
-
-
-/**
- * Helper function for calculating a tree profile.
- *
- * @param {Profile.CallTree} tree Call tree.
- * @param {string} opt_label Node label.
- */
-Profile.prototype.getTreeProfile_ = function (tree, opt_label) {
- if (!opt_label) {
- tree.computeTotalWeights();
- return tree;
- } else {
- var subTree = tree.cloneSubtree(opt_label);
- subTree.computeTotalWeights();
- return subTree;
- }
-};
-
-
-/**
- * Calculates a flat profile of callees starting from a node with
- * the specified label. If no name specified, starts from the root.
- *
- * @param {string} opt_label Starting node label.
- */
-Profile.prototype.getFlatProfile = function (opt_label) {
- var counters = new CallTree();
- var rootLabel = opt_label || CallTree.ROOT_NODE_LABEL;
- var precs = {};
- precs[rootLabel] = 0;
- var root = counters.findOrAddChild(rootLabel);
-
- this.topDownTree_.computeTotalWeights();
- this.topDownTree_.traverseInDepth(
- function onEnter(node) {
- if (!(node.label in precs)) {
- precs[node.label] = 0;
- }
- var nodeLabelIsRootLabel = node.label == rootLabel;
- if (nodeLabelIsRootLabel || precs[rootLabel] > 0) {
- if (precs[rootLabel] == 0) {
- root.selfWeight += node.selfWeight;
- root.totalWeight += node.totalWeight;
- } else {
- var rec = root.findOrAddChild(node.label);
- rec.selfWeight += node.selfWeight;
- if (nodeLabelIsRootLabel || precs[node.label] == 0) {
- rec.totalWeight += node.totalWeight;
- }
- }
- precs[node.label]++;
- }
- },
- function onExit(node) {
- if (node.label == rootLabel || precs[rootLabel] > 0) {
- precs[node.label]--;
- }
- },
- null);
-
- if (!opt_label) {
- // If we have created a flat profile for the whole program, we don't
- // need an explicit root in it. Thus, replace the counters tree
- // root with the node corresponding to the whole program.
- counters.root_ = root;
- } else {
- // Propagate weights so percents can be calculated correctly.
- counters.getRoot().selfWeight = root.selfWeight;
- counters.getRoot().totalWeight = root.totalWeight;
- }
- return counters;
-};
-
-
-Profile.CEntryNode = function (name, ticks) {
- this.name = name;
- this.ticks = ticks;
-}
-
-
-Profile.prototype.getCEntryProfile = function () {
- var result = [new Profile.CEntryNode("TOTAL", 0)];
- var total_ticks = 0;
- for (var f in this.c_entries_) {
- var ticks = this.c_entries_[f];
- total_ticks += ticks;
- result.push(new Profile.CEntryNode(f, ticks));
- }
- result[0].ticks = total_ticks; // Sorting will keep this at index 0.
- result.sort(function (n1, n2) {
- return n2.ticks - n1.ticks || (n2.name < n1.name ? -1 : 1)
- });
- return result;
-}
-
-
-/**
- * Cleans up function entries that are not referenced by code entries.
- */
-Profile.prototype.cleanUpFuncEntries = function () {
- var referencedFuncEntries = [];
- var entries = this.codeMap_.getAllDynamicEntriesWithAddresses();
- for (var i = 0, l = entries.length; i < l; ++i) {
- if (entries[i][1].constructor === Profile.FunctionEntry) {
- entries[i][1].used = false;
- }
- }
- for (var i = 0, l = entries.length; i < l; ++i) {
- if ("func" in entries[i][1]) {
- entries[i][1].func.used = true;
- }
- }
- for (var i = 0, l = entries.length; i < l; ++i) {
- if (entries[i][1].constructor === Profile.FunctionEntry &&
- !entries[i][1].used) {
- this.codeMap_.deleteCode(entries[i][0]);
- }
- }
-};
-
-
-/**
- * Creates a dynamic code entry.
- *
- * @param {number} size Code size.
- * @param {string} type Code type.
- * @param {string} name Function name.
- * @constructor
- */
-Profile.DynamicCodeEntry = function (size, type, name) {
- CodeMap.CodeEntry.call(this, size, name, type);
-};
-
-
-/**
- * Returns node name.
- */
-Profile.DynamicCodeEntry.prototype.getName = function () {
- return this.type + ': ' + this.name;
-};
-
-
-/**
- * Returns raw node name (without type decoration).
- */
-Profile.DynamicCodeEntry.prototype.getRawName = function () {
- return this.name;
-};
-
-
-Profile.DynamicCodeEntry.prototype.isJSFunction = function () {
- return false;
-};
-
-
-Profile.DynamicCodeEntry.prototype.toString = function () {
- return this.getName() + ': ' + this.size.toString(16);
-};
-
-
-/**
- * Creates a dynamic code entry.
- *
- * @param {number} size Code size.
- * @param {string} type Code type.
- * @param {Profile.FunctionEntry} func Shared function entry.
- * @param {Profile.CodeState} state Code optimization state.
- * @constructor
- */
-Profile.DynamicFuncCodeEntry = function (size, type, func, state) {
- CodeMap.CodeEntry.call(this, size, '', type);
- this.func = func;
- this.state = state;
-};
-
-Profile.DynamicFuncCodeEntry.STATE_PREFIX = ["", "~", "*"];
-
-/**
- * Returns state.
- */
-Profile.DynamicFuncCodeEntry.prototype.getState = function () {
- return Profile.DynamicFuncCodeEntry.STATE_PREFIX[this.state];
-};
-
-/**
- * Returns node name.
- */
-Profile.DynamicFuncCodeEntry.prototype.getName = function () {
- var name = this.func.getName();
- return this.type + ': ' + this.getState() + name;
-};
-
-
-/**
- * Returns raw node name (without type decoration).
- */
-Profile.DynamicFuncCodeEntry.prototype.getRawName = function () {
- return this.func.getName();
-};
-
-
-Profile.DynamicFuncCodeEntry.prototype.isJSFunction = function () {
- return true;
-};
-
-
-Profile.DynamicFuncCodeEntry.prototype.toString = function () {
- return this.getName() + ': ' + this.size.toString(16);
-};
-
-
-/**
- * Creates a shared function object entry.
- *
- * @param {string} name Function name.
- * @constructor
- */
-Profile.FunctionEntry = function (name) {
- CodeMap.CodeEntry.call(this, 0, name);
-};
-
-
-/**
- * Returns node name.
- */
-Profile.FunctionEntry.prototype.getName = function () {
- var name = this.name;
- if (name.length == 0) {
- name = '<anonymous>';
- } else if (name.charAt(0) == ' ') {
- // An anonymous function with location: " aaa.js:10".
- name = '<anonymous>' + name;
- }
- return name;
-};
-
-Profile.FunctionEntry.prototype.toString = CodeMap.CodeEntry.prototype.toString;
-
-/**
- * Constructs a call graph.
- *
- * @constructor
- */
-function CallTree() {
- this.root_ = new CallTree.Node(
- CallTree.ROOT_NODE_LABEL);
-};
-
-
-/**
- * The label of the root node.
- */
-CallTree.ROOT_NODE_LABEL = '';
-
-
-/**
- * @private
- */
-CallTree.prototype.totalsComputed_ = false;
-
-
-/**
- * Returns the tree root.
- */
-CallTree.prototype.getRoot = function () {
- return this.root_;
-};
-
-
-/**
- * Adds the specified call path, constructing nodes as necessary.
- *
- * @param {Array<string>} path Call path.
- */
-CallTree.prototype.addPath = function (path) {
- if (path.length == 0) {
- return;
- }
- var curr = this.root_;
- for (var i = 0; i < path.length; ++i) {
- curr = curr.findOrAddChild(path[i]);
- }
- curr.selfWeight++;
- this.totalsComputed_ = false;
-};
-
-
-/**
- * Finds an immediate child of the specified parent with the specified
- * label, creates a child node if necessary. If a parent node isn't
- * specified, uses tree root.
- *
- * @param {string} label Child node label.
- */
-CallTree.prototype.findOrAddChild = function (label) {
- return this.root_.findOrAddChild(label);
-};
-
-
-/**
- * Creates a subtree by cloning and merging all subtrees rooted at nodes
- * with a given label. E.g. cloning the following call tree on label 'A'
- * will give the following result:
- *
- * <A>--<B> <B>
- * / /
- * <root> == clone on 'A' ==> <root>--<A>
- * \ \
- * <C>--<A>--<D> <D>
- *
- * And <A>'s selfWeight will be the sum of selfWeights of <A>'s from the
- * source call tree.
- *
- * @param {string} label The label of the new root node.
- */
-CallTree.prototype.cloneSubtree = function (label) {
- var subTree = new CallTree();
- this.traverse(function (node, parent) {
- if (!parent && node.label != label) {
- return null;
- }
- var child = (parent ? parent : subTree).findOrAddChild(node.label);
- child.selfWeight += node.selfWeight;
- return child;
- });
- return subTree;
-};
-
-
-/**
- * Computes total weights in the call graph.
- */
-CallTree.prototype.computeTotalWeights = function () {
- if (this.totalsComputed_) {
- return;
- }
- this.root_.computeTotalWeight();
- this.totalsComputed_ = true;
-};
-
-
-/**
- * Traverses the call graph in preorder. This function can be used for
- * building optionally modified tree clones. This is the boilerplate code
- * for this scenario:
- *
- * callTree.traverse(function(node, parentClone) {
- * var nodeClone = cloneNode(node);
- * if (parentClone)
- * parentClone.addChild(nodeClone);
- * return nodeClone;
- * });
- *
- * @param {function(CallTree.Node, *)} f Visitor function.
- * The second parameter is the result of calling 'f' on the parent node.
- */
-CallTree.prototype.traverse = function (f) {
- var pairsToProcess = new ConsArray();
- pairsToProcess.concat([{ node: this.root_, param: null }]);
- while (!pairsToProcess.atEnd()) {
- var pair = pairsToProcess.next();
- var node = pair.node;
- var newParam = f(node, pair.param);
- var morePairsToProcess = [];
- node.forEachChild(function (child) {
- morePairsToProcess.push({ node: child, param: newParam });
- });
- pairsToProcess.concat(morePairsToProcess);
- }
-};
-
-
-/**
- * Performs an indepth call graph traversal.
- *
- * @param {function(CallTree.Node)} enter A function called
- * prior to visiting node's children.
- * @param {function(CallTree.Node)} exit A function called
- * after visiting node's children.
- */
-CallTree.prototype.traverseInDepth = function (enter, exit) {
- function traverse(node) {
- enter(node);
- node.forEachChild(traverse);
- exit(node);
- }
- traverse(this.root_);
-};
-
-
-/**
- * Constructs a call graph node.
- *
- * @param {string} label Node label.
- * @param {CallTree.Node} opt_parent Node parent.
- */
-CallTree.Node = function (label, opt_parent) {
- this.label = label;
- this.parent = opt_parent;
- this.children = {};
-};
-
-
-/**
- * Node self weight (how many times this node was the last node in
- * a call path).
- * @type {number}
- */
-CallTree.Node.prototype.selfWeight = 0;
-
-
-/**
- * Node total weight (includes weights of all children).
- * @type {number}
- */
-CallTree.Node.prototype.totalWeight = 0;
-
-
-/**
- * Adds a child node.
- *
- * @param {string} label Child node label.
- */
-CallTree.Node.prototype.addChild = function (label) {
- var child = new CallTree.Node(label, this);
- this.children[label] = child;
- return child;
-};
-
-
-/**
- * Computes node's total weight.
- */
-CallTree.Node.prototype.computeTotalWeight =
- function () {
- var totalWeight = this.selfWeight;
- this.forEachChild(function (child) {
- totalWeight += child.computeTotalWeight();
- });
- return this.totalWeight = totalWeight;
- };
-
-
-/**
- * Returns all node's children as an array.
- */
-CallTree.Node.prototype.exportChildren = function () {
- var result = [];
- this.forEachChild(function (node) { result.push(node); });
- return result;
-};
-
-
-/**
- * Finds an immediate child with the specified label.
- *
- * @param {string} label Child node label.
- */
-CallTree.Node.prototype.findChild = function (label) {
- return this.children[label] || null;
-};
-
-
-/**
- * Finds an immediate child with the specified label, creates a child
- * node if necessary.
- *
- * @param {string} label Child node label.
- */
-CallTree.Node.prototype.findOrAddChild = function (label) {
- return this.findChild(label) || this.addChild(label);
-};
-
-
-/**
- * Calls the specified function for every child.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-CallTree.Node.prototype.forEachChild = function (f) {
- for (var c in this.children) {
- f(this.children[c]);
- }
-};
-
-
-/**
- * Walks up from the current node up to the call tree root.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-CallTree.Node.prototype.walkUpToRoot = function (f) {
- for (var curr = this; curr != null; curr = curr.parent) {
- f(curr);
- }
-};
-
-
-/**
- * Tries to find a node with the specified path.
- *
- * @param {Array<string>} labels The path.
- * @param {function(CallTree.Node)} opt_f Visitor function.
- */
-CallTree.Node.prototype.descendToChild = function (
- labels, opt_f) {
- for (var pos = 0, curr = this; pos < labels.length && curr != null; pos++) {
- var child = curr.findChild(labels[pos]);
- if (opt_f) {
- opt_f(child, pos);
- }
- curr = child;
- }
- return curr;
-};
-
-function JsonProfile() {
- this.codeMap_ = new CodeMap();
- this.codeEntries_ = [];
- this.functionEntries_ = [];
- this.ticks_ = [];
- this.scripts_ = [];
-}
-
-JsonProfile.prototype.addLibrary = function (
- name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
- endAddr - startAddr, name, 'SHARED_LIB');
- this.codeMap_.addLibrary(startAddr, entry);
-
- entry.codeId = this.codeEntries_.length;
- this.codeEntries_.push({ name: entry.name, type: entry.type });
- return entry;
-};
-
-JsonProfile.prototype.addStaticCode = function (
- name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
- endAddr - startAddr, name, 'CPP');
- this.codeMap_.addStaticCode(startAddr, entry);
-
- entry.codeId = this.codeEntries_.length;
- this.codeEntries_.push({ name: entry.name, type: entry.type });
- return entry;
-};
-
-JsonProfile.prototype.addCode = function (
- kind, name, timestamp, start, size) {
- let codeId = this.codeEntries_.length;
- // Find out if we have a static code entry for the code. If yes, we will
- // make sure it is written to the JSON file just once.
- let staticEntry = this.codeMap_.findAddress(start);
- if (staticEntry && staticEntry.entry.type === 'CPP') {
- codeId = staticEntry.entry.codeId;
- }
-
- var entry = new CodeMap.CodeEntry(size, name, 'CODE');
- this.codeMap_.addCode(start, entry);
-
- entry.codeId = codeId;
- this.codeEntries_[codeId] = {
- name: entry.name,
- timestamp: timestamp,
- type: entry.type,
- kind: kind
- };
-
- return entry;
-};
-
-JsonProfile.prototype.addFuncCode = function (
- kind, name, timestamp, start, size, funcAddr, state) {
- // As code and functions are in the same address space,
- // it is safe to put them in a single code map.
- var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
- if (!func) {
- var func = new CodeMap.CodeEntry(0, name, 'SFI');
- this.codeMap_.addCode(funcAddr, func);
-
- func.funcId = this.functionEntries_.length;
- this.functionEntries_.push({ name: name, codes: [] });
- } else if (func.name !== name) {
- // Function object has been overwritten with a new one.
- func.name = name;
-
- func.funcId = this.functionEntries_.length;
- this.functionEntries_.push({ name: name, codes: [] });
- }
- // TODO(jarin): Insert the code object into the SFI's code list.
- var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
- if (entry) {
- if (entry.size === size && entry.func === func) {
- // Entry state has changed.
- entry.state = state;
- } else {
- this.codeMap_.deleteCode(start);
- entry = null;
- }
- }
- if (!entry) {
- entry = new CodeMap.CodeEntry(size, name, 'JS');
- this.codeMap_.addCode(start, entry);
-
- entry.codeId = this.codeEntries_.length;
-
- this.functionEntries_[func.funcId].codes.push(entry.codeId);
-
- if (state === 0) {
- kind = "Builtin";
- } else if (state === 1) {
- kind = "Unopt";
- } else if (state === 2) {
- kind = "Opt";
- }
-
- this.codeEntries_.push({
- name: entry.name,
- type: entry.type,
- kind: kind,
- func: func.funcId,
- tm: timestamp
- });
- }
- return entry;
-};
-
-JsonProfile.prototype.moveCode = function (from, to) {
- try {
- this.codeMap_.moveCode(from, to);
- } catch (e) {
- printErr("Move: unknown source " + from);
- }
-};
-
-JsonProfile.prototype.addSourcePositions = function (
- start, script, startPos, endPos, sourcePositions, inliningPositions,
- inlinedFunctions) {
- var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
- if (!entry) return;
- var codeId = entry.codeId;
-
- // Resolve the inlined functions list.
- if (inlinedFunctions.length > 0) {
- inlinedFunctions = inlinedFunctions.substring(1).split("S");
- for (var i = 0; i < inlinedFunctions.length; i++) {
- var funcAddr = parseInt(inlinedFunctions[i]);
- var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
- if (!func || func.funcId === undefined) {
- printErr("Could not find function " + inlinedFunctions[i]);
- inlinedFunctions[i] = null;
- } else {
- inlinedFunctions[i] = func.funcId;
- }
- }
- } else {
- inlinedFunctions = [];
- }
-
- this.codeEntries_[entry.codeId].source = {
- script: script,
- start: startPos,
- end: endPos,
- positions: sourcePositions,
- inlined: inliningPositions,
- fns: inlinedFunctions
- };
-};
-
-JsonProfile.prototype.addScriptSource = function (id, url, source) {
- this.scripts_[id] = new Script(id, url, source);
-};
-
-
-JsonProfile.prototype.deoptCode = function (
- timestamp, code, inliningId, scriptOffset, bailoutType,
- sourcePositionText, deoptReasonText) {
- let entry = this.codeMap_.findDynamicEntryByStartAddress(code);
- if (entry) {
- let codeId = entry.codeId;
- if (!this.codeEntries_[codeId].deopt) {
- // Only add the deopt if there was no deopt before.
- // The subsequent deoptimizations should be lazy deopts for
- // other on-stack activations.
- this.codeEntries_[codeId].deopt = {
- tm: timestamp,
- inliningId: inliningId,
- scriptOffset: scriptOffset,
- posText: sourcePositionText,
- reason: deoptReasonText,
- bailoutType: bailoutType
- };
- }
- }
-};
-
-JsonProfile.prototype.deleteCode = function (start) {
- try {
- this.codeMap_.deleteCode(start);
- } catch (e) {
- printErr("Delete: unknown address " + start);
- }
-};
-
-JsonProfile.prototype.moveFunc = function (from, to) {
- if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
- this.codeMap_.moveCode(from, to);
- }
-};
-
-JsonProfile.prototype.findEntry = function (addr) {
- return this.codeMap_.findEntry(addr);
-};
-
-JsonProfile.prototype.recordTick = function (time_ns, vmState, stack) {
- // TODO(jarin) Resolve the frame-less case (when top of stack is
- // known code).
- var processedStack = [];
- for (var i = 0; i < stack.length; i++) {
- var resolved = this.codeMap_.findAddress(stack[i]);
- if (resolved) {
- processedStack.push(resolved.entry.codeId, resolved.offset);
- } else {
- processedStack.push(-1, stack[i]);
- }
- }
- this.ticks_.push({ tm: time_ns, vm: vmState, s: processedStack });
-};
-
-function writeJson(s) {
- write(JSON.stringify(s, null, 2));
-}
-
-JsonProfile.prototype.writeJson = function () {
- // Write out the JSON in a partially manual way to avoid creating too-large
- // strings in one JSON.stringify call when there are a lot of ticks.
- write('{\n')
-
- write(' "code": ');
- writeJson(this.codeEntries_);
- write(',\n');
-
- write(' "functions": ');
- writeJson(this.functionEntries_);
- write(',\n');
-
- write(' "ticks": [\n');
- for (var i = 0; i < this.ticks_.length; i++) {
- write(' ');
- writeJson(this.ticks_[i]);
- if (i < this.ticks_.length - 1) {
- write(',\n');
- } else {
- write('\n');
- }
- }
- write(' ],\n');
-
- write(' "scripts": ');
- writeJson(this.scripts_);
-
- write('}\n');
-};
diff --git a/deps/v8/tools/profile.mjs b/deps/v8/tools/profile.mjs
index f443740324..f4be41e2da 100644
--- a/deps/v8/tools/profile.mjs
+++ b/deps/v8/tools/profile.mjs
@@ -43,7 +43,7 @@ export class SourcePosition {
}
toString() {
- return `${this.script.name}:${this.line}:${this.column}`;
+ return `${this.script.name}:${this.line}:${this.column}`;
}
toStringLong() {
@@ -79,7 +79,7 @@ export class Script {
addSourcePosition(line, column, entry) {
let sourcePosition = this.lineToColumn.get(line)?.get(column);
if (sourcePosition === undefined) {
- sourcePosition = new SourcePosition(this, line, column, )
+ sourcePosition = new SourcePosition(this, line, column,)
this._addSourcePosition(line, column, sourcePosition);
}
sourcePosition.addEntry(entry);
@@ -110,13 +110,13 @@ export class Script {
class SourceInfo {
- script;
- start;
- end;
- positions;
- inlined ;
- fns;
- disassemble;
+ script;
+ start;
+ end;
+ positions;
+ inlined;
+ fns;
+ disassemble;
setSourcePositionInfo(script, startPos, endPos, sourcePositionTable, inliningPositions, inlinedFunctions) {
this.script = script;
@@ -181,9 +181,10 @@ export class Profile {
static CodeState = {
COMPILED: 0,
IGNITION: 1,
- NATIVE_CONTEXT_INDEPENDENT: 2,
- TURBOPROP: 3,
- TURBOFAN: 4,
+ BASELINE: 2,
+ NATIVE_CONTEXT_INDEPENDENT: 3,
+ TURBOPROP: 4,
+ TURBOFAN: 5,
}
/**
@@ -195,6 +196,8 @@ export class Profile {
return this.CodeState.COMPILED;
case '~':
return this.CodeState.IGNITION;
+ case '^':
+ return this.CodeState.BASELINE;
case '-':
return this.CodeState.NATIVE_CONTEXT_INDEPENDENT;
case '+':
@@ -210,6 +213,8 @@ export class Profile {
return "Builtin";
} else if (state === this.CodeState.IGNITION) {
return "Unopt";
+ } else if (state === this.CodeState.BASELINE) {
+ return "Baseline";
} else if (state === this.CodeState.NATIVE_CONTEXT_INDEPENDENT) {
return "NCI";
} else if (state === this.CodeState.TURBOPROP) {
@@ -232,7 +237,7 @@ export class Profile {
* during stack strace processing, specifies a position of the frame
* containing the address.
*/
- handleUnknownCode(operation, addr, opt_stackPos) {}
+ handleUnknownCode(operation, addr, opt_stackPos) { }
/**
* Registers a library.
@@ -255,7 +260,7 @@ export class Profile {
* @param {number} endAddr Ending address.
*/
addStaticCode(name, startAddr, endAddr) {
- const entry = new CodeEntry(endAddr - startAddr, name, 'CPP');
+ const entry = new CodeEntry(endAddr - startAddr, name, 'CPP');
this.codeMap_.addStaticCode(startAddr, entry);
return entry;
}
@@ -370,8 +375,8 @@ export class Profile {
}
this.getOrCreateSourceInfo(entry).setSourcePositionInfo(
- script, startPos, endPos, sourcePositionTable, inliningPositions,
- inlinedFunctions);
+ script, startPos, endPos, sourcePositionTable, inliningPositions,
+ inlinedFunctions);
}
addDisassemble(start, kind, disassemble) {
@@ -646,7 +651,7 @@ class DynamicCodeEntry extends CodeEntry {
constructor(size, type, name) {
super(size, name, type);
}
-
+
getName() {
return this.type + ': ' + this.name;
}
@@ -693,7 +698,7 @@ class DynamicFuncCodeEntry extends CodeEntry {
return this.source?.getSourceCode();
}
- static STATE_PREFIX = ["", "~", "-", "+", "*"];
+ static STATE_PREFIX = ["", "~", "^", "-", "+", "*"];
getState() {
return DynamicFuncCodeEntry.STATE_PREFIX[this.state];
}
@@ -726,7 +731,7 @@ class DynamicFuncCodeEntry extends CodeEntry {
* @constructor
*/
class FunctionEntry extends CodeEntry {
-
+
// Contains the list of generated code for this function.
_codeEntries = new Set();
@@ -754,7 +759,7 @@ class FunctionEntry extends CodeEntry {
getName() {
let name = this.name;
if (name.length == 0) {
- return '<anonymous>';
+ return '<anonymous>';
} else if (name.charAt(0) == ' ') {
// An anonymous function with location: " aaa.js:10".
return `<anonymous>${name}`;
@@ -888,7 +893,7 @@ class CallTree {
* @param {function(CallTreeNode)} exit A function called
* after visiting node's children.
*/
- traverseInDepth(enter, exit) {
+ traverseInDepth(enter, exit) {
function traverse(node) {
enter(node);
node.forEachChild(traverse);
@@ -905,7 +910,7 @@ class CallTree {
* @param {string} label Node label.
* @param {CallTreeNode} opt_parent Node parent.
*/
- class CallTreeNode {
+class CallTreeNode {
/**
* Node self weight (how many times this node was the last node in
* a call path).
diff --git a/deps/v8/tools/splaytree.js b/deps/v8/tools/splaytree.js
deleted file mode 100644
index d272a9e182..0000000000
--- a/deps/v8/tools/splaytree.js
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * Constructs a Splay tree. A splay tree is a self-balancing binary
- * search tree with the additional property that recently accessed
- * elements are quick to access again. It performs basic operations
- * such as insertion, look-up and removal in O(log(n)) amortized time.
- *
- * @constructor
- */
-function SplayTree() {
-};
-
-
-/**
- * Pointer to the root node of the tree.
- *
- * @type {SplayTree.Node}
- * @private
- */
-SplayTree.prototype.root_ = null;
-
-
-/**
- * @return {boolean} Whether the tree is empty.
- */
-SplayTree.prototype.isEmpty = function() {
- return !this.root_;
-};
-
-
-
-/**
- * Inserts a node into the tree with the specified key and value if
- * the tree does not already contain a node with the specified key. If
- * the value is inserted, it becomes the root of the tree.
- *
- * @param {number} key Key to insert into the tree.
- * @param {*} value Value to insert into the tree.
- */
-SplayTree.prototype.insert = function(key, value) {
- if (this.isEmpty()) {
- this.root_ = new SplayTree.Node(key, value);
- return;
- }
- // Splay on the key to move the last node on the search path for
- // the key to the root of the tree.
- this.splay_(key);
- if (this.root_.key == key) {
- return;
- }
- var node = new SplayTree.Node(key, value);
- if (key > this.root_.key) {
- node.left = this.root_;
- node.right = this.root_.right;
- this.root_.right = null;
- } else {
- node.right = this.root_;
- node.left = this.root_.left;
- this.root_.left = null;
- }
- this.root_ = node;
-};
-
-
-/**
- * Removes a node with the specified key from the tree if the tree
- * contains a node with this key. The removed node is returned. If the
- * key is not found, an exception is thrown.
- *
- * @param {number} key Key to find and remove from the tree.
- * @return {SplayTree.Node} The removed node.
- */
-SplayTree.prototype.remove = function(key) {
- if (this.isEmpty()) {
- throw Error('Key not found: ' + key);
- }
- this.splay_(key);
- if (this.root_.key != key) {
- throw Error('Key not found: ' + key);
- }
- var removed = this.root_;
- if (!this.root_.left) {
- this.root_ = this.root_.right;
- } else {
- var right = this.root_.right;
- this.root_ = this.root_.left;
- // Splay to make sure that the new root has an empty right child.
- this.splay_(key);
- // Insert the original right child as the right child of the new
- // root.
- this.root_.right = right;
- }
- return removed;
-};
-
-
-/**
- * Returns the node having the specified key or null if the tree doesn't contain
- * a node with the specified key.
- *
- * @param {number} key Key to find in the tree.
- * @return {SplayTree.Node} Node having the specified key.
- */
-SplayTree.prototype.find = function(key) {
- if (this.isEmpty()) {
- return null;
- }
- this.splay_(key);
- return this.root_.key == key ? this.root_ : null;
-};
-
-
-/**
- * @return {SplayTree.Node} Node having the minimum key value.
- */
-SplayTree.prototype.findMin = function() {
- if (this.isEmpty()) {
- return null;
- }
- var current = this.root_;
- while (current.left) {
- current = current.left;
- }
- return current;
-};
-
-
-/**
- * @return {SplayTree.Node} Node having the maximum key value.
- */
-SplayTree.prototype.findMax = function(opt_startNode) {
- if (this.isEmpty()) {
- return null;
- }
- var current = opt_startNode || this.root_;
- while (current.right) {
- current = current.right;
- }
- return current;
-};
-
-
-/**
- * @return {SplayTree.Node} Node having the maximum key value that
- * is less or equal to the specified key value.
- */
-SplayTree.prototype.findGreatestLessThan = function(key) {
- if (this.isEmpty()) {
- return null;
- }
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- this.splay_(key);
- // Now the result is either the root node or the greatest node in
- // the left subtree.
- if (this.root_.key <= key) {
- return this.root_;
- } else if (this.root_.left) {
- return this.findMax(this.root_.left);
- } else {
- return null;
- }
-};
-
-
-/**
- * @return {Array<*>} An array containing all the values of tree's nodes paired
- * with keys.
- */
-SplayTree.prototype.exportKeysAndValues = function() {
- var result = [];
- this.traverse_(function(node) { result.push([node.key, node.value]); });
- return result;
-};
-
-
-/**
- * @return {Array<*>} An array containing all the values of tree's nodes.
- */
-SplayTree.prototype.exportValues = function() {
- var result = [];
- this.traverse_(function(node) { result.push(node.value); });
- return result;
-};
-
-
-/**
- * Perform the splay operation for the given key. Moves the node with
- * the given key to the top of the tree. If no node has the given
- * key, the last node on the search path is moved to the top of the
- * tree. This is the simplified top-down splaying algorithm from:
- * "Self-adjusting Binary Search Trees" by Sleator and Tarjan
- *
- * @param {number} key Key to splay the tree on.
- * @private
- */
-SplayTree.prototype.splay_ = function(key) {
- if (this.isEmpty()) {
- return;
- }
- // Create a dummy node. The use of the dummy node is a bit
- // counter-intuitive: The right child of the dummy node will hold
- // the L tree of the algorithm. The left child of the dummy node
- // will hold the R tree of the algorithm. Using a dummy node, left
- // and right will always be nodes and we avoid special cases.
- var dummy, left, right;
- dummy = left = right = new SplayTree.Node(null, null);
- var current = this.root_;
- while (true) {
- if (key < current.key) {
- if (!current.left) {
- break;
- }
- if (key < current.left.key) {
- // Rotate right.
- var tmp = current.left;
- current.left = tmp.right;
- tmp.right = current;
- current = tmp;
- if (!current.left) {
- break;
- }
- }
- // Link right.
- right.left = current;
- right = current;
- current = current.left;
- } else if (key > current.key) {
- if (!current.right) {
- break;
- }
- if (key > current.right.key) {
- // Rotate left.
- var tmp = current.right;
- current.right = tmp.left;
- tmp.left = current;
- current = tmp;
- if (!current.right) {
- break;
- }
- }
- // Link left.
- left.right = current;
- left = current;
- current = current.right;
- } else {
- break;
- }
- }
- // Assemble.
- left.right = current.left;
- right.left = current.right;
- current.left = dummy.right;
- current.right = dummy.left;
- this.root_ = current;
-};
-
-
-/**
- * Performs a preorder traversal of the tree.
- *
- * @param {function(SplayTree.Node)} f Visitor function.
- * @private
- */
-SplayTree.prototype.traverse_ = function(f) {
- var nodesToVisit = [this.root_];
- while (nodesToVisit.length > 0) {
- var node = nodesToVisit.shift();
- if (node == null) {
- continue;
- }
- f(node);
- nodesToVisit.push(node.left);
- nodesToVisit.push(node.right);
- }
-};
-
-
-/**
- * Constructs a Splay tree node.
- *
- * @param {number} key Key.
- * @param {*} value Value.
- */
-SplayTree.Node = function(key, value) {
- this.key = key;
- this.value = value;
-};
-
-
-/**
- * @type {SplayTree.Node}
- */
-SplayTree.Node.prototype.left = null;
-
-
-/**
- * @type {SplayTree.Node}
- */
-SplayTree.Node.prototype.right = null;
diff --git a/deps/v8/tools/system-analyzer/index.html b/deps/v8/tools/system-analyzer/index.html
index fa09830240..849e72b363 100644
--- a/deps/v8/tools/system-analyzer/index.html
+++ b/deps/v8/tools/system-analyzer/index.html
@@ -175,7 +175,7 @@ found in the LICENSE file. -->
<dd>Enable all V8 logging options.</dd>
<dt>
<a href="https://source.chromium.org/search?q=FLAG_trace_maps">
- <code>--trace-maps</code>
+ <code>--log-maps</code>
</a>
</dt>
<dd>
@@ -183,7 +183,7 @@ found in the LICENSE file. -->
</dd>
<dt>
<a href="https://source.chromium.org/search?q=FLAG_trace_ic">
- <code>--trace-ic</code>
+ <code>--log-ic</code>
</a>
</dt>
<dd>
@@ -206,6 +206,12 @@ found in the LICENSE file. -->
<code>--log-api</code>
</a>
</dt>
+ <dd>Log details about deoptimized code</dd>
+ <dt>
+ <a href="https://source.chromium.org/search?q=FLAG_log_deopt">
+ <code>--log-deopt</code>
+ </a>
+ </dt>
<dd>Log various API uses.</dd>
</dl>
diff --git a/deps/v8/tools/system-analyzer/index.mjs b/deps/v8/tools/system-analyzer/index.mjs
index 531ae79138..4b0ed9f9c5 100644
--- a/deps/v8/tools/system-analyzer/index.mjs
+++ b/deps/v8/tools/system-analyzer/index.mjs
@@ -248,7 +248,7 @@ class App {
focusSourcePosition(sourcePosition) {
if (!sourcePosition) return;
- this._view.sourcePanel.focusedSourcePositions = [sourcePosition];
+ this._view.scriptPanel.focusedSourcePositions = [sourcePosition];
}
handleToolTip(event) {
diff --git a/deps/v8/tools/system-analyzer/view/log-file-reader-template.html b/deps/v8/tools/system-analyzer/view/log-file-reader-template.html
index e54d45990a..478e08129c 100644
--- a/deps/v8/tools/system-analyzer/view/log-file-reader-template.html
+++ b/deps/v8/tools/system-analyzer/view/log-file-reader-template.html
@@ -44,6 +44,7 @@ found in the LICENSE file. -->
.loading #loader {
display: block;
position: fixed;
+ z-index: 9999;
top: 0px;
left: 0px;
width: 100%;
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html
index b27ad66b59..bcab2d5349 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-template.html
@@ -76,7 +76,7 @@ found in the LICENSE file. -->
font-weight: 400;
}
- .panelCloserInput:checked ~ h3 {
+ .panelCloserInput:checked ~ h3 {
display: inherit;
flex: 1;
writing-mode: unset;
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index d3674a4f8b..c040912fc9 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -113,6 +113,7 @@ SLOW_ARCHS = [
"mips64el",
"s390",
"s390x",
+ "riscv64"
]
@@ -169,6 +170,7 @@ class BuildConfig(object):
self.asan = build_config['is_asan']
self.cfi_vptr = build_config['is_cfi']
+ self.control_flow_integrity = build_config['v8_control_flow_integrity']
self.concurrent_marking = build_config['v8_enable_concurrent_marking']
self.dcheck_always_on = build_config['dcheck_always_on']
self.gcov_coverage = build_config['is_gcov_coverage']
@@ -187,6 +189,7 @@ class BuildConfig(object):
self.verify_csa = build_config['v8_enable_verify_csa']
self.lite_mode = build_config['v8_enable_lite_mode']
self.pointer_compression = build_config['v8_enable_pointer_compression']
+ self.webassembly = build_config['v8_enable_webassembly']
# Export only for MIPS target
if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
self.mips_arch_variant = build_config['mips_arch_variant']
@@ -204,6 +207,8 @@ class BuildConfig(object):
detected_options.append('asan')
if self.cfi_vptr:
detected_options.append('cfi_vptr')
+ if self.control_flow_integrity:
+ detected_options.append('control_flow_integrity')
if self.dcheck_always_on:
detected_options.append('dcheck_always_on')
if self.gcov_coverage:
@@ -224,6 +229,8 @@ class BuildConfig(object):
detected_options.append('lite_mode')
if self.pointer_compression:
detected_options.append('pointer_compression')
+ if self.webassembly:
+ detected_options.append('webassembly')
return '\n'.join(detected_options)
@@ -351,9 +358,6 @@ class BaseTestRunner(object):
help="Path to a file for storing json results.")
parser.add_option('--slow-tests-cutoff', type="int", default=100,
help='Collect N slowest tests')
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -634,11 +638,24 @@ class BaseTestRunner(object):
self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
self.build_config.mips_arch_variant)
+ no_simd_sse = any(
+ i in options.extra_flags for i in ['--noenable-sse3',
+ '--no-enable-sse3'
+ '--noenable-ssse3',
+ '--no-enable-ssse3',
+ '--noenable-sse4-1',
+ '--no-enable-sse4_1'])
+
+ # Set no_simd_sse on architectures without Simd enabled.
+ if self.build_config.arch == 'ppc64':
+ no_simd_sse = True
+
return {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"cfi_vptr": self.build_config.cfi_vptr,
+ "control_flow_integrity": self.build_config.control_flow_integrity,
"concurrent_marking": self.build_config.concurrent_marking,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
@@ -646,6 +663,7 @@ class BaseTestRunner(object):
"gc_fuzzer": False,
"gc_stress": False,
"gcov_coverage": self.build_config.gcov_coverage,
+ "has_webassembly": self.build_config.webassembly,
"isolates": options.isolates,
"is_clang": self.build_config.is_clang,
"is_full_debug": self.build_config.is_full_debug,
@@ -654,6 +672,7 @@ class BaseTestRunner(object):
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
+ "no_simd_sse": no_simd_sse,
"novfp3": False,
"optimize_for_size": "--optimize-for-size" in options.extra_flags,
"predictable": self.build_config.predictable,
@@ -760,9 +779,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 854abc6655..6c2cc01fb8 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -63,7 +63,7 @@ VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little", "android",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
"x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
- "linux", "aix", "r1", "r2", "r3", "r5", "r6"]:
+ "linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv64"]:
VARIABLES[var] = var
# Allow using variants as keywords.
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 69ca853de3..595f7e27f4 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -14,14 +14,9 @@ ALL_VARIANT_FLAGS = {
"interpreted_regexp": [["--regexp-interpret-all"]],
"experimental_regexp": [["--default-to-experimental-regexp-engine"]],
"jitless": [["--jitless"]],
+ "sparkplug": [["--sparkplug"]],
"minor_mc": [["--minor-mc"]],
- "nci": [["--turbo-nci"]],
- "nci_as_midtier": [["--turbo-nci-as-midtier"]],
"no_lfa": [["--no-lazy-feedback-allocation"]],
- "no_local_heaps": [[
- "--no-local-heaps",
- "--no-turbo-direct-heap-access",
- "--no-finalize-streaming-on-background"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
@@ -33,6 +28,7 @@ ALL_VARIANT_FLAGS = {
"slow_path": [["--force-slow-path"]],
"stress": [["--stress-opt", "--no-liftoff", "--stress-lazy-source-positions"]],
"stress_concurrent_allocation": [["--stress-concurrent-allocation"]],
+ "stress_concurrent_inlining": [["--stress-concurrent-inlining"]],
"stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
"--finalize-streaming-on-background",
"--stress-wasm-code-gc"]],
@@ -43,6 +39,7 @@ ALL_VARIANT_FLAGS = {
"trusted": [["--no-untrusted-code-mitigations"]],
"no_wasm_traps": [["--no-wasm-trap-handler"]],
"turboprop": [["--turboprop"]],
+ "turboprop_as_toptier": [["--turboprop-as-toptier"]],
"instruction_scheduling": [["--turbo-instruction-scheduling"]],
"stress_instruction_scheduling": [["--turbo-stress-instruction-scheduling"]],
"top_level_await": [["--harmony-top-level-await"]],
@@ -53,17 +50,20 @@ ALL_VARIANT_FLAGS = {
# implications defined in flag-definitions.h.
INCOMPATIBLE_FLAGS_PER_VARIANT = {
"assert_types": ["--no-assert-types"],
- "jitless": ["--opt", "--always-opt", "--liftoff", "--track-field-types", "--validate-asm"],
+ "jitless": ["--opt", "--always-opt", "--liftoff", "--track-field-types", "--validate-asm", "--sparkplug", "--always-sparkplug"],
"no_wasm_traps": ["--wasm-trap-handler"],
"nooptimization": ["--opt", "--always-opt", "--no-liftoff", "--wasm-tier-up"],
"slow_path": ["--no-force-slow-path"],
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
+ "stress_concurrent_inlining": ["--single-threaded", "--predictable", "--no-turbo-direct-heap-access"],
"stress_incremental_marking": ["--no-stress-incremental-marking"],
- "future": ["--parallel-compile-tasks"],
+ "future": ["--parallel-compile-tasks", "--no-turbo-direct-heap-access"],
"stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile", "--parallel-compile-tasks"],
"stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff", "--max-inlined-bytecode-size=*",
"--max-inlined-bytecode-size-cumulative=*", "--stress-inline"],
- "turboprop": ["--interrupt-budget=*", "--no-turboprop"],
+ "sparkplug": ["--jitless"],
+ "turboprop": ["--interrupt-budget=*", "--no-turbo-direct-heap-access", "--no-turboprop"],
+ "turboprop_as_toptier": ["--interrupt-budget=*", "--no-turbo-direct-heap-access", "--no-turboprop", "--no-turboprop-as-toptier"],
"code_serializer": ["--cache=after-execute", "--cache=full-code-cache", "--cache=none"],
"no_local_heaps": ["--concurrent-inlining", "--turboprop"],
"experimental_regexp": ["--no-enable-experimental-regexp-engine", "--no-default-to-experimental-regexp-engine"],
@@ -96,8 +96,9 @@ INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
"--no-enable-sse4-1": ["--enable-sse4-1"],
"--optimize-for-size": ["--max-semi-space-size=*"],
"--stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
+ "--stress_concurrent_inlining": ["--single-threaded", "--predictable"],
"--stress-flush-bytecode": ["--no-stress-flush-bytecode"],
- "--future": ["--parallel-compile-tasks"],
+ "--future": ["--parallel-compile-tasks", "--no-turbo-direct-heap-access"],
"--stress-incremental-marking": INCOMPATIBLE_FLAGS_PER_VARIANT["stress_incremental_marking"],
}
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index ff58391110..f3551d01b8 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -46,7 +46,7 @@ VARIANT_ALIASES = {
'exhaustive': MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
'extra': ['nooptimization', 'future', 'no_wasm_traps', 'turboprop',
- 'instruction_scheduling'],
+ 'instruction_scheduling', 'turboprop_as_toptier'],
}
# Extra flags passed to all tests using the standard test runner.
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
index b802368183..965ba23d04 100644
--- a/deps/v8/tools/testrunner/testproc/fuzzer.py
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -21,9 +21,11 @@ EXTRA_FLAGS = [
(0.1, '--interrupt-budget=100'),
(0.1, '--liftoff'),
(0.2, '--no-analyze-environment-liveness'),
- (0.1, '--no-enable-sse3'),
- (0.1, '--no-enable-ssse3'),
- (0.1, '--no-enable-sse4_1'),
+ # TODO(machenbach): Enable when it doesn't collide with crashing on missing
+ # simd features.
+ #(0.1, '--no-enable-sse3'),
+ #(0.1, '--no-enable-ssse3'),
+ #(0.1, '--no-enable-sse4_1'),
(0.1, '--no-enable-sse4_2'),
(0.1, '--no-enable-sahf'),
(0.1, '--no-enable-avx'),
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 634ef7c2f2..9ff943a5c2 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -15,7 +15,6 @@ import time
from . import base
from . import util
-from ..local import junit_output
def print_failure_header(test):
@@ -349,45 +348,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
deleted file mode 100644
index d0e21785ce..0000000000
--- a/deps/v8/tools/tickprocessor-driver.js
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Tick Processor's code flow.
-
-function processArguments(args) {
- var processor = new ArgumentsProcessor(args);
- if (processor.parse()) {
- return processor.result();
- } else {
- processor.printUsageAndExit();
- }
-}
-
-function initSourceMapSupport() {
- // Pull dev tools source maps into our name space.
- SourceMap = WebInspector.SourceMap;
-
- // Overwrite the load function to load scripts synchronously.
- SourceMap.load = function(sourceMapURL) {
- var content = readFile(sourceMapURL);
- var sourceMapObject = (JSON.parse(content));
- return new SourceMap(sourceMapURL, sourceMapObject);
- };
-}
-
-var entriesProviders = {
- 'unix': UnixCppEntriesProvider,
- 'windows': WindowsCppEntriesProvider,
- 'mac': MacCppEntriesProvider
-};
-
-var params = processArguments(arguments);
-var sourceMap = null;
-if (params.sourceMap) {
- initSourceMapSupport();
- sourceMap = SourceMap.load(params.sourceMap);
-}
-var tickProcessor = new TickProcessor(
- new (entriesProviders[params.platform])(params.nm, params.objdump, params.targetRootFS,
- params.apkEmbeddedLibrary),
- params.separateIc,
- params.separateBytecodes,
- params.separateBuiltins,
- params.separateStubs,
- params.callGraphSize,
- params.ignoreUnknown,
- params.stateFilter,
- params.distortion,
- params.range,
- sourceMap,
- params.timedRange,
- params.pairwiseTimedRange,
- params.onlySummary,
- params.runtimeTimerFilter,
- params.preprocessJson);
-tickProcessor.processLogFile(params.logFileName);
-tickProcessor.printStatistics();
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
deleted file mode 100644
index 2a5b9af83c..0000000000
--- a/deps/v8/tools/tickprocessor.js
+++ /dev/null
@@ -1,977 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-function inherits(childCtor, parentCtor) {
- childCtor.prototype.__proto__ = parentCtor.prototype;
-};
-
-
-function V8Profile(separateIc, separateBytecodes, separateBuiltins,
- separateStubs) {
- Profile.call(this);
- var regexps = [];
- if (!separateIc) regexps.push(V8Profile.IC_RE);
- if (!separateBytecodes) regexps.push(V8Profile.BYTECODES_RE);
- if (!separateBuiltins) regexps.push(V8Profile.BUILTINS_RE);
- if (!separateStubs) regexps.push(V8Profile.STUBS_RE);
- if (regexps.length > 0) {
- this.skipThisFunction = function(name) {
- for (var i=0; i<regexps.length; i++) {
- if (regexps[i].test(name)) return true;
- }
- return false;
- };
- }
-};
-inherits(V8Profile, Profile);
-
-
-V8Profile.IC_RE =
- /^(LoadGlobalIC: )|(Handler: )|(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Load|Store)IC_)/;
-V8Profile.BYTECODES_RE = /^(BytecodeHandler: )/
-V8Profile.BUILTINS_RE = /^(Builtin: )/
-V8Profile.STUBS_RE = /^(Stub: )/
-
-
-/**
- * A thin wrapper around shell's 'read' function showing a file name on error.
- */
-function readFile(fileName) {
- try {
- return read(fileName);
- } catch (e) {
- printErr(fileName + ': ' + (e.message || e));
- throw e;
- }
-}
-
-
-/**
- * Parser for dynamic code optimization state.
- */
-function parseState(s) {
- switch (s) {
- case "": return Profile.CodeState.COMPILED;
- case "~": return Profile.CodeState.OPTIMIZABLE;
- case "*": return Profile.CodeState.OPTIMIZED;
- }
- throw new Error("unknown code state: " + s);
-}
-
-
-function TickProcessor(
- cppEntriesProvider,
- separateIc,
- separateBytecodes,
- separateBuiltins,
- separateStubs,
- callGraphSize,
- ignoreUnknown,
- stateFilter,
- distortion,
- range,
- sourceMap,
- timedRange,
- pairwiseTimedRange,
- onlySummary,
- runtimeTimerFilter,
- preprocessJson) {
- this.preprocessJson = preprocessJson;
- LogReader.call(this, {
- 'shared-library': { parsers: [parseString, parseInt, parseInt, parseInt],
- processor: this.processSharedLibrary },
- 'code-creation': {
- parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
- parseString, parseVarArgs],
- processor: this.processCodeCreation },
- 'code-deopt': {
- parsers: [parseInt, parseInt, parseInt, parseInt, parseInt,
- parseString, parseString, parseString],
- processor: this.processCodeDeopt },
- 'code-move': { parsers: [parseInt, parseInt, ],
- processor: this.processCodeMove },
- 'code-delete': { parsers: [parseInt],
- processor: this.processCodeDelete },
- 'code-source-info': {
- parsers: [parseInt, parseInt, parseInt, parseInt, parseString,
- parseString, parseString],
- processor: this.processCodeSourceInfo },
- 'script-source': {
- parsers: [parseInt, parseString, parseString],
- processor: this.processScriptSource },
- 'sfi-move': { parsers: [parseInt, parseInt],
- processor: this.processFunctionMove },
- 'active-runtime-timer': {
- parsers: [parseString],
- processor: this.processRuntimeTimerEvent },
- 'tick': {
- parsers: [parseInt, parseInt, parseInt,
- parseInt, parseInt, parseVarArgs],
- processor: this.processTick },
- 'heap-sample-begin': { parsers: [parseString, parseString, parseInt],
- processor: this.processHeapSampleBegin },
- 'heap-sample-end': { parsers: [parseString, parseString],
- processor: this.processHeapSampleEnd },
- 'timer-event-start' : { parsers: [parseString, parseString, parseString],
- processor: this.advanceDistortion },
- 'timer-event-end' : { parsers: [parseString, parseString, parseString],
- processor: this.advanceDistortion },
- // Ignored events.
- 'profiler': null,
- 'function-creation': null,
- 'function-move': null,
- 'function-delete': null,
- 'heap-sample-item': null,
- 'current-time': null, // Handled specially, not parsed.
- // Obsolete row types.
- 'code-allocate': null,
- 'begin-code-region': null,
- 'end-code-region': null },
- timedRange,
- pairwiseTimedRange);
-
- this.cppEntriesProvider_ = cppEntriesProvider;
- this.callGraphSize_ = callGraphSize;
- this.ignoreUnknown_ = ignoreUnknown;
- this.stateFilter_ = stateFilter;
- this.runtimeTimerFilter_ = runtimeTimerFilter;
- this.sourceMap = sourceMap;
- var ticks = this.ticks_ =
- { total: 0, unaccounted: 0, excluded: 0, gc: 0 };
-
- distortion = parseInt(distortion);
- // Convert picoseconds to nanoseconds.
- this.distortion_per_entry = isNaN(distortion) ? 0 : (distortion / 1000);
- this.distortion = 0;
- var rangelimits = range ? range.split(",") : [];
- var range_start = parseInt(rangelimits[0]);
- var range_end = parseInt(rangelimits[1]);
- // Convert milliseconds to nanoseconds.
- this.range_start = isNaN(range_start) ? -Infinity : (range_start * 1000);
- this.range_end = isNaN(range_end) ? Infinity : (range_end * 1000)
-
- V8Profile.prototype.handleUnknownCode = function(
- operation, addr, opt_stackPos) {
- var op = Profile.Operation;
- switch (operation) {
- case op.MOVE:
- printErr('Code move event for unknown code: 0x' + addr.toString(16));
- break;
- case op.DELETE:
- printErr('Code delete event for unknown code: 0x' + addr.toString(16));
- break;
- case op.TICK:
- // Only unknown PCs (the first frame) are reported as unaccounted,
- // otherwise tick balance will be corrupted (this behavior is compatible
- // with the original tickprocessor.py script.)
- if (opt_stackPos == 0) {
- ticks.unaccounted++;
- }
- break;
- }
- };
-
- if (preprocessJson) {
- this.profile_ = new JsonProfile();
- } else {
- this.profile_ = new V8Profile(separateIc, separateBytecodes,
- separateBuiltins, separateStubs);
- }
- this.codeTypes_ = {};
- // Count each tick as a time unit.
- this.viewBuilder_ = new ViewBuilder(1);
- this.lastLogFileName_ = null;
-
- this.generation_ = 1;
- this.currentProducerProfile_ = null;
- this.onlySummary_ = onlySummary;
-};
-inherits(TickProcessor, LogReader);
-
-
-TickProcessor.VmStates = {
- JS: 0,
- GC: 1,
- PARSER: 2,
- BYTECODE_COMPILER: 3,
- COMPILER: 4,
- OTHER: 5,
- EXTERNAL: 6,
- IDLE: 7,
-};
-
-
-TickProcessor.CodeTypes = {
- CPP: 0,
- SHARED_LIB: 1
-};
-// Otherwise, this is JS-related code. We are not adding it to
-// codeTypes_ map because there can be zillions of them.
-
-
-TickProcessor.CALL_PROFILE_CUTOFF_PCT = 1.0;
-
-TickProcessor.CALL_GRAPH_SIZE = 5;
-
-/**
- * @override
- */
-TickProcessor.prototype.printError = function(str) {
- printErr(str);
-};
-
-
-TickProcessor.prototype.setCodeType = function(name, type) {
- this.codeTypes_[name] = TickProcessor.CodeTypes[type];
-};
-
-
-TickProcessor.prototype.isSharedLibrary = function(name) {
- return this.codeTypes_[name] == TickProcessor.CodeTypes.SHARED_LIB;
-};
-
-
-TickProcessor.prototype.isCppCode = function(name) {
- return this.codeTypes_[name] == TickProcessor.CodeTypes.CPP;
-};
-
-
-TickProcessor.prototype.isJsCode = function(name) {
- return name !== "UNKNOWN" && !(name in this.codeTypes_);
-};
-
-
-TickProcessor.prototype.processLogFile = function(fileName) {
- this.lastLogFileName_ = fileName;
- var line;
- while (line = readline()) {
- this.processLogLine(line);
- }
-};
-
-
-TickProcessor.prototype.processLogFileInTest = function(fileName) {
- // Hack file name to avoid dealing with platform specifics.
- this.lastLogFileName_ = 'v8.log';
- var contents = readFile(fileName);
- this.processLogChunk(contents);
-};
-
-
-TickProcessor.prototype.processSharedLibrary = function(
- name, startAddr, endAddr, aslrSlide) {
- var entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide);
- this.setCodeType(entry.getName(), 'SHARED_LIB');
-
- var self = this;
- var libFuncs = this.cppEntriesProvider_.parseVmSymbols(
- name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
- self.profile_.addStaticCode(fName, fStart, fEnd);
- self.setCodeType(fName, 'CPP');
- });
-};
-
-
-TickProcessor.prototype.processCodeCreation = function(
- type, kind, timestamp, start, size, name, maybe_func) {
- if (maybe_func.length) {
- var funcAddr = parseInt(maybe_func[0]);
- var state = parseState(maybe_func[1]);
- this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
- } else {
- this.profile_.addCode(type, name, timestamp, start, size);
- }
-};
-
-
-TickProcessor.prototype.processCodeDeopt = function(
- timestamp, size, code, inliningId, scriptOffset, bailoutType,
- sourcePositionText, deoptReasonText) {
- this.profile_.deoptCode(timestamp, code, inliningId, scriptOffset,
- bailoutType, sourcePositionText, deoptReasonText);
-};
-
-
-TickProcessor.prototype.processCodeMove = function(from, to) {
- this.profile_.moveCode(from, to);
-};
-
-TickProcessor.prototype.processCodeDelete = function(start) {
- this.profile_.deleteCode(start);
-};
-
-TickProcessor.prototype.processCodeSourceInfo = function(
- start, script, startPos, endPos, sourcePositions, inliningPositions,
- inlinedFunctions) {
- this.profile_.addSourcePositions(start, script, startPos,
- endPos, sourcePositions, inliningPositions, inlinedFunctions);
-};
-
-TickProcessor.prototype.processScriptSource = function(script, url, source) {
- this.profile_.addScriptSource(script, url, source);
-};
-
-TickProcessor.prototype.processFunctionMove = function(from, to) {
- this.profile_.moveFunc(from, to);
-};
-
-
-TickProcessor.prototype.includeTick = function(vmState) {
- if (this.stateFilter_ !== null) {
- return this.stateFilter_ == vmState;
- } else if (this.runtimeTimerFilter_ !== null) {
- return this.currentRuntimeTimer == this.runtimeTimerFilter_;
- }
- return true;
-};
-
-TickProcessor.prototype.processRuntimeTimerEvent = function(name) {
- this.currentRuntimeTimer = name;
-}
-
-TickProcessor.prototype.processTick = function(pc,
- ns_since_start,
- is_external_callback,
- tos_or_external_callback,
- vmState,
- stack) {
- this.distortion += this.distortion_per_entry;
- ns_since_start -= this.distortion;
- if (ns_since_start < this.range_start || ns_since_start > this.range_end) {
- return;
- }
- this.ticks_.total++;
- if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
- if (!this.includeTick(vmState)) {
- this.ticks_.excluded++;
- return;
- }
- if (is_external_callback) {
- // Don't use PC when in external callback code, as it can point
- // inside callback's code, and we will erroneously report
- // that a callback calls itself. Instead we use tos_or_external_callback,
- // as simply resetting PC will produce unaccounted ticks.
- pc = tos_or_external_callback;
- tos_or_external_callback = 0;
- } else if (tos_or_external_callback) {
- // Find out, if top of stack was pointing inside a JS function
- // meaning that we have encountered a frameless invocation.
- var funcEntry = this.profile_.findEntry(tos_or_external_callback);
- if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
- tos_or_external_callback = 0;
- }
- }
-
- this.profile_.recordTick(
- ns_since_start, vmState,
- this.processStack(pc, tos_or_external_callback, stack));
-};
-
-
-TickProcessor.prototype.advanceDistortion = function() {
- this.distortion += this.distortion_per_entry;
-}
-
-
-TickProcessor.prototype.processHeapSampleBegin = function(space, state, ticks) {
- if (space != 'Heap') return;
- this.currentProducerProfile_ = new CallTree();
-};
-
-
-TickProcessor.prototype.processHeapSampleEnd = function(space, state) {
- if (space != 'Heap' || !this.currentProducerProfile_) return;
-
- print('Generation ' + this.generation_ + ':');
- var tree = this.currentProducerProfile_;
- tree.computeTotalWeights();
- var producersView = this.viewBuilder_.buildView(tree);
- // Sort by total time, desc, then by name, desc.
- producersView.sort(function(rec1, rec2) {
- return rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
- this.printHeavyProfile(producersView.head.children);
-
- this.currentProducerProfile_ = null;
- this.generation_++;
-};
-
-
-TickProcessor.prototype.printStatistics = function() {
- if (this.preprocessJson) {
- this.profile_.writeJson();
- return;
- }
-
- print('Statistical profiling result from ' + this.lastLogFileName_ +
- ', (' + this.ticks_.total +
- ' ticks, ' + this.ticks_.unaccounted + ' unaccounted, ' +
- this.ticks_.excluded + ' excluded).');
-
- if (this.ticks_.total == 0) return;
-
- var flatProfile = this.profile_.getFlatProfile();
- var flatView = this.viewBuilder_.buildView(flatProfile);
- // Sort by self time, desc, then by name, desc.
- flatView.sort(function(rec1, rec2) {
- return rec2.selfTime - rec1.selfTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
- var totalTicks = this.ticks_.total;
- if (this.ignoreUnknown_) {
- totalTicks -= this.ticks_.unaccounted;
- }
- var printAllTicks = !this.onlySummary_;
-
- // Count library ticks
- var flatViewNodes = flatView.head.children;
- var self = this;
-
- var libraryTicks = 0;
- if(printAllTicks) this.printHeader('Shared libraries');
- this.printEntries(flatViewNodes, totalTicks, null,
- function(name) { return self.isSharedLibrary(name); },
- function(rec) { libraryTicks += rec.selfTime; }, printAllTicks);
- var nonLibraryTicks = totalTicks - libraryTicks;
-
- var jsTicks = 0;
- if(printAllTicks) this.printHeader('JavaScript');
- this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
- function(name) { return self.isJsCode(name); },
- function(rec) { jsTicks += rec.selfTime; }, printAllTicks);
-
- var cppTicks = 0;
- if(printAllTicks) this.printHeader('C++');
- this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
- function(name) { return self.isCppCode(name); },
- function(rec) { cppTicks += rec.selfTime; }, printAllTicks);
-
- this.printHeader('Summary');
- this.printLine('JavaScript', jsTicks, totalTicks, nonLibraryTicks);
- this.printLine('C++', cppTicks, totalTicks, nonLibraryTicks);
- this.printLine('GC', this.ticks_.gc, totalTicks, nonLibraryTicks);
- this.printLine('Shared libraries', libraryTicks, totalTicks, null);
- if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) {
- this.printLine('Unaccounted', this.ticks_.unaccounted,
- this.ticks_.total, null);
- }
-
- if(printAllTicks) {
- print('\n [C++ entry points]:');
- print(' ticks cpp total name');
- var c_entry_functions = this.profile_.getCEntryProfile();
- var total_c_entry = c_entry_functions[0].ticks;
- for (var i = 1; i < c_entry_functions.length; i++) {
- c = c_entry_functions[i];
- this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
- }
-
- this.printHeavyProfHeader();
- var heavyProfile = this.profile_.getBottomUpProfile();
- var heavyView = this.viewBuilder_.buildView(heavyProfile);
- // To show the same percentages as in the flat profile.
- heavyView.head.totalTime = totalTicks;
- // Sort by total time, desc, then by name, desc.
- heavyView.sort(function(rec1, rec2) {
- return rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
- this.printHeavyProfile(heavyView.head.children);
- }
-};
-
-
-function padLeft(s, len) {
- s = s.toString();
- if (s.length < len) {
- var padLength = len - s.length;
- if (!(padLength in padLeft)) {
- padLeft[padLength] = new Array(padLength + 1).join(' ');
- }
- s = padLeft[padLength] + s;
- }
- return s;
-};
-
-
-TickProcessor.prototype.printHeader = function(headerTitle) {
- print('\n [' + headerTitle + ']:');
- print(' ticks total nonlib name');
-};
-
-
-TickProcessor.prototype.printLine = function(
- entry, ticks, totalTicks, nonLibTicks) {
- var pct = ticks * 100 / totalTicks;
- var nonLibPct = nonLibTicks != null
- ? padLeft((ticks * 100 / nonLibTicks).toFixed(1), 5) + '% '
- : ' ';
- print(' ' + padLeft(ticks, 5) + ' ' +
- padLeft(pct.toFixed(1), 5) + '% ' +
- nonLibPct +
- entry);
-}
-
-TickProcessor.prototype.printHeavyProfHeader = function() {
- print('\n [Bottom up (heavy) profile]:');
- print(' Note: percentage shows a share of a particular caller in the ' +
- 'total\n' +
- ' amount of its parent calls.');
- print(' Callers occupying less than ' +
- TickProcessor.CALL_PROFILE_CUTOFF_PCT.toFixed(1) +
- '% are not shown.\n');
- print(' ticks parent name');
-};
-
-
-TickProcessor.prototype.processProfile = function(
- profile, filterP, func) {
- for (var i = 0, n = profile.length; i < n; ++i) {
- var rec = profile[i];
- if (!filterP(rec.internalFuncName)) {
- continue;
- }
- func(rec);
- }
-};
-
-TickProcessor.prototype.getLineAndColumn = function(name) {
- var re = /:([0-9]+):([0-9]+)$/;
- var array = re.exec(name);
- if (!array) {
- return null;
- }
- return {line: array[1], column: array[2]};
-}
-
-TickProcessor.prototype.hasSourceMap = function() {
- return this.sourceMap != null;
-};
-
-
-TickProcessor.prototype.formatFunctionName = function(funcName) {
- if (!this.hasSourceMap()) {
- return funcName;
- }
- var lc = this.getLineAndColumn(funcName);
- if (lc == null) {
- return funcName;
- }
- // in source maps lines and columns are zero based
- var lineNumber = lc.line - 1;
- var column = lc.column - 1;
- var entry = this.sourceMap.findEntry(lineNumber, column);
- var sourceFile = entry[2];
- var sourceLine = entry[3] + 1;
- var sourceColumn = entry[4] + 1;
-
- return sourceFile + ':' + sourceLine + ':' + sourceColumn + ' -> ' + funcName;
-};
-
-TickProcessor.prototype.printEntries = function(
- profile, totalTicks, nonLibTicks, filterP, callback, printAllTicks) {
- var that = this;
- this.processProfile(profile, filterP, function (rec) {
- if (rec.selfTime == 0) return;
- callback(rec);
- var funcName = that.formatFunctionName(rec.internalFuncName);
- if(printAllTicks) {
- that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
- }
- });
-};
-
-
-TickProcessor.prototype.printHeavyProfile = function(profile, opt_indent) {
- var self = this;
- var indent = opt_indent || 0;
- var indentStr = padLeft('', indent);
- this.processProfile(profile, function() { return true; }, function (rec) {
- // Cut off too infrequent callers.
- if (rec.parentTotalPercent < TickProcessor.CALL_PROFILE_CUTOFF_PCT) return;
- var funcName = self.formatFunctionName(rec.internalFuncName);
- print(' ' + padLeft(rec.totalTime, 5) + ' ' +
- padLeft(rec.parentTotalPercent.toFixed(1), 5) + '% ' +
- indentStr + funcName);
- // Limit backtrace depth.
- if (indent < 2 * self.callGraphSize_) {
- self.printHeavyProfile(rec.children, indent + 2);
- }
- // Delimit top-level functions.
- if (indent == 0) {
- print('');
- }
- });
-};
-
-
-function CppEntriesProvider() {
-};
-
-
-CppEntriesProvider.prototype.parseVmSymbols = function(
- libName, libStart, libEnd, libASLRSlide, processorFunc) {
- this.loadSymbols(libName);
-
- var lastUnknownSize;
- var lastAdded;
-
- function inRange(funcInfo, start, end) {
- return funcInfo.start >= start && funcInfo.end <= end;
- }
-
- function addEntry(funcInfo) {
- // Several functions can be mapped onto the same address. To avoid
- // creating zero-sized entries, skip such duplicates.
- // Also double-check that function belongs to the library address space.
-
- if (lastUnknownSize &&
- lastUnknownSize.start < funcInfo.start) {
- // Try to update lastUnknownSize based on new entries start position.
- lastUnknownSize.end = funcInfo.start;
- if ((!lastAdded || !inRange(lastUnknownSize, lastAdded.start,
- lastAdded.end)) &&
- inRange(lastUnknownSize, libStart, libEnd)) {
- processorFunc(lastUnknownSize.name, lastUnknownSize.start,
- lastUnknownSize.end);
- lastAdded = lastUnknownSize;
- }
- }
- lastUnknownSize = undefined;
-
- if (funcInfo.end) {
- // Skip duplicates that have the same start address as the last added.
- if ((!lastAdded || lastAdded.start != funcInfo.start) &&
- inRange(funcInfo, libStart, libEnd)) {
- processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
- lastAdded = funcInfo;
- }
- } else {
- // If a funcInfo doesn't have an end, try to match it up with then next
- // entry.
- lastUnknownSize = funcInfo;
- }
- }
-
- while (true) {
- var funcInfo = this.parseNextLine();
- if (funcInfo === null) {
- continue;
- } else if (funcInfo === false) {
- break;
- }
- if (funcInfo.start < libStart - libASLRSlide &&
- funcInfo.start < libEnd - libStart) {
- funcInfo.start += libStart;
- } else {
- funcInfo.start += libASLRSlide;
- }
- if (funcInfo.size) {
- funcInfo.end = funcInfo.start + funcInfo.size;
- }
- addEntry(funcInfo);
- }
- addEntry({name: '', start: libEnd});
-};
-
-
-CppEntriesProvider.prototype.loadSymbols = function(libName) {
-};
-
-
-CppEntriesProvider.prototype.parseNextLine = function() {
- return false;
-};
-
-
-function UnixCppEntriesProvider(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
- this.symbols = [];
- // File offset of a symbol minus the virtual address of a symbol found in
- // the symbol table.
- this.fileOffsetMinusVma = 0;
- this.parsePos = 0;
- this.nmExec = nmExec;
- this.objdumpExec = objdumpExec;
- this.targetRootFS = targetRootFS;
- this.apkEmbeddedLibrary = apkEmbeddedLibrary;
- this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
-};
-inherits(UnixCppEntriesProvider, CppEntriesProvider);
-
-
-UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
- this.parsePos = 0;
- if (this.apkEmbeddedLibrary && libName.endsWith('.apk')) {
- libName = this.apkEmbeddedLibrary;
- }
- if (this.targetRootFS) {
- libName = libName.substring(libName.lastIndexOf('/') + 1);
- libName = this.targetRootFS + libName;
- }
- try {
- this.symbols = [
- os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
- os.system(this.nmExec, ['-C', '-n', '-S', '-D', libName], -1, -1)
- ];
-
- const objdumpOutput = os.system(this.objdumpExec, ['-h', libName], -1, -1);
- for (const line of objdumpOutput.split('\n')) {
- const [,sectionName,,vma,,fileOffset] = line.trim().split(/\s+/);
- if (sectionName === ".text") {
- this.fileOffsetMinusVma = parseInt(fileOffset, 16) - parseInt(vma, 16);
- }
- }
- } catch (e) {
- // If the library cannot be found on this system let's not panic.
- this.symbols = ['', ''];
- }
-};
-
-
-UnixCppEntriesProvider.prototype.parseNextLine = function() {
- if (this.symbols.length == 0) {
- return false;
- }
- var lineEndPos = this.symbols[0].indexOf('\n', this.parsePos);
- if (lineEndPos == -1) {
- this.symbols.shift();
- this.parsePos = 0;
- return this.parseNextLine();
- }
-
- var line = this.symbols[0].substring(this.parsePos, lineEndPos);
- this.parsePos = lineEndPos + 1;
- var fields = line.match(this.FUNC_RE);
- var funcInfo = null;
- if (fields) {
- funcInfo = { name: fields[3], start: parseInt(fields[1], 16) + this.fileOffsetMinusVma };
- if (fields[2]) {
- funcInfo.size = parseInt(fields[2], 16);
- }
- }
- return funcInfo;
-};
-
-
-function MacCppEntriesProvider(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
- UnixCppEntriesProvider.call(this, nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary);
- // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
- this.FUNC_RE = /^([0-9a-fA-F]{8,16})() (.*)$/;
-};
-inherits(MacCppEntriesProvider, UnixCppEntriesProvider);
-
-
-MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
- this.parsePos = 0;
- libName = this.targetRootFS + libName;
-
- // It seems that in OS X `nm` thinks that `-f` is a format option, not a
- // "flat" display option flag.
- try {
- this.symbols = [os.system(this.nmExec, ['-n', libName], -1, -1), ''];
- } catch (e) {
- // If the library cannot be found on this system let's not panic.
- this.symbols = '';
- }
-};
-
-
-function WindowsCppEntriesProvider(_ignored_nmExec, _ignored_objdumpExec, targetRootFS,
- _ignored_apkEmbeddedLibrary) {
- this.targetRootFS = targetRootFS;
- this.symbols = '';
- this.parsePos = 0;
-};
-inherits(WindowsCppEntriesProvider, CppEntriesProvider);
-
-
-WindowsCppEntriesProvider.FILENAME_RE = /^(.*)\.([^.]+)$/;
-
-
-WindowsCppEntriesProvider.FUNC_RE =
- /^\s+0001:[0-9a-fA-F]{8}\s+([_\?@$0-9a-zA-Z]+)\s+([0-9a-fA-F]{8}).*$/;
-
-
-WindowsCppEntriesProvider.IMAGE_BASE_RE =
- /^\s+0000:00000000\s+___ImageBase\s+([0-9a-fA-F]{8}).*$/;
-
-
-// This is almost a constant on Windows.
-WindowsCppEntriesProvider.EXE_IMAGE_BASE = 0x00400000;
-
-
-WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
- libName = this.targetRootFS + libName;
- var fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
- if (!fileNameFields) return;
- var mapFileName = fileNameFields[1] + '.map';
- this.moduleType_ = fileNameFields[2].toLowerCase();
- try {
- this.symbols = read(mapFileName);
- } catch (e) {
- // If .map file cannot be found let's not panic.
- this.symbols = '';
- }
-};
-
-
-WindowsCppEntriesProvider.prototype.parseNextLine = function() {
- var lineEndPos = this.symbols.indexOf('\r\n', this.parsePos);
- if (lineEndPos == -1) {
- return false;
- }
-
- var line = this.symbols.substring(this.parsePos, lineEndPos);
- this.parsePos = lineEndPos + 2;
-
- // Image base entry is above all other symbols, so we can just
- // terminate parsing.
- var imageBaseFields = line.match(WindowsCppEntriesProvider.IMAGE_BASE_RE);
- if (imageBaseFields) {
- var imageBase = parseInt(imageBaseFields[1], 16);
- if ((this.moduleType_ == 'exe') !=
- (imageBase == WindowsCppEntriesProvider.EXE_IMAGE_BASE)) {
- return false;
- }
- }
-
- var fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
- return fields ?
- { name: this.unmangleName(fields[1]), start: parseInt(fields[2], 16) } :
- null;
-};
-
-
-/**
- * Performs very simple unmangling of C++ names.
- *
- * Does not handle arguments and template arguments. The mangled names have
- * the form:
- *
- * ?LookupInDescriptor@JSObject@internal@v8@@...arguments info...
- */
-WindowsCppEntriesProvider.prototype.unmangleName = function(name) {
- // Empty or non-mangled name.
- if (name.length < 1 || name.charAt(0) != '?') return name;
- var nameEndPos = name.indexOf('@@');
- var components = name.substring(1, nameEndPos).split('@');
- components.reverse();
- return components.join('::');
-};
-
-
-class ArgumentsProcessor extends BaseArgumentsProcessor {
- getArgsDispatch() {
- let dispatch = {
- '-j': ['stateFilter', TickProcessor.VmStates.JS,
- 'Show only ticks from JS VM state'],
- '-g': ['stateFilter', TickProcessor.VmStates.GC,
- 'Show only ticks from GC VM state'],
- '-p': ['stateFilter', TickProcessor.VmStates.PARSER,
- 'Show only ticks from PARSER VM state'],
- '-b': ['stateFilter', TickProcessor.VmStates.BYTECODE_COMPILER,
- 'Show only ticks from BYTECODE_COMPILER VM state'],
- '-c': ['stateFilter', TickProcessor.VmStates.COMPILER,
- 'Show only ticks from COMPILER VM state'],
- '-o': ['stateFilter', TickProcessor.VmStates.OTHER,
- 'Show only ticks from OTHER VM state'],
- '-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
- 'Show only ticks from EXTERNAL VM state'],
- '--filter-runtime-timer': ['runtimeTimerFilter', null,
- 'Show only ticks matching the given runtime timer scope'],
- '--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE,
- 'Set the call graph size'],
- '--ignore-unknown': ['ignoreUnknown', true,
- 'Exclude ticks of unknown code entries from processing'],
- '--separate-ic': ['separateIc', parseBool,
- 'Separate IC entries'],
- '--separate-bytecodes': ['separateBytecodes', parseBool,
- 'Separate Bytecode entries'],
- '--separate-builtins': ['separateBuiltins', parseBool,
- 'Separate Builtin entries'],
- '--separate-stubs': ['separateStubs', parseBool,
- 'Separate Stub entries'],
- '--unix': ['platform', 'unix',
- 'Specify that we are running on *nix platform'],
- '--windows': ['platform', 'windows',
- 'Specify that we are running on Windows platform'],
- '--mac': ['platform', 'mac',
- 'Specify that we are running on Mac OS X platform'],
- '--nm': ['nm', 'nm',
- 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
- '--objdump': ['objdump', 'objdump',
- 'Specify the \'objdump\' executable to use (e.g. --objdump=/my_dir/objdump)'],
- '--target': ['targetRootFS', '',
- 'Specify the target root directory for cross environment'],
- '--apk-embedded-library': ['apkEmbeddedLibrary', '',
- 'Specify the path of the embedded library for Android traces'],
- '--range': ['range', 'auto,auto',
- 'Specify the range limit as [start],[end]'],
- '--distortion': ['distortion', 0,
- 'Specify the logging overhead in picoseconds'],
- '--source-map': ['sourceMap', null,
- 'Specify the source map that should be used for output'],
- '--timed-range': ['timedRange', true,
- 'Ignore ticks before first and after last Date.now() call'],
- '--pairwise-timed-range': ['pairwiseTimedRange', true,
- 'Ignore ticks outside pairs of Date.now() calls'],
- '--only-summary': ['onlySummary', true,
- 'Print only tick summary, exclude other information'],
- '--preprocess': ['preprocessJson', true,
- 'Preprocess for consumption with web interface']
- };
- dispatch['--js'] = dispatch['-j'];
- dispatch['--gc'] = dispatch['-g'];
- dispatch['--compiler'] = dispatch['-c'];
- dispatch['--other'] = dispatch['-o'];
- dispatch['--external'] = dispatch['-e'];
- dispatch['--ptr'] = dispatch['--pairwise-timed-range'];
- return dispatch;
- }
-
- getDefaultResults() {
- return {
- logFileName: 'v8.log',
- platform: 'unix',
- stateFilter: null,
- callGraphSize: 5,
- ignoreUnknown: false,
- separateIc: true,
- separateBytecodes: false,
- separateBuiltins: true,
- separateStubs: true,
- preprocessJson: null,
- targetRootFS: '',
- nm: 'nm',
- objdump: 'objdump',
- range: 'auto,auto',
- distortion: 0,
- timedRange: false,
- pairwiseTimedRange: false,
- onlySummary: false,
- runtimeTimerFilter: null,
- };
- }
-}
diff --git a/deps/v8/tools/tickprocessor.mjs b/deps/v8/tools/tickprocessor.mjs
index 54c37e68e0..3041a0cddd 100644
--- a/deps/v8/tools/tickprocessor.mjs
+++ b/deps/v8/tools/tickprocessor.mjs
@@ -46,8 +46,8 @@ class V8Profile extends Profile {
if (!separateBuiltins) regexps.push(V8Profile.BUILTINS_RE);
if (!separateStubs) regexps.push(V8Profile.STUBS_RE);
if (regexps.length > 0) {
- this.skipThisFunction = function(name) {
- for (let i=0; i<regexps.length; i++) {
+ this.skipThisFunction = function (name) {
+ for (let i = 0; i < regexps.length; i++) {
if (regexps[i].test(name)) return true;
}
return false;
@@ -64,14 +64,14 @@ export function readFile(fileName) {
try {
return read(fileName);
} catch (e) {
- printErr(fileName + ': ' + (e.message || e));
+ printErr(`${fileName}: ${e.message || e}`);
throw e;
}
}
export class TickProcessor extends LogReader {
- constructor(
+ constructor(
cppEntriesProvider,
separateIc,
separateBytecodes,
@@ -88,48 +88,70 @@ export class TickProcessor extends LogReader {
onlySummary,
runtimeTimerFilter,
preprocessJson) {
- super({},
+ super({},
timedRange,
pairwiseTimedRange);
- this.dispatchTable_ = {
- 'shared-library': { parsers: [parseString, parseInt, parseInt, parseInt],
- processor: this.processSharedLibrary },
+ this.dispatchTable_ = {
+ 'shared-library': {
+ parsers: [parseString, parseInt, parseInt, parseInt],
+ processor: this.processSharedLibrary
+ },
'code-creation': {
- parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
- parseString, parseVarArgs],
- processor: this.processCodeCreation },
+ parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
+ parseString, parseVarArgs],
+ processor: this.processCodeCreation
+ },
'code-deopt': {
- parsers: [parseInt, parseInt, parseInt, parseInt, parseInt,
- parseString, parseString, parseString],
- processor: this.processCodeDeopt },
- 'code-move': { parsers: [parseInt, parseInt, ],
- processor: this.processCodeMove },
- 'code-delete': { parsers: [parseInt],
- processor: this.processCodeDelete },
+ parsers: [parseInt, parseInt, parseInt, parseInt, parseInt,
+ parseString, parseString, parseString],
+ processor: this.processCodeDeopt
+ },
+ 'code-move': {
+ parsers: [parseInt, parseInt,],
+ processor: this.processCodeMove
+ },
+ 'code-delete': {
+ parsers: [parseInt],
+ processor: this.processCodeDelete
+ },
'code-source-info': {
- parsers: [parseInt, parseInt, parseInt, parseInt, parseString,
- parseString, parseString],
- processor: this.processCodeSourceInfo },
+ parsers: [parseInt, parseInt, parseInt, parseInt, parseString,
+ parseString, parseString],
+ processor: this.processCodeSourceInfo
+ },
'script-source': {
- parsers: [parseInt, parseString, parseString],
- processor: this.processScriptSource },
- 'sfi-move': { parsers: [parseInt, parseInt],
- processor: this.processFunctionMove },
+ parsers: [parseInt, parseString, parseString],
+ processor: this.processScriptSource
+ },
+ 'sfi-move': {
+ parsers: [parseInt, parseInt],
+ processor: this.processFunctionMove
+ },
'active-runtime-timer': {
parsers: [parseString],
- processor: this.processRuntimeTimerEvent },
+ processor: this.processRuntimeTimerEvent
+ },
'tick': {
- parsers: [parseInt, parseInt, parseInt,
- parseInt, parseInt, parseVarArgs],
- processor: this.processTick },
- 'heap-sample-begin': { parsers: [parseString, parseString, parseInt],
- processor: this.processHeapSampleBegin },
- 'heap-sample-end': { parsers: [parseString, parseString],
- processor: this.processHeapSampleEnd },
- 'timer-event-start' : { parsers: [parseString, parseString, parseString],
- processor: this.advanceDistortion },
- 'timer-event-end' : { parsers: [parseString, parseString, parseString],
- processor: this.advanceDistortion },
+ parsers: [parseInt, parseInt, parseInt,
+ parseInt, parseInt, parseVarArgs],
+ processor: this.processTick
+ },
+ 'heap-sample-begin': {
+ parsers: [parseString, parseString, parseInt],
+ processor: this.processHeapSampleBegin
+ },
+ 'heap-sample-end': {
+ parsers: [parseString, parseString],
+ processor: this.processHeapSampleEnd
+ },
+ 'timer-event-start': {
+ parsers: [parseString, parseString, parseString],
+ processor: this.advanceDistortion
+ },
+ 'timer-event-end': {
+ parsers: [parseString, parseString, parseString],
+ processor: this.advanceDistortion
+ },
// Ignored events.
'profiler': null,
'function-creation': null,
@@ -143,733 +165,661 @@ export class TickProcessor extends LogReader {
'end-code-region': null
};
- this.preprocessJson = preprocessJson;
- this.cppEntriesProvider_ = cppEntriesProvider;
- this.callGraphSize_ = callGraphSize;
- this.ignoreUnknown_ = ignoreUnknown;
- this.stateFilter_ = stateFilter;
- this.runtimeTimerFilter_ = runtimeTimerFilter;
- this.sourceMap = sourceMap;
- const ticks = this.ticks_ =
- { total: 0, unaccounted: 0, excluded: 0, gc: 0 };
-
- distortion = parseInt(distortion);
- // Convert picoseconds to nanoseconds.
- this.distortion_per_entry = isNaN(distortion) ? 0 : (distortion / 1000);
- this.distortion = 0;
- const rangelimits = range ? range.split(",") : [];
- const range_start = parseInt(rangelimits[0]);
- const range_end = parseInt(rangelimits[1]);
- // Convert milliseconds to nanoseconds.
- this.range_start = isNaN(range_start) ? -Infinity : (range_start * 1000);
- this.range_end = isNaN(range_end) ? Infinity : (range_end * 1000)
-
- V8Profile.prototype.handleUnknownCode = function(
+ this.preprocessJson = preprocessJson;
+ this.cppEntriesProvider_ = cppEntriesProvider;
+ this.callGraphSize_ = callGraphSize;
+ this.ignoreUnknown_ = ignoreUnknown;
+ this.stateFilter_ = stateFilter;
+ this.runtimeTimerFilter_ = runtimeTimerFilter;
+ this.sourceMap = sourceMap;
+ const ticks = this.ticks_ =
+ { total: 0, unaccounted: 0, excluded: 0, gc: 0 };
+
+ distortion = parseInt(distortion);
+ // Convert picoseconds to nanoseconds.
+ this.distortion_per_entry = isNaN(distortion) ? 0 : (distortion / 1000);
+ this.distortion = 0;
+ const rangelimits = range ? range.split(",") : [];
+ const range_start = parseInt(rangelimits[0]);
+ const range_end = parseInt(rangelimits[1]);
+ // Convert milliseconds to nanoseconds.
+ this.range_start = isNaN(range_start) ? -Infinity : (range_start * 1000);
+ this.range_end = isNaN(range_end) ? Infinity : (range_end * 1000)
+
+ V8Profile.prototype.handleUnknownCode = function (
operation, addr, opt_stackPos) {
- const op = Profile.Operation;
- switch (operation) {
- case op.MOVE:
- printErr(`Code move event for unknown code: 0x${addr.toString(16)}`);
- break;
- case op.DELETE:
- printErr(`Code delete event for unknown code: 0x${addr.toString(16)}`);
- break;
- case op.TICK:
- // Only unknown PCs (the first frame) are reported as unaccounted,
- // otherwise tick balance will be corrupted (this behavior is compatible
- // with the original tickprocessor.py script.)
- if (opt_stackPos == 0) {
- ticks.unaccounted++;
- }
- break;
- }
- };
+ const op = Profile.Operation;
+ switch (operation) {
+ case op.MOVE:
+ printErr(`Code move event for unknown code: 0x${addr.toString(16)}`);
+ break;
+ case op.DELETE:
+ printErr(`Code delete event for unknown code: 0x${addr.toString(16)}`);
+ break;
+ case op.TICK:
+ // Only unknown PCs (the first frame) are reported as unaccounted,
+ // otherwise tick balance will be corrupted (this behavior is compatible
+ // with the original tickprocessor.py script.)
+ if (opt_stackPos == 0) {
+ ticks.unaccounted++;
+ }
+ break;
+ }
+ };
- if (preprocessJson) {
- this.profile_ = new JsonProfile();
- } else {
- this.profile_ = new V8Profile(separateIc, separateBytecodes,
+ if (preprocessJson) {
+ this.profile_ = new JsonProfile();
+ } else {
+ this.profile_ = new V8Profile(separateIc, separateBytecodes,
separateBuiltins, separateStubs);
- }
- this.codeTypes_ = {};
- // Count each tick as a time unit.
- this.viewBuilder_ = new ViewBuilder(1);
- this.lastLogFileName_ = null;
-
- this.generation_ = 1;
- this.currentProducerProfile_ = null;
- this.onlySummary_ = onlySummary;
-}
-
-
-static VmStates = {
- JS: 0,
- GC: 1,
- PARSER: 2,
- BYTECODE_COMPILER: 3,
- COMPILER: 4,
- OTHER: 5,
- EXTERNAL: 6,
- IDLE: 7,
-};
-
-
-static CodeTypes = {
- CPP: 0,
- SHARED_LIB: 1
-};
-// Otherwise, this is JS-related code. We are not adding it to
-// codeTypes_ map because there can be zillions of them.
-
-
-static CALL_PROFILE_CUTOFF_PCT = 1.0;
-
-static CALL_GRAPH_SIZE = 5;
-
-/**
- * @override
- */
-printError(str) {
- printErr(str);
-}
-
-
-setCodeType(name, type) {
- this.codeTypes_[name] = TickProcessor.CodeTypes[type];
-}
-
-
-isSharedLibrary(name) {
- return this.codeTypes_[name] == TickProcessor.CodeTypes.SHARED_LIB;
-}
-
-
-isCppCode(name) {
- return this.codeTypes_[name] == TickProcessor.CodeTypes.CPP;
-}
-
+ }
+ this.codeTypes_ = {};
+ // Count each tick as a time unit.
+ this.viewBuilder_ = new ViewBuilder(1);
+ this.lastLogFileName_ = null;
+
+ this.generation_ = 1;
+ this.currentProducerProfile_ = null;
+ this.onlySummary_ = onlySummary;
+ }
+
+ static VmStates = {
+ JS: 0,
+ GC: 1,
+ PARSER: 2,
+ BYTECODE_COMPILER: 3,
+ COMPILER: 4,
+ OTHER: 5,
+ EXTERNAL: 6,
+ IDLE: 7,
+ };
-isJsCode(name) {
- return name !== "UNKNOWN" && !(name in this.codeTypes_);
-}
+ static CodeTypes = {
+ CPP: 0,
+ SHARED_LIB: 1
+ };
+ // Otherwise, this is JS-related code. We are not adding it to
+ // codeTypes_ map because there can be zillions of them.
+ static CALL_PROFILE_CUTOFF_PCT = 1.0;
+ static CALL_GRAPH_SIZE = 5;
-processLogFile(fileName) {
- this.lastLogFileName_ = fileName;
- let line;
- while (line = readline()) {
- this.processLogLine(line);
+ /**
+ * @override
+ */
+ printError(str) {
+ printErr(str);
}
-}
+ setCodeType(name, type) {
+ this.codeTypes_[name] = TickProcessor.CodeTypes[type];
+ }
-processLogFileInTest(fileName) {
- // Hack file name to avoid dealing with platform specifics.
- this.lastLogFileName_ = 'v8.log';
- const contents = readFile(fileName);
- this.processLogChunk(contents);
-}
+ isSharedLibrary(name) {
+ return this.codeTypes_[name] == TickProcessor.CodeTypes.SHARED_LIB;
+ }
+ isCppCode(name) {
+ return this.codeTypes_[name] == TickProcessor.CodeTypes.CPP;
+ }
-processSharedLibrary(
- name, startAddr, endAddr, aslrSlide) {
- const entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide);
- this.setCodeType(entry.getName(), 'SHARED_LIB');
+ isJsCode(name) {
+ return name !== "UNKNOWN" && !(name in this.codeTypes_);
+ }
- const self = this;
- const libFuncs = this.cppEntriesProvider_.parseVmSymbols(
- name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
- self.profile_.addStaticCode(fName, fStart, fEnd);
- self.setCodeType(fName, 'CPP');
- });
-}
+ processLogFile(fileName) {
+ this.lastLogFileName_ = fileName;
+ let line;
+ while (line = readline()) {
+ this.processLogLine(line);
+ }
+ }
+ processLogFileInTest(fileName) {
+ // Hack file name to avoid dealing with platform specifics.
+ this.lastLogFileName_ = 'v8.log';
+ const contents = readFile(fileName);
+ this.processLogChunk(contents);
+ }
-processCodeCreation(
- type, kind, timestamp, start, size, name, maybe_func) {
- if (maybe_func.length) {
- const funcAddr = parseInt(maybe_func[0]);
- const state = Profile.parseState(maybe_func[1]);
- this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
- } else {
- this.profile_.addCode(type, name, timestamp, start, size);
+ processSharedLibrary(name, startAddr, endAddr, aslrSlide) {
+ const entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide);
+ this.setCodeType(entry.getName(), 'SHARED_LIB');
+ const libFuncs = this.cppEntriesProvider_.parseVmSymbols(
+ name, startAddr, endAddr, aslrSlide, (fName, fStart, fEnd) => {
+ this.profile_.addStaticCode(fName, fStart, fEnd);
+ this.setCodeType(fName, 'CPP');
+ });
}
-}
+ processCodeCreation(type, kind, timestamp, start, size, name, maybe_func) {
+ if (maybe_func.length) {
+ const funcAddr = parseInt(maybe_func[0]);
+ const state = Profile.parseState(maybe_func[1]);
+ this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
+ } else {
+ this.profile_.addCode(type, name, timestamp, start, size);
+ }
+ }
-processCodeDeopt(
- timestamp, size, code, inliningId, scriptOffset, bailoutType,
- sourcePositionText, deoptReasonText) {
- this.profile_.deoptCode(timestamp, code, inliningId, scriptOffset,
+ processCodeDeopt(
+ timestamp, size, code, inliningId, scriptOffset, bailoutType,
+ sourcePositionText, deoptReasonText) {
+ this.profile_.deoptCode(timestamp, code, inliningId, scriptOffset,
bailoutType, sourcePositionText, deoptReasonText);
-}
-
-
-processCodeMove(from, to) {
- this.profile_.moveCode(from, to);
-}
+ }
-processCodeDelete(start) {
- this.profile_.deleteCode(start);
-}
+ processCodeMove(from, to) {
+ this.profile_.moveCode(from, to);
+ }
-processCodeSourceInfo(
- start, script, startPos, endPos, sourcePositions, inliningPositions,
- inlinedFunctions) {
- this.profile_.addSourcePositions(start, script, startPos,
- endPos, sourcePositions, inliningPositions, inlinedFunctions);
-}
+ processCodeDelete(start) {
+ this.profile_.deleteCode(start);
+ }
-processScriptSource(script, url, source) {
- this.profile_.addScriptSource(script, url, source);
-}
+ processCodeSourceInfo(
+ start, script, startPos, endPos, sourcePositions, inliningPositions,
+ inlinedFunctions) {
+ this.profile_.addSourcePositions(start, script, startPos,
+ endPos, sourcePositions, inliningPositions, inlinedFunctions);
+ }
-processFunctionMove(from, to) {
- this.profile_.moveFunc(from, to);
-}
+ processScriptSource(script, url, source) {
+ this.profile_.addScriptSource(script, url, source);
+ }
+ processFunctionMove(from, to) {
+ this.profile_.moveFunc(from, to);
+ }
-includeTick(vmState) {
- if (this.stateFilter_ !== null) {
- return this.stateFilter_ == vmState;
- } else if (this.runtimeTimerFilter_ !== null) {
- return this.currentRuntimeTimer == this.runtimeTimerFilter_;
+ includeTick(vmState) {
+ if (this.stateFilter_ !== null) {
+ return this.stateFilter_ == vmState;
+ } else if (this.runtimeTimerFilter_ !== null) {
+ return this.currentRuntimeTimer == this.runtimeTimerFilter_;
+ }
+ return true;
}
- return true;
-}
-processRuntimeTimerEvent(name) {
- this.currentRuntimeTimer = name;
-}
+ processRuntimeTimerEvent(name) {
+ this.currentRuntimeTimer = name;
+ }
-processTick(pc,
- ns_since_start,
- is_external_callback,
- tos_or_external_callback,
- vmState,
- stack) {
- this.distortion += this.distortion_per_entry;
- ns_since_start -= this.distortion;
- if (ns_since_start < this.range_start || ns_since_start > this.range_end) {
- return;
- }
- this.ticks_.total++;
- if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
- if (!this.includeTick(vmState)) {
- this.ticks_.excluded++;
- return;
- }
- if (is_external_callback) {
- // Don't use PC when in external callback code, as it can point
- // inside callback's code, and we will erroneously report
- // that a callback calls itself. Instead we use tos_or_external_callback,
- // as simply resetting PC will produce unaccounted ticks.
- pc = tos_or_external_callback;
- tos_or_external_callback = 0;
- } else if (tos_or_external_callback) {
- // Find out, if top of stack was pointing inside a JS function
- // meaning that we have encountered a frameless invocation.
- const funcEntry = this.profile_.findEntry(tos_or_external_callback);
- if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
+ processTick(pc,
+ ns_since_start,
+ is_external_callback,
+ tos_or_external_callback,
+ vmState,
+ stack) {
+ this.distortion += this.distortion_per_entry;
+ ns_since_start -= this.distortion;
+ if (ns_since_start < this.range_start || ns_since_start > this.range_end) {
+ return;
+ }
+ this.ticks_.total++;
+ if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
+ if (!this.includeTick(vmState)) {
+ this.ticks_.excluded++;
+ return;
+ }
+ if (is_external_callback) {
+ // Don't use PC when in external callback code, as it can point
+ // inside callback's code, and we will erroneously report
+ // that a callback calls itself. Instead we use tos_or_external_callback,
+ // as simply resetting PC will produce unaccounted ticks.
+ pc = tos_or_external_callback;
tos_or_external_callback = 0;
+ } else if (tos_or_external_callback) {
+ // Find out, if top of stack was pointing inside a JS function
+ // meaning that we have encountered a frameless invocation.
+ const funcEntry = this.profile_.findEntry(tos_or_external_callback);
+ if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
+ tos_or_external_callback = 0;
+ }
}
- }
- this.profile_.recordTick(
+ this.profile_.recordTick(
ns_since_start, vmState,
this.processStack(pc, tos_or_external_callback, stack));
-}
-
-
-advanceDistortion() {
- this.distortion += this.distortion_per_entry;
-}
-
+ }
-processHeapSampleBegin(space, state, ticks) {
- if (space != 'Heap') return;
- this.currentProducerProfile_ = new CallTree();
-}
+ advanceDistortion() {
+ this.distortion += this.distortion_per_entry;
+ }
+ processHeapSampleBegin(space, state, ticks) {
+ if (space != 'Heap') return;
+ this.currentProducerProfile_ = new CallTree();
+ }
-processHeapSampleEnd(space, state) {
- if (space != 'Heap' || !this.currentProducerProfile_) return;
+ processHeapSampleEnd(space, state) {
+ if (space != 'Heap' || !this.currentProducerProfile_) return;
- print(`Generation ${this.generation_}:`);
- const tree = this.currentProducerProfile_;
- tree.computeTotalWeights();
- const producersView = this.viewBuilder_.buildView(tree);
- // Sort by total time, desc, then by name, desc.
- producersView.sort((rec1, rec2) =>
+ print(`Generation ${this.generation_}:`);
+ const tree = this.currentProducerProfile_;
+ tree.computeTotalWeights();
+ const producersView = this.viewBuilder_.buildView(tree);
+ // Sort by total time, desc, then by name, desc.
+ producersView.sort((rec1, rec2) =>
rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1) );
- this.printHeavyProfile(producersView.head.children);
+ (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1));
+ this.printHeavyProfile(producersView.head.children);
- this.currentProducerProfile_ = null;
- this.generation_++;
-}
+ this.currentProducerProfile_ = null;
+ this.generation_++;
+ }
+ printStatistics() {
+ if (this.preprocessJson) {
+ this.profile_.writeJson();
+ return;
+ }
-printStatistics() {
- if (this.preprocessJson) {
- this.profile_.writeJson();
- return;
- }
+ print(`Statistical profiling result from ${this.lastLogFileName_}` +
+ `, (${this.ticks_.total} ticks, ${this.ticks_.unaccounted} unaccounted, ` +
+ `${this.ticks_.excluded} excluded).`);
- print(`Statistical profiling result from ${this.lastLogFileName_}` +
- ', (' + this.ticks_.total +
- ' ticks, ' + this.ticks_.unaccounted + ' unaccounted, ' +
- this.ticks_.excluded + ' excluded).');
- if (this.ticks_.total == 0) return;
+ if (this.ticks_.total == 0) return;
- const flatProfile = this.profile_.getFlatProfile();
- const flatView = this.viewBuilder_.buildView(flatProfile);
- // Sort by self time, desc, then by name, desc.
- flatView.sort((rec1, rec2) =>
+ const flatProfile = this.profile_.getFlatProfile();
+ const flatView = this.viewBuilder_.buildView(flatProfile);
+ // Sort by self time, desc, then by name, desc.
+ flatView.sort((rec1, rec2) =>
rec2.selfTime - rec1.selfTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1) );
- let totalTicks = this.ticks_.total;
- if (this.ignoreUnknown_) {
- totalTicks -= this.ticks_.unaccounted;
- }
- const printAllTicks = !this.onlySummary_;
-
- // Count library ticks
- const flatViewNodes = flatView.head.children;
- const self = this;
-
- let libraryTicks = 0;
- if(printAllTicks) this.printHeader('Shared libraries');
- this.printEntries(flatViewNodes, totalTicks, null,
- name => self.isSharedLibrary(name),
- function(rec) { libraryTicks += rec.selfTime; }, printAllTicks);
- const nonLibraryTicks = totalTicks - libraryTicks;
-
- let jsTicks = 0;
- if(printAllTicks) this.printHeader('JavaScript');
- this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
- name => self.isJsCode(name),
- function(rec) { jsTicks += rec.selfTime; }, printAllTicks);
-
- let cppTicks = 0;
- if(printAllTicks) this.printHeader('C++');
- this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
- name => self.isCppCode(name),
- function(rec) { cppTicks += rec.selfTime; }, printAllTicks);
-
- this.printHeader('Summary');
- this.printLine('JavaScript', jsTicks, totalTicks, nonLibraryTicks);
- this.printLine('C++', cppTicks, totalTicks, nonLibraryTicks);
- this.printLine('GC', this.ticks_.gc, totalTicks, nonLibraryTicks);
- this.printLine('Shared libraries', libraryTicks, totalTicks, null);
- if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) {
- this.printLine('Unaccounted', this.ticks_.unaccounted,
- this.ticks_.total, null);
- }
-
- if(printAllTicks) {
- print('\n [C++ entry points]:');
- print(' ticks cpp total name');
- const c_entry_functions = this.profile_.getCEntryProfile();
- const total_c_entry = c_entry_functions[0].ticks;
- for (let i = 1; i < c_entry_functions.length; i++) {
- const c = c_entry_functions[i];
- this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
+ (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1));
+ let totalTicks = this.ticks_.total;
+ if (this.ignoreUnknown_) {
+ totalTicks -= this.ticks_.unaccounted;
+ }
+ const printAllTicks = !this.onlySummary_;
+
+ // Count library ticks
+ const flatViewNodes = flatView.head.children;
+
+ let libraryTicks = 0;
+ if (printAllTicks) this.printHeader('Shared libraries');
+ this.printEntries(flatViewNodes, totalTicks, null,
+ name => this.isSharedLibrary(name),
+ (rec) => { libraryTicks += rec.selfTime; }, printAllTicks);
+ const nonLibraryTicks = totalTicks - libraryTicks;
+
+ let jsTicks = 0;
+ if (printAllTicks) this.printHeader('JavaScript');
+ this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
+ name => this.isJsCode(name),
+ (rec) => { jsTicks += rec.selfTime; }, printAllTicks);
+
+ let cppTicks = 0;
+ if (printAllTicks) this.printHeader('C++');
+ this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
+ name => this.isCppCode(name),
+ (rec) => { cppTicks += rec.selfTime; }, printAllTicks);
+
+ this.printHeader('Summary');
+ this.printLine('JavaScript', jsTicks, totalTicks, nonLibraryTicks);
+ this.printLine('C++', cppTicks, totalTicks, nonLibraryTicks);
+ this.printLine('GC', this.ticks_.gc, totalTicks, nonLibraryTicks);
+ this.printLine('Shared libraries', libraryTicks, totalTicks, null);
+ if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) {
+ this.printLine('Unaccounted', this.ticks_.unaccounted,
+ this.ticks_.total, null);
}
- this.printHeavyProfHeader();
- const heavyProfile = this.profile_.getBottomUpProfile();
- const heavyView = this.viewBuilder_.buildView(heavyProfile);
- // To show the same percentages as in the flat profile.
- heavyView.head.totalTime = totalTicks;
- // Sort by total time, desc, then by name, desc.
- heavyView.sort((rec1, rec2) =>
+ if (printAllTicks) {
+ print('\n [C++ entry points]:');
+ print(' ticks cpp total name');
+ const c_entry_functions = this.profile_.getCEntryProfile();
+ const total_c_entry = c_entry_functions[0].ticks;
+ for (let i = 1; i < c_entry_functions.length; i++) {
+ const c = c_entry_functions[i];
+ this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
+ }
+
+ this.printHeavyProfHeader();
+ const heavyProfile = this.profile_.getBottomUpProfile();
+ const heavyView = this.viewBuilder_.buildView(heavyProfile);
+ // To show the same percentages as in the flat profile.
+ heavyView.head.totalTime = totalTicks;
+ // Sort by total time, desc, then by name, desc.
+ heavyView.sort((rec1, rec2) =>
rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1) );
- this.printHeavyProfile(heavyView.head.children);
+ (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1));
+ this.printHeavyProfile(heavyView.head.children);
+ }
}
-}
-
-
-printHeader(headerTitle) {
- print(`\n [${headerTitle}]:`);
- print(' ticks total nonlib name');
-}
+ printHeader(headerTitle) {
+ print(`\n [${headerTitle}]:`);
+ print(' ticks total nonlib name');
+ }
-printLine(
+ printLine(
entry, ticks, totalTicks, nonLibTicks) {
- const pct = ticks * 100 / totalTicks;
- const nonLibPct = nonLibTicks != null
- ? padLeft((ticks * 100 / nonLibTicks).toFixed(1), 5) + '% '
+ const pct = ticks * 100 / totalTicks;
+ const nonLibPct = nonLibTicks != null
+ ? `${(ticks * 100 / nonLibTicks).toFixed(1).toString().padStart(5)}% `
: ' ';
- print(` ${padLeft(ticks, 5)} ` +
- padLeft(pct.toFixed(1), 5) + '% ' +
- nonLibPct +
- entry);
-}
-
-printHeavyProfHeader() {
- print('\n [Bottom up (heavy) profile]:');
- print(' Note: percentage shows a share of a particular caller in the ' +
- 'total\n' +
- ' amount of its parent calls.');
- print(' Callers occupying less than ' +
- TickProcessor.CALL_PROFILE_CUTOFF_PCT.toFixed(1) +
- '% are not shown.\n');
- print(' ticks parent name');
-}
+ print(`${` ${ticks.toString().padStart(5)} ` +
+ pct.toFixed(1).toString().padStart(5)}% ${nonLibPct}${entry}`);
+ }
+ printHeavyProfHeader() {
+ print('\n [Bottom up (heavy) profile]:');
+ print(' Note: percentage shows a share of a particular caller in the ' +
+ 'total\n' +
+ ' amount of its parent calls.');
+ print(` Callers occupying less than ${TickProcessor.CALL_PROFILE_CUTOFF_PCT.toFixed(1)}% are not shown.\n`);
+ print(' ticks parent name');
+ }
-processProfile(
- profile, filterP, func) {
- for (let i = 0, n = profile.length; i < n; ++i) {
- const rec = profile[i];
- if (!filterP(rec.internalFuncName)) {
- continue;
+ processProfile(profile, filterP, func) {
+ for (let i = 0, n = profile.length; i < n; ++i) {
+ const rec = profile[i];
+ if (!filterP(rec.internalFuncName)) {
+ continue;
+ }
+ func(rec);
}
- func(rec);
}
-};
-getLineAndColumn(name) {
- const re = /:([0-9]+):([0-9]+)$/;
- const array = re.exec(name);
- if (!array) {
- return null;
+ getLineAndColumn(name) {
+ const re = /:([0-9]+):([0-9]+)$/;
+ const array = re.exec(name);
+ if (!array) {
+ return null;
+ }
+ return { line: array[1], column: array[2] };
}
- return {line: array[1], column: array[2]};
-}
-hasSourceMap() {
- return this.sourceMap != null;
-}
-
-
-formatFunctionName(funcName) {
- if (!this.hasSourceMap()) {
- return funcName;
- }
- const lc = this.getLineAndColumn(funcName);
- if (lc == null) {
- return funcName;
+ hasSourceMap() {
+ return this.sourceMap != null;
}
- // in source maps lines and columns are zero based
- const lineNumber = lc.line - 1;
- const column = lc.column - 1;
- const entry = this.sourceMap.findEntry(lineNumber, column);
- const sourceFile = entry[2];
- const sourceLine = entry[3] + 1;
- const sourceColumn = entry[4] + 1;
- return sourceFile + ':' + sourceLine + ':' + sourceColumn + ' -> ' + funcName;
-}
-
-printEntries(
- profile, totalTicks, nonLibTicks, filterP, callback, printAllTicks) {
- const that = this;
- this.processProfile(profile, filterP, function (rec) {
- if (rec.selfTime == 0) return;
- callback(rec);
- const funcName = that.formatFunctionName(rec.internalFuncName);
- if(printAllTicks) {
- that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
+ formatFunctionName(funcName) {
+ if (!this.hasSourceMap()) {
+ return funcName;
}
- });
-}
-
- printHeavyProfile(profile, opt_indent) {
- const self = this;
- const indent = opt_indent || 0;
- const indentStr = padLeft('', indent);
- this.processProfile(profile, () => true, function (rec) {
- // Cut off too infrequent callers.
- if (rec.parentTotalPercent < TickProcessor.CALL_PROFILE_CUTOFF_PCT) return;
- const funcName = self.formatFunctionName(rec.internalFuncName);
- print(` ${padLeft(rec.totalTime, 5)} ` +
- padLeft(rec.parentTotalPercent.toFixed(1), 5) + '% ' +
- indentStr + funcName);
- // Limit backtrace depth.
- if (indent < 2 * self.callGraphSize_) {
- self.printHeavyProfile(rec.children, indent + 2);
- }
- // Delimit top-level functions.
- if (indent == 0) {
- print('');
+ const lc = this.getLineAndColumn(funcName);
+ if (lc == null) {
+ return funcName;
}
- });
-}
-}
-
-
+ // in source maps lines and columns are zero based
+ const lineNumber = lc.line - 1;
+ const column = lc.column - 1;
+ const entry = this.sourceMap.findEntry(lineNumber, column);
+ const sourceFile = entry[2];
+ const sourceLine = entry[3] + 1;
+ const sourceColumn = entry[4] + 1;
+
+ return `${sourceFile}:${sourceLine}:${sourceColumn} -> ${funcName}`;
+ }
+
+ printEntries(
+ profile, totalTicks, nonLibTicks, filterP, callback, printAllTicks) {
+ this.processProfile(profile, filterP, (rec) => {
+ if (rec.selfTime == 0) return;
+ callback(rec);
+ const funcName = this.formatFunctionName(rec.internalFuncName);
+ if (printAllTicks) {
+ this.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
+ }
+ });
+ }
-function padLeft(s, len) {
- s = s.toString();
- if (s.length < len) {
- const padLength = len - s.length;
- if (!(padLength in padLeft)) {
- padLeft[padLength] = new Array(padLength + 1).join(' ');
- }
- s = padLeft[padLength] + s;
+ printHeavyProfile(profile, opt_indent) {
+ const indent = opt_indent || 0;
+ const indentStr = ''.padStart(indent);
+ this.processProfile(profile, () => true, (rec) => {
+ // Cut off too infrequent callers.
+ if (rec.parentTotalPercent < TickProcessor.CALL_PROFILE_CUTOFF_PCT) return;
+ const funcName = this.formatFunctionName(rec.internalFuncName);
+ print(`${` ${rec.totalTime.toString().padStart(5)} ` +
+ rec.parentTotalPercent.toFixed(1).toString().padStart(5)}% ${indentStr}${funcName}`);
+ // Limit backtrace depth.
+ if (indent < 2 * this.callGraphSize_) {
+ this.printHeavyProfile(rec.children, indent + 2);
+ }
+ // Delimit top-level functions.
+ if (indent == 0) print('');
+ });
}
- return s;
-};
+}
class CppEntriesProvider {
-
-parseVmSymbols(
- libName, libStart, libEnd, libASLRSlide, processorFunc) {
- this.loadSymbols(libName);
-
- let lastUnknownSize;
- let lastAdded;
-
- function inRange(funcInfo, start, end) {
+ inRange(funcInfo, start, end) {
return funcInfo.start >= start && funcInfo.end <= end;
}
- function addEntry(funcInfo) {
- // Several functions can be mapped onto the same address. To avoid
- // creating zero-sized entries, skip such duplicates.
- // Also double-check that function belongs to the library address space.
+ parseVmSymbols(libName, libStart, libEnd, libASLRSlide, processorFunc) {
+ this.loadSymbols(libName);
- if (lastUnknownSize &&
+ let lastUnknownSize;
+ let lastAdded;
+
+ let addEntry = (funcInfo) => {
+ // Several functions can be mapped onto the same address. To avoid
+ // creating zero-sized entries, skip such duplicates.
+ // Also double-check that function belongs to the library address space.
+
+ if (lastUnknownSize &&
lastUnknownSize.start < funcInfo.start) {
- // Try to update lastUnknownSize based on new entries start position.
- lastUnknownSize.end = funcInfo.start;
- if ((!lastAdded || !inRange(lastUnknownSize, lastAdded.start,
- lastAdded.end)) &&
- inRange(lastUnknownSize, libStart, libEnd)) {
- processorFunc(lastUnknownSize.name, lastUnknownSize.start,
- lastUnknownSize.end);
- lastAdded = lastUnknownSize;
+ // Try to update lastUnknownSize based on new entries start position.
+ lastUnknownSize.end = funcInfo.start;
+ if ((!lastAdded ||
+ !this.inRange(lastUnknownSize, lastAdded.start, lastAdded.end)) &&
+ this.inRange(lastUnknownSize, libStart, libEnd)) {
+ processorFunc(
+ lastUnknownSize.name, lastUnknownSize.start, lastUnknownSize.end);
+ lastAdded = lastUnknownSize;
+ }
}
- }
- lastUnknownSize = undefined;
-
- if (funcInfo.end) {
- // Skip duplicates that have the same start address as the last added.
- if ((!lastAdded || lastAdded.start != funcInfo.start) &&
- inRange(funcInfo, libStart, libEnd)) {
- processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
- lastAdded = funcInfo;
+ lastUnknownSize = undefined;
+
+ if (funcInfo.end) {
+ // Skip duplicates that have the same start address as the last added.
+ if ((!lastAdded || lastAdded.start != funcInfo.start) &&
+ this.inRange(funcInfo, libStart, libEnd)) {
+ processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
+ lastAdded = funcInfo;
+ }
+ } else {
+ // If a funcInfo doesn't have an end, try to match it up with then next
+ // entry.
+ lastUnknownSize = funcInfo;
}
- } else {
- // If a funcInfo doesn't have an end, try to match it up with then next
- // entry.
- lastUnknownSize = funcInfo;
}
- }
- while (true) {
- const funcInfo = this.parseNextLine();
- if (funcInfo === null) {
- continue;
- } else if (funcInfo === false) {
- break;
- }
- if (funcInfo.start < libStart - libASLRSlide &&
+ while (true) {
+ const funcInfo = this.parseNextLine();
+ if (funcInfo === null) continue;
+ if (funcInfo === false) break;
+ if (funcInfo.start < libStart - libASLRSlide &&
funcInfo.start < libEnd - libStart) {
- funcInfo.start += libStart;
- } else {
- funcInfo.start += libASLRSlide;
- }
- if (funcInfo.size) {
- funcInfo.end = funcInfo.start + funcInfo.size;
+ funcInfo.start += libStart;
+ } else {
+ funcInfo.start += libASLRSlide;
+ }
+ if (funcInfo.size) {
+ funcInfo.end = funcInfo.start + funcInfo.size;
+ }
+ addEntry(funcInfo);
}
- addEntry(funcInfo);
+ addEntry({ name: '', start: libEnd });
}
- addEntry({name: '', start: libEnd});
-}
-
-
-loadSymbols(libName) {
-}
-parseNextLine() { return false }
+ loadSymbols(libName) {}
+ parseNextLine() { return false }
}
export class UnixCppEntriesProvider extends CppEntriesProvider {
constructor(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
super();
- this.symbols = [];
- // File offset of a symbol minus the virtual address of a symbol found in
- // the symbol table.
- this.fileOffsetMinusVma = 0;
- this.parsePos = 0;
- this.nmExec = nmExec;
- this.objdumpExec = objdumpExec;
- this.targetRootFS = targetRootFS;
- this.apkEmbeddedLibrary = apkEmbeddedLibrary;
- this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
-}
+ this.symbols = [];
+ // File offset of a symbol minus the virtual address of a symbol found in
+ // the symbol table.
+ this.fileOffsetMinusVma = 0;
+ this.parsePos = 0;
+ this.nmExec = nmExec;
+ this.objdumpExec = objdumpExec;
+ this.targetRootFS = targetRootFS;
+ this.apkEmbeddedLibrary = apkEmbeddedLibrary;
+ this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
+ }
-loadSymbols(libName) {
- this.parsePos = 0;
- if (this.apkEmbeddedLibrary && libName.endsWith('.apk')) {
- libName = this.apkEmbeddedLibrary;
- }
- if (this.targetRootFS) {
- libName = libName.substring(libName.lastIndexOf('/') + 1);
- libName = this.targetRootFS + libName;
- }
- try {
- this.symbols = [
- os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
- os.system(this.nmExec, ['-C', '-n', '-S', '-D', libName], -1, -1)
- ];
-
- const objdumpOutput = os.system(this.objdumpExec, ['-h', libName], -1, -1);
- for (const line of objdumpOutput.split('\n')) {
- const [,sectionName,,vma,,fileOffset] = line.trim().split(/\s+/);
- if (sectionName === ".text") {
- this.fileOffsetMinusVma = parseInt(fileOffset, 16) - parseInt(vma, 16);
+ loadSymbols(libName) {
+ this.parsePos = 0;
+ if (this.apkEmbeddedLibrary && libName.endsWith('.apk')) {
+ libName = this.apkEmbeddedLibrary;
+ }
+ if (this.targetRootFS) {
+ libName = libName.substring(libName.lastIndexOf('/') + 1);
+ libName = this.targetRootFS + libName;
+ }
+ try {
+ this.symbols = [
+ os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
+ os.system(this.nmExec, ['-C', '-n', '-S', '-D', libName], -1, -1)
+ ];
+
+ const objdumpOutput = os.system(this.objdumpExec, ['-h', libName], -1, -1);
+ for (const line of objdumpOutput.split('\n')) {
+ const [, sectionName, , vma, , fileOffset] = line.trim().split(/\s+/);
+ if (sectionName === ".text") {
+ this.fileOffsetMinusVma = parseInt(fileOffset, 16) - parseInt(vma, 16);
+ }
}
+ } catch (e) {
+ // If the library cannot be found on this system let's not panic.
+ this.symbols = ['', ''];
}
- } catch (e) {
- // If the library cannot be found on this system let's not panic.
- this.symbols = ['', ''];
}
-}
-
-parseNextLine() {
- if (this.symbols.length == 0) {
- return false;
- }
- const lineEndPos = this.symbols[0].indexOf('\n', this.parsePos);
- if (lineEndPos == -1) {
- this.symbols.shift();
- this.parsePos = 0;
- return this.parseNextLine();
- }
+ parseNextLine() {
+ if (this.symbols.length == 0) {
+ return false;
+ }
+ const lineEndPos = this.symbols[0].indexOf('\n', this.parsePos);
+ if (lineEndPos == -1) {
+ this.symbols.shift();
+ this.parsePos = 0;
+ return this.parseNextLine();
+ }
- const line = this.symbols[0].substring(this.parsePos, lineEndPos);
- this.parsePos = lineEndPos + 1;
- const fields = line.match(this.FUNC_RE);
- let funcInfo = null;
- if (fields) {
- funcInfo = { name: fields[3], start: parseInt(fields[1], 16) + this.fileOffsetMinusVma };
- if (fields[2]) {
- funcInfo.size = parseInt(fields[2], 16);
+ const line = this.symbols[0].substring(this.parsePos, lineEndPos);
+ this.parsePos = lineEndPos + 1;
+ const fields = line.match(this.FUNC_RE);
+ let funcInfo = null;
+ if (fields) {
+ funcInfo = { name: fields[3], start: parseInt(fields[1], 16) + this.fileOffsetMinusVma };
+ if (fields[2]) {
+ funcInfo.size = parseInt(fields[2], 16);
+ }
}
+ return funcInfo;
}
- return funcInfo;
-}
}
export class MacCppEntriesProvider extends UnixCppEntriesProvider {
constructor(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
- super(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary);
- // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
- this.FUNC_RE = /^([0-9a-fA-F]{8,16})() (.*)$/;
-}
-
+ super(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary);
+ // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
+ this.FUNC_RE = /^([0-9a-fA-F]{8,16})() (.*)$/;
+ }
-loadSymbols(libName) {
- this.parsePos = 0;
- libName = this.targetRootFS + libName;
+ loadSymbols(libName) {
+ this.parsePos = 0;
+ libName = this.targetRootFS + libName;
- // It seems that in OS X `nm` thinks that `-f` is a format option, not a
- // "flat" display option flag.
- try {
- this.symbols = [os.system(this.nmExec, ['-n', libName], -1, -1), ''];
- } catch (e) {
- // If the library cannot be found on this system let's not panic.
- this.symbols = '';
+ // It seems that in OS X `nm` thinks that `-f` is a format option, not a
+ // "flat" display option flag.
+ try {
+ this.symbols = [os.system(this.nmExec, ['-n', libName], -1, -1), ''];
+ } catch (e) {
+ // If the library cannot be found on this system let's not panic.
+ this.symbols = '';
+ }
}
}
-}
export class WindowsCppEntriesProvider extends CppEntriesProvider {
constructor(_ignored_nmExec, _ignored_objdumpExec, targetRootFS,
- _ignored_apkEmbeddedLibrary) {
- super();
- this.targetRootFS = targetRootFS;
- this.symbols = '';
- this.parsePos = 0;
-};
-
-
-static FILENAME_RE = /^(.*)\.([^.]+)$/;
-
+ _ignored_apkEmbeddedLibrary) {
+ super();
+ this.targetRootFS = targetRootFS;
+ this.symbols = '';
+ this.parsePos = 0;
+ }
-static FUNC_RE =
+ static FILENAME_RE = /^(.*)\.([^.]+)$/;
+ static FUNC_RE =
/^\s+0001:[0-9a-fA-F]{8}\s+([_\?@$0-9a-zA-Z]+)\s+([0-9a-fA-F]{8}).*$/;
-
-
-static IMAGE_BASE_RE =
+ static IMAGE_BASE_RE =
/^\s+0000:00000000\s+___ImageBase\s+([0-9a-fA-F]{8}).*$/;
+ // This is almost a constant on Windows.
+ static EXE_IMAGE_BASE = 0x00400000;
-
-// This is almost a constant on Windows.
-static EXE_IMAGE_BASE = 0x00400000;
-
-
-loadSymbols(libName) {
- libName = this.targetRootFS + libName;
- const fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
- if (!fileNameFields) return;
- const mapFileName = fileNameFields[1] + '.map';
- this.moduleType_ = fileNameFields[2].toLowerCase();
- try {
- this.symbols = read(mapFileName);
- } catch (e) {
- // If .map file cannot be found let's not panic.
- this.symbols = '';
+ loadSymbols(libName) {
+ libName = this.targetRootFS + libName;
+ const fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
+ if (!fileNameFields) return;
+ const mapFileName = `${fileNameFields[1]}.map`;
+ this.moduleType_ = fileNameFields[2].toLowerCase();
+ try {
+ this.symbols = read(mapFileName);
+ } catch (e) {
+ // If .map file cannot be found let's not panic.
+ this.symbols = '';
+ }
}
-};
-
-parseNextLine() {
- const lineEndPos = this.symbols.indexOf('\r\n', this.parsePos);
- if (lineEndPos == -1) {
- return false;
- }
+ parseNextLine() {
+ const lineEndPos = this.symbols.indexOf('\r\n', this.parsePos);
+ if (lineEndPos == -1) {
+ return false;
+ }
- const line = this.symbols.substring(this.parsePos, lineEndPos);
- this.parsePos = lineEndPos + 2;
+ const line = this.symbols.substring(this.parsePos, lineEndPos);
+ this.parsePos = lineEndPos + 2;
- // Image base entry is above all other symbols, so we can just
- // terminate parsing.
- const imageBaseFields = line.match(WindowsCppEntriesProvider.IMAGE_BASE_RE);
- if (imageBaseFields) {
- const imageBase = parseInt(imageBaseFields[1], 16);
- if ((this.moduleType_ == 'exe') !=
+ // Image base entry is above all other symbols, so we can just
+ // terminate parsing.
+ const imageBaseFields = line.match(WindowsCppEntriesProvider.IMAGE_BASE_RE);
+ if (imageBaseFields) {
+ const imageBase = parseInt(imageBaseFields[1], 16);
+ if ((this.moduleType_ == 'exe') !=
(imageBase == WindowsCppEntriesProvider.EXE_IMAGE_BASE)) {
- return false;
+ return false;
+ }
}
- }
- const fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
- return fields ?
+ const fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
+ return fields ?
{ name: this.unmangleName(fields[1]), start: parseInt(fields[2], 16) } :
null;
-};
-
+ }
-/**
- * Performs very simple unmangling of C++ names.
- *
- * Does not handle arguments and template arguments. The mangled names have
- * the form:
- *
- * ?LookupInDescriptor@JSObject@internal@v8@@...arguments info...
- */
+ /**
+ * Performs very simple unmangling of C++ names.
+ *
+ * Does not handle arguments and template arguments. The mangled names have
+ * the form:
+ *
+ * ?LookupInDescriptor@JSObject@internal@v8@@...arguments info...
+ */
unmangleName(name) {
- // Empty or non-mangled name.
- if (name.length < 1 || name.charAt(0) != '?') return name;
- const nameEndPos = name.indexOf('@@');
- const components = name.substring(1, nameEndPos).split('@');
- components.reverse();
- return components.join('::');
-}
+ // Empty or non-mangled name.
+ if (name.length < 1 || name.charAt(0) != '?') return name;
+ const nameEndPos = name.indexOf('@@');
+ const components = name.substring(1, nameEndPos).split('@');
+ components.reverse();
+ return components.join('::');
+ }
}
@@ -877,61 +827,61 @@ export class ArgumentsProcessor extends BaseArgumentsProcessor {
getArgsDispatch() {
let dispatch = {
'-j': ['stateFilter', TickProcessor.VmStates.JS,
- 'Show only ticks from JS VM state'],
+ 'Show only ticks from JS VM state'],
'-g': ['stateFilter', TickProcessor.VmStates.GC,
- 'Show only ticks from GC VM state'],
+ 'Show only ticks from GC VM state'],
'-p': ['stateFilter', TickProcessor.VmStates.PARSER,
- 'Show only ticks from PARSER VM state'],
+ 'Show only ticks from PARSER VM state'],
'-b': ['stateFilter', TickProcessor.VmStates.BYTECODE_COMPILER,
- 'Show only ticks from BYTECODE_COMPILER VM state'],
+ 'Show only ticks from BYTECODE_COMPILER VM state'],
'-c': ['stateFilter', TickProcessor.VmStates.COMPILER,
- 'Show only ticks from COMPILER VM state'],
+ 'Show only ticks from COMPILER VM state'],
'-o': ['stateFilter', TickProcessor.VmStates.OTHER,
- 'Show only ticks from OTHER VM state'],
+ 'Show only ticks from OTHER VM state'],
'-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
- 'Show only ticks from EXTERNAL VM state'],
+ 'Show only ticks from EXTERNAL VM state'],
'--filter-runtime-timer': ['runtimeTimerFilter', null,
- 'Show only ticks matching the given runtime timer scope'],
+ 'Show only ticks matching the given runtime timer scope'],
'--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE,
- 'Set the call graph size'],
+ 'Set the call graph size'],
'--ignore-unknown': ['ignoreUnknown', true,
- 'Exclude ticks of unknown code entries from processing'],
+ 'Exclude ticks of unknown code entries from processing'],
'--separate-ic': ['separateIc', parseBool,
- 'Separate IC entries'],
+ 'Separate IC entries'],
'--separate-bytecodes': ['separateBytecodes', parseBool,
- 'Separate Bytecode entries'],
+ 'Separate Bytecode entries'],
'--separate-builtins': ['separateBuiltins', parseBool,
- 'Separate Builtin entries'],
+ 'Separate Builtin entries'],
'--separate-stubs': ['separateStubs', parseBool,
- 'Separate Stub entries'],
+ 'Separate Stub entries'],
'--unix': ['platform', 'unix',
- 'Specify that we are running on *nix platform'],
+ 'Specify that we are running on *nix platform'],
'--windows': ['platform', 'windows',
- 'Specify that we are running on Windows platform'],
+ 'Specify that we are running on Windows platform'],
'--mac': ['platform', 'mac',
- 'Specify that we are running on Mac OS X platform'],
+ 'Specify that we are running on Mac OS X platform'],
'--nm': ['nm', 'nm',
- 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
+ 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
'--objdump': ['objdump', 'objdump',
- 'Specify the \'objdump\' executable to use (e.g. --objdump=/my_dir/objdump)'],
+ 'Specify the \'objdump\' executable to use (e.g. --objdump=/my_dir/objdump)'],
'--target': ['targetRootFS', '',
- 'Specify the target root directory for cross environment'],
+ 'Specify the target root directory for cross environment'],
'--apk-embedded-library': ['apkEmbeddedLibrary', '',
- 'Specify the path of the embedded library for Android traces'],
+ 'Specify the path of the embedded library for Android traces'],
'--range': ['range', 'auto,auto',
- 'Specify the range limit as [start],[end]'],
+ 'Specify the range limit as [start],[end]'],
'--distortion': ['distortion', 0,
- 'Specify the logging overhead in picoseconds'],
+ 'Specify the logging overhead in picoseconds'],
'--source-map': ['sourceMap', null,
- 'Specify the source map that should be used for output'],
+ 'Specify the source map that should be used for output'],
'--timed-range': ['timedRange', true,
- 'Ignore ticks before first and after last Date.now() call'],
+ 'Ignore ticks before first and after last Date.now() call'],
'--pairwise-timed-range': ['pairwiseTimedRange', true,
- 'Ignore ticks outside pairs of Date.now() calls'],
+ 'Ignore ticks outside pairs of Date.now() calls'],
'--only-summary': ['onlySummary', true,
- 'Print only tick summary, exclude other information'],
+ 'Print only tick summary, exclude other information'],
'--preprocess': ['preprocessJson', true,
- 'Preprocess for consumption with web interface']
+ 'Preprocess for consumption with web interface']
};
dispatch['--js'] = dispatch['-j'];
dispatch['--gc'] = dispatch['-g'];
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index 4cd2bdefd5..f174a239bf 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -350,6 +350,7 @@ class SystemTest(unittest.TestCase):
'no_i18n\n'
'tsan\n'
'ubsan_vptr\n'
+ 'webassembly\n'
'>>> Running tests for ia32.release')
self.assertIn(expect_text, result.stdout, result)
self.assertEqual(0, result.returncode, result)
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index 8f8efc4f58..eb30c4d28f 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -20,5 +20,7 @@
"v8_enable_concurrent_marking": true,
"v8_enable_verify_csa": false,
"v8_enable_lite_mode": false,
- "v8_enable_pointer_compression": true
+ "v8_enable_pointer_compression": true,
+ "v8_control_flow_integrity": false,
+ "v8_enable_webassembly": true
}
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index 7134998e1f..e6d5b52942 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -20,5 +20,7 @@
"v8_enable_concurrent_marking": true,
"v8_enable_verify_csa": false,
"v8_enable_lite_mode": false,
- "v8_enable_pointer_compression": false
+ "v8_enable_pointer_compression": false,
+ "v8_control_flow_integrity": false,
+ "v8_enable_webassembly": true
}
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index cfd41e6bbd..dba01c8839 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -11,6 +11,8 @@ INSTANCE_TYPES = {
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
8: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
+ 18: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE",
+ 26: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
32: "STRING_TYPE",
33: "CONS_STRING_TYPE",
34: "EXTERNAL_STRING_TYPE",
@@ -49,105 +51,107 @@ INSTANCE_TYPES = {
87: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
88: "ASM_WASM_DATA_TYPE",
89: "ASYNC_GENERATOR_REQUEST_TYPE",
- 90: "BREAK_POINT_TYPE",
- 91: "BREAK_POINT_INFO_TYPE",
- 92: "CACHED_TEMPLATE_OBJECT_TYPE",
- 93: "CALL_HANDLER_INFO_TYPE",
- 94: "CLASS_POSITIONS_TYPE",
- 95: "DEBUG_INFO_TYPE",
- 96: "ENUM_CACHE_TYPE",
- 97: "FEEDBACK_CELL_TYPE",
- 98: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 99: "INTERCEPTOR_INFO_TYPE",
- 100: "INTERPRETER_DATA_TYPE",
- 101: "MODULE_REQUEST_TYPE",
- 102: "PROMISE_CAPABILITY_TYPE",
- 103: "PROMISE_REACTION_TYPE",
- 104: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
- 105: "PROTOTYPE_INFO_TYPE",
- 106: "SCRIPT_TYPE",
- 107: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
- 108: "STACK_FRAME_INFO_TYPE",
- 109: "STACK_TRACE_FRAME_TYPE",
- 110: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
- 111: "TUPLE2_TYPE",
- 112: "WASM_EXCEPTION_TAG_TYPE",
- 113: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 114: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
- 115: "WASM_JS_FUNCTION_DATA_TYPE",
- 116: "FIXED_ARRAY_TYPE",
- 117: "HASH_TABLE_TYPE",
- 118: "EPHEMERON_HASH_TABLE_TYPE",
- 119: "GLOBAL_DICTIONARY_TYPE",
- 120: "NAME_DICTIONARY_TYPE",
- 121: "NUMBER_DICTIONARY_TYPE",
- 122: "ORDERED_HASH_MAP_TYPE",
- 123: "ORDERED_HASH_SET_TYPE",
- 124: "ORDERED_NAME_DICTIONARY_TYPE",
- 125: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 126: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 127: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 128: "SCOPE_INFO_TYPE",
+ 90: "BASELINE_DATA_TYPE",
+ 91: "BREAK_POINT_TYPE",
+ 92: "BREAK_POINT_INFO_TYPE",
+ 93: "CACHED_TEMPLATE_OBJECT_TYPE",
+ 94: "CALL_HANDLER_INFO_TYPE",
+ 95: "CLASS_POSITIONS_TYPE",
+ 96: "DEBUG_INFO_TYPE",
+ 97: "ENUM_CACHE_TYPE",
+ 98: "FEEDBACK_CELL_TYPE",
+ 99: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 100: "INTERCEPTOR_INFO_TYPE",
+ 101: "INTERPRETER_DATA_TYPE",
+ 102: "MODULE_REQUEST_TYPE",
+ 103: "PROMISE_CAPABILITY_TYPE",
+ 104: "PROMISE_REACTION_TYPE",
+ 105: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
+ 106: "PROTOTYPE_INFO_TYPE",
+ 107: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
+ 108: "SCRIPT_TYPE",
+ 109: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+ 110: "STACK_FRAME_INFO_TYPE",
+ 111: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 112: "TUPLE2_TYPE",
+ 113: "WASM_EXCEPTION_TAG_TYPE",
+ 114: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 115: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+ 116: "WASM_JS_FUNCTION_DATA_TYPE",
+ 117: "FIXED_ARRAY_TYPE",
+ 118: "HASH_TABLE_TYPE",
+ 119: "EPHEMERON_HASH_TABLE_TYPE",
+ 120: "GLOBAL_DICTIONARY_TYPE",
+ 121: "NAME_DICTIONARY_TYPE",
+ 122: "NUMBER_DICTIONARY_TYPE",
+ 123: "ORDERED_HASH_MAP_TYPE",
+ 124: "ORDERED_HASH_SET_TYPE",
+ 125: "ORDERED_NAME_DICTIONARY_TYPE",
+ 126: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 127: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 128: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
129: "SCRIPT_CONTEXT_TABLE_TYPE",
130: "BYTE_ARRAY_TYPE",
131: "BYTECODE_ARRAY_TYPE",
132: "FIXED_DOUBLE_ARRAY_TYPE",
133: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
- 134: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
- 135: "AWAIT_CONTEXT_TYPE",
- 136: "BLOCK_CONTEXT_TYPE",
- 137: "CATCH_CONTEXT_TYPE",
- 138: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 139: "EVAL_CONTEXT_TYPE",
- 140: "FUNCTION_CONTEXT_TYPE",
- 141: "MODULE_CONTEXT_TYPE",
- 142: "NATIVE_CONTEXT_TYPE",
- 143: "SCRIPT_CONTEXT_TYPE",
- 144: "WITH_CONTEXT_TYPE",
- 145: "EXPORTED_SUB_CLASS_BASE_TYPE",
- 146: "EXPORTED_SUB_CLASS_TYPE",
- 147: "EXPORTED_SUB_CLASS2_TYPE",
- 148: "SMALL_ORDERED_HASH_MAP_TYPE",
- 149: "SMALL_ORDERED_HASH_SET_TYPE",
- 150: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 151: "DESCRIPTOR_ARRAY_TYPE",
- 152: "STRONG_DESCRIPTOR_ARRAY_TYPE",
- 153: "SOURCE_TEXT_MODULE_TYPE",
- 154: "SYNTHETIC_MODULE_TYPE",
- 155: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 156: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 157: "WEAK_FIXED_ARRAY_TYPE",
- 158: "TRANSITION_ARRAY_TYPE",
- 159: "CELL_TYPE",
- 160: "CODE_TYPE",
- 161: "CODE_DATA_CONTAINER_TYPE",
- 162: "COVERAGE_INFO_TYPE",
- 163: "EMBEDDER_DATA_ARRAY_TYPE",
- 164: "FEEDBACK_METADATA_TYPE",
- 165: "FEEDBACK_VECTOR_TYPE",
- 166: "FILLER_TYPE",
- 167: "FREE_SPACE_TYPE",
- 168: "INTERNAL_CLASS_TYPE",
- 169: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 170: "MAP_TYPE",
- 171: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
- 172: "PREPARSE_DATA_TYPE",
- 173: "PROPERTY_ARRAY_TYPE",
- 174: "PROPERTY_CELL_TYPE",
- 175: "SHARED_FUNCTION_INFO_TYPE",
- 176: "SMI_BOX_TYPE",
- 177: "SMI_PAIR_TYPE",
- 178: "SORT_STATE_TYPE",
- 179: "WASM_ARRAY_TYPE",
- 180: "WASM_CAPI_FUNCTION_DATA_TYPE",
- 181: "WASM_STRUCT_TYPE",
- 182: "WEAK_ARRAY_LIST_TYPE",
- 183: "WEAK_CELL_TYPE",
- 184: "JS_PROXY_TYPE",
+ 134: "SCOPE_INFO_TYPE",
+ 135: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
+ 136: "AWAIT_CONTEXT_TYPE",
+ 137: "BLOCK_CONTEXT_TYPE",
+ 138: "CATCH_CONTEXT_TYPE",
+ 139: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 140: "EVAL_CONTEXT_TYPE",
+ 141: "FUNCTION_CONTEXT_TYPE",
+ 142: "MODULE_CONTEXT_TYPE",
+ 143: "NATIVE_CONTEXT_TYPE",
+ 144: "SCRIPT_CONTEXT_TYPE",
+ 145: "WITH_CONTEXT_TYPE",
+ 146: "EXPORTED_SUB_CLASS_BASE_TYPE",
+ 147: "EXPORTED_SUB_CLASS_TYPE",
+ 148: "EXPORTED_SUB_CLASS2_TYPE",
+ 149: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 150: "SMALL_ORDERED_HASH_SET_TYPE",
+ 151: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 152: "DESCRIPTOR_ARRAY_TYPE",
+ 153: "STRONG_DESCRIPTOR_ARRAY_TYPE",
+ 154: "SOURCE_TEXT_MODULE_TYPE",
+ 155: "SYNTHETIC_MODULE_TYPE",
+ 156: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 157: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 158: "WEAK_FIXED_ARRAY_TYPE",
+ 159: "TRANSITION_ARRAY_TYPE",
+ 160: "CELL_TYPE",
+ 161: "CODE_TYPE",
+ 162: "CODE_DATA_CONTAINER_TYPE",
+ 163: "COVERAGE_INFO_TYPE",
+ 164: "EMBEDDER_DATA_ARRAY_TYPE",
+ 165: "FEEDBACK_METADATA_TYPE",
+ 166: "FEEDBACK_VECTOR_TYPE",
+ 167: "FILLER_TYPE",
+ 168: "FREE_SPACE_TYPE",
+ 169: "INTERNAL_CLASS_TYPE",
+ 170: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 171: "MAP_TYPE",
+ 172: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+ 173: "PREPARSE_DATA_TYPE",
+ 174: "PROPERTY_ARRAY_TYPE",
+ 175: "PROPERTY_CELL_TYPE",
+ 176: "SHARED_FUNCTION_INFO_TYPE",
+ 177: "SMI_BOX_TYPE",
+ 178: "SMI_PAIR_TYPE",
+ 179: "SORT_STATE_TYPE",
+ 180: "SWISS_NAME_DICTIONARY_TYPE",
+ 181: "WASM_ARRAY_TYPE",
+ 182: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 183: "WASM_STRUCT_TYPE",
+ 184: "WEAK_ARRAY_LIST_TYPE",
+ 185: "WEAK_CELL_TYPE",
+ 186: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 185: "JS_GLOBAL_OBJECT_TYPE",
- 186: "JS_GLOBAL_PROXY_TYPE",
- 187: "JS_MODULE_NAMESPACE_TYPE",
+ 187: "JS_GLOBAL_OBJECT_TYPE",
+ 188: "JS_GLOBAL_PROXY_TYPE",
+ 189: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1042: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
@@ -165,316 +169,329 @@ INSTANCE_TYPES = {
1054: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
1055: "JS_ARGUMENTS_OBJECT_TYPE",
1056: "JS_API_OBJECT_TYPE",
- 1058: "JS_MAP_KEY_ITERATOR_TYPE",
- 1059: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 1060: "JS_MAP_VALUE_ITERATOR_TYPE",
- 1061: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 1062: "JS_SET_VALUE_ITERATOR_TYPE",
- 1063: "JS_DATA_VIEW_TYPE",
- 1064: "JS_TYPED_ARRAY_TYPE",
- 1065: "JS_MAP_TYPE",
- 1066: "JS_SET_TYPE",
- 1067: "JS_BOUND_FUNCTION_TYPE",
- 1068: "JS_FUNCTION_TYPE",
- 1069: "JS_WEAK_MAP_TYPE",
- 1070: "JS_WEAK_SET_TYPE",
- 1071: "JS_ARRAY_TYPE",
- 1072: "JS_ARRAY_BUFFER_TYPE",
- 1073: "JS_ARRAY_ITERATOR_TYPE",
- 1074: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 1075: "JS_COLLATOR_TYPE",
- 1076: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 1077: "JS_DATE_TYPE",
- 1078: "JS_DATE_TIME_FORMAT_TYPE",
- 1079: "JS_DISPLAY_NAMES_TYPE",
- 1080: "JS_ERROR_TYPE",
- 1081: "JS_FINALIZATION_REGISTRY_TYPE",
- 1082: "JS_LIST_FORMAT_TYPE",
- 1083: "JS_LOCALE_TYPE",
- 1084: "JS_MESSAGE_OBJECT_TYPE",
- 1085: "JS_NUMBER_FORMAT_TYPE",
- 1086: "JS_PLURAL_RULES_TYPE",
- 1087: "JS_PROMISE_TYPE",
- 1088: "JS_REG_EXP_TYPE",
- 1089: "JS_REG_EXP_STRING_ITERATOR_TYPE",
- 1090: "JS_RELATIVE_TIME_FORMAT_TYPE",
- 1091: "JS_SEGMENT_ITERATOR_TYPE",
- 1092: "JS_SEGMENTER_TYPE",
- 1093: "JS_SEGMENTS_TYPE",
- 1094: "JS_STRING_ITERATOR_TYPE",
- 1095: "JS_V8_BREAK_ITERATOR_TYPE",
- 1096: "JS_WEAK_REF_TYPE",
- 1097: "WASM_EXCEPTION_OBJECT_TYPE",
- 1098: "WASM_GLOBAL_OBJECT_TYPE",
- 1099: "WASM_INSTANCE_OBJECT_TYPE",
- 1100: "WASM_MEMORY_OBJECT_TYPE",
- 1101: "WASM_MODULE_OBJECT_TYPE",
- 1102: "WASM_TABLE_OBJECT_TYPE",
+ 1058: "JS_BOUND_FUNCTION_TYPE",
+ 1059: "JS_FUNCTION_TYPE",
+ 1060: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1061: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1062: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1063: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1064: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1065: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1066: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1067: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1068: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1069: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1070: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
+ 1071: "JS_ARRAY_CONSTRUCTOR_TYPE",
+ 1072: "JS_PROMISE_CONSTRUCTOR_TYPE",
+ 1073: "JS_REG_EXP_CONSTRUCTOR_TYPE",
+ 1074: "JS_MAP_KEY_ITERATOR_TYPE",
+ 1075: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 1076: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 1077: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 1078: "JS_SET_VALUE_ITERATOR_TYPE",
+ 1079: "JS_DATA_VIEW_TYPE",
+ 1080: "JS_TYPED_ARRAY_TYPE",
+ 1081: "JS_MAP_TYPE",
+ 1082: "JS_SET_TYPE",
+ 1083: "JS_WEAK_MAP_TYPE",
+ 1084: "JS_WEAK_SET_TYPE",
+ 1085: "JS_ARRAY_TYPE",
+ 1086: "JS_ARRAY_BUFFER_TYPE",
+ 1087: "JS_ARRAY_ITERATOR_TYPE",
+ 1088: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 1089: "JS_COLLATOR_TYPE",
+ 1090: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 1091: "JS_DATE_TYPE",
+ 1092: "JS_DATE_TIME_FORMAT_TYPE",
+ 1093: "JS_DISPLAY_NAMES_TYPE",
+ 1094: "JS_ERROR_TYPE",
+ 1095: "JS_FINALIZATION_REGISTRY_TYPE",
+ 1096: "JS_LIST_FORMAT_TYPE",
+ 1097: "JS_LOCALE_TYPE",
+ 1098: "JS_MESSAGE_OBJECT_TYPE",
+ 1099: "JS_NUMBER_FORMAT_TYPE",
+ 1100: "JS_PLURAL_RULES_TYPE",
+ 1101: "JS_PROMISE_TYPE",
+ 1102: "JS_REG_EXP_TYPE",
+ 1103: "JS_REG_EXP_STRING_ITERATOR_TYPE",
+ 1104: "JS_RELATIVE_TIME_FORMAT_TYPE",
+ 1105: "JS_SEGMENT_ITERATOR_TYPE",
+ 1106: "JS_SEGMENTER_TYPE",
+ 1107: "JS_SEGMENTS_TYPE",
+ 1108: "JS_STRING_ITERATOR_TYPE",
+ 1109: "JS_V8_BREAK_ITERATOR_TYPE",
+ 1110: "JS_WEAK_REF_TYPE",
+ 1111: "WASM_EXCEPTION_OBJECT_TYPE",
+ 1112: "WASM_GLOBAL_OBJECT_TYPE",
+ 1113: "WASM_INSTANCE_OBJECT_TYPE",
+ 1114: "WASM_MEMORY_OBJECT_TYPE",
+ 1115: "WASM_MODULE_OBJECT_TYPE",
+ 1116: "WASM_TABLE_OBJECT_TYPE",
+ 1117: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x02115): (170, "MetaMap"),
- ("read_only_space", 0x0213d): (67, "NullMap"),
- ("read_only_space", 0x02165): (152, "StrongDescriptorArrayMap"),
- ("read_only_space", 0x0218d): (157, "WeakFixedArrayMap"),
- ("read_only_space", 0x021cd): (96, "EnumCacheMap"),
- ("read_only_space", 0x02201): (116, "FixedArrayMap"),
- ("read_only_space", 0x0224d): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x02299): (167, "FreeSpaceMap"),
- ("read_only_space", 0x022c1): (166, "OnePointerFillerMap"),
- ("read_only_space", 0x022e9): (166, "TwoPointerFillerMap"),
- ("read_only_space", 0x02311): (67, "UninitializedMap"),
- ("read_only_space", 0x02389): (67, "UndefinedMap"),
- ("read_only_space", 0x023cd): (66, "HeapNumberMap"),
- ("read_only_space", 0x02401): (67, "TheHoleMap"),
- ("read_only_space", 0x02461): (67, "BooleanMap"),
- ("read_only_space", 0x02505): (130, "ByteArrayMap"),
- ("read_only_space", 0x0252d): (116, "FixedCOWArrayMap"),
- ("read_only_space", 0x02555): (117, "HashTableMap"),
- ("read_only_space", 0x0257d): (64, "SymbolMap"),
- ("read_only_space", 0x025a5): (40, "OneByteStringMap"),
- ("read_only_space", 0x025cd): (128, "ScopeInfoMap"),
- ("read_only_space", 0x025f5): (175, "SharedFunctionInfoMap"),
- ("read_only_space", 0x0261d): (160, "CodeMap"),
- ("read_only_space", 0x02645): (159, "CellMap"),
- ("read_only_space", 0x0266d): (174, "GlobalPropertyCellMap"),
- ("read_only_space", 0x02695): (70, "ForeignMap"),
- ("read_only_space", 0x026bd): (158, "TransitionArrayMap"),
- ("read_only_space", 0x026e5): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x0270d): (165, "FeedbackVectorMap"),
- ("read_only_space", 0x02749): (67, "ArgumentsMarkerMap"),
- ("read_only_space", 0x027a9): (67, "ExceptionMap"),
- ("read_only_space", 0x02805): (67, "TerminationExceptionMap"),
- ("read_only_space", 0x0286d): (67, "OptimizedOutMap"),
- ("read_only_space", 0x028cd): (67, "StaleRegisterMap"),
- ("read_only_space", 0x0292d): (129, "ScriptContextTableMap"),
- ("read_only_space", 0x02955): (126, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x0297d): (164, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x029a5): (116, "ArrayListMap"),
- ("read_only_space", 0x029cd): (65, "BigIntMap"),
- ("read_only_space", 0x029f5): (127, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x02a1d): (131, "BytecodeArrayMap"),
- ("read_only_space", 0x02a45): (161, "CodeDataContainerMap"),
- ("read_only_space", 0x02a6d): (162, "CoverageInfoMap"),
- ("read_only_space", 0x02a95): (132, "FixedDoubleArrayMap"),
- ("read_only_space", 0x02abd): (119, "GlobalDictionaryMap"),
- ("read_only_space", 0x02ae5): (97, "ManyClosuresCellMap"),
- ("read_only_space", 0x02b0d): (116, "ModuleInfoMap"),
- ("read_only_space", 0x02b35): (120, "NameDictionaryMap"),
- ("read_only_space", 0x02b5d): (97, "NoClosuresCellMap"),
- ("read_only_space", 0x02b85): (121, "NumberDictionaryMap"),
- ("read_only_space", 0x02bad): (97, "OneClosureCellMap"),
- ("read_only_space", 0x02bd5): (122, "OrderedHashMapMap"),
- ("read_only_space", 0x02bfd): (123, "OrderedHashSetMap"),
- ("read_only_space", 0x02c25): (124, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x02c4d): (172, "PreparseDataMap"),
- ("read_only_space", 0x02c75): (173, "PropertyArrayMap"),
- ("read_only_space", 0x02c9d): (93, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x02cc5): (93, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02ced): (93, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02d15): (125, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x02d3d): (148, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x02d65): (149, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x02d8d): (150, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x02db5): (153, "SourceTextModuleMap"),
- ("read_only_space", 0x02ddd): (154, "SyntheticModuleMap"),
- ("read_only_space", 0x02e05): (71, "WasmTypeInfoMap"),
- ("read_only_space", 0x02e2d): (182, "WeakArrayListMap"),
- ("read_only_space", 0x02e55): (118, "EphemeronHashTableMap"),
- ("read_only_space", 0x02e7d): (163, "EmbedderDataArrayMap"),
- ("read_only_space", 0x02ea5): (183, "WeakCellMap"),
- ("read_only_space", 0x02ecd): (32, "StringMap"),
- ("read_only_space", 0x02ef5): (41, "ConsOneByteStringMap"),
- ("read_only_space", 0x02f1d): (33, "ConsStringMap"),
- ("read_only_space", 0x02f45): (37, "ThinStringMap"),
- ("read_only_space", 0x02f6d): (35, "SlicedStringMap"),
- ("read_only_space", 0x02f95): (43, "SlicedOneByteStringMap"),
- ("read_only_space", 0x02fbd): (34, "ExternalStringMap"),
- ("read_only_space", 0x02fe5): (42, "ExternalOneByteStringMap"),
- ("read_only_space", 0x0300d): (50, "UncachedExternalStringMap"),
- ("read_only_space", 0x03035): (0, "InternalizedStringMap"),
- ("read_only_space", 0x0305d): (2, "ExternalInternalizedStringMap"),
- ("read_only_space", 0x03085): (10, "ExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x030ad): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x030d5): (67, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x030fd): (67, "BasicBlockCountersMarkerMap"),
- ("read_only_space", 0x03141): (87, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x03229): (99, "InterceptorInfoMap"),
- ("read_only_space", 0x05355): (72, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x0537d): (73, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x053a5): (74, "CallableTaskMap"),
- ("read_only_space", 0x053cd): (75, "CallbackTaskMap"),
- ("read_only_space", 0x053f5): (76, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x0541d): (79, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x05445): (80, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x0546d): (81, "AccessCheckInfoMap"),
- ("read_only_space", 0x05495): (82, "AccessorInfoMap"),
- ("read_only_space", 0x054bd): (83, "AccessorPairMap"),
- ("read_only_space", 0x054e5): (84, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x0550d): (85, "AllocationMementoMap"),
- ("read_only_space", 0x05535): (88, "AsmWasmDataMap"),
- ("read_only_space", 0x0555d): (89, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x05585): (90, "BreakPointMap"),
- ("read_only_space", 0x055ad): (91, "BreakPointInfoMap"),
- ("read_only_space", 0x055d5): (92, "CachedTemplateObjectMap"),
- ("read_only_space", 0x055fd): (94, "ClassPositionsMap"),
- ("read_only_space", 0x05625): (95, "DebugInfoMap"),
- ("read_only_space", 0x0564d): (98, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x05675): (100, "InterpreterDataMap"),
- ("read_only_space", 0x0569d): (101, "ModuleRequestMap"),
- ("read_only_space", 0x056c5): (102, "PromiseCapabilityMap"),
- ("read_only_space", 0x056ed): (103, "PromiseReactionMap"),
- ("read_only_space", 0x05715): (104, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x0573d): (105, "PrototypeInfoMap"),
- ("read_only_space", 0x05765): (106, "ScriptMap"),
- ("read_only_space", 0x0578d): (107, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x057b5): (108, "StackFrameInfoMap"),
- ("read_only_space", 0x057dd): (109, "StackTraceFrameMap"),
- ("read_only_space", 0x05805): (110, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x0582d): (111, "Tuple2Map"),
- ("read_only_space", 0x05855): (112, "WasmExceptionTagMap"),
- ("read_only_space", 0x0587d): (113, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x058a5): (114, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x058cd): (115, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x058f5): (134, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x0591d): (151, "DescriptorArrayMap"),
- ("read_only_space", 0x05945): (156, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x0596d): (155, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x05995): (171, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x059bd): (180, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x059e5): (168, "InternalClassMap"),
- ("read_only_space", 0x05a0d): (177, "SmiPairMap"),
- ("read_only_space", 0x05a35): (176, "SmiBoxMap"),
- ("read_only_space", 0x05a5d): (145, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x05a85): (146, "ExportedSubClassMap"),
- ("read_only_space", 0x05aad): (68, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x05ad5): (69, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x05afd): (133, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x05b25): (169, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x05b4d): (147, "ExportedSubClass2Map"),
- ("read_only_space", 0x05b75): (178, "SortStateMap"),
- ("read_only_space", 0x05b9d): (86, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05bc5): (86, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05bed): (77, "LoadHandler1Map"),
- ("read_only_space", 0x05c15): (77, "LoadHandler2Map"),
- ("read_only_space", 0x05c3d): (77, "LoadHandler3Map"),
- ("read_only_space", 0x05c65): (78, "StoreHandler0Map"),
- ("read_only_space", 0x05c8d): (78, "StoreHandler1Map"),
- ("read_only_space", 0x05cb5): (78, "StoreHandler2Map"),
- ("read_only_space", 0x05cdd): (78, "StoreHandler3Map"),
- ("map_space", 0x02115): (1057, "ExternalMap"),
- ("map_space", 0x0213d): (1084, "JSMessageObjectMap"),
- ("map_space", 0x02165): (181, "WasmRttEqrefMap"),
- ("map_space", 0x0218d): (181, "WasmRttAnyrefMap"),
- ("map_space", 0x021b5): (181, "WasmRttExternrefMap"),
- ("map_space", 0x021dd): (181, "WasmRttFuncrefMap"),
- ("map_space", 0x02205): (181, "WasmRttI31refMap"),
+ ("read_only_space", 0x02119): (171, "MetaMap"),
+ ("read_only_space", 0x02141): (67, "NullMap"),
+ ("read_only_space", 0x02169): (153, "StrongDescriptorArrayMap"),
+ ("read_only_space", 0x02191): (158, "WeakFixedArrayMap"),
+ ("read_only_space", 0x021d1): (97, "EnumCacheMap"),
+ ("read_only_space", 0x02205): (117, "FixedArrayMap"),
+ ("read_only_space", 0x02251): (8, "OneByteInternalizedStringMap"),
+ ("read_only_space", 0x0229d): (168, "FreeSpaceMap"),
+ ("read_only_space", 0x022c5): (167, "OnePointerFillerMap"),
+ ("read_only_space", 0x022ed): (167, "TwoPointerFillerMap"),
+ ("read_only_space", 0x02315): (67, "UninitializedMap"),
+ ("read_only_space", 0x0238d): (67, "UndefinedMap"),
+ ("read_only_space", 0x023d1): (66, "HeapNumberMap"),
+ ("read_only_space", 0x02405): (67, "TheHoleMap"),
+ ("read_only_space", 0x02465): (67, "BooleanMap"),
+ ("read_only_space", 0x02509): (130, "ByteArrayMap"),
+ ("read_only_space", 0x02531): (117, "FixedCOWArrayMap"),
+ ("read_only_space", 0x02559): (118, "HashTableMap"),
+ ("read_only_space", 0x02581): (64, "SymbolMap"),
+ ("read_only_space", 0x025a9): (40, "OneByteStringMap"),
+ ("read_only_space", 0x025d1): (134, "ScopeInfoMap"),
+ ("read_only_space", 0x025f9): (176, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x02621): (161, "CodeMap"),
+ ("read_only_space", 0x02649): (160, "CellMap"),
+ ("read_only_space", 0x02671): (175, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x02699): (70, "ForeignMap"),
+ ("read_only_space", 0x026c1): (159, "TransitionArrayMap"),
+ ("read_only_space", 0x026e9): (45, "ThinOneByteStringMap"),
+ ("read_only_space", 0x02711): (166, "FeedbackVectorMap"),
+ ("read_only_space", 0x0274d): (67, "ArgumentsMarkerMap"),
+ ("read_only_space", 0x027ad): (67, "ExceptionMap"),
+ ("read_only_space", 0x02809): (67, "TerminationExceptionMap"),
+ ("read_only_space", 0x02871): (67, "OptimizedOutMap"),
+ ("read_only_space", 0x028d1): (67, "StaleRegisterMap"),
+ ("read_only_space", 0x02931): (129, "ScriptContextTableMap"),
+ ("read_only_space", 0x02959): (127, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x02981): (165, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x029a9): (117, "ArrayListMap"),
+ ("read_only_space", 0x029d1): (65, "BigIntMap"),
+ ("read_only_space", 0x029f9): (128, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02a21): (131, "BytecodeArrayMap"),
+ ("read_only_space", 0x02a49): (162, "CodeDataContainerMap"),
+ ("read_only_space", 0x02a71): (163, "CoverageInfoMap"),
+ ("read_only_space", 0x02a99): (132, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x02ac1): (120, "GlobalDictionaryMap"),
+ ("read_only_space", 0x02ae9): (98, "ManyClosuresCellMap"),
+ ("read_only_space", 0x02b11): (117, "ModuleInfoMap"),
+ ("read_only_space", 0x02b39): (121, "NameDictionaryMap"),
+ ("read_only_space", 0x02b61): (98, "NoClosuresCellMap"),
+ ("read_only_space", 0x02b89): (122, "NumberDictionaryMap"),
+ ("read_only_space", 0x02bb1): (98, "OneClosureCellMap"),
+ ("read_only_space", 0x02bd9): (123, "OrderedHashMapMap"),
+ ("read_only_space", 0x02c01): (124, "OrderedHashSetMap"),
+ ("read_only_space", 0x02c29): (125, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x02c51): (173, "PreparseDataMap"),
+ ("read_only_space", 0x02c79): (174, "PropertyArrayMap"),
+ ("read_only_space", 0x02ca1): (94, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x02cc9): (94, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02cf1): (94, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02d19): (126, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x02d41): (149, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x02d69): (150, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x02d91): (151, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x02db9): (154, "SourceTextModuleMap"),
+ ("read_only_space", 0x02de1): (180, "SwissNameDictionaryMap"),
+ ("read_only_space", 0x02e09): (155, "SyntheticModuleMap"),
+ ("read_only_space", 0x02e31): (71, "WasmTypeInfoMap"),
+ ("read_only_space", 0x02e59): (184, "WeakArrayListMap"),
+ ("read_only_space", 0x02e81): (119, "EphemeronHashTableMap"),
+ ("read_only_space", 0x02ea9): (164, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x02ed1): (185, "WeakCellMap"),
+ ("read_only_space", 0x02ef9): (32, "StringMap"),
+ ("read_only_space", 0x02f21): (41, "ConsOneByteStringMap"),
+ ("read_only_space", 0x02f49): (33, "ConsStringMap"),
+ ("read_only_space", 0x02f71): (37, "ThinStringMap"),
+ ("read_only_space", 0x02f99): (35, "SlicedStringMap"),
+ ("read_only_space", 0x02fc1): (43, "SlicedOneByteStringMap"),
+ ("read_only_space", 0x02fe9): (34, "ExternalStringMap"),
+ ("read_only_space", 0x03011): (42, "ExternalOneByteStringMap"),
+ ("read_only_space", 0x03039): (50, "UncachedExternalStringMap"),
+ ("read_only_space", 0x03061): (0, "InternalizedStringMap"),
+ ("read_only_space", 0x03089): (2, "ExternalInternalizedStringMap"),
+ ("read_only_space", 0x030b1): (10, "ExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x030d9): (18, "UncachedExternalInternalizedStringMap"),
+ ("read_only_space", 0x03101): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x03129): (58, "UncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x03151): (67, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x03179): (67, "BasicBlockCountersMarkerMap"),
+ ("read_only_space", 0x031bd): (87, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x032bd): (100, "InterceptorInfoMap"),
+ ("read_only_space", 0x05411): (72, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x05439): (73, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x05461): (74, "CallableTaskMap"),
+ ("read_only_space", 0x05489): (75, "CallbackTaskMap"),
+ ("read_only_space", 0x054b1): (76, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x054d9): (79, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x05501): (80, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x05529): (81, "AccessCheckInfoMap"),
+ ("read_only_space", 0x05551): (82, "AccessorInfoMap"),
+ ("read_only_space", 0x05579): (83, "AccessorPairMap"),
+ ("read_only_space", 0x055a1): (84, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x055c9): (85, "AllocationMementoMap"),
+ ("read_only_space", 0x055f1): (88, "AsmWasmDataMap"),
+ ("read_only_space", 0x05619): (89, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x05641): (90, "BaselineDataMap"),
+ ("read_only_space", 0x05669): (91, "BreakPointMap"),
+ ("read_only_space", 0x05691): (92, "BreakPointInfoMap"),
+ ("read_only_space", 0x056b9): (93, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x056e1): (95, "ClassPositionsMap"),
+ ("read_only_space", 0x05709): (96, "DebugInfoMap"),
+ ("read_only_space", 0x05731): (99, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x05759): (101, "InterpreterDataMap"),
+ ("read_only_space", 0x05781): (102, "ModuleRequestMap"),
+ ("read_only_space", 0x057a9): (103, "PromiseCapabilityMap"),
+ ("read_only_space", 0x057d1): (104, "PromiseReactionMap"),
+ ("read_only_space", 0x057f9): (105, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x05821): (106, "PrototypeInfoMap"),
+ ("read_only_space", 0x05849): (107, "RegExpBoilerplateDescriptionMap"),
+ ("read_only_space", 0x05871): (108, "ScriptMap"),
+ ("read_only_space", 0x05899): (109, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x058c1): (110, "StackFrameInfoMap"),
+ ("read_only_space", 0x058e9): (111, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x05911): (112, "Tuple2Map"),
+ ("read_only_space", 0x05939): (113, "WasmExceptionTagMap"),
+ ("read_only_space", 0x05961): (114, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x05989): (115, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x059b1): (116, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x059d9): (135, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x05a01): (152, "DescriptorArrayMap"),
+ ("read_only_space", 0x05a29): (157, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x05a51): (156, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x05a79): (172, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x05aa1): (182, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x05ac9): (169, "InternalClassMap"),
+ ("read_only_space", 0x05af1): (178, "SmiPairMap"),
+ ("read_only_space", 0x05b19): (177, "SmiBoxMap"),
+ ("read_only_space", 0x05b41): (146, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x05b69): (147, "ExportedSubClassMap"),
+ ("read_only_space", 0x05b91): (68, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x05bb9): (69, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x05be1): (133, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x05c09): (170, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x05c31): (148, "ExportedSubClass2Map"),
+ ("read_only_space", 0x05c59): (179, "SortStateMap"),
+ ("read_only_space", 0x05c81): (86, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05ca9): (86, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05cd1): (77, "LoadHandler1Map"),
+ ("read_only_space", 0x05cf9): (77, "LoadHandler2Map"),
+ ("read_only_space", 0x05d21): (77, "LoadHandler3Map"),
+ ("read_only_space", 0x05d49): (78, "StoreHandler0Map"),
+ ("read_only_space", 0x05d71): (78, "StoreHandler1Map"),
+ ("read_only_space", 0x05d99): (78, "StoreHandler2Map"),
+ ("read_only_space", 0x05dc1): (78, "StoreHandler3Map"),
+ ("map_space", 0x02119): (1057, "ExternalMap"),
+ ("map_space", 0x02141): (1098, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("read_only_space", 0x021b5): "EmptyWeakFixedArray",
- ("read_only_space", 0x021bd): "EmptyDescriptorArray",
- ("read_only_space", 0x021f5): "EmptyEnumCache",
- ("read_only_space", 0x02229): "EmptyFixedArray",
- ("read_only_space", 0x02231): "NullValue",
- ("read_only_space", 0x02339): "UninitializedValue",
- ("read_only_space", 0x023b1): "UndefinedValue",
- ("read_only_space", 0x023f5): "NanValue",
- ("read_only_space", 0x02429): "TheHoleValue",
- ("read_only_space", 0x02455): "HoleNanValue",
- ("read_only_space", 0x02489): "TrueValue",
- ("read_only_space", 0x024c9): "FalseValue",
- ("read_only_space", 0x024f9): "empty_string",
- ("read_only_space", 0x02735): "EmptyScopeInfo",
- ("read_only_space", 0x02771): "ArgumentsMarker",
- ("read_only_space", 0x027d1): "Exception",
- ("read_only_space", 0x0282d): "TerminationException",
- ("read_only_space", 0x02895): "OptimizedOut",
- ("read_only_space", 0x028f5): "StaleRegister",
- ("read_only_space", 0x03125): "EmptyPropertyArray",
- ("read_only_space", 0x0312d): "EmptyByteArray",
- ("read_only_space", 0x03135): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x03169): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x03175): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x0317d): "EmptySlowElementDictionary",
- ("read_only_space", 0x031a1): "EmptyOrderedHashMap",
- ("read_only_space", 0x031b5): "EmptyOrderedHashSet",
- ("read_only_space", 0x031c9): "EmptyFeedbackMetadata",
- ("read_only_space", 0x031d5): "EmptyPropertyCell",
- ("read_only_space", 0x031e9): "EmptyPropertyDictionary",
- ("read_only_space", 0x03211): "EmptyOrderedPropertyDictionary",
- ("read_only_space", 0x03251): "NoOpInterceptorInfo",
- ("read_only_space", 0x03279): "EmptyWeakArrayList",
- ("read_only_space", 0x03285): "InfinityValue",
- ("read_only_space", 0x03291): "MinusZeroValue",
- ("read_only_space", 0x0329d): "MinusInfinityValue",
- ("read_only_space", 0x032a9): "SelfReferenceMarker",
- ("read_only_space", 0x032e9): "BasicBlockCountersMarker",
- ("read_only_space", 0x0332d): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x03339): "TrampolineTrivialCodeDataContainer",
- ("read_only_space", 0x03345): "TrampolinePromiseRejectionCodeDataContainer",
- ("read_only_space", 0x03351): "GlobalThisBindingScopeInfo",
- ("read_only_space", 0x03389): "EmptyFunctionScopeInfo",
- ("read_only_space", 0x033b1): "NativeScopeInfo",
- ("read_only_space", 0x033cd): "HashSeed",
- ("old_space", 0x02115): "ArgumentsIteratorAccessor",
- ("old_space", 0x02159): "ArrayLengthAccessor",
- ("old_space", 0x0219d): "BoundFunctionLengthAccessor",
- ("old_space", 0x021e1): "BoundFunctionNameAccessor",
- ("old_space", 0x02225): "ErrorStackAccessor",
- ("old_space", 0x02269): "FunctionArgumentsAccessor",
- ("old_space", 0x022ad): "FunctionCallerAccessor",
- ("old_space", 0x022f1): "FunctionNameAccessor",
- ("old_space", 0x02335): "FunctionLengthAccessor",
- ("old_space", 0x02379): "FunctionPrototypeAccessor",
- ("old_space", 0x023bd): "RegExpResultIndicesAccessor",
- ("old_space", 0x02401): "StringLengthAccessor",
- ("old_space", 0x02445): "InvalidPrototypeValidityCell",
- ("old_space", 0x02531): "EmptyScript",
- ("old_space", 0x02571): "ManyClosuresCell",
- ("old_space", 0x0257d): "ArrayConstructorProtector",
- ("old_space", 0x02591): "NoElementsProtector",
- ("old_space", 0x025a5): "IsConcatSpreadableProtector",
- ("old_space", 0x025b9): "ArraySpeciesProtector",
- ("old_space", 0x025cd): "TypedArraySpeciesProtector",
- ("old_space", 0x025e1): "PromiseSpeciesProtector",
- ("old_space", 0x025f5): "RegExpSpeciesProtector",
- ("old_space", 0x02609): "StringLengthProtector",
- ("old_space", 0x0261d): "ArrayIteratorProtector",
- ("old_space", 0x02631): "ArrayBufferDetachingProtector",
- ("old_space", 0x02645): "PromiseHookProtector",
- ("old_space", 0x02659): "PromiseResolveProtector",
- ("old_space", 0x0266d): "MapIteratorProtector",
- ("old_space", 0x02681): "PromiseThenProtector",
- ("old_space", 0x02695): "SetIteratorProtector",
- ("old_space", 0x026a9): "StringIteratorProtector",
- ("old_space", 0x026bd): "SingleCharacterStringCache",
- ("old_space", 0x02ac5): "StringSplitCache",
- ("old_space", 0x02ecd): "RegExpMultipleCache",
- ("old_space", 0x032d5): "BuiltinsConstantsTable",
- ("old_space", 0x036bd): "AsyncFunctionAwaitRejectSharedFun",
- ("old_space", 0x036e1): "AsyncFunctionAwaitResolveSharedFun",
- ("old_space", 0x03705): "AsyncGeneratorAwaitRejectSharedFun",
- ("old_space", 0x03729): "AsyncGeneratorAwaitResolveSharedFun",
- ("old_space", 0x0374d): "AsyncGeneratorYieldResolveSharedFun",
- ("old_space", 0x03771): "AsyncGeneratorReturnResolveSharedFun",
- ("old_space", 0x03795): "AsyncGeneratorReturnClosedRejectSharedFun",
- ("old_space", 0x037b9): "AsyncGeneratorReturnClosedResolveSharedFun",
- ("old_space", 0x037dd): "AsyncIteratorValueUnwrapSharedFun",
- ("old_space", 0x03801): "PromiseAllResolveElementSharedFun",
- ("old_space", 0x03825): "PromiseAllSettledResolveElementSharedFun",
- ("old_space", 0x03849): "PromiseAllSettledRejectElementSharedFun",
- ("old_space", 0x0386d): "PromiseAnyRejectElementSharedFun",
- ("old_space", 0x03891): "PromiseCapabilityDefaultRejectSharedFun",
- ("old_space", 0x038b5): "PromiseCapabilityDefaultResolveSharedFun",
- ("old_space", 0x038d9): "PromiseCatchFinallySharedFun",
- ("old_space", 0x038fd): "PromiseGetCapabilitiesExecutorSharedFun",
- ("old_space", 0x03921): "PromiseThenFinallySharedFun",
- ("old_space", 0x03945): "PromiseThrowerFinallySharedFun",
- ("old_space", 0x03969): "PromiseValueThunkFinallySharedFun",
- ("old_space", 0x0398d): "ProxyRevokeSharedFun",
+ ("read_only_space", 0x021b9): "EmptyWeakFixedArray",
+ ("read_only_space", 0x021c1): "EmptyDescriptorArray",
+ ("read_only_space", 0x021f9): "EmptyEnumCache",
+ ("read_only_space", 0x0222d): "EmptyFixedArray",
+ ("read_only_space", 0x02235): "NullValue",
+ ("read_only_space", 0x0233d): "UninitializedValue",
+ ("read_only_space", 0x023b5): "UndefinedValue",
+ ("read_only_space", 0x023f9): "NanValue",
+ ("read_only_space", 0x0242d): "TheHoleValue",
+ ("read_only_space", 0x02459): "HoleNanValue",
+ ("read_only_space", 0x0248d): "TrueValue",
+ ("read_only_space", 0x024cd): "FalseValue",
+ ("read_only_space", 0x024fd): "empty_string",
+ ("read_only_space", 0x02739): "EmptyScopeInfo",
+ ("read_only_space", 0x02775): "ArgumentsMarker",
+ ("read_only_space", 0x027d5): "Exception",
+ ("read_only_space", 0x02831): "TerminationException",
+ ("read_only_space", 0x02899): "OptimizedOut",
+ ("read_only_space", 0x028f9): "StaleRegister",
+ ("read_only_space", 0x031a1): "EmptyPropertyArray",
+ ("read_only_space", 0x031a9): "EmptyByteArray",
+ ("read_only_space", 0x031b1): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x031e5): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x031f1): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x031f9): "EmptySlowElementDictionary",
+ ("read_only_space", 0x0321d): "EmptyOrderedHashMap",
+ ("read_only_space", 0x03231): "EmptyOrderedHashSet",
+ ("read_only_space", 0x03245): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x03251): "EmptyPropertyDictionary",
+ ("read_only_space", 0x03279): "EmptyOrderedPropertyDictionary",
+ ("read_only_space", 0x03291): "EmptySwissPropertyDictionary",
+ ("read_only_space", 0x032e5): "NoOpInterceptorInfo",
+ ("read_only_space", 0x0330d): "EmptyWeakArrayList",
+ ("read_only_space", 0x03319): "InfinityValue",
+ ("read_only_space", 0x03325): "MinusZeroValue",
+ ("read_only_space", 0x03331): "MinusInfinityValue",
+ ("read_only_space", 0x0333d): "SelfReferenceMarker",
+ ("read_only_space", 0x0337d): "BasicBlockCountersMarker",
+ ("read_only_space", 0x033c1): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x033cd): "TrampolineTrivialCodeDataContainer",
+ ("read_only_space", 0x033d9): "TrampolinePromiseRejectionCodeDataContainer",
+ ("read_only_space", 0x033e5): "GlobalThisBindingScopeInfo",
+ ("read_only_space", 0x0341d): "EmptyFunctionScopeInfo",
+ ("read_only_space", 0x03445): "NativeScopeInfo",
+ ("read_only_space", 0x03461): "HashSeed",
+ ("old_space", 0x02119): "ArgumentsIteratorAccessor",
+ ("old_space", 0x0215d): "ArrayLengthAccessor",
+ ("old_space", 0x021a1): "BoundFunctionLengthAccessor",
+ ("old_space", 0x021e5): "BoundFunctionNameAccessor",
+ ("old_space", 0x02229): "ErrorStackAccessor",
+ ("old_space", 0x0226d): "FunctionArgumentsAccessor",
+ ("old_space", 0x022b1): "FunctionCallerAccessor",
+ ("old_space", 0x022f5): "FunctionNameAccessor",
+ ("old_space", 0x02339): "FunctionLengthAccessor",
+ ("old_space", 0x0237d): "FunctionPrototypeAccessor",
+ ("old_space", 0x023c1): "StringLengthAccessor",
+ ("old_space", 0x02405): "InvalidPrototypeValidityCell",
+ ("old_space", 0x0240d): "EmptyScript",
+ ("old_space", 0x0244d): "ManyClosuresCell",
+ ("old_space", 0x02459): "ArrayConstructorProtector",
+ ("old_space", 0x0246d): "NoElementsProtector",
+ ("old_space", 0x02481): "IsConcatSpreadableProtector",
+ ("old_space", 0x02495): "ArraySpeciesProtector",
+ ("old_space", 0x024a9): "TypedArraySpeciesProtector",
+ ("old_space", 0x024bd): "PromiseSpeciesProtector",
+ ("old_space", 0x024d1): "RegExpSpeciesProtector",
+ ("old_space", 0x024e5): "StringLengthProtector",
+ ("old_space", 0x024f9): "ArrayIteratorProtector",
+ ("old_space", 0x0250d): "ArrayBufferDetachingProtector",
+ ("old_space", 0x02521): "PromiseHookProtector",
+ ("old_space", 0x02535): "PromiseResolveProtector",
+ ("old_space", 0x02549): "MapIteratorProtector",
+ ("old_space", 0x0255d): "PromiseThenProtector",
+ ("old_space", 0x02571): "SetIteratorProtector",
+ ("old_space", 0x02585): "StringIteratorProtector",
+ ("old_space", 0x02599): "SingleCharacterStringCache",
+ ("old_space", 0x029a1): "StringSplitCache",
+ ("old_space", 0x02da9): "RegExpMultipleCache",
+ ("old_space", 0x031b1): "BuiltinsConstantsTable",
+ ("old_space", 0x035b1): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x035d5): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x035f9): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x0361d): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x03641): "AsyncGeneratorYieldResolveSharedFun",
+ ("old_space", 0x03665): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x03689): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x036ad): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x036d1): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x036f5): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x03719): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x0373d): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x03761): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x03785): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x037a9): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x037cd): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x037f1): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x03815): "PromiseThenFinallySharedFun",
+ ("old_space", 0x03839): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x0385d): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x03881): "ProxyRevokeSharedFun",
}
# Lower 32 bits of first page addresses for various heap spaces.
@@ -489,7 +506,6 @@ FRAME_MARKERS = (
"ENTRY",
"CONSTRUCT_ENTRY",
"EXIT",
- "OPTIMIZED",
"WASM",
"WASM_TO_JS",
"JS_TO_WASM",
@@ -498,13 +514,14 @@ FRAME_MARKERS = (
"WASM_EXIT",
"WASM_COMPILE_LAZY",
"INTERPRETED",
+ "BASELINE",
+ "OPTIMIZED",
"STUB",
"BUILTIN_CONTINUATION",
"JAVA_SCRIPT_BUILTIN_CONTINUATION",
"JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH",
"INTERNAL",
"CONSTRUCT",
- "ARGUMENTS_ADAPTOR",
"BUILTIN",
"BUILTIN_EXIT",
"NATIVE",
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index dc9621a83b..4d352754b7 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -71,7 +71,7 @@ log_and_run cp -r ${TMP_DIR}/spec/test/js-api/* ${JS_API_TEST_DIR}/tests
# Generate the proposal tests.
###############################################################################
-repos='bulk-memory-operations reference-types js-types tail-call simd'
+repos='bulk-memory-operations reference-types js-types tail-call simd memory64'
for repo in ${repos}; do
echo "Process ${repo}"
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 8fcce8dda7..3332f7805b 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,7 +7,7 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles........................
+The bartender starts to shake the bottles..........................
I can't add trailing whitespaces, so I'm adding this line...........
I'm starting to think that just adding trailing whitespaces might not be bad.
@@ -15,3 +15,4 @@ Because whitespaces are not that funny.....
Today's answer to life the universe and everything is 12950!
Today's answer to life the universe and everything is 6728!
Today's answer to life the universe and everything is 6728!!
+.